--- /dev/null
+---
+name: Bug report
+about: Create a report to help us improve
+
+---
+
+<!-- **Are you in the right place?**
+1. For issues or feature requests, please create an issue in this repository.
+2. Did you already search the existing open issues for anything similar? -->
+
+
+**Bug Report**
+
+What happened:
+
+What you expected to happen:
+
+How to reproduce it (minimal and precise):
+<!-- Please let us know any circumstances for reproduction of your bug. -->
+
+Share your group_vars files, inventory and **full** ceph-ansibe log
+
+**Environment**:
+* OS (e.g. from /etc/os-release):
+* Kernel (e.g. `uname -a`):
+* Docker version if applicable (e.g. `docker version`):
+* Ansible version (e.g. `ansible-playbook --version`):
+* ceph-ansible version (e.g. `git head or tag or stable branch`):
+* Ceph version (e.g. `ceph -v`):
--- /dev/null
+---
+name: Feature request
+about: Suggest an idea for this project
+
+---
+
+**Is your feature request related to a problem? Please describe.**
+A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
+
+**Describe the solution you'd like**
+A clear and concise description of what you want to happen.
+
+**Describe alternatives you've considered**
+A clear and concise description of any alternative solutions or features you've considered.
+
+**Additional context**
+Add any other context or screenshots about the feature request here.
--- /dev/null
+# Number of days of inactivity before an issue becomes stale
+daysUntilStale: 30
+# Number of days of inactivity before a stale issue is closed
+daysUntilClose: 7
+# Issues with these labels will never be considered stale
+exemptLabels:
+ - do-not-close
+# Label to use when marking an issue as stale
+staleLabel: wontfix
+# Comment to post when marking an issue as stale. Set to `false` to disable
+markComment: >
+ This issue has been automatically marked as stale because it has not had
+ recent activity. It will be closed if no further activity occurs. Thank you
+ for your contributions.
+# Comment to post when closing a stale issue. Set to `false` to disable
+closeComment: false
\ No newline at end of file
--- /dev/null
+name: ansible-lint
+on: [pull_request]
+jobs:
+ build:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v2
+ - name: Setup python
+ uses: actions/setup-python@v2
+ with:
+ python-version: '3.8'
+ architecture: x64
+ - run: pip install -r <(grep ansible tests/requirements.txt) ansible-lint==4.3.7 'rich>=9.5.1,<11.0.0' netaddr
+ - run: ansible-lint -x 106,204,205,208 -v --force-color ./roles/*/ ./infrastructure-playbooks/*.yml site-container.yml.sample site-container.yml.sample dashboard.yml
+ - run: ansible-playbook -i ./tests/functional/all_daemons/hosts site.yml.sample --syntax-check --list-tasks -vv
+ - run: ansible-playbook -i ./tests/functional/all_daemons/hosts site-container.yml.sample --syntax-check --list-tasks -vv
+ - run: ansible-playbook -i ./tests/functional/all_daemons/hosts infrastructure-playbooks/*.yml --syntax-check --list-tasks -vv
--- /dev/null
+name: check-nbsp
+on: [pull_request]
+jobs:
+ build:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v2
+ - run: if [[ -n $(grep --exclude-dir=.git -P "\xa0" -r .) ]]; then echo 'NBSP characters found'; exit 1; fi
--- /dev/null
+name: defaults
+on: [pull_request]
+jobs:
+ build:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v2
+ with:
+ fetch-depth: 0
+ - run: "${GITHUB_WORKSPACE}/tests/scripts/workflows/defaults.sh"
\ No newline at end of file
--- /dev/null
+name: flake8
+on:
+ pull_request:
+ paths:
+ - 'library/**.py'
+ - 'module_utils/**.py'
+ - 'plugins/filter/**.py'
+ - 'tests/conftest.py'
+ - 'tests/library/**.py'
+ - 'tests/module_utils/**.py'
+ - 'tests/plugins/filter/**.py'
+ - 'tests/functional/tests/**.py'
+jobs:
+ build:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v2
+ - name: Setup python
+ uses: actions/setup-python@v2
+ with:
+ python-version: 3.8
+ architecture: x64
+ - run: pip install flake8
+ - run: flake8 --max-line-length 160 ./library/ ./module_utils/ ./plugins/filter/ ./tests/library/ ./tests/module_utils/ ./tests/plugins/filter/ ./tests/conftest.py ./tests/functional/tests/
--- /dev/null
+name: pytest
+on:
+ pull_request:
+ paths:
+ - 'library/**.py'
+ - 'module_utils/**.py'
+ - 'plugins/filter/**.py'
+ - 'tests/library/**.py'
+ - 'tests/module_utils/**.py'
+ - 'tests/plugins/filter/**.py'
+jobs:
+ build:
+ runs-on: ubuntu-latest
+ strategy:
+ matrix:
+ python-version: [3.6, 3.7, 3.8]
+ name: Python ${{ matrix.python-version }}
+ steps:
+ - uses: actions/checkout@v2
+ - name: Setup python
+ uses: actions/setup-python@v2
+ with:
+ python-version: ${{ matrix.python-version }}
+ architecture: x64
+ - run: pip install -r tests/requirements.txt
+ - run: pytest --cov=library/ --cov=module_utils/ --cov=plugins/filter/ -vvvv tests/library/ tests/module_utils/ tests/plugins/filter/
+ env:
+ PYTHONPATH: "$PYTHONPATH:/home/runner/work/ceph-ansible/ceph-ansible/library:/home/runner/work/ceph-ansible/ceph-ansible/module_utils:/home/runner/work/ceph-ansible/ceph-ansible/plugins/filter:/home/runner/work/ceph-ansible/ceph-ansible"
--- /dev/null
+name: signed-off
+on: [pull_request]
+jobs:
+ build:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v2
+ with:
+ fetch-depth: 0
+ - run: "${GITHUB_WORKSPACE}/tests/scripts/workflows/signed-off.sh"
\ No newline at end of file
--- /dev/null
+.vagrant
+*.vdi
+*.keyring
+fetch/*
+/vagrant_variables.yml
+group_vars/all
+group_vars/mons
+group_vars/osds
+group_vars/mdss
+group_vars/rgws
+group_vars/*.yml
+*.DS_Store
+/*.yml
+*.pyc
+*.sw?
+.tox
+ceph-ansible.spec
+*.retry
+*.pytest_cache
+!.travis.yml
+!.mergify.yml
+!raw_install_python.yml
+!requirements.yml
--- /dev/null
+pull_request_rules:
+# Backports
+ - actions:
+ backport:
+ branches:
+ - stable-3.0
+ conditions:
+ - label=backport-stable-3.0
+ name: backport stable-3.0
+ - actions:
+ backport:
+ branches:
+ - stable-3.1
+ conditions:
+ - label=backport-stable-3.1
+ name: backport stable-3.1
+ - actions:
+ backport:
+ branches:
+ - stable-3.2
+ conditions:
+ - label=backport-stable-3.2
+ name: backport stable-3.2
+ - actions:
+ backport:
+ branches:
+ - stable-4.0
+ conditions:
+ - label=backport-stable-4.0
+ name: backport stable-4.0
+ - actions:
+ backport:
+ branches:
+ - stable-5.0
+ conditions:
+ - label=backport-stable-5.0
+ name: backport stable-5.0
--- /dev/null
+version: 2
+
+build:
+ os: "ubuntu-22.04"
+ tools:
+ python: "3.9"
\ No newline at end of file
--- /dev/null
+# Contributing to ceph-ansible
+
+1. Follow the [commit guidelines](#commit-guidelines)
+
+## Commit guidelines
+
+- All commits should have a subject and a body
+- The commit subject should briefly describe what the commit changes
+- The commit body should describe the problem addressed and the chosen solution
+ - What was the problem and solution? Why that solution? Were there alternative ideas?
+- Wrap commit subjects and bodies to 80 characters
+- Sign-off your commits
+- Add a best-effort scope designation to commit subjects. This could be a directory name, file name,
+ or the name of a logical grouping of code. Examples:
+ - library: add a placeholder module for the validate action plugin
+ - site.yml: combine validate play with fact gathering play
+ - rhcs: bump version to 3.0 for stable 3.1
+- Commits linked with an issue should trace them with :
+ - Fixes: #2653
+
+[Suggested reading.](https://chris.beams.io/posts/git-commit/)
+
+## Pull requests
+
+### Jenkins CI
+
+We use Jenkins to run several tests on each pull request.
+
+If you don't want to run a build for a particular pull request, because all you are changing is the
+README for example, add the text `[skip ci]` to the PR title.
+
+### Merging strategy
+
+Merging PR is controlled by [mergify](https://mergify.io/) by the following rules:
+
+- at least one approuval from a maintainer
+- a SUCCESS from the CI pipeline "ceph-ansible PR Pipeline"
+
+If you work is not ready for review/merge, please request the DNM label via a comment or the title of your PR.
+This will prevent the engine merging your pull request.
+
+### Backports (maintainers only)
+
+If you wish to see your work from 'master' being backported to a stable branch you can ping a maintainer
+so he will set the backport label on your PR. Once the PR from master is merged, a backport PR will be created by mergify,
+if there is a cherry-pick conflict you must resolv it by pulling the branch.
+
+**NEVER** push directly into a stable branch, **unless** the code from master has diverged so much that the files don't exist in the stable branch.
+If that happens, inform the maintainers of the reasons why you pushed directly into a stable branch, if the reason is invalid, maintainers will immediatly close your pull request.
+
+## Good to know
+
+### Sample files
+
+The sample files we provide in `group_vars/` are versionned,
+they are a copy of what their respective `./roles/<role>/defaults/main.yml` contain.
+
+It means if you are pushing a patch modifying one of these files:
+
+- `./roles/ceph-mds/defaults/main.yml`
+- `./roles/ceph-mgr/defaults/main.yml`
+- `./roles/ceph-fetch-keys/defaults/main.yml`
+- `./roles/ceph-rbd-mirror/defaults/main.yml`
+- `./roles/ceph-defaults/defaults/main.yml`
+- `./roles/ceph-osd/defaults/main.yml`
+- `./roles/ceph-nfs/defaults/main.yml`
+- `./roles/ceph-client/defaults/main.yml`
+- `./roles/ceph-common/defaults/main.yml`
+- `./roles/ceph-iscsi-gw/defaults/main.yml`
+- `./roles/ceph-mon/defaults/main.yml`
+- `./roles/ceph-rgw/defaults/main.yml`
+- `./roles/ceph-container-common/defaults/main.yml`
+- `./roles/ceph-common-coreos/defaults/main.yml`
+
+You will have to get the corresponding sample file updated, there is a script which do it for you.
+You must run `./generate_group_vars_sample.sh` before you commit your changes so you are guaranteed to have consistent content for these files.
+
+### Keep your branch up-to-date
+
+Sometimes, a pull request can be subject to long discussion, reviews and comments, meantime, `master`
+moves forward so let's try to keep your branch rebased on master regularly to avoid huge conflict merge.
+A rebased branch is more likely to be merged easily & shorter.
+
+### Organize your commits
+
+Do not split your commits unecessary, we are used to see pull request with useless additional commits like
+"I'm addressing reviewer's comments". So, please, squash and/or amend them as much as possible.
+
+Similarly, split them when needed, if you are modifying several parts in ceph-ansible or pushing a large
+patch you may have to split yours commit properly so it's better to understand your work.
+Some recommandations:
+
+- one fix = one commit,
+- do not mix multiple topics in a single commit,
+- if you PR contains a large number of commits that are each other totally unrelated, it should probably even be split in several PRs.
+
+If you've broken your work up into a set of sequential changes and each commit pass the tests on their own then that's fine.
+If you've got commits fixing typos or other problems introduced by previous commits in the same PR, then those should be squashed before merging.
+
+If you are new to Git, these links might help:
+
+- [https://git-scm.com/book/en/v2/Git-Tools-Rewriting-History](https://git-scm.com/book/en/v2/Git-Tools-Rewriting-History)
+- [http://gitready.com/advanced/2009/02/10/squashing-commits-with-rebase.html](http://gitready.com/advanced/2009/02/10/squashing-commits-with-rebase.html)
\ No newline at end of file
--- /dev/null
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [2014] [Sébastien Han]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
--- /dev/null
+# Makefile for constructing RPMs.
+# Try "make" (for SRPMS) or "make rpm"
+
+NAME = ceph-ansible
+
+# Set the RPM package NVR from "git describe".
+# Examples:
+#
+# A "git describe" value of "v2.2.0beta1" would create an NVR
+# "ceph-ansible-2.2.0-0.beta1.1.el8"
+#
+# A "git describe" value of "v2.2.0rc1" would create an NVR
+# "ceph-ansible-2.2.0-0.rc1.1.el8"
+#
+# A "git describe" value of "v2.2.0rc1-1-gc465f85" would create an NVR
+# "ceph-ansible-2.2.0-0.rc1.1.gc465f85.el8"
+#
+# A "git describe" value of "v2.2.0" creates an NVR
+# "ceph-ansible-2.2.0-1.el8"
+
+DIST ?= "el8"
+MOCK_CONFIG ?= "epel-8-x86_64"
+TAG := $(shell git describe --tags --abbrev=0 --match 'v*')
+VERSION := $(shell echo $(TAG) | sed 's/^v//')
+COMMIT := $(shell git rev-parse HEAD)
+SHORTCOMMIT := $(shell echo $(COMMIT) | cut -c1-7)
+RELEASE := $(shell git describe --tags --match 'v*' \
+ | sed 's/^v//' \
+ | sed 's/^[^-]*-//' \
+ | sed 's/-/./')
+ifeq ($(VERSION),$(RELEASE))
+ RELEASE = 1
+endif
+ifneq (,$(findstring alpha,$(VERSION)))
+ ALPHA := $(shell echo $(VERSION) | sed 's/.*alpha/alpha/')
+ RELEASE := 0.$(ALPHA).$(RELEASE)
+ VERSION := $(subst $(ALPHA),,$(VERSION))
+endif
+ifneq (,$(findstring beta,$(VERSION)))
+ BETA := $(shell echo $(VERSION) | sed 's/.*beta/beta/')
+ RELEASE := 0.$(BETA).$(RELEASE)
+ VERSION := $(subst $(BETA),,$(VERSION))
+endif
+ifneq (,$(findstring rc,$(VERSION)))
+ RC := $(shell echo $(VERSION) | sed 's/.*rc/rc/')
+ RELEASE := 0.$(RC).$(RELEASE)
+ VERSION := $(subst $(RC),,$(VERSION))
+endif
+
+ifneq (,$(shell echo $(VERSION) | grep [a-zA-Z]))
+ # If we still have alpha characters in our Git tag string, we don't know
+ # how to translate that into a sane RPM version/release. Bail out.
+ $(error cannot translate Git tag version $(VERSION) to an RPM NVR)
+endif
+
+NVR := $(NAME)-$(VERSION)-$(RELEASE).$(DIST)
+
+all: srpm
+
+# Testing only
+echo:
+ echo COMMIT $(COMMIT)
+ echo VERSION $(VERSION)
+ echo RELEASE $(RELEASE)
+ echo NVR $(NVR)
+
+clean:
+ rm -rf dist/
+ rm -rf ceph-ansible-$(VERSION)-$(SHORTCOMMIT).tar.gz
+ rm -rf $(NVR).src.rpm
+
+dist:
+ git archive --format=tar.gz --prefix=ceph-ansible-$(VERSION)/ HEAD > ceph-ansible-$(VERSION)-$(SHORTCOMMIT).tar.gz
+
+spec:
+ sed ceph-ansible.spec.in \
+ -e 's/@COMMIT@/$(COMMIT)/' \
+ -e 's/@VERSION@/$(VERSION)/' \
+ -e 's/@RELEASE@/$(RELEASE)/' \
+ > ceph-ansible.spec
+
+srpm: dist spec
+ rpmbuild -bs ceph-ansible.spec \
+ --define "_topdir ." \
+ --define "_sourcedir ." \
+ --define "_srcrpmdir ." \
+ --define "dist .$(DIST)"
+
+rpm: dist srpm
+ mock -r $(MOCK_CONFIG) rebuild $(NVR).src.rpm \
+ --resultdir=. \
+ --define "dist .$(DIST)"
+
+tag:
+ $(eval BRANCH := $(shell git rev-parse --abbrev-ref HEAD))
+ $(eval LASTNUM := $(shell echo $(TAG) \
+ | sed -E "s/.*[^0-9]([0-9]+)$$/\1/"))
+ $(eval NEXTNUM=$(shell echo $$(($(LASTNUM)+1))))
+ $(eval NEXTTAG=$(shell echo $(TAG) | sed "s/$(LASTNUM)$$/$(NEXTNUM)/"))
+ if [[ "$(TAG)" == "$(git describe --tags --match 'v*')" ]]; then \
+ echo "$(SHORTCOMMIT) on $(BRANCH) is already tagged as $(TAG)"; \
+ exit 1; \
+ fi
+ if [[ "$(BRANCH)" != "master" ]] && \
+ ! [[ "$(BRANCH)" =~ ^stable- ]]; then \
+ echo Cannot tag $(BRANCH); \
+ exit 1; \
+ fi
+ @echo Tagging Git branch $(BRANCH)
+ git tag $(NEXTTAG)
+ @echo run \'git push origin $(NEXTTAG)\' to push to GitHub.
+
+.PHONY: dist rpm srpm tag
--- /dev/null
+# RGW Multisite
+
+This document contains directions for configuring the RGW Multisite in ceph-ansible.
+Multisite replication can be configured either over multiple Ceph clusters or in a single Ceph cluster to isolate RGWs from each other.
+
+The first two sections are refreshers on working with ansible inventory and RGW Multisite.
+The next 4 sections are instructions on deploying the following multisite scenarios:
+
+- Scenario #1: Single Realm with Multiple Ceph Clusters
+- Scenario #2: Single Ceph Cluster with Multiple Realms
+- Scenario #3: Multiple Realms over Multiple Ceph Clusters
+- Scenario #4: Multiple Realms over Multiple Ceph Clusters with Multiple Instances on a Host
+
+## Working with Ansible Inventory
+
+If you are familiar with basic ansible terminology, working with inventory files, and variable precedence feel free to skip this section.
+
+### The Inventory File
+
+ceph-ansible starts up all the different daemons in a Ceph cluster.
+Each daemon (osd.0, mon.1, rgw.a) is given a line in the inventory file. Each line is called a **host** in ansible.
+Each type of daemon (osd, mon, rgw, mgr, etc.) is given a **group** with its respective daemons in the ansible inventory file.
+
+Here is an example of an inventory file (in .ini format) for a ceph cluster with 1 ceph-mgr, 4 rgws, 3 osds, and 2 mons:
+
+```ansible-inventory
+[mgrs]
+mgr-001 ansible_ssh_host=192.168.224.48 ansible_ssh_port=22
+
+[rgws]
+rgw-001 ansible_ssh_host=192.168.216.145 ansible_ssh_port=22 radosgw_address=192.168.216.145
+rgw-002 ansible_ssh_host=192.168.215.178 ansible_ssh_port=22 radosgw_address=192.168.215.178
+
+[osds]
+osd-001 ansible_ssh_host=192.168.230.196 ansible_ssh_port=22
+osd-002 ansible_ssh_host=192.168.226.21 ansible_ssh_port=22
+osd-003 ansible_ssh_host=192.168.176.118 ansible_ssh_port=22
+
+[mons]
+mon-001 ansible_ssh_host=192.168.210.155 ansible_ssh_port=22 monitor_address=192.168.210.155
+mon-002 ansible_ssh_host=192.168.179.111 ansible_ssh_port=22 monitor_address=192.168.179.111
+```
+
+Notice there are 4 groups defined here: mgrs, rgws, osds, mons.
+There is one host (mgr-001) in mgrs, 2 hosts (rgw-001, rgw-002) in rgws, 3 hosts (osd-001, osd-002, osd-003) in osds, and 2 hosts (mon-001, mon-002) in mons.
+
+### group_vars
+
+In the ceph-ansible tree there is a directory called `group_vars`. This directory has a collection of .yml files for variables set for each of the groups.
+The rgw multisite specific variables are defined in `all.yml`. This file has variables that apply to all groups in the inventory.
+When a variable, for example if `rgw_realm: usa`, is set in `group_vars/all.yml`, `usa` will be the value for `rgw_realm` for all of the rgws.
+
+### host_vars
+
+If you want to set any of the variables defined in `group_vars` for a specific host you have two options.
+One option is to edit the line in the inventory file for the host you want to configure. In the above inventory each mon and rgw has a host specific variable for its address.
+
+The preferred option is to create a directory called `host_vars` at the root of the ceph-ansible tree.
+In `host_vars/` there can be files with the same name as the host (ex: osd-001, mgr-001, rgw-001) that set variables for each host.
+The values for the variables set in `host_vars` have a higher precedence than the values in `group_var`.
+
+Consider this the file `host_vars/rgw-001`:
+
+```yaml
+rgw_realm: usa
+rgw_zonegroup: alaska
+rgw_zone: juneau
+
+rgw_zonemaster: true
+rgw_zonesecondary: false
+system_access_key: alaskaaccesskey
+system_secret_key: alaskasecretkey
+```
+
+Even if `rgw_realm` is set to `france` in `group_vars/all.yml`, `rgw_realm` will evaluate to `usa` for tasks run on `rgw-001`.
+This is because Ansible gives higher precedence to the values set in `host_vars` over `group_vars`.
+
+For more information on working with inventory in Ansible please visit: <https://docs.ansible.com/ansible/latest/user_guide/intro_inventory.html>.
+
+## Brief Multisite Overview
+
+### RGW Multisite terminology
+
+If you are familiar with RGW multisite in detail, feel free to skip this section.
+
+Rados gateways (RGWs) in multisite replication are grouped into zones.
+
+A group of 1 or more RGWs can be grouped into a **zone**.\
+A group of 1 or more zones can be grouped into a **zonegroup**.\
+A group of 1 or more zonegroups can be grouped into a **realm**.\
+A Ceph **cluster** in multisite has 1 or more rgws that use the same backend OSDs.
+
+There can be multiple clusters in one realm, multiple realms in a single cluster, or multiple realms over multiple clusters.
+
+### RGW Realms
+
+A realm allows the RGWs inside of it to be independent and isolated from RGWs outside of the realm. A realm contains one or more zonegroups.
+
+Realms can contain 1 or more clusters. There can also be more than 1 realm in a cluster.
+
+### RGW Zonegroups
+
+Similar to zones a zonegroup can be either **master zonegroup** or a **secondary zonegroup**.
+
+`rgw_zonegroupmaster` specifies whether the zonegroup will be the master zonegroup in a realm.
+There can only be one master zonegroup per realm. There can be any number of secondary zonegroups in a realm.
+Zonegroups that are not master must have `rgw_zonegroupmaster` set to false.
+
+### RGW Zones
+
+A zone is a collection of RGW daemons. A zone can be either **master zone** or a **secondary zone**.
+
+`rgw_zonemaster` specifies that the zone will be the master zone in a zonegroup.
+`rgw_zonesecondary` specifies that the zone will be a secondary zone in a zonegroup.
+Both `rgw_zonemaster` and `rgw_zonesecondary` need to be defined. They cannot have the same value.
+
+A secondary zone pulls a realm in order to sync data to it.
+
+Finally, The variable `rgw_zone` is set to "default" to enable compression for clusters configured without rgw multi-site.
+If multisite is configured `rgw_zone` should not be set to "default".
+
+For more detail information on multisite please visit: <https://docs.ceph.com/docs/master/radosgw/multisite/>.
+
+## Deployment Scenario #1: Single Realm & Zonegroup with Multiple Ceph Clusters
+
+### Requirements
+
+* At least 2 Ceph clusters
+* 1 RGW per cluster
+* Jewel or newer
+
+### Configuring the Master Zone in the Primary Cluster
+
+This will setup a realm, master zonegroup and master zone in the Ceph cluster.
+Since there is only 1 realm, 1 zonegroup, and 1 zone for all the rgw hosts, only `group_vars/all.yml` needs to be edited for mulitsite conifguration.
+If there is one more that one rgw being deployed in this configuration, the rgw(s) will be added to the master zone.
+
+1. Generate System Access and System Secret Keys
+
+ ```bash
+ echo system_access_key: $(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 20 | head -n 1) > multi-site-keys.txt
+ echo system_secret_key: $(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 40 | head -n 1) >> multi-site-keys.txt
+ ```
+
+2. Edit `group_vars/all.yml` for the 1st cluster
+
+ ```yaml
+ rgw_multisite: true
+
+ rgw_zone: juneau
+ rgw_zonegroup: alaska
+ rgw_realm: usa
+
+ rgw_zonemaster: true
+ rgw_zonesecondary: false
+
+ rgw_zonegroupmaster: true
+
+ rgw_multisite_proto: http
+ rgw_zone_user: edward.lewis
+ rgw_zone_user_display_name: "Edward Lewis"
+
+ system_access_key: 6kWkikvapSnHyE22P7nO
+ system_secret_key: MGecsMrWtKZgngOHZdrd6d3JxGO5CPWgT2lcnpSt
+ ```
+
+ **Note:** `rgw_zonemaster` should have the value of `true` and `rgw_zonesecondary` should be `false`. Both values always need to be defined when running multisite.
+
+ **Note:** replace the `system_access_key` and `system_secret_key` values with the ones you generated.
+
+3. Run the ceph-ansible playbook for the 1st cluster
+
+### Configuring the Secondary Zone in a Separate Cluster
+
+This will setup a realm, master zonegroup and master zone in the secondary Ceph cluster.
+Since there is only 1 realm, 1 zonegroup, and 1 zone for all the rgw hosts, only `group_vars/all.yml` needs to be edited for mulitsite conifguration.
+If there is one more that one rgw being deployed in this configuration, the rgw(s) will be added to the secondary zone.
+
+1. Edit `group_vars/all.yml` for the 2nd cluster
+
+ ```yaml
+ rgw_multisite: true
+
+ rgw_zone: fairbanks
+ rgw_zonegroup: alaska
+ rgw_realm: usa
+
+ rgw_zonemaster: false
+ rgw_zonesecondary: true
+
+ rgw_zonegroupmaster: true
+
+ rgw_multisite_proto: http
+ rgw_zone_user: edward.lewis
+ rgw_zone_user_display_name: "Edward Lewis"
+
+ system_access_key: 6kWkikvapSnHyE22P7nO
+ system_secret_key: MGecsMrWtKZgngOHZdrd6d3JxGO5CPWgT2lcnpSt
+
+ rgw_pull_proto: http
+ rgw_pull_port: 8080
+ rgw_pullhost: rgw-001-hostname
+ ```
+
+ **Note:** `rgw_zonemaster` should have the value of `false` and `rgw_zonesecondary` should be `true`
+
+ **Note:** The variables `rgw_pull_port`, `rgw_pull_proto`, `rgw_pullhost`, are joined together to make an endpoint string needed to create secondary zones. This endpoint is of one of the RGW endpoints in a master zone in the zonegroup and realm you want to create secondary zones in. This endpoint **must be resolvable** from the mons and rgws in the cluster the secondary zone(s) are being created in.
+
+ **Note:** `system_access_key`, and `system_secret_key` should match what you used in the Primary Cluster
+
+2. Run the ceph-ansible playbook on your 2nd cluster
+
+### Conclusion
+
+You should now have a master zone on cluster0 and a secondary zone on cluster1 in an Active-Active mode.
+
+## Deployment Scenario #2: Single Ceph Cluster with Multiple Realms
+
+### Requirements
+
+* Jewel or newer
+
+### Configuring Multiple Realms in a Single Cluster
+
+This configuration will a single Ceph cluster with multiple realms.
+Each of the rgws in the inventory should have a file in `host_vars` where the realm, zone, and zonegroup can be set for the rgw along with other variables.
+
+1. Generate System Access and System Secret Keys for each realm
+
+ ```bash
+ echo system_access_key: $(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 20 | head -n 1) > multi-site-keys-realm-1.txt
+ echo system_secret_key: $(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 40 | head -n 1) >> multi-site-keys-realm-1.txt
+
+ echo system_access_key: $(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 20 | head -n 1) > multi-site-keys-realm-2.txt
+ echo system_secret_key: $(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 40 | head -n 1) >> multi-site-keys-realm-2.txt
+ ```
+
+2. Edit `group_vars/all.yml` for the cluster
+
+ ```yaml
+ rgw_multisite: true
+ ```
+
+ As previously learned, all values set here will be set on all rgw hosts. `rgw_multisite` be set to `true` for all rgw hosts so multisite playbooks can run on all rgws.
+
+3. Create & edit files in `host_vars/` to create realms, zonegroups, and master zones.
+
+ Here is an example of the file `host_vars/rgw-001` for the `rgw-001` entry in the `[rgws]` section of for the example ansible inventory.
+
+ ```yaml
+ rgw_zonemaster: true
+ rgw_zonesecondary: false
+ rgw_zonegroupmaster: true
+ rgw_multisite_proto: http
+ rgw_realm: france
+ rgw_zonegroup: idf
+ rgw_zone: paris
+ radosgw_address: "{{ _radosgw_address }}"
+ radosgw_frontend_port: 8080
+ rgw_zone_user: jacques.chirac
+ rgw_zone_user_display_name: "Jacques Chirac"
+ system_access_key: P9Eb6S8XNyo4dtZZUUMy
+ system_secret_key: qqHCUtfdNnpHq3PZRHW5un9l0bEBM812Uhow0XfB
+ ```
+
+ Here is an example of the file `host_vars/rgw-002` for the `rgw-002` entry in the `[rgws]` section of for the example ansible inventory.
+
+ ```yaml
+ rgw_zonemaster: true
+ rgw_zonesecondary: false
+ rgw_zonegroupmaster: true
+ rgw_multisite_proto: http
+ rgw_realm: usa
+ rgw_zonegroup: alaska
+ rgw_zone: juneau
+ radosgw_address: "{{ _radosgw_address }}"
+ radosgw_frontend_port: 8080
+ rgw_zone_user: edward.lewis
+ rgw_zone_user_display_name: "Edward Lewis"
+ system_access_key: yu17wkvAx3B8Wyn08XoF
+ system_secret_key: 5YZfaSUPqxSNIkZQQA3lBZ495hnIV6k2HAz710BY
+ ```
+
+ **Note:** Since `rgw_realm`, `rgw_zonegroup`, and `rgw_zone` differ between files, a new realm, zonegroup, and master zone are created containing rgw-001 and rgw-002 respectively.
+
+ **Note:** `rgw_zonegroupmaster` is set to true in each of the files since it will be the only zonegroup in each realm.
+
+ **Note:** `rgw_zonemaster` should have the value of `true` and `rgw_zonesecondary` should be `false`.
+
+ **Note:** replace the `system_access_key` and `system_secret_key` values with the ones you generated.
+
+4. Run the ceph-ansible playbook on your cluster
+
+### Conclusion
+
+The RGWs in the deployed cluster will be split up into 2 realms: `france` and `usa`. France has a zonegroup named `idf` and usa has one called `alaska`.
+`Idf` has a master zone called `paris`. `Alaska` has a master zone called `juneau`.
+
+## Deployment Scenario #3: Multiple Realms over Multiple Ceph Clusters
+
+The multisite playbooks in ceph-ansible are flexible enough to create many realms, zonegroups, and zones that span many clusters.
+
+A multisite configuration consisting of multiple realms across multiple clusters can be configured by having files in `host_vars` for the rgws in each cluster similar to scenario #2.
+
+The host_vars for the rgws in the second cluster would have `rgw_zonesecondary` set to true and the additional `rgw_pull` variables as seen in scenario #2
+
+The inventory for the rgws section of the master cluster for this example looks like:
+
+```ansible-inventory
+[rgws]
+rgw-001 ansible_ssh_host=192.168.216.145 ansible_ssh_port=22 radosgw_address=192.168.216.145
+rgw-002 ansible_ssh_host=192.168.215.178 ansible_ssh_port=22 radosgw_address=192.168.215.178
+```
+
+The inventory for the rgws section of the secondary cluster for this example looks like:
+
+```ansible-inventory
+[rgws]
+rgw-003 ansible_ssh_host=192.168.215.178 ansible_ssh_port=22 radosgw_address=192.168.215.199
+rgw-004 ansible_ssh_host=192.168.215.178 ansible_ssh_port=22 radosgw_address=192.168.194.109
+```
+
+### Requirements
+
+* At least 2 Ceph clusters
+* at least 2 RGW in the master cluster and the secondary clusters
+* Jewel or newer
+
+1. Generate System Access and System Secret Keys for each realm
+
+ ```bash
+ echo system_access_key: $(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 20 | head -n 1) > multi-site-keys-realm-1.txt
+ echo system_secret_key: $(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 40 | head -n 1) >> multi-site-keys-realm-1.txt
+
+ echo system_access_key: $(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 20 | head -n 1) > multi-site-keys-realm-2.txt
+ echo system_secret_key: $(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 40 | head -n 1) >> multi-site-keys-realm-2.txt
+
+ ...
+ ```
+
+2. Edit `group_vars/all.yml` for the cluster
+
+ ```yaml
+ rgw_multisite: true
+ ```
+
+ As per the previous example, all values set here will be set on all rgw hosts.
+
+3. Create & edit files in `host_vars/` to create realms, zonegroups, and master zones on cluster #1.
+
+ Here is an example of the file `host_vars/rgw-001` for the the master cluster.
+
+ ```yaml
+ rgw_zonemaster: true
+ rgw_zonesecondary: false
+ rgw_zonegroupmaster: true
+ rgw_multisite_proto: http
+ rgw_realm: france
+ rgw_zonegroup: idf
+ rgw_zone: paris
+ radosgw_address: "{{ _radosgw_address }}"
+ radosgw_frontend_port: 8080
+ rgw_zone_user: jacques.chirac
+ rgw_zone_user_display_name: "Jacques Chirac"
+ system_access_key: P9Eb6S8XNyo4dtZZUUMy
+ system_secret_key: qqHCUtfdNnpHq3PZRHW5un9l0bEBM812Uhow0XfB
+ ```
+
+ Here is an example of the file `host_vars/rgw-002` for the the master cluster.
+
+ ```yaml
+ rgw_zonemaster: true
+ rgw_zonesecondary: false
+ rgw_zonegroupmaster: true
+ rgw_multisite_proto: http
+ rgw_realm: usa
+ rgw_zonegroup: alaska
+ rgw_zone: juneau
+ radosgw_address: "{{ _radosgw_address }}"
+ radosgw_frontend_port: 8080
+ rgw_zone_user: edward.lewis
+ rgw_zone_user_display_name: "Edward Lewis"
+ system_access_key: yu17wkvAx3B8Wyn08XoF
+ system_secret_key: 5YZfaSUPqxSNIkZQQA3lBZ495hnIV6k2HAz710BY
+ ```
+
+4. Run the ceph-ansible playbook on your master cluster.
+
+5. Create & edit files in `host_vars/` for the entries in the `[rgws]` section of the inventory on the secondary cluster.
+
+ Here is an example of the file `host_vars/rgw-003` for the `rgw-003` entry in the `[rgws]` section for a secondary cluster.
+
+ ```yaml
+ rgw_zonemaster: false
+ rgw_zonesecondary: true
+ rgw_zonegroupmaster: true
+ rgw_multisite_proto: http
+ rgw_realm: france
+ rgw_zonegroup: idf
+ rgw_zone: versailles
+ radosgw_address: "{{ _radosgw_address }}"
+ radosgw_frontend_port: 8080
+ rgw_zone_user: jacques.chirac
+ rgw_zone_user_display_name: "Jacques Chirac"
+ system_access_key: P9Eb6S8XNyo4dtZZUUMy
+ system_secret_key: qqHCUtfdNnpHq3PZRHW5un9l0bEBM812Uhow0XfB
+ rgw_pull_proto: http
+ rgw_pull_port: 8080
+ rgw_pullhost: rgw-001-hostname
+ ```
+
+ Here is an example of the file `host_vars/rgw-004` for the `rgw-004` entry in the `[rgws]` section for a secondary cluster.
+
+ ```yaml
+ rgw_zonemaster: false
+ rgw_zonesecondary: true
+ rgw_zonegroupmaster: true
+ rgw_multisite_proto: http
+ rgw_realm: usa
+ rgw_zonegroup: alaska
+ rgw_zone: juneau
+ radosgw_address: "{{ _radosgw_address }}"
+ radosgw_frontend_port: 8080
+ rgw_zone_user: edward.lewis
+ rgw_zone_user_display_name: "Edward Lewis"
+ system_access_key: yu17wkvAx3B8Wyn08XoF
+ system_secret_key: 5YZfaSUPqxSNIkZQQA3lBZ495hnIV6k2HAz710BY
+ rgw_pull_proto: http
+ rgw_pull_port: 8080
+ rgw_pullhost: rgw-002-hostname
+ ```
+
+6. Run the ceph-ansible playbook on your secondary cluster.
+
+### Conclusion
+
+There will be 2 realms in this configuration, `france` and `usa`, with RGWs and RGW zones in both clusters. Cluster0 will has the master zones and Cluster1 has the secondary zones.
+
+Data is realm france will be replicated over both clusters and remain isolated from rgws in realm usa and vice versa.
+
+## Deployment Scenario #4: Multiple Realms over Multiple Ceph Clusters with Multiple Instances
+
+More than 1 RGW can be running on a single host. To configure multisite for a host with more than one rgw instance running on the host, `rgw_instances` must be configured.
+
+Each item in `rgw_instances` (declared in a host_vars file) represents an RGW on that host. In each item is the multisite configuration for that RGW.
+
+Here is an example:
+
+```yaml
+rgw_instances:
+ - instance_name: rgw1
+ rgw_zonemaster: true
+ rgw_zonesecondary: false
+ rgw_zonegroupmaster: true
+ rgw_realm: usa
+ rgw_zonegroup: alaska
+ rgw_zone: juneau
+ radosgw_address: "{{ _radosgw_address }}"
+ radosgw_frontend_port: 8080
+ rgw_multisite_proto: http
+ rgw_zone_user: edward.lewis
+ rgw_zone_user_display_name: "Edward Lewis"
+ system_access_key: yu17wkvAx3B8Wyn08XoF
+ system_secret_key: 5YZfaSUPqxSNIkZQQA3lBZ495hnIV6k2HAz710BY
+```
+
+### Setting rgw_instances for a host in the master zone
+
+Here is an example of a host_vars for a host (ex: rgw-001 in the examples) containing 2 rgw_instances:
+
+```yaml
+rgw_instances:
+ - instance_name: rgw1
+ rgw_zonemaster: true
+ rgw_zonesecondary: false
+ rgw_zonegroupmaster: true
+ rgw_realm: usa
+ rgw_zonegroup: alaska
+ rgw_zone: juneau
+ radosgw_address: "{{ _radosgw_address }}"
+ radosgw_frontend_port: 8080
+ rgw_multisite_proto: http
+ rgw_zone_user: edward.lewis
+ rgw_zone_user_display_name: "Edward Lewis"
+ system_access_key: yu17wkvAx3B8Wyn08XoF
+ system_secret_key: 5YZfaSUPqxSNIkZQQA3lBZ495hnIV6k2HAz710BY
+ - instance_name: rgw2
+ rgw_zonemaster: true
+ rgw_zonesecondary: false
+ rgw_zonegroupmaster: true
+ rgw_realm: france
+ rgw_zonegroup: idf
+ rgw_zone: paris
+ radosgw_address: "{{ _radosgw_address }}"
+ radosgw_frontend_port: 8081
+ rgw_multisite_proto: http
+ rgw_zone_user: jacques.chirac
+ rgw_zone_user_display_name: "Jacques Chirac"
+ system_access_key: P9Eb6S8XNyo4dtZZUUMy
+ system_secret_key: qqHCUtfdNnpHq3PZRHW5un9l0bEBM812Uhow0XfB
+```
+
+This example starts up 2 rgws on host rgw-001. `rgw1` is configured to be in realm usa and `rgw2` is configured to be in realm france.
+
+**Note:** The old format of declaring `rgw_zonemaster`, `rgw_zonesecondary`, `rgw_zonegroupmaster`, `rgw_multisite_proto` outside of `rgw_instances` still works but declaring the values at the instance level (as seen above) is preferred.
+
+### Setting rgw_instances for a host in a secondary zone
+
+To start up multiple rgws on a host that are in a secondary zone, `endpoint` must be added to rgw_instances.
+
+The value of `endpoint` should be the endpoint of an RGW in the master zone of the realm that is resolvable from the host.`rgw_pull_{proto, host, port}` are not necessary since `endpoint` is a combination of all three.
+
+Here is an example of a host_vars for a host containing 2 rgw_instances in a secondary zone:
+
+```yaml
+rgw_instances:
+ - instance_name: rgw3
+ rgw_zonemaster: false
+ rgw_zonesecondary: true
+ rgw_zonegroupmaster: true
+ rgw_realm: usa
+ rgw_zonegroup: alaska
+ rgw_zone: fairbanks
+ radosgw_address: "{{ _radosgw_address }}"
+ radosgw_frontend_port: 8080
+ rgw_multisite_proto: "http"
+ rgw_zone_user: edward.lewis
+ rgw_zone_user_display_name: "Edward Lewis"
+ system_access_key: yu17wkvAx3B8Wyn08XoF
+ system_secret_key: 5YZfaSUPqxSNIkZQQA3lBZ495hnIV6k2HAz710BY
+ endpoint: https://rgw-001-hostname:8080
+ - instance_name: rgw4
+ rgw_zonemaster: false
+ rgw_zonesecondary: true
+ rgw_zonegroupmaster: true
+ rgw_realm: france
+ rgw_zonegroup: idf
+ rgw_zone: versailles
+ radosgw_address: "{{ _radosgw_address }}"
+ radosgw_frontend_port: 8081
+ rgw_multisite_proto: "http"
+ rgw_zone_user: jacques.chirac
+ rgw_zone_user_display_name: "Jacques Chirac"
+ system_access_key: P9Eb6S8XNyo4dtZZUUMy
+ system_secret_key: qqHCUtfdNnpHq3PZRHW5un9l0bEBM812Uhow0XfB
+ endpoint: https://rgw-001-hostname:8081
+```
+
+This example starts up 2 rgws on the host that will pull the realm from the rgws on rgw-001 above. `rgw3` is pulling from the rgw endpoint in realm usa in the master zone example above (instance name rgw1). `rgw4` is pulling from the rgw endpoint in realm france in the master zone example above (instance name rgw2).
+
+**Note:** The old format of declaring `rgw_zonemaster`, `rgw_zonesecondary`, `rgw_zonegroupmaster`, `rgw_multisite_proto` outside of `rgw_instances` still works but declaring the values at the instance level (as seen above) is preferred.
+
+### Conclusion
+
+`rgw_instances` can be used in host_vars for multisite deployments like scenarios 2 and 3
+
--- /dev/null
+ceph-ansible
+============
+Ansible playbooks for Ceph, the distributed filesystem.
+
+Please refer to our hosted documentation here: https://docs.ceph.com/projects/ceph-ansible/en/latest/
+
+You can view documentation for our ``stable-*`` branches by substituting ``master`` in the link
+above for the name of the branch. For example: https://docs.ceph.com/projects/ceph-ansible/en/stable-5.0/
--- /dev/null
+# -*- mode: ruby -*-
+# vi: set ft=ruby :
+
+require 'yaml'
+VAGRANTFILE_API_VERSION = '2'
+
+if File.file?(File.join(File.dirname(__FILE__), 'vagrant_variables.yml')) then
+ vagrant_variables_file = 'vagrant_variables.yml'
+else
+ vagrant_variables_file = 'vagrant_variables.yml.sample'
+end
+
+config_file=File.expand_path(File.join(File.dirname(__FILE__), vagrant_variables_file))
+
+settings=YAML.load_file(config_file)
+
+LABEL_PREFIX = settings['label_prefix'] ? settings['label_prefix'] + "-" : ""
+NMONS = settings['mon_vms']
+NOSDS = settings['osd_vms']
+NMDSS = settings['mds_vms']
+NRGWS = settings['rgw_vms']
+NNFSS = settings['nfs_vms']
+GRAFANA = settings['grafana_server_vms']
+NRBD_MIRRORS = settings['rbd_mirror_vms']
+CLIENTS = settings['client_vms']
+NISCSI_GWS = settings['iscsi_gw_vms']
+MGRS = settings['mgr_vms']
+PUBLIC_SUBNET = settings['public_subnet']
+CLUSTER_SUBNET = settings['cluster_subnet']
+BOX = ENV['CEPH_ANSIBLE_VAGRANT_BOX'] || settings['vagrant_box']
+CLIENT_BOX = ENV['CEPH_ANSIBLE_VAGRANT_BOX'] || settings['client_vagrant_box'] || BOX
+BOX_URL = ENV['CEPH_ANSIBLE_VAGRANT_BOX_URL'] || settings['vagrant_box_url']
+SYNC_DIR = settings['vagrant_sync_dir']
+MEMORY = settings['memory']
+ETH = settings['eth']
+DOCKER = settings['docker']
+USER = settings['ssh_username']
+DEBUG = settings['debug']
+
+ASSIGN_STATIC_IP = !(BOX == 'openstack' or BOX == 'linode')
+DISABLE_SYNCED_FOLDER = settings.fetch('vagrant_disable_synced_folder', false)
+
+$last_ip_pub_digit = 9
+$last_ip_cluster_digit = 9
+
+ansible_provision = proc do |ansible|
+ if DOCKER then
+ ansible.playbook = 'site-container.yml'
+ if settings['skip_tags']
+ ansible.skip_tags = settings['skip_tags']
+ end
+ else
+ ansible.playbook = 'site.yml'
+ end
+
+ # Note: Can't do ranges like mon[0-2] in groups because
+ # these aren't supported by Vagrant, see
+ # https://github.com/mitchellh/vagrant/issues/3539
+ ansible.groups = {
+ 'mons' => (0..NMONS - 1).map { |j| "#{LABEL_PREFIX}mon#{j}" },
+ 'osds' => (0..NOSDS - 1).map { |j| "#{LABEL_PREFIX}osd#{j}" },
+ 'mdss' => (0..NMDSS - 1).map { |j| "#{LABEL_PREFIX}mds#{j}" },
+ 'rgws' => (0..NRGWS - 1).map { |j| "#{LABEL_PREFIX}rgw#{j}" },
+ 'nfss' => (0..NNFSS - 1).map { |j| "#{LABEL_PREFIX}nfs#{j}" },
+ 'rbd_mirrors' => (0..NRBD_MIRRORS - 1).map { |j| "#{LABEL_PREFIX}rbd_mirror#{j}" },
+ 'clients' => (0..CLIENTS - 1).map { |j| "#{LABEL_PREFIX}client#{j}" },
+ 'iscsigws' => (0..NISCSI_GWS - 1).map { |j| "#{LABEL_PREFIX}iscsi_gw#{j}" },
+ 'mgrs' => (0..MGRS - 1).map { |j| "#{LABEL_PREFIX}mgr#{j}" },
+ 'monitoring' => (0..GRAFANA - 1).map { |j| "#{LABEL_PREFIX}grafana#{j}" }
+ }
+
+ ansible.extra_vars = {
+ cluster_network: "#{CLUSTER_SUBNET}.0/24",
+ journal_size: 100,
+ public_network: "#{PUBLIC_SUBNET}.0/24",
+ }
+
+ # In a production deployment, these should be secret
+ if DOCKER then
+ ansible.extra_vars = ansible.extra_vars.merge({
+ containerized_deployment: 'true',
+ monitor_interface: ETH,
+ ceph_mon_docker_subnet: "#{PUBLIC_SUBNET}.0/24",
+ devices: settings['disks'],
+ radosgw_interface: ETH,
+ generate_fsid: 'true',
+ })
+ else
+ ansible.extra_vars = ansible.extra_vars.merge({
+ devices: settings['disks'],
+ monitor_interface: ETH,
+ radosgw_interface: ETH,
+ os_tuning_params: settings['os_tuning_params'],
+ })
+ end
+
+ if BOX == 'linode' then
+ ansible.sudo = true
+ # Use monitor_address_block instead of monitor_interface:
+ ansible.extra_vars.delete(:monitor_interface)
+ # Use radosgw_address_block instead of radosgw_interface:
+ ansible.extra_vars.delete(:radosgw_interface)
+ ansible.extra_vars = ansible.extra_vars.merge({
+ cluster_network: "#{CLUSTER_SUBNET}.0/16",
+ devices: ['/dev/sdc'], # hardcode leftover disk
+ monitor_address_block: "#{PUBLIC_SUBNET}.0/16",
+ radosgw_address_block: "#{PUBLIC_SUBNET}.0/16",
+ public_network: "#{PUBLIC_SUBNET}.0/16",
+ })
+ end
+
+ if DEBUG then
+ ansible.verbose = '-vvvv'
+ end
+ ansible.limit = 'all'
+end
+
+def create_vmdk(name, size)
+ dir = Pathname.new(__FILE__).expand_path.dirname
+ path = File.join(dir, '.vagrant', name + '.vmdk')
+ `vmware-vdiskmanager -c -s #{size} -t 0 -a scsi #{path} \
+ 2>&1 > /dev/null` unless File.exist?(path)
+end
+
+Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
+ config.vm.box = BOX
+ config.vm.box_url = BOX_URL
+ config.ssh.insert_key = false # workaround for https://github.com/mitchellh/vagrant/issues/5048
+ config.ssh.private_key_path = settings['ssh_private_key_path']
+ config.ssh.username = USER
+
+ # When using libvirt, avoid errors like:
+ # "host doesn't support requested feature: CPUID.01H:EDX.ds [bit 21]"
+ config.vm.provider :libvirt do |lv|
+ lv.cpu_mode = 'host-passthrough'
+ lv.volume_cache = 'unsafe'
+ lv.graphics_type = 'none'
+ lv.cpus = 2
+ end
+
+ # Faster bootup. Disables mounting the sync folder for libvirt and virtualbox
+ if DISABLE_SYNCED_FOLDER
+ config.vm.provider :virtualbox do |v,override|
+ override.vm.synced_folder '.', SYNC_DIR, disabled: true
+ end
+ config.vm.provider :libvirt do |v,override|
+ override.vm.synced_folder '.', SYNC_DIR, disabled: true
+ end
+ end
+
+ if BOX == 'openstack'
+ # OpenStack VMs
+ config.vm.provider :openstack do |os|
+ config.vm.synced_folder ".", "/home/#{USER}/vagrant", disabled: true
+ config.ssh.pty = true
+ os.openstack_auth_url = settings['os_openstack_auth_url']
+ os.username = settings['os_username']
+ os.password = settings['os_password']
+ os.tenant_name = settings['os_tenant_name']
+ os.region = settings['os_region']
+ os.flavor = settings['os_flavor']
+ os.image = settings['os_image']
+ os.keypair_name = settings['os_keypair_name']
+ os.security_groups = ['default']
+
+ if settings['os_networks'] then
+ os.networks = settings['os_networks']
+ end
+
+ if settings['os_floating_ip_pool'] then
+ os.floating_ip_pool = settings['os_floating_ip_pool']
+ end
+
+ config.vm.provision "shell", inline: "true", upload_path: "/home/#{USER}/vagrant-shell"
+ end
+ elsif BOX == 'linode'
+ config.vm.provider :linode do |provider, override|
+ provider.token = ENV['LINODE_API_KEY']
+ provider.distribution = settings['cloud_distribution'] # 'Ubuntu 16.04 LTS'
+ provider.datacenter = settings['cloud_datacenter']
+ provider.plan = MEMORY.to_s
+ provider.private_networking = true
+ # root install generally takes <1GB
+ provider.xvda_size = 4*1024
+ # add some swap as the Linode distros require it
+ provider.swap_size = 128
+ end
+ end
+
+ (0..NMONS - 1).each do |i|
+ config.vm.define "#{LABEL_PREFIX}mon#{i}" do |mon|
+ mon.vm.hostname = "#{LABEL_PREFIX}mon#{i}"
+ if ASSIGN_STATIC_IP
+ mon.vm.network :private_network,
+ ip: "#{PUBLIC_SUBNET}.#{$last_ip_pub_digit+=1}"
+ end
+ # Virtualbox
+ mon.vm.provider :virtualbox do |vb|
+ vb.customize ['modifyvm', :id, '--memory', "#{MEMORY}"]
+ end
+
+ # VMware
+ mon.vm.provider :vmware_fusion do |v|
+ v.vmx['memsize'] = "#{MEMORY}"
+ end
+
+ # Libvirt
+ mon.vm.provider :libvirt do |lv|
+ lv.memory = MEMORY
+ lv.random_hostname = true
+ end
+
+ # Parallels
+ mon.vm.provider "parallels" do |prl|
+ prl.name = "ceph-mon#{i}"
+ prl.memory = "#{MEMORY}"
+ end
+
+ mon.vm.provider :linode do |provider|
+ provider.label = mon.vm.hostname
+ end
+ end
+ end
+
+ (0..GRAFANA - 1).each do |i|
+ config.vm.define "#{LABEL_PREFIX}grafana#{i}" do |grf|
+ grf.vm.hostname = "#{LABEL_PREFIX}grafana#{i}"
+ if ASSIGN_STATIC_IP
+ grf.vm.network :private_network,
+ ip: "#{PUBLIC_SUBNET}.#{$last_ip_pub_digit+=1}"
+ end
+ # Virtualbox
+ grf.vm.provider :virtualbox do |vb|
+ vb.customize ['modifyvm', :id, '--memory', "#{MEMORY}"]
+ end
+
+ # VMware
+ grf.vm.provider :vmware_fusion do |v|
+ v.vmx['memsize'] = "#{MEMORY}"
+ end
+
+ # Libvirt
+ grf.vm.provider :libvirt do |lv|
+ lv.memory = MEMORY
+ lv.random_hostname = true
+ end
+
+ # Parallels
+ grf.vm.provider "parallels" do |prl|
+ prl.name = "ceph-grafana#{i}"
+ prl.memory = "#{MEMORY}"
+ end
+
+ grf.vm.provider :linode do |provider|
+ provider.label = grf.vm.hostname
+ end
+ end
+ end
+
+ (0..MGRS - 1).each do |i|
+ config.vm.define "#{LABEL_PREFIX}mgr#{i}" do |mgr|
+ mgr.vm.hostname = "#{LABEL_PREFIX}mgr#{i}"
+ if ASSIGN_STATIC_IP
+ mgr.vm.network :private_network,
+ ip: "#{PUBLIC_SUBNET}.#{$last_ip_pub_digit+=1}"
+ end
+ # Virtualbox
+ mgr.vm.provider :virtualbox do |vb|
+ vb.customize ['modifyvm', :id, '--memory', "#{MEMORY}"]
+ end
+
+ # VMware
+ mgr.vm.provider :vmware_fusion do |v|
+ v.vmx['memsize'] = "#{MEMORY}"
+ end
+
+ # Libvirt
+ mgr.vm.provider :libvirt do |lv|
+ lv.memory = MEMORY
+ lv.random_hostname = true
+ end
+
+ # Parallels
+ mgr.vm.provider "parallels" do |prl|
+ prl.name = "ceph-mgr#{i}"
+ prl.memory = "#{MEMORY}"
+ end
+
+ mgr.vm.provider :linode do |provider|
+ provider.label = mgr.vm.hostname
+ end
+ end
+ end
+
+ (0..CLIENTS - 1).each do |i|
+ config.vm.define "#{LABEL_PREFIX}client#{i}" do |client|
+ client.vm.box = CLIENT_BOX
+ client.vm.hostname = "#{LABEL_PREFIX}client#{i}"
+ if ASSIGN_STATIC_IP
+ client.vm.network :private_network,
+ ip: "#{PUBLIC_SUBNET}.#{$last_ip_pub_digit+=1}"
+ end
+ # Virtualbox
+ client.vm.provider :virtualbox do |vb|
+ vb.customize ['modifyvm', :id, '--memory', "#{MEMORY}"]
+ end
+
+ # VMware
+ client.vm.provider :vmware_fusion do |v|
+ v.vmx['memsize'] = "#{MEMORY}"
+ end
+
+ # Libvirt
+ client.vm.provider :libvirt do |lv|
+ lv.memory = MEMORY
+ lv.random_hostname = true
+ end
+
+ # Parallels
+ client.vm.provider "parallels" do |prl|
+ prl.name = "ceph-client#{i}"
+ prl.memory = "#{MEMORY}"
+ end
+
+ client.vm.provider :linode do |provider|
+ provider.label = client.vm.hostname
+ end
+ end
+ end
+
+ (0..NRGWS - 1).each do |i|
+ config.vm.define "#{LABEL_PREFIX}rgw#{i}" do |rgw|
+ rgw.vm.hostname = "#{LABEL_PREFIX}rgw#{i}"
+ if ASSIGN_STATIC_IP
+ rgw.vm.network :private_network,
+ ip: "#{PUBLIC_SUBNET}.#{$last_ip_pub_digit+=1}"
+ end
+
+ # Virtualbox
+ rgw.vm.provider :virtualbox do |vb|
+ vb.customize ['modifyvm', :id, '--memory', "#{MEMORY}"]
+ end
+
+ # VMware
+ rgw.vm.provider :vmware_fusion do |v|
+ v.vmx['memsize'] = "#{MEMORY}"
+ end
+
+ # Libvirt
+ rgw.vm.provider :libvirt do |lv|
+ lv.memory = MEMORY
+ lv.random_hostname = true
+ end
+
+ # Parallels
+ rgw.vm.provider "parallels" do |prl|
+ prl.name = "ceph-rgw#{i}"
+ prl.memory = "#{MEMORY}"
+ end
+
+ rgw.vm.provider :linode do |provider|
+ provider.label = rgw.vm.hostname
+ end
+ end
+ end
+
+ (0..NNFSS - 1).each do |i|
+ config.vm.define "#{LABEL_PREFIX}nfs#{i}" do |nfs|
+ nfs.vm.hostname = "#{LABEL_PREFIX}nfs#{i}"
+ if ASSIGN_STATIC_IP
+ nfs.vm.network :private_network,
+ ip: "#{PUBLIC_SUBNET}.#{$last_ip_pub_digit+=1}"
+ end
+
+ # Virtualbox
+ nfs.vm.provider :virtualbox do |vb|
+ vb.customize ['modifyvm', :id, '--memory', "#{MEMORY}"]
+ end
+
+ # VMware
+ nfs.vm.provider :vmware_fusion do |v|
+ v.vmx['memsize'] = "#{MEMORY}"
+ end
+
+ # Libvirt
+ nfs.vm.provider :libvirt do |lv|
+ lv.memory = MEMORY
+ lv.random_hostname = true
+ end
+
+ # Parallels
+ nfs.vm.provider "parallels" do |prl|
+ prl.name = "ceph-nfs#{i}"
+ prl.memory = "#{MEMORY}"
+ end
+
+ nfs.vm.provider :linode do |provider|
+ provider.label = nfs.vm.hostname
+ end
+ end
+ end
+
+ (0..NMDSS - 1).each do |i|
+ config.vm.define "#{LABEL_PREFIX}mds#{i}" do |mds|
+ mds.vm.hostname = "#{LABEL_PREFIX}mds#{i}"
+ if ASSIGN_STATIC_IP
+ mds.vm.network :private_network,
+ ip: "#{PUBLIC_SUBNET}.#{$last_ip_pub_digit+=1}"
+ end
+ # Virtualbox
+ mds.vm.provider :virtualbox do |vb|
+ vb.customize ['modifyvm', :id, '--memory', "#{MEMORY}"]
+ end
+
+ # VMware
+ mds.vm.provider :vmware_fusion do |v|
+ v.vmx['memsize'] = "#{MEMORY}"
+ end
+
+ # Libvirt
+ mds.vm.provider :libvirt do |lv|
+ lv.memory = MEMORY
+ lv.random_hostname = true
+ end
+ # Parallels
+ mds.vm.provider "parallels" do |prl|
+ prl.name = "ceph-mds#{i}"
+ prl.memory = "#{MEMORY}"
+ end
+
+ mds.vm.provider :linode do |provider|
+ provider.label = mds.vm.hostname
+ end
+ end
+ end
+
+ (0..NRBD_MIRRORS - 1).each do |i|
+ config.vm.define "#{LABEL_PREFIX}rbd-mirror#{i}" do |rbd_mirror|
+ rbd_mirror.vm.hostname = "#{LABEL_PREFIX}rbd-mirror#{i}"
+ if ASSIGN_STATIC_IP
+ rbd_mirror.vm.network :private_network,
+ ip: "#{PUBLIC_SUBNET}.#{$last_ip_pub_digit+=1}"
+ end
+ # Virtualbox
+ rbd_mirror.vm.provider :virtualbox do |vb|
+ vb.customize ['modifyvm', :id, '--memory', "#{MEMORY}"]
+ end
+
+ # VMware
+ rbd_mirror.vm.provider :vmware_fusion do |v|
+ v.vmx['memsize'] = "#{MEMORY}"
+ end
+
+ # Libvirt
+ rbd_mirror.vm.provider :libvirt do |lv|
+ lv.memory = MEMORY
+ lv.random_hostname = true
+ end
+ # Parallels
+ rbd_mirror.vm.provider "parallels" do |prl|
+ prl.name = "ceph-rbd-mirror#{i}"
+ prl.memory = "#{MEMORY}"
+ end
+
+ rbd_mirror.vm.provider :linode do |provider|
+ provider.label = rbd_mirror.vm.hostname
+ end
+ end
+ end
+
+ (0..NISCSI_GWS - 1).each do |i|
+ config.vm.define "#{LABEL_PREFIX}iscsi-gw#{i}" do |iscsi_gw|
+ iscsi_gw.vm.hostname = "#{LABEL_PREFIX}iscsi-gw#{i}"
+ if ASSIGN_STATIC_IP
+ iscsi_gw.vm.network :private_network,
+ ip: "#{PUBLIC_SUBNET}.#{$last_ip_pub_digit+=1}"
+ end
+ # Virtualbox
+ iscsi_gw.vm.provider :virtualbox do |vb|
+ vb.customize ['modifyvm', :id, '--memory', "#{MEMORY}"]
+ end
+
+ # VMware
+ iscsi_gw.vm.provider :vmware_fusion do |v|
+ v.vmx['memsize'] = "#{MEMORY}"
+ end
+
+ # Libvirt
+ iscsi_gw.vm.provider :libvirt do |lv|
+ lv.memory = MEMORY
+ lv.random_hostname = true
+ end
+ # Parallels
+ iscsi_gw.vm.provider "parallels" do |prl|
+ prl.name = "iscsi-gw#{i}"
+ prl.memory = "#{MEMORY}"
+ end
+
+ iscsi_gw.vm.provider :linode do |provider|
+ provider.label = iscsi_gw.vm.hostname
+ end
+ end
+ end
+
+ (0..NOSDS - 1).each do |i|
+ config.vm.define "#{LABEL_PREFIX}osd#{i}" do |osd|
+ osd.vm.hostname = "#{LABEL_PREFIX}osd#{i}"
+ if ASSIGN_STATIC_IP
+ osd.vm.network :private_network,
+ ip: "#{PUBLIC_SUBNET}.#{$last_ip_pub_digit+=1}"
+ osd.vm.network :private_network,
+ ip: "#{CLUSTER_SUBNET}.#{$last_ip_cluster_digit+=1}"
+ end
+ # Virtualbox
+ osd.vm.provider :virtualbox do |vb|
+ # Create our own controller for consistency and to remove VM dependency
+ unless File.exist?("disk-#{i}-0.vdi")
+ # Adding OSD Controller;
+ # once the first disk is there assuming we don't need to do this
+ vb.customize ['storagectl', :id,
+ '--name', 'OSD Controller',
+ '--add', 'scsi']
+ end
+
+ (0..2).each do |d|
+ vb.customize ['createhd',
+ '--filename', "disk-#{i}-#{d}",
+ '--size', '11000'] unless File.exist?("disk-#{i}-#{d}.vdi")
+ vb.customize ['storageattach', :id,
+ '--storagectl', 'OSD Controller',
+ '--port', 3 + d,
+ '--device', 0,
+ '--type', 'hdd',
+ '--medium', "disk-#{i}-#{d}.vdi"]
+ end
+ vb.customize ['modifyvm', :id, '--memory', "#{MEMORY}"]
+ end
+
+ # VMware
+ osd.vm.provider :vmware_fusion do |v|
+ (0..1).each do |d|
+ v.vmx["scsi0:#{d + 1}.present"] = 'TRUE'
+ v.vmx["scsi0:#{d + 1}.fileName"] =
+ create_vmdk("disk-#{i}-#{d}", '11000MB')
+ end
+ v.vmx['memsize'] = "#{MEMORY}"
+ end
+
+ # Libvirt
+ driverletters = ('a'..'z').to_a
+ osd.vm.provider :libvirt do |lv|
+ # always make /dev/sd{a/b/c} so that CI can ensure that
+ # virtualbox and libvirt will have the same devices to use for OSDs
+ (0..2).each do |d|
+ lv.storage :file, :device => "hd#{driverletters[d]}", :size => '50G', :bus => "ide"
+ end
+ lv.memory = MEMORY
+ lv.random_hostname = true
+ end
+
+ # Parallels
+ osd.vm.provider "parallels" do |prl|
+ prl.name = "ceph-osd#{i}"
+ prl.memory = "#{MEMORY}"
+ (0..1).each do |d|
+ prl.customize ["set", :id,
+ "--device-add",
+ "hdd",
+ "--iface",
+ "sata"]
+ end
+ end
+
+ osd.vm.provider :linode do |provider|
+ provider.label = osd.vm.hostname
+ end
+
+ # Run the provisioner after the last machine comes up
+ osd.vm.provision 'ansible', &ansible_provision if i == (NOSDS - 1)
+ end
+ end
+end
--- /dev/null
+# Comments inside this file must be set BEFORE the option.
+# NOT after the option, otherwise the comment will be interpreted as a value to that option.
+
+[defaults]
+ansible_managed = Please do not change this file directly since it is managed by Ansible and will be overwritten
+library = ./library
+module_utils = ./module_utils
+action_plugins = plugins/actions
+callback_plugins = plugins/callback
+filter_plugins = plugins/filter
+roles_path = ./roles
+# Be sure the user running Ansible has permissions on the logfile
+log_path = $HOME/ansible/ansible.log
+
+forks = 20
+host_key_checking = False
+gathering = smart
+fact_caching = jsonfile
+fact_caching_connection = $HOME/ansible/facts
+fact_caching_timeout = 7200
+nocows = 1
+callback_whitelist = profile_tasks
+stdout_callback = yaml
+force_valid_group_names = ignore
+inject_facts_as_vars = False
+
+# Disable them in the context of https://review.openstack.org/#/c/469644
+retry_files_enabled = False
+
+# This is the default SSH timeout to use on connection attempts
+# CI slaves are slow so by setting a higher value we can avoid the following error:
+# Timeout (12s) waiting for privilege escalation prompt:
+timeout = 60
+
+[ssh_connection]
+# see: https://github.com/ansible/ansible/issues/11536
+control_path = %(directory)s/%%h-%%r-%%p
+ssh_args = -o ControlMaster=auto -o ControlPersist=600s
+pipelining = True
+
+# Option to retry failed ssh executions if the failure is encountered in ssh itself
+retries = 10
--- /dev/null
+%global commit @COMMIT@
+%global shortcommit %(c=%{commit}; echo ${c:0:7})
+
+Name: ceph-ansible
+Version: @VERSION@
+Release: @RELEASE@%{?dist}
+Summary: Ansible playbooks for Ceph
+# Some files have been copied from Ansible (GPLv3+). For example:
+# plugins/actions/config_template.py
+# roles/ceph-common/plugins/actions/config_template.py
+License: ASL 2.0 and GPLv3+
+URL: https://github.com/ceph/ceph-ansible
+Source0: %{name}-%{version}-%{shortcommit}.tar.gz
+Obsoletes: ceph-iscsi-ansible <= 1.5
+
+BuildArch: noarch
+
+BuildRequires: ansible >= 2.9
+Requires: ansible >= 2.9
+
+%if 0%{?rhel} == 7
+BuildRequires: python2-devel
+Requires: python2-netaddr
+%else
+BuildRequires: python3-devel
+Requires: python3-netaddr
+%endif
+
+%description
+Ansible playbooks for Ceph
+
+%prep
+%autosetup -p1
+
+%build
+
+%install
+mkdir -p %{buildroot}%{_datarootdir}/ceph-ansible
+
+for f in ansible.cfg *.yml *.sample group_vars roles library module_utils plugins infrastructure-playbooks; do
+ cp -a $f %{buildroot}%{_datarootdir}/ceph-ansible
+done
+
+pushd %{buildroot}%{_datarootdir}/ceph-ansible
+ # These untested playbooks are too unstable for users.
+ rm -r infrastructure-playbooks/untested-by-ci
+ %if ! 0%{?fedora} && ! 0%{?centos}
+ # remove ability to install ceph community version
+ rm roles/ceph-common/tasks/installs/redhat_{community,dev}_repository.yml
+ # Ship only the Red Hat Ceph Storage config (overwrite upstream settings)
+ cp group_vars/rhcs.yml.sample group_vars/all.yml.sample
+ %endif
+popd
+
+%check
+# Borrowed from upstream's .travis.yml:
+ansible-playbook -i dummy-ansible-hosts test.yml --syntax-check
+
+%files
+%doc README.rst
+%license LICENSE
+%{_datarootdir}/ceph-ansible
+
+%changelog
--- /dev/null
+#!/usr/bin/env bash
+set -e
+shopt -s extglob # enable extended pattern matching features
+
+
+#############
+# VARIABLES #
+#############
+
+stable_branch=$1
+commit=$2
+bkp_branch_name=$3
+bkp_branch_name_prefix=bkp
+bkp_branch=$bkp_branch_name-$bkp_branch_name_prefix-$stable_branch
+
+
+#############
+# FUNCTIONS #
+#############
+
+verify_commit () {
+ for com in ${commit//,/ }; do
+ if [[ $(git cat-file -t "$com" 2>/dev/null) != commit ]]; then
+ echo "$com does not exist in your tree"
+ echo "Run 'git fetch origin master && git pull origin master'"
+ exit 1
+ fi
+ done
+}
+
+git_status () {
+ if [[ $(git status --porcelain | wc -l) -gt 0 ]]; then
+ echo "It looks like you have not committed changes:"
+ echo ""
+ git status --short
+ echo ""
+ echo ""
+ echo "Press ENTER to continue or Ctrl+c to break."
+ read -r
+ fi
+}
+
+checkout () {
+ git checkout --no-track -b "$bkp_branch" origin/"$stable_branch"
+}
+
+cherry_pick () {
+ local x
+ for com in ${commit//,/ }; do
+ x="$x $com"
+ done
+ # Trim the first white space and use an array
+ # Reference: https://github.com/koalaman/shellcheck/wiki/SC2086#exceptions
+ x=(${x##*( )})
+ git cherry-pick -x -s "${x[@]}"
+}
+
+push () {
+ git push origin "$bkp_branch"
+}
+
+create_pr () {
+ hub pull-request -h ceph/ceph-ansible:"$bkp_branch" -b "$stable_branch" -F -
+}
+
+cleanup () {
+ echo "Moving back to previous branch"
+ git checkout -
+ git branch -D "$bkp_branch"
+}
+
+test_args () {
+ if [ $# -lt 3 ]; then
+ echo "Please run the script like this: ./contrib/backport_to_stable_branch.sh STABLE_BRANCH_NAME COMMIT_SHA1 BACKPORT_BRANCH_NAME"
+ echo "We accept multiple commits as soon as they are commas-separated."
+ echo "e.g: ./contrib/backport_to_stable_branch.sh stable-2.2 6892670d317698771be7e96ce9032bc27d3fd1e5,8756c553cc8c213fc4996fc5202c7b687eb645a3 my-work"
+ exit 1
+ fi
+}
+
+
+########
+# MAIN #
+########
+test_args "$@"
+git_status
+verify_commit
+checkout
+cherry_pick
+push
+create_pr <<MSG
+${4} Backport of ${3} in $stable_branch
+
+Backport of #${3} in $stable_branch
+MSG
+cleanup
--- /dev/null
+#!/bin/bash
+set -xe
+
+# VARIABLES
+BASEDIR=$(dirname "$0")
+LOCAL_BRANCH=$(cd $BASEDIR && git rev-parse --abbrev-ref HEAD)
+ROLES="ceph-common ceph-mon ceph-osd ceph-mds ceph-rgw ceph-fetch-keys ceph-rbd-mirror ceph-client ceph-container-common ceph-mgr ceph-defaults ceph-config"
+
+
+# FUNCTIONS
+function goto_basedir {
+ TOP_LEVEL=$(cd $BASEDIR && git rev-parse --show-toplevel)
+ if [[ "$(pwd)" != "$TOP_LEVEL" ]]; then
+ pushd "$TOP_LEVEL"
+ fi
+}
+
+function check_existing_remote {
+ if ! git remote show "$1" &> /dev/null; then
+ git remote add "$1" git@github.com:/ceph/ansible-"$1".git
+ fi
+}
+
+function pull_origin {
+ git pull origin master
+}
+
+function reset_hard_origin {
+ # let's bring everything back to normal
+ git checkout "$LOCAL_BRANCH"
+ git fetch origin --prune
+ git fetch --tags
+ git reset --hard origin/master
+}
+
+function check_git_status {
+ if [[ $(git status --porcelain | wc -l) -gt 0 ]]; then
+ echo "It looks like the following changes haven't been committed yet"
+ echo ""
+ git status --short
+ echo ""
+ echo ""
+ echo "Do you really want to continue?"
+ echo "Press ENTER to continue or CTRL C to break"
+ read -r
+ fi
+}
+
+function compare_tags {
+ # compare local tags (from https://github.com/ceph/ceph-ansible/) with distant tags (from https://github.com/ceph/ansible-ceph-$ROLE)
+ local tag_local
+ local tag_remote
+ for tag_local in $(git tag | grep -oE '^v[2-9].[0-9]*.[0-9]*$' | sort -t. -k 1,1n -k 2,2n -k 3,3n -k 4,4n); do
+ tags_array+=("$tag_local")
+ done
+ for tag_remote in $(git ls-remote --tags "$1" | grep -oE 'v[2-9].[0-9]*.[0-9]*$' | sort -t. -k 1,1n -k 2,2n -k 3,3n -k 4,4n); do
+ remote_tags_array+=("$tag_remote")
+ done
+
+ for i in "${tags_array[@]}"; do
+ skip=
+ for j in "${remote_tags_array[@]}"; do
+ [[ "$i" == "$j" ]] && { skip=1; break; }
+ done
+ [[ -n $skip ]] || tag_to_apply+=("$i")
+ done
+}
+
+# MAIN
+goto_basedir
+check_git_status
+trap reset_hard_origin EXIT
+trap reset_hard_origin ERR
+pull_origin
+
+for ROLE in $ROLES; do
+ # For readability we use 2 variables with the same content
+ # so we always make sure we 'push' to a remote and 'filter' a role
+ REMOTE=$ROLE
+ check_existing_remote "$REMOTE"
+ reset_hard_origin
+ # First we filter branches by rewriting master with the content of roles/$ROLE
+ # this gives us a new commit history
+ for BRANCH in $(git branch --list --remotes "origin/stable-*" "origin/master" "origin/ansible-1.9" | cut -d '/' -f2); do
+ git checkout -B "$BRANCH" origin/"$BRANCH"
+ # use || true to avoid exiting in case of 'Found nothing to rewrite'
+ git filter-branch -f --prune-empty --subdirectory-filter roles/"$ROLE" || true
+ git push -f "$REMOTE" "$BRANCH"
+ done
+ reset_hard_origin
+ # then we filter tags starting from version 2.0 and push them
+ compare_tags "$ROLE"
+ if [[ ${#tag_to_apply[@]} == 0 ]]; then
+ echo "No new tag to push."
+ continue
+ fi
+ for TAG in "${tag_to_apply[@]}"; do
+ # use || true to avoid exiting in case of 'Found nothing to rewrite'
+ git filter-branch -f --prune-empty --subdirectory-filter roles/"$ROLE" "$TAG" || true
+ git push -f "$REMOTE" "$TAG"
+ reset_hard_origin
+ done
+done
+trap - EXIT ERR
+popd &> /dev/null
--- /dev/null
+#Package lines can be commented out with '#'
+#
+#boost-atomic
+#boost-chrono
+#boost-date-time
+#boost-iostreams
+#boost-program
+#boost-random
+#boost-regex
+#boost-system
+#boost-thread
+#bzip2-libs
+#cyrus-sasl-lib
+#expat
+#fcgi
+#fuse-libs
+#glibc
+#keyutils-libs
+#leveldb
+#libaio
+#libatomic_ops
+#libattr
+#libblkid
+#libcap
+#libcom_err
+#libcurl
+#libgcc
+#libicu
+#libidn
+#libnghttp2
+#libpsl
+#libselinux
+#libssh2
+#libstdc++
+#libunistring
+#nss-softokn-freebl
+#openldap
+#openssl-libs
+#pcre
+#python-nose
+#python-sphinx
+#snappy
+#systemd-libs
+#zlib
--- /dev/null
+#!/bin/bash -e
+#
+# Copyright (C) 2014, 2015 Red Hat <contact@redhat.com>
+#
+# Author: Daniel Lin <danielin@umich.edu>
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+
+if test -f /etc/redhat-release ; then
+ PACKAGE_INSTALLER=yum
+elif type apt-get > /dev/null 2>&1 ; then
+ PACKAGE_INSTALLER=apt-get
+else
+ echo "ERROR: Package Installer could not be determined"
+ exit 1
+fi
+
+while read p; do
+ if [[ $p =~ ^#.* ]] ; then
+ continue
+ fi
+ $PACKAGE_INSTALLER install $p -y
+done < $1
--- /dev/null
+#!/bin/bash
+
+create_snapshots() {
+ local pattern=$1
+ for vm in $(sudo virsh list --all | awk "/${pattern}/{print \$2}"); do
+ sudo virsh shutdown "${vm}"
+ wait_for_shutoff "${vm}"
+ sudo virsh snapshot-create "${vm}"
+ sudo virsh start "${vm}"
+ done
+}
+
+delete_snapshots() {
+ local pattern=$1
+ for vm in $(sudo virsh list --all | awk "/${pattern}/{print \$2}"); do
+ for snapshot in $(sudo virsh snapshot-list "${vm}" --name); do
+ echo "deleting snapshot ${snapshot} (vm: ${vm})"
+ sudo virsh snapshot-delete "${vm}" "${snapshot}"
+ done
+ done
+}
+
+revert_snapshots() {
+ local pattern=$1
+ for vm in $(sudo virsh list --all | awk "/${pattern}/{print \$2}"); do
+ echo "restoring last snapshot for ${vm}"
+ sudo virsh snapshot-revert "${vm}" --current
+ sudo virsh start "${vm}"
+ done
+}
+
+wait_for_shutoff() {
+ local vm=$1
+ local retries=60
+ local delay=2
+
+ until test "${retries}" -eq 0
+ do
+ echo "waiting for ${vm} to be shut off... #${retries}"
+ sleep "${delay}"
+ let "retries=$retries-1"
+ local current_state=$(sudo virsh domstate "${vm}")
+ test "${current_state}" == "shut off" && return
+ done
+ echo couldnt shutoff "${vm}"
+ exit 1
+}
+
+while :; do
+ case $1 in
+ -d|--delete)
+ delete_snapshots "$2"
+ exit
+ ;;
+ -i|--interactive)
+ INTERACTIVE=TRUE
+ ;;
+ -s|--snapshot)
+ create_snapshots "$2"
+ ;;
+ -r|--revert)
+ revert_snapshots "$2"
+ ;;
+ --)
+ shift
+ break
+ ;;
+ *)
+ break
+ esac
+
+ shift
+done
--- /dev/null
+---
+# DEPLOY CONTAINERIZED DAEMONS
+docker: true
+
+# DEFINE THE NUMBER OF VMS TO RUN
+mon_vms: 1
+osd_vms: 1
+mds_vms: 0
+rgw_vms: 0
+nfs_vms: 0
+rbd_mirror_vms: 0
+client_vms: 0
+iscsi_gw_vms: 0
+mgr_vms: 0
+
+# SUBNETS TO USE FOR THE VMS
+public_subnet: 192.168.0
+cluster_subnet: 192.168.1
+
+# MEMORY
+memory: 1024
+
+disks: [ '/dev/sda', '/dev/sdb' ]
+
+eth: 'enp0s8'
+vagrant_box: centos/atomic-host
+# The sync directory changes based on vagrant box
+# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
+vagrant_sync_dir: /home/vagrant/sync
+
+skip_tags: 'with_pkg'
--- /dev/null
+---
+
+vagrant_box: 'linode'
+vagrant_box_url: 'https://github.com/displague/vagrant-linode/raw/master/box/linode.box'
+
+# Set a label prefix for the machines in this cluster. (This is useful and necessary when running multiple clusters concurrently.)
+#label_prefix: 'foo'
+
+ssh_username: 'vagrant'
+ssh_private_key_path: '~/.ssh/id_rsa'
+
+cloud_distribution: 'CentOS 7'
+cloud_datacenter: 'newark'
+
+# Memory for each Linode instance, this determines price! See Linode plans.
+memory: 2048
+
+# The private network on Linode, you probably don't want to change this.
+public_subnet: 192.168.0
+cluster_subnet: 192.168.0
+
+# DEFINE THE NUMBER OF VMS TO RUN
+mon_vms: 3
+osd_vms: 3
+mds_vms: 1
+rgw_vms: 0
+nfs_vms: 0
+rbd_mirror_vms: 0
+client_vms: 0
+iscsi_gw_vms: 0
+
+# The sync directory changes based on vagrant box
+# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
+# vagrant_sync_dir: /home/vagrant/sync
+
+os_tuning_params:
+ - { name: fs.file-max, value: 26234859 }
--- /dev/null
+---
+
+# DEPLOY CONTAINERIZED DAEMONS
+docker: true
+
+# DEFINE THE NUMBER OF VMS TO RUN
+mon_vms: 1
+osd_vms: 1
+mds_vms: 0
+rgw_vms: 0
+nfs_vms: 0
+rbd_mirror_vms: 0
+client_vms: 0
+iscsi_gw_vms: 0
+
+# SUBNET TO USE FOR THE VMS
+# Use whatever private subnet your Openstack VMs are given
+public_subnet: 172.17.72
+cluster_subnet: 172.17.72
+
+# For Openstack VMs, the disk will depend on what you are allocated
+disks: [ '/dev/vdb' ]
+
+# For Openstack VMs, the lan is usually eth0
+eth: 'eth0'
+
+# For Openstack VMs, choose the following box instead
+vagrant_box: 'openstack'
+
+# When using Atomic Hosts (RHEL or CentOS), uncomment the line below to skip package installation
+#skip_tags: 'with_pkg'
+
+# Set a label prefix for the machines in this cluster to differentiate
+# between different concurrent clusters e.g. your OpenStack username
+label_prefix: 'your-openstack-username'
+
+# For deploying on OpenStack VMs uncomment these vars and assign values.
+# You can use env vars for the values if it makes sense.
+#ssh_username :
+#ssh_private_key_path :
+#os_openstack_auth_url :
+#os_username :
+#os_password :
+#os_tenant_name :
+#os_region :
+#os_flavor :
+#os_image :
+#os_keypair_name :
+#os_networks :
+#os_floating_ip_pool :
--- /dev/null
+---
+- hosts:
+ - "{{ mon_group_name|default('mons') }}"
+ - "{{ osd_group_name|default('osds') }}"
+ - "{{ mds_group_name|default('mdss') }}"
+ - "{{ rgw_group_name|default('rgws') }}"
+ - "{{ mgr_group_name|default('mgrs') }}"
+ - "{{ rbdmirror_group_name|default('rbdmirrors') }}"
+ - "{{ nfs_group_name|default('nfss') }}"
+ - "{{ iscsi_gw_group_name|default('iscsigws') }}"
+ - "{{ monitoring_group_name|default('monitoring') }}"
+ gather_facts: false
+ become: true
+ pre_tasks:
+ - name: set ceph node exporter install 'In Progress'
+ run_once: true
+ set_stats:
+ data:
+ installer_phase_ceph_node_exporter:
+ status: "In Progress"
+ start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
+
+ tasks:
+ - import_role:
+ name: ceph-defaults
+ tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-facts
+ tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-container-engine
+ - import_role:
+ name: ceph-container-common
+ tasks_from: registry
+ when:
+ - not containerized_deployment | bool
+ - ceph_docker_registry_auth | bool
+ - import_role:
+ name: ceph-node-exporter
+
+ post_tasks:
+ - name: set ceph node exporter install 'Complete'
+ run_once: true
+ set_stats:
+ data:
+ installer_phase_ceph_node_exporter:
+ status: "Complete"
+ end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
+
+- hosts: "{{ monitoring_group_name }}"
+ gather_facts: false
+ become: true
+ pre_tasks:
+ - name: set ceph grafana install 'In Progress'
+ run_once: true
+ set_stats:
+ data:
+ installer_phase_ceph_grafana:
+ status: "In Progress"
+ start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
+
+ tasks:
+ - import_role:
+ name: ceph-defaults
+ tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-facts
+ tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-facts
+ tasks_from: grafana
+ tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-prometheus
+ - import_role:
+ name: ceph-grafana
+
+ post_tasks:
+ - name: set ceph grafana install 'Complete'
+ run_once: true
+ set_stats:
+ data:
+ installer_phase_ceph_grafana:
+ status: "Complete"
+ end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
+
+# using groups[] here otherwise it can't fallback to the mon if there's no mgr group.
+# adding an additional | default(omit) in case where no monitors are present (external ceph cluster)
+- hosts: "{{ groups[mgr_group_name] | default(groups[mon_group_name]) | default(omit) }}"
+ gather_facts: false
+ become: true
+ pre_tasks:
+ - name: set ceph dashboard install 'In Progress'
+ run_once: true
+ set_stats:
+ data:
+ installer_phase_ceph_dashboard:
+ status: "In Progress"
+ start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
+
+ tasks:
+ - import_role:
+ name: ceph-defaults
+ tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-facts
+ tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-facts
+ tasks_from: grafana
+ tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-dashboard
+
+ post_tasks:
+ - name: set ceph dashboard install 'Complete'
+ run_once: true
+ set_stats:
+ data:
+ installer_phase_ceph_dashboard:
+ status: "Complete"
+ end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
--- /dev/null
+# Minimal makefile for Sphinx documentation
+#
+
+# You can set these variables from the command line.
+SPHINXOPTS =
+SPHINXBUILD = sphinx-build
+SPHINXPROJ = ceph-ansible
+SOURCEDIR = source
+BUILDDIR = build
+
+# Put it first so that "make" without argument is like "make help".
+help:
+ @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
+
+.PHONY: help Makefile
+
+# Catch-all target: route all unknown targets to Sphinx using the new
+# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
+%: Makefile
+ @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
\ No newline at end of file
--- /dev/null
+# -*- coding: utf-8 -*-
+#
+# ceph-ansible documentation build configuration file, created by
+# sphinx-quickstart on Wed Apr 5 11:55:38 2017.
+#
+# This file is execfile()d with the current directory set to its
+# containing dir.
+#
+# Note that not all possible configuration values are present in this
+# autogenerated file.
+#
+# All configuration values have a default; values that are commented out
+# serve to show the default.
+
+# If extensions (or modules to document with autodoc) are in another directory,
+# add these directories to sys.path here. If the directory is relative to the
+# documentation root, use os.path.abspath to make it absolute, like shown here.
+#
+# import os
+# import sys
+# sys.path.insert(0, os.path.abspath('.'))
+
+
+# -- General configuration ------------------------------------------------
+
+# If your documentation needs a minimal Sphinx version, state it here.
+#
+# needs_sphinx = '1.0'
+
+# Add any Sphinx extension module names here, as strings. They can be
+# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
+# ones.
+extensions = []
+
+# Add any paths that contain templates here, relative to this directory.
+templates_path = ['_templates']
+
+# The suffix(es) of source filenames.
+# You can specify multiple suffix as a list of string:
+#
+# source_suffix = ['.rst', '.md']
+source_suffix = '.rst'
+
+# The master toctree document.
+master_doc = 'glossary'
+
+# General information about the project.
+project = u'ceph-ansible'
+copyright = u'2017-2018, Ceph team and individual contributors'
+author = u'Ceph team and individual contributors'
+
+# The version info for the project you're documenting, acts as replacement for
+# |version| and |release|, also used in various other places throughout the
+# built documents.
+#
+# The short X.Y version.
+version = u''
+# The full version, including alpha/beta/rc tags.
+release = u''
+
+# The language for content autogenerated by Sphinx. Refer to documentation
+# for a list of supported languages.
+#
+# This is also used if you do content translation via gettext catalogs.
+# Usually you set "language" from the command line for these cases.
+language = None
+
+# List of patterns, relative to source directory, that match files and
+# directories to ignore when looking for source files.
+# This patterns also effect to html_static_path and html_extra_path
+exclude_patterns = []
+
+# The name of the Pygments (syntax highlighting) style to use.
+pygments_style = 'sphinx'
+
+# If true, `todo` and `todoList` produce output, else they produce nothing.
+todo_include_todos = False
+
+
+# -- Options for HTML output ----------------------------------------------
+
+# The theme to use for HTML and HTML Help pages. See the documentation for
+# a list of builtin themes.
+#
+html_theme = 'alabaster'
+
+# Theme options are theme-specific and customize the look and feel of a theme
+# further. For a list of options available for each theme, see the
+# documentation.
+#
+# html_theme_options = {}
+
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+html_static_path = ['_static']
+
+
+# -- Options for HTMLHelp output ------------------------------------------
+
+# Output file base name for HTML help builder.
+htmlhelp_basename = 'ceph-ansibledoc'
+
+
+# -- Options for LaTeX output ---------------------------------------------
+
+latex_elements = {
+ # The paper size ('letterpaper' or 'a4paper').
+ #
+ # 'papersize': 'letterpaper',
+
+ # The font size ('10pt', '11pt' or '12pt').
+ #
+ # 'pointsize': '10pt',
+
+ # Additional stuff for the LaTeX preamble.
+ #
+ # 'preamble': '',
+
+ # Latex figure (float) alignment
+ #
+ # 'figure_align': 'htbp',
+}
+
+# Grouping the document tree into LaTeX files. List of tuples
+# (source start file, target name, title,
+# author, documentclass [howto, manual, or own class]).
+latex_documents = [
+ (master_doc, 'ceph-ansible.tex', u'ceph-ansible Documentation',
+ u'Ceph team and individual contributors', 'manual'),
+]
+
+
+# -- Options for manual page output ---------------------------------------
+
+# One entry per manual page. List of tuples
+# (source start file, name, description, authors, manual section).
+man_pages = [
+ (master_doc, 'ceph-ansible', u'ceph-ansible Documentation',
+ [author], 1)
+]
+
+
+# -- Options for Texinfo output -------------------------------------------
+
+# Grouping the document tree into Texinfo files. List of tuples
+# (source start file, target name, title, author,
+# dir menu entry, description, category)
+texinfo_documents = [
+ (master_doc, 'ceph-ansible', u'ceph-ansible Documentation',
+ author, 'ceph-ansible', 'One line description of project.',
+ 'Miscellaneous'),
+]
+
+master_doc = 'index'
\ No newline at end of file
--- /dev/null
+Adding/Removing OSD(s) after a cluster is deployed is a common operation that should be straight-forward to achieve.
+
+
+Adding osd(s)
+-------------
+
+Adding new OSD(s) on an existing host or adding a new OSD node can be achieved by running the main playbook with the ``--limit`` ansible option.
+You basically need to update your host_vars/group_vars with the new hardware and/or the inventory host file with the new osd nodes being added.
+
+The command used would be like following:
+
+``ansible-playbook -vv -i <your-inventory> site-container.yml --limit <node>``
+
+example:
+
+.. code-block:: shell
+
+ $ cat hosts
+ [mons]
+ mon-node-1
+ mon-node-2
+ mon-node-3
+
+ [mgrs]
+ mon-node-1
+ mon-node-2
+ mon-node-3
+
+ [osds]
+ osd-node-1
+ osd-node-2
+ osd-node-3
+ osd-node-99
+
+ $ ansible-playbook -vv -i hosts site-container.yml --limit osd-node-99
+
+
+Shrinking osd(s)
+----------------
+
+Shrinking OSDs can be done by using the shrink-osd.yml playbook provided in ``infrastructure-playbooks`` directory.
+
+The variable ``osd_to_kill`` is a comma separated list of OSD IDs which must be passed to the playbook (passing it as an extra var is the easiest way).
+
+The playbook will shrink all osds passed in ``osd_to_kill`` serially.
+
+example:
+
+.. code-block:: shell
+
+ $ ansible-playbook -vv -i hosts infrastructure-playbooks/shrink-osd.yml -e osd_to_kill=1,2,3
--- /dev/null
+Purging the cluster
+-------------------
+
+ceph-ansible provides two playbooks in ``infrastructure-playbooks`` for purging a Ceph cluster: ``purge-cluster.yml`` and ``purge-container-cluster.yml``.
+
+The names are pretty self-explanatory, ``purge-cluster.yml`` is intended to purge a non-containerized cluster whereas ``purge-container-cluster.yml`` is to purge a containerized cluster.
+
+example:
+
+.. code-block:: shell
+
+ $ ansible-playbook -vv -i hosts infrastructure-playbooks/purge-container-cluster.yml
+
+.. note::
+ These playbooks aren't intended to be run with the ``--limit`` option.
\ No newline at end of file
--- /dev/null
+Upgrading the ceph cluster
+--------------------------
+
+ceph-ansible provides a playbook in ``infrastructure-playbooks`` for upgrading a Ceph cluster: ``rolling_update.yml``.
+
+This playbook could be used for both minor upgrades (X.Y to X.Z) or major upgrades (X to Y).
+
+Before running a major upgrade you need to update the ceph-ansible version first.
+
+example:
+
+.. code-block:: shell
+
+ $ ansible-playbook -vv -i hosts infrastructure-playbooks/rolling_update.yml
+
+.. note::
+ This playbook isn't intended to be run with the ``--limit`` ansible option.
--- /dev/null
+Contribution Guidelines
+=======================
+
+The repository centralises all the Ansible roles. The roles are all part of the Ansible Galaxy.
+
+We love contribution and we love giving visibility to our contributors, this is why all the **commits must be signed-off**.
+
+Mailing list
+------------
+
+Please register the mailing list at http://lists.ceph.com/listinfo.cgi/ceph-ansible-ceph.com.
+
+IRC
+---
+
+Feel free to join us in the channel ``#ceph-ansible`` of the OFTC servers (https://www.oftc.net).
+
+GitHub
+------
+
+The main GitHub account for the project is at https://github.com/ceph/ceph-ansible/.
+
+Submit a patch
+--------------
+
+To start contributing just do:
+
+.. code-block:: console
+
+ $ git checkout -b my-working-branch
+ $ # do your changes #
+ $ git add -p
+
+If your change impacts a variable file in a role such as ``roles/ceph-common/defaults/main.yml``, you need to generate a ``group_vars`` file:
+
+.. code-block:: console
+
+ $ ./generate_group_vars_sample.sh
+
+You are finally ready to push your changes on GitHub:
+
+.. code-block:: console
+
+ $ git commit -s
+ $ git push origin my-working-branch
+
+Worked on a change and you don't want to resend a commit for a syntax fix?
+
+.. code-block:: console
+
+ $ # do your syntax change #
+ $ git commit --amend
+ $ git push -f origin my-working-branch
+
+Pull Request Testing
+--------------------
+
+Pull request testing is handled by Jenkins. All test must pass before your pull request will be merged.
+
+All of tests that are running are listed in the GitHub UI and will list their current status.
+
+If a test fails and you'd like to rerun it, comment on your pull request in the following format:
+
+.. code-block:: none
+
+ jenkins test $scenario_name
+
+For example:
+
+.. code-block:: none
+
+ jenkins test centos-non_container-all_daemons
+
+Backporting changes
+-------------------
+
+If a change should be backported to a ``stable-*`` Git branch:
+
+- Mark your pull request with the GitHub label "Backport" so we don't lose track of it.
+- Fetch the latest updates into your clone: ``git fetch``
+- Determine the latest available stable branch:
+ ``git branch -r --list "origin/stable-[0-9].[0-9]" | sort -r | sed 1q``
+- Create a new local branch for your pull request, based on the stable branch:
+ ``git checkout --no-track -b my-backported-change origin/stable-5.0``
+- Cherry-pick your change: ``git cherry-pick -x (your-sha1)``
+- Create a new pull request against the ``stable-5.0`` branch.
+- Ensure that your pull request's title has the prefix "backport:", so it's clear
+ to reviewers what this is about.
+- Add a comment in your backport pull request linking to the original (master) pull request.
+
+All changes to the stable branches should land in master first, so we avoid
+regressions.
+
+Once this is done, one of the project maintainers will tag the tip of the
+stable branch with your change. For example:
+
+.. code-block:: console
+
+ $ git checkout stable-5.0
+ $ git pull --ff-only
+ $ git tag v5.0.12
+ $ git push origin v5.0.12
--- /dev/null
+Glossary
+========
+
+.. toctree::
+ :maxdepth: 3
+ :caption: Contents:
+
+ index
+ testing/glossary
--- /dev/null
+============
+ceph-ansible
+============
+
+Ansible playbooks for Ceph, the distributed filesystem.
+
+
+Installation
+============
+
+GitHub
+------
+
+You can install directly from the source on GitHub by following these steps:
+
+- Clone the repository:
+
+ .. code-block:: console
+
+ $ git clone https://github.com/ceph/ceph-ansible.git
+
+- Next, you must decide which branch of ``ceph-ansible`` you wish to use. There
+ are stable branches to choose from or you could use the master branch:
+
+ .. code-block:: console
+
+ $ git checkout $branch
+
+- Next, use pip and the provided requirements.txt to install Ansible and other
+ needed Python libraries:
+
+ .. code-block:: console
+
+ $ pip install -r requirements.txt
+
+.. _ansible-on-rhel-family:
+
+Ansible on RHEL and CentOS
+--------------------------
+
+You can acquire Ansible on RHEL and CentOS by installing from `Ansible channel <https://access.redhat.com/articles/3174981>`_.
+
+On RHEL:
+
+.. code-block:: console
+
+ $ subscription-manager repos --enable=rhel-7-server-ansible-2-rpms
+
+(CentOS does not use subscription-manager and already has "Extras" enabled by default.)
+
+.. code-block:: console
+
+ $ sudo yum install ansible
+
+Ansible on Ubuntu
+-----------------
+
+You can acquire Ansible on Ubuntu by using the `Ansible PPA <https://launchpad.net/~ansible/+archive/ubuntu/ansible>`_.
+
+.. code-block:: console
+
+ $ sudo add-apt-repository ppa:ansible/ansible
+ $ sudo apt update
+ $ sudo apt install ansible
+
+Ansible collections
+-------------------
+
+In order to install third-party collections that are required for ceph-ansible,
+please run:
+
+.. code-block:: console
+
+ $ ansible-galaxy install -r requirements.yml
+
+
+Releases
+========
+
+The following branches should be used depending on your requirements. The ``stable-*``
+branches have been QE tested and sometimes receive backport fixes throughout their lifecycle.
+The ``master`` branch should be considered experimental and used with caution.
+
+- ``stable-3.0`` Supports Ceph versions ``jewel`` and ``luminous``. This branch requires Ansible version ``2.4``.
+
+- ``stable-3.1`` Supports Ceph versions ``luminous`` and ``mimic``. This branch requires Ansible version ``2.4``.
+
+- ``stable-3.2`` Supports Ceph versions ``luminous`` and ``mimic``. This branch requires Ansible version ``2.6``.
+
+- ``stable-4.0`` Supports Ceph version ``nautilus``. This branch requires Ansible version ``2.9``.
+
+- ``stable-5.0`` Supports Ceph version ``octopus``. This branch requires Ansible version ``2.9``.
+
+- ``stable-6.0`` Supports Ceph version ``pacific``. This branch requires Ansible version ``2.10``.
+
+- ``master`` Supports the master branch of Ceph. This branch requires Ansible version ``2.10``.
+
+.. NOTE:: ``stable-3.0`` and ``stable-3.1`` branches of ceph-ansible are deprecated and no longer maintained.
+
+Configuration and Usage
+=======================
+
+This project assumes you have a basic knowledge of how Ansible works and have already prepared your hosts for
+configuration by Ansible.
+
+After you've cloned the ``ceph-ansible`` repository, selected your branch and installed Ansible then you'll need to create
+your inventory file, playbook and configuration for your Ceph cluster.
+
+Inventory
+---------
+
+The Ansible inventory file defines the hosts in your cluster and what roles each host plays in your Ceph cluster. The default
+location for an inventory file is ``/etc/ansible/hosts`` but this file can be placed anywhere and used with the ``-i`` flag of
+``ansible-playbook``.
+
+An example inventory file would look like:
+
+.. code-block:: ini
+
+ [mons]
+ mon1
+ mon2
+ mon3
+
+ [osds]
+ osd1
+ osd2
+ osd3
+
+.. note::
+
+ For more information on Ansible inventories please refer to the Ansible documentation: http://docs.ansible.com/ansible/latest/intro_inventory.html
+
+Playbook
+--------
+
+You must have a playbook to pass to the ``ansible-playbook`` command when deploying your cluster. There is a sample playbook at the root of the ``ceph-ansible``
+project called ``site.yml.sample``. This playbook should work fine for most usages, but it does include by default every daemon group which might not be
+appropriate for your cluster setup. Perform the following steps to prepare your playbook:
+
+- Rename the sample playbook: ``mv site.yml.sample site.yml``
+
+- Modify the playbook as necessary for the requirements of your cluster
+
+.. note::
+
+ It's important the playbook you use is placed at the root of the ``ceph-ansible`` project. This is how Ansible will be able to find the roles that
+ ``ceph-ansible`` provides.
+
+Configuration Validation
+------------------------
+
+The ``ceph-ansible`` project provides config validation through the ``ceph-validate`` role. If you are using one of the provided playbooks this role will
+be run early in the deployment as to ensure you've given ``ceph-ansible`` the correct config. This check is only making sure that you've provided the
+proper config settings for your cluster, not that the values in them will produce a healthy cluster. For example, if you give an incorrect address for
+``monitor_address`` then the mon will still fail to join the cluster.
+
+An example of a validation failure might look like:
+
+.. code-block:: console
+
+ TASK [ceph-validate : validate provided configuration] *************************
+ task path: /Users/andrewschoen/dev/ceph-ansible/roles/ceph-validate/tasks/main.yml:3
+ Wednesday 02 May 2018 13:48:16 -0500 (0:00:06.984) 0:00:18.803 *********
+ [ERROR]: [mon0] Validation failed for variable: osd_objectstore
+
+ [ERROR]: [mon0] Given value for osd_objectstore: foo
+
+ [ERROR]: [mon0] Reason: osd_objectstore must be either 'bluestore' or 'filestore'
+
+ fatal: [mon0]: FAILED! => {
+ "changed": false
+ }
+
+Supported Validation
+^^^^^^^^^^^^^^^^^^^^
+
+The ``ceph-validate`` role currently supports validation of the proper config for the following
+osd scenarios:
+
+- ``collocated``
+- ``non-collocated``
+- ``lvm``
+
+The following install options are also validated by the ``ceph-validate`` role:
+
+- ``ceph_origin`` set to ``distro``
+- ``ceph_origin`` set to ``repository``
+- ``ceph_origin`` set to ``local``
+- ``ceph_repository`` set to ``rhcs``
+- ``ceph_repository`` set to ``dev``
+- ``ceph_repository`` set to ``community``
+
+
+Installation methods
+--------------------
+
+Ceph can be installed through several methods.
+
+.. toctree::
+ :maxdepth: 1
+
+ installation/methods
+
+Configuration
+-------------
+
+The configuration for your Ceph cluster will be set by the use of ansible variables that ``ceph-ansible`` provides. All of these options and their default
+values are defined in the ``group_vars/`` directory at the root of the ``ceph-ansible`` project. Ansible will use configuration in a ``group_vars/`` directory
+that is relative to your inventory file or your playbook. Inside of the ``group_vars/`` directory there are many sample Ansible configuration files that relate
+to each of the Ceph daemon groups by their filename. For example, the ``osds.yml.sample`` contains all the default configuration for the OSD daemons. The ``all.yml.sample``
+file is a special ``group_vars`` file that applies to all hosts in your cluster.
+
+.. note::
+
+ For more information on setting group or host specific configuration refer to the Ansible documentation: http://docs.ansible.com/ansible/latest/intro_inventory.html#splitting-out-host-and-group-specific-data
+
+At the most basic level you must tell ``ceph-ansible`` what version of Ceph you wish to install, the method of installation, your clusters network settings and
+how you want your OSDs configured. To begin your configuration rename each file in ``group_vars/`` you wish to use so that it does not include the ``.sample``
+at the end of the filename, uncomment the options you wish to change and provide your own value.
+
+An example configuration that deploys the upstream ``octopus`` version of Ceph with lvm batch method would look like this in ``group_vars/all.yml``:
+
+.. code-block:: yaml
+
+ ceph_origin: repository
+ ceph_repository: community
+ public_network: "192.168.3.0/24"
+ cluster_network: "192.168.4.0/24"
+ monitor_interface: eth1
+ devices:
+ - '/dev/sda'
+ - '/dev/sdb'
+
+The following config options are required to be changed on all installations but there could be other required options depending on your OSD scenario
+selection or other aspects of your cluster.
+
+- ``ceph_origin``
+- ``public_network``
+- ``monitor_interface`` or ``monitor_address``
+
+
+When deploying RGW instance(s) you are required to set the ``radosgw_interface`` or ``radosgw_address`` config option.
+
+``ceph.conf`` Configuration File
+---------------------------------
+
+The supported method for defining your ``ceph.conf`` is to use the ``ceph_conf_overrides`` variable. This allows you to specify configuration options using
+an INI format. This variable can be used to override sections already defined in ``ceph.conf`` (see: ``roles/ceph-config/templates/ceph.conf.j2``) or to provide
+new configuration options.
+
+The following sections in ``ceph.conf`` are supported:
+
+* ``[global]``
+* ``[mon]``
+* ``[osd]``
+* ``[mds]``
+* ``[client.rgw.{instance_name}]``
+
+An example:
+
+.. code-block:: yaml
+
+ ceph_conf_overrides:
+ global:
+ foo: 1234
+ bar: 5678
+ osd:
+ osd_mkfs_type: ext4
+
+.. note::
+
+ We will no longer accept pull requests that modify the ``ceph.conf`` template unless it helps the deployment. For simple configuration tweaks
+ please use the ``ceph_conf_overrides`` variable.
+
+Full documentation for configuring each of the Ceph daemon types are in the following sections.
+
+OSD Configuration
+-----------------
+
+OSD configuration was used to be set by selecting an OSD scenario and providing the configuration needed for
+that scenario. As of nautilus in stable-4.0, the only scenarios available is ``lvm``.
+
+.. toctree::
+ :maxdepth: 1
+
+ osds/scenarios
+
+Day-2 Operations
+----------------
+
+ceph-ansible provides a set of playbook in ``infrastructure-playbooks`` directory in order to perform some basic day-2 operations.
+
+.. toctree::
+ :maxdepth: 1
+
+ day-2/osds
+ day-2/purge
+ day-2/upgrade
+
+RBD Mirroring
+-------------
+
+Ceph-ansible provides the role ``ceph-rbd-mirror`` that can setup an RBD mirror replication.
+
+.. toctree::
+ :maxdepth: 1
+
+ rbdmirror/index
+
+Contribution
+============
+
+See the following section for guidelines on how to contribute to ``ceph-ansible``.
+
+.. toctree::
+ :maxdepth: 1
+
+ dev/index
+
+Testing
+=======
+
+Documentation for writing functional testing scenarios for ``ceph-ansible``.
+
+* :doc:`Testing with ceph-ansible <testing/index>`
+* :doc:`Glossary <testing/glossary>`
+
+Demos
+=====
+
+Vagrant Demo
+------------
+
+Deployment from scratch on vagrant machines: https://youtu.be/E8-96NamLDo
+
+Bare metal demo
+---------------
+
+Deployment from scratch on bare metal machines: https://youtu.be/dv_PEp9qAqg
--- /dev/null
+Containerized deployment
+========================
+
+Ceph-ansible supports docker and podman only in order to deploy Ceph in a containerized context.
+
+Configuration and Usage
+-----------------------
+
+To deploy ceph in containers, you will need to set the ``containerized_deployment`` variable to ``true`` and use the site-container.yml.sample playbook.
+
+.. code-block:: yaml
+
+ containerized_deployment: true
+
+The ``ceph_origin`` and ``ceph_repository`` variables aren't needed anymore in containerized deployment and are ignored.
+
+.. code-block:: console
+
+ $ ansible-playbook site-container.yml.sample
+
+.. note::
+
+ The infrastructure playbooks are working for both non containerized and containerized deployment.
+
+Custom container image
+----------------------
+
+You can configure your own container register, image and tag by using the ``ceph_docker_registry``, ``ceph_docker_image`` and ``ceph_docker_image_tag`` variables.
+
+.. code-block:: yaml
+
+ ceph_docker_registry: quay.ceph.io
+ ceph_docker_image: ceph-ci/daemon
+ ceph_docker_image_tag: latest
+
+.. note::
+
+ ``ceph_docker_image`` should have both image namespace and image name concatenated and separated by a slash character.
+
+ ``ceph_docker_image_tag`` should be set to a fixed tag, not to any "latest" tags unless you know what you are doing. Using a "latest" tag
+ might make the playbook restart all the daemons deployed in your cluster since these tags are intended to be updated periodically.
+
+Container registry authentication
+---------------------------------
+
+When using a container registry with authentication then you need to set the ``ceph_docker_registry_auth`` variable to ``true`` and provide the credentials via the
+``ceph_docker_registry_username`` and ``ceph_docker_registry_password`` variables
+
+.. code-block:: yaml
+
+ ceph_docker_registry_auth: true
+ ceph_docker_registry_username: foo
+ ceph_docker_registry_password: bar
+
+Container registry behind a proxy
+---------------------------------
+
+When using a container registry reachable via a http(s) proxy then you need to set the ``ceph_docker_http_proxy`` and/or ``ceph_docker_https_proxy`` variables. If you need
+to exclude some host for the proxy configuration to can use the ``ceph_docker_no_proxy`` variable.
+
+.. code-block:: yaml
+
+ ceph_docker_http_proxy: http://192.168.42.100:8080
+ ceph_docker_https_proxy: https://192.168.42.100:8080
\ No newline at end of file
--- /dev/null
+Installation methods
+====================
+
+ceph-ansible can deploy Ceph either in a non-containerized context (via packages) or in a containerized context using ceph-container images.
+
+.. toctree::
+ :maxdepth: 1
+
+ non-containerized
+ containerized
+
+The difference here is that you don't have the rbd command on the host when using the containerized deployment so everything related to ceph needs to be executed within a container. So in the case there is software like e.g. Open Nebula which requires that the rbd command is accessible directly on the host (non-containerized) then you have to install the rbd command by yourself on those servers outside of containers (or make sure that this software somehow runs within containers as well and that it can access rbd).
--- /dev/null
+Non containerized deployment
+============================
+
+The following are all of the available options for the installing Ceph through different channels.
+
+We support 3 main installation methods, all managed by the ``ceph_origin`` variable:
+
+- ``repository``: means that you will get Ceph installed through a new repository. Later below choose between ``community``, ``rhcs`` or ``dev``. These options will be exposed through the ``ceph_repository`` variable.
+- ``distro``: means that no separate repo file will be added and you will get whatever version of Ceph is included in your Linux distro.
+- ``local``: means that the Ceph binaries will be copied over from the local machine (not well tested, use at your own risk)
+
+Origin: Repository
+------------------
+
+If ``ceph_origin`` is set to ``repository``, you now have the choice between a couple of repositories controlled by the ``ceph_repository`` option:
+
+- ``community``: fetches packages from http://download.ceph.com, the official community Ceph repositories
+- ``rhcs``: means you are a Red Hat customer
+- ``dev``: fetches packages from shaman, a gitbuilder based package system
+- ``uca``: fetches packages from Ubuntu Cloud Archive
+- ``custom``: fetches packages from a specific repository
+
+Community repository
+~~~~~~~~~~~~~~~~~~~~
+
+If ``ceph_repository`` is set to ``community``, packages you will be by default installed from http://download.ceph.com, this can be changed by tweaking ``ceph_mirror``.
+
+RHCS repository
+~~~~~~~~~~~~~~~
+
+RHCS is the Red Hat Ceph Storage product from Red Hat, the enterprise version of Ceph.
+If ``ceph_repository`` is set to ``rhcs``, packages you will be installed from Red Hat sources.
+
+To choose a specific version of RHCS you can set the ``ceph_rhcs_version`` variable accordingly, e.g: ``ceph_rhcs_version: 2``.
+
+UCA repository
+~~~~~~~~~~~~~~
+
+If ``ceph_repository`` is set to ``uca``, packages you will be by default installed from http://ubuntu-cloud.archive.canonical.com/ubuntu, this can be changed by tweaking ``ceph_stable_repo_uca``.
+You can also decide which OpenStack version the Ceph packages should come from by tweaking ``ceph_stable_openstack_release_uca``.
+For example, ``ceph_stable_openstack_release_uca: queens``.
+
+Dev repository
+~~~~~~~~~~~~~~
+
+If ``ceph_repository`` is set to ``dev``, packages you will be by default installed from https://shaman.ceph.com/, this can not be tweaked.
+You can obviously decide which branch to install with the help of ``ceph_dev_branch`` (defaults to 'master').
+Additionally, you can specify a SHA1 with ``ceph_dev_sha1``, defaults to 'latest' (as in latest built).
+
+Custom repository
+~~~~~~~~~~~~~~~~~
+
+If ``ceph_repository`` is set to ``custom``, packages you will be by default installed from a desired repository.
+This repository is specified with ``ceph_custom_repo``, e.g: ``ceph_custom_repo: https://server.domain.com/ceph-custom-repo``.
+
+
+Origin: Distro
+--------------
+
+If ``ceph_origin`` is set to ``distro``, no separate repo file will be added and you will get whatever version of Ceph is included in your Linux distro.
+
+
+Origin: Local
+-------------
+
+If ``ceph_origin`` is set to ``local``, the ceph binaries will be copied over from the local machine (not well tested, use at your own risk)
--- /dev/null
+OSD Scenario
+============
+
+As of stable-4.0, the following scenarios are not supported anymore since they are associated to ``ceph-disk``:
+
+* `collocated`
+* `non-collocated`
+
+Since the Ceph luminous release, it is preferred to use the :ref:`lvm scenario
+<osd_scenario_lvm>` that uses the ``ceph-volume`` provisioning tool. Any other
+scenario will cause deprecation warnings.
+
+``ceph-disk`` was deprecated during the ceph-ansible 3.2 cycle and has been removed entirely from Ceph itself in the Nautilus version.
+At present (starting from stable-4.0), there is only one scenario, which defaults to ``lvm``, see:
+
+* :ref:`lvm <osd_scenario_lvm>`
+
+So there is no need to configure ``osd_scenario`` anymore, it defaults to ``lvm``.
+
+The ``lvm`` scenario mentioned above support both containerized and non-containerized cluster.
+As a reminder, deploying a containerized cluster can be done by setting ``containerized_deployment``
+to ``True``.
+
+If you want to skip OSD creation during a ``ceph-ansible run``
+(e.g. because you have already provisioned your OSDs but disk IDs have
+changed), you can skip the ``prepare_osd`` tag i.e. by specifying
+``--skip-tags prepare_osd`` on the ``ansible-playbook`` command line.
+
+.. _osd_scenario_lvm:
+
+lvm
+---
+
+This OSD scenario uses ``ceph-volume`` to create OSDs, primarily using LVM, and
+is only available when the Ceph release is luminous or newer.
+It is automatically enabled.
+
+Other (optional) supported settings:
+
+- ``osd_objectstore``: Set the Ceph *objectstore* for the OSD. Available options
+ are ``filestore`` or ``bluestore``. You can only select ``bluestore`` with
+ the Ceph release is luminous or greater. Defaults to ``bluestore`` if unset.
+
+- ``dmcrypt``: Enable Ceph's encryption on OSDs using ``dmcrypt``.
+ Defaults to ``false`` if unset.
+
+- ``osds_per_device``: Provision more than 1 OSD (the default if unset) per device.
+
+
+Simple configuration
+^^^^^^^^^^^^^^^^^^^^
+
+With this approach, most of the decisions on how devices are configured to
+provision an OSD are made by the Ceph tooling (``ceph-volume lvm batch`` in
+this case). There is almost no room to modify how the OSD is composed given an
+input of devices.
+
+To use this configuration, the ``devices`` option must be populated with the
+raw device paths that will be used to provision the OSDs.
+
+
+.. note:: Raw devices must be "clean", without a gpt partition table, or
+ logical volumes present.
+
+
+For example, for a node that has ``/dev/sda`` and ``/dev/sdb`` intended for
+Ceph usage, the configuration would be:
+
+
+.. code-block:: yaml
+
+ devices:
+ - /dev/sda
+ - /dev/sdb
+
+In the above case, if both devices are spinning drives, 2 OSDs would be
+created, each with its own collocated journal.
+
+Other provisioning strategies are possible, by mixing spinning and solid state
+devices, for example:
+
+.. code-block:: yaml
+
+ devices:
+ - /dev/sda
+ - /dev/sdb
+ - /dev/nvme0n1
+
+Similar to the initial example, this would end up producing 2 OSDs, but data
+would be placed on the slower spinning drives (``/dev/sda``, and ``/dev/sdb``)
+and journals would be placed on the faster solid state device ``/dev/nvme0n1``.
+The ``ceph-volume`` tool describes this in detail in
+`the "batch" subcommand section <http://docs.ceph.com/docs/master/ceph-volume/lvm/batch/>`_
+
+This option can also be used with ``osd_auto_discovery``, meaning that you do not need to populate
+``devices`` directly and any appropriate devices found by ansible will be used instead.
+
+.. code-block:: yaml
+
+ osd_auto_discovery: true
+
+Other (optional) supported settings:
+
+- ``crush_device_class``: Sets the CRUSH device class for all OSDs created with this
+ method (it is not possible to have a per-OSD CRUSH device class using the *simple*
+ configuration approach). Values *must be* a string, like
+ ``crush_device_class: "ssd"``
+
+
+Advanced configuration
+^^^^^^^^^^^^^^^^^^^^^^
+
+This configuration is useful when more granular control is wanted when setting
+up devices and how they should be arranged to provision an OSD. It requires an
+existing setup of volume groups and logical volumes (``ceph-volume`` will **not**
+create these).
+
+To use this configuration, the ``lvm_volumes`` option must be populated with
+logical volumes and volume groups. Additionally, absolute paths to partitions
+*can* be used for ``journal``, ``block.db``, and ``block.wal``.
+
+.. note:: This configuration uses ``ceph-volume lvm create`` to provision OSDs
+
+Supported ``lvm_volumes`` configuration settings:
+
+- ``data``: The logical volume name or full path to a raw device (an LV will be
+ created using 100% of the raw device)
+
+- ``data_vg``: The volume group name, **required** if ``data`` is a logical volume.
+
+- ``crush_device_class``: CRUSH device class name for the resulting OSD, allows
+ setting set the device class for each OSD, unlike the global ``crush_device_class``
+ that sets them for all OSDs.
+
+.. note:: If you wish to set the ``crush_device_class`` for the OSDs
+ when using ``devices`` you must set it using the global ``crush_device_class``
+ option as shown above. There is no way to define a specific CRUSH device class
+ per OSD when using ``devices`` like there is for ``lvm_volumes``.
+
+
+``filestore`` objectstore variables:
+
+- ``journal``: The logical volume name or full path to a partition.
+
+- ``journal_vg``: The volume group name, **required** if ``journal`` is a logical volume.
+
+.. warning:: Each entry must be unique, duplicate values are not allowed
+
+
+``bluestore`` objectstore variables:
+
+- ``db``: The logical volume name or full path to a partition.
+
+- ``db_vg``: The volume group name, **required** if ``db`` is a logical volume.
+
+- ``wal``: The logical volume name or full path to a partition.
+
+- ``wal_vg``: The volume group name, **required** if ``wal`` is a logical volume.
+
+
+.. note:: These ``bluestore`` variables are optional optimizations. Bluestore's
+ ``db`` and ``wal`` will only benefit from faster devices. It is possible to
+ create a bluestore OSD with a single raw device.
+
+.. warning:: Each entry must be unique, duplicate values are not allowed
+
+
+``bluestore`` example using raw devices:
+
+.. code-block:: yaml
+
+ osd_objectstore: bluestore
+ lvm_volumes:
+ - data: /dev/sda
+ - data: /dev/sdb
+
+.. note:: Volume groups and logical volumes will be created in this case,
+ utilizing 100% of the devices.
+
+``bluestore`` example with logical volumes:
+
+.. code-block:: yaml
+
+ osd_objectstore: bluestore
+ lvm_volumes:
+ - data: data-lv1
+ data_vg: data-vg1
+ - data: data-lv2
+ data_vg: data-vg2
+
+.. note:: Volume groups and logical volumes must exist.
+
+
+``bluestore`` example defining ``wal`` and ``db`` logical volumes:
+
+.. code-block:: yaml
+
+ osd_objectstore: bluestore
+ lvm_volumes:
+ - data: data-lv1
+ data_vg: data-vg1
+ db: db-lv1
+ db_vg: db-vg1
+ wal: wal-lv1
+ wal_vg: wal-vg1
+ - data: data-lv2
+ data_vg: data-vg2
+ db: db-lv2
+ db_vg: db-vg2
+ wal: wal-lv2
+ wal_vg: wal-vg2
+
+.. note:: Volume groups and logical volumes must exist.
+
+
+``filestore`` example with logical volumes:
+
+.. code-block:: yaml
+
+ osd_objectstore: filestore
+ lvm_volumes:
+ - data: data-lv1
+ data_vg: data-vg1
+ journal: journal-lv1
+ journal_vg: journal-vg1
+ - data: data-lv2
+ data_vg: data-vg2
+ journal: journal-lv2
+ journal_vg: journal-vg2
+
+.. note:: Volume groups and logical volumes must exist.
--- /dev/null
+RBD Mirroring
+=============
+
+There's not so much to do from the primary cluster side in order to setup an RBD mirror replication.
+``ceph_rbd_mirror_configure`` has to be set to ``true`` to make ceph-ansible create the mirrored pool
+defined in ``ceph_rbd_mirror_pool`` and the keyring that is going to be used to add the rbd mirror peer.
+
+group_vars from the primary cluster:
+
+.. code-block:: yaml
+
+ ceph_rbd_mirror_configure: true
+ ceph_rbd_mirror_pool: rbd
+
+Optionnally, you can tell ceph-ansible to set the name and the secret of the keyring you want to create:
+
+.. code-block:: yaml
+
+ ceph_rbd_mirror_local_user: client.rbd-mirror-peer # 'client.rbd-mirror-peer' is the default value.
+ ceph_rbd_mirror_local_user_secret: AQC+eM1iKKBXFBAAVpunJvqpkodHSYmljCFCnw==
+
+This secret will be needed to add the rbd mirror peer from the secondary cluster.
+If you do not enforce it as shown above, you can get it from a monitor by running the following command:
+``ceph auth get {{ ceph_rbd_mirror_local_user }}``
+
+
+.. code-block:: shell
+
+ $ sudo ceph auth get client.rbd-mirror-peer
+
+Once your variables are defined, you can run the playbook (you might want to run with --limit option):
+
+.. code-block:: shell
+
+ $ ansible-playbook -vv -i hosts site-container.yml --limit rbdmirror0
+
+
+The configuration of the rbd mirror replication strictly speaking is done on the secondary cluster.
+The rbd-mirror daemon pulls the data from the primary cluster. This is where the rbd mirror peer addition has to be done.
+The configuration is similar with what was done on the primary cluster, it just needs few additional variables.
+
+``ceph_rbd_mirror_remote_user`` : This user must match the name defined in the variable ``ceph_rbd_mirror_local_user`` from the primary cluster.
+``ceph_rbd_mirror_remote_mon_hosts`` : This must a comma separated list of the monitor addresses from the primary cluster.
+``ceph_rbd_mirror_remote_key`` : This must be the same value as the user (``{{ ceph_rbd_mirror_local_user }}``) keyring secret from the primary cluster.
+
+group_vars from the secondary cluster:
+
+.. code-block:: yaml
+
+ ceph_rbd_mirror_configure: true
+ ceph_rbd_mirror_pool: rbd
+ ceph_rbd_mirror_remote_user: client.rbd-mirror-peer # This must match the value defined in {{ ceph_rbd_mirror_local_user }} on primary cluster.
+ ceph_rbd_mirror_remote_mon_hosts: 1.2.3.4
+ ceph_rbd_mirror_remote_key: AQC+eM1iKKBXFBAAVpunJvqpkodHSYmljCFCnw== # This must match the secret of the registered keyring of the user defined in {{ ceph_rbd_mirror_local_user }} on primary cluster.
+
+Once you variables are defined, you can run the playbook (you might want to run with --limit option):
+
+.. code-block:: shell
+
+ $ ansible-playbook -vv -i hosts site-container.yml --limit rbdmirror0
\ No newline at end of file
--- /dev/null
+.. _development:
+
+ceph-ansible testing for development
+====================================
--- /dev/null
+Glossary
+========
+
+.. toctree::
+ :maxdepth: 1
+
+ index
+ running.rst
+ development.rst
+ scenarios.rst
+ modifying.rst
+ layout.rst
+ tests.rst
+ tox.rst
--- /dev/null
+.. _testing:
+
+Testing
+=======
+
+``ceph-ansible`` has the ability to test different scenarios (collocated journals
+or dmcrypt OSDs for example) in an isolated, repeatable, and easy way.
+
+These tests can run locally with VirtualBox or via libvirt if available, which
+removes the need to solely rely on a CI system like Jenkins to verify
+a behavior.
+
+* **Getting started:**
+
+ * :doc:`Running a Test Scenario <running>`
+ * :ref:`dependencies`
+
+* **Configuration and structure:**
+
+ * :ref:`layout`
+ * :ref:`test_files`
+ * :ref:`scenario_files`
+ * :ref:`scenario_wiring`
+
+* **Adding or modifying tests:**
+
+ * :ref:`test_conventions`
+ * :ref:`testinfra`
+
+* **Adding or modifying a scenario:**
+
+ * :ref:`scenario_conventions`
+ * :ref:`scenario_environment_configuration`
+ * :ref:`scenario_ansible_configuration`
+
+* **Custom/development repositories and packages:**
+
+ * :ref:`tox_environment_variables`
--- /dev/null
+.. _layout:
+
+Layout and conventions
+----------------------
+
+Test files and directories follow a few conventions, which makes it easy to
+create (or expect) certain interactions between tests and scenarios.
+
+All tests are in the ``tests`` directory. Scenarios are defined in
+``tests/functional/`` and use the following convention for directory
+structure:
+
+.. code-block:: none
+
+ tests/functional/<distro>/<distro version>/<scenario name>/
+
+For example: ``tests/functional/centos/7/journal-collocation``
+
+Within a test scenario there are a few files that define what that specific
+scenario needs for the tests, like how many OSD nodes or MON nodes. Tls
+
+At the very least, a scenario will need these files:
+
+* ``Vagrantfile``: must be symlinked from the root directory of the project
+* ``hosts``: An Ansible hosts file that defines the machines part of the
+ cluster
+* ``group_vars/all``: if any modifications are needed for deployment, this
+ would override them. Additionally, further customizations can be done. For
+ example, for OSDs that would mean adding ``group_vars/osds``
+* ``vagrant_variables.yml``: Defines the actual environment for the test, where
+ machines, networks, disks, linux distro/version, can be defined.
+
+
+.. _test_conventions:
+
+Conventions
+-----------
+
+Python test files (unlike scenarios) rely on paths to *map* where they belong. For
+example, a file that should only test monitor nodes would live in
+``ceph-ansible/tests/functional/tests/mon/``. Internally, the test runner
+(``py.test``) will *mark* these as tests that should run on a monitor only.
+Since the configuration of a scenario already defines what node has a given
+role, then it is easier for the system to only run tests that belong to
+a particular node type.
+
+The current convention is a bit manual, with initial path support for:
+
+* mon
+* osd
+* mds
+* rgw
+* journal_collocation
+* all/any (if none of the above are matched, then these are run on any host)
+
+
+.. _testinfra:
+
+``testinfra``
+-------------
--- /dev/null
+.. _modifying:
+
+Modifying (or adding) tests
+===========================
--- /dev/null
+.. _running_tests:
+
+Running Tests
+=============
+
+Although tests run continuously in CI, a lot of effort was put into making it
+easy to run in any environment, as long as a couple of requirements are met.
+
+
+.. _dependencies:
+
+Dependencies
+------------
+
+There are some Python dependencies, which are listed in a ``requirements.txt``
+file within the ``tests/`` directory. These are meant to be installed using
+Python install tools (pip in this case):
+
+.. code-block:: console
+
+ pip install -r tests/requirements.txt
+
+For virtualization, either libvirt or VirtualBox is needed (there is native
+support from the harness for both). This makes the test harness even more
+flexible as most platforms will be covered by either VirtualBox or libvirt.
+
+
+.. _running_a_scenario:
+
+Running a scenario
+------------------
+
+Tests are driven by ``tox``, a command line tool to run a matrix of tests defined in
+a configuration file (``tox.ini`` in this case at the root of the project).
+
+For a thorough description of a scenario see :ref:`test_scenarios`.
+
+To run a single scenario, make sure it is available (should be defined from
+``tox.ini``) by listing them:
+
+.. code-block:: console
+
+ tox -l
+
+In this example, we will use the ``luminous-ansible2.4-xenial_cluster`` one. The
+harness defaults to ``VirtualBox`` as the backend, so if you have that
+installed in your system then this command should just work:
+
+.. code-block:: console
+
+ tox -e luminous-ansible2.4-xenial_cluster
+
+And for libvirt it would be:
+
+.. code-block:: console
+
+ tox -e luminous-ansible2.4-xenial_cluster -- --provider=libvirt
+
+.. warning::
+
+ Depending on the type of scenario and resources available, running
+ these tests locally in a personal computer can be very resource intensive.
+
+.. note::
+
+ Most test runs take between 20 and 40 minutes depending on system
+ resources
+
+The command should bring up the machines needed for the test, provision them
+with ``ceph-ansible``, run the tests, and tear the whole environment down at the
+end.
+
+
+The output would look something similar to this trimmed version:
+
+.. code-block:: console
+
+ luminous-ansible2.4-xenial_cluster create: /Users/alfredo/python/upstream/ceph-ansible/.tox/luminous-ansible2.4-xenial_cluster
+ luminous-ansible2.4-xenial_cluster installdeps: ansible==2.4.2, -r/Users/alfredo/python/upstream/ceph-ansible/tests/requirements.txt
+ luminous-ansible2.4-xenial_cluster runtests: commands[0] | vagrant up --no-provision --provider=virtualbox
+ Bringing machine 'client0' up with 'virtualbox' provider...
+ Bringing machine 'rgw0' up with 'virtualbox' provider...
+ Bringing machine 'mds0' up with 'virtualbox' provider...
+ Bringing machine 'mon0' up with 'virtualbox' provider...
+ Bringing machine 'mon1' up with 'virtualbox' provider...
+ Bringing machine 'mon2' up with 'virtualbox' provider...
+ Bringing machine 'osd0' up with 'virtualbox' provider...
+ ...
+
+
+After all the nodes are up, ``ceph-ansible`` will provision them, and run the
+playbook(s):
+
+.. code-block:: console
+
+ ...
+ PLAY RECAP *********************************************************************
+ client0 : ok=4 changed=0 unreachable=0 failed=0
+ mds0 : ok=4 changed=0 unreachable=0 failed=0
+ mon0 : ok=4 changed=0 unreachable=0 failed=0
+ mon1 : ok=4 changed=0 unreachable=0 failed=0
+ mon2 : ok=4 changed=0 unreachable=0 failed=0
+ osd0 : ok=4 changed=0 unreachable=0 failed=0
+ rgw0 : ok=4 changed=0 unreachable=0 failed=0
+ ...
+
+
+Once the whole environment is all running the tests will be sent out to the
+hosts, with output similar to this:
+
+.. code-block:: console
+
+ luminous-ansible2.4-xenial_cluster runtests: commands[4] | testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory=/Users/alfredo/python/upstream/ceph-ansible/tests/functional/ubuntu/16.04/cluster/hosts /Users/alfredo/python/upstream/ceph-ansible/tests/functional/tests
+ ============================ test session starts ===========================
+ platform darwin -- Python 2.7.8, pytest-3.0.7, py-1.4.33, pluggy-0.4.0 -- /Users/alfredo/python/upstream/ceph-ansible/.tox/luminous-ansible2.4-xenial_cluster/bin/python
+ cachedir: ../../../../.cache
+ rootdir: /Users/alfredo/python/upstream/ceph-ansible/tests, inifile: pytest.ini
+ plugins: testinfra-1.5.4, xdist-1.15.0
+ [gw0] darwin Python 2.7.8 cwd: /Users/alfredo/python/upstream/ceph-ansible/tests/functional/ubuntu/16.04/cluster
+ [gw1] darwin Python 2.7.8 cwd: /Users/alfredo/python/upstream/ceph-ansible/tests/functional/ubuntu/16.04/cluster
+ [gw2] darwin Python 2.7.8 cwd: /Users/alfredo/python/upstream/ceph-ansible/tests/functional/ubuntu/16.04/cluster
+ [gw3] darwin Python 2.7.8 cwd: /Users/alfredo/python/upstream/ceph-ansible/tests/functional/ubuntu/16.04/cluster
+ [gw0] Python 2.7.8 (v2.7.8:ee879c0ffa11, Jun 29 2014, 21:07:35) -- [GCC 4.2.1 (Apple Inc. build 5666) (dot 3)]
+ [gw1] Python 2.7.8 (v2.7.8:ee879c0ffa11, Jun 29 2014, 21:07:35) -- [GCC 4.2.1 (Apple Inc. build 5666) (dot 3)]
+ [gw2] Python 2.7.8 (v2.7.8:ee879c0ffa11, Jun 29 2014, 21:07:35) -- [GCC 4.2.1 (Apple Inc. build 5666) (dot 3)]
+ [gw3] Python 2.7.8 (v2.7.8:ee879c0ffa11, Jun 29 2014, 21:07:35) -- [GCC 4.2.1 (Apple Inc. build 5666) (dot 3)]
+ gw0 [154] / gw1 [154] / gw2 [154] / gw3 [154]
+ scheduling tests via LoadScheduling
+
+ ../../../tests/test_install.py::TestInstall::test_ceph_dir_exists[ansible:/mon0]
+ ../../../tests/test_install.py::TestInstall::test_ceph_dir_is_a_directory[ansible:/mon0]
+ ../../../tests/test_install.py::TestInstall::test_ceph_conf_is_a_file[ansible:/mon0]
+ ../../../tests/test_install.py::TestInstall::test_ceph_dir_is_a_directory[ansible:/mon1]
+ [gw2] PASSED ../../../tests/test_install.py::TestCephConf::test_ceph_config_has_mon_host_line[ansible:/mon0]
+ ../../../tests/test_install.py::TestInstall::test_ceph_conf_exists[ansible:/mon1]
+ [gw3] PASSED ../../../tests/test_install.py::TestCephConf::test_mon_host_line_has_correct_value[ansible:/mon0]
+ ../../../tests/test_install.py::TestInstall::test_ceph_conf_is_a_file[ansible:/mon1]
+ [gw1] PASSED ../../../tests/test_install.py::TestInstall::test_ceph_command_exists[ansible:/mon1]
+ ../../../tests/test_install.py::TestCephConf::test_mon_host_line_has_correct_value[ansible:/mon1]
+ ...
+
+Finally the whole environment gets torn down:
+
+.. code-block:: console
+
+ luminous-ansible2.4-xenial_cluster runtests: commands[5] | vagrant destroy --force
+ ==> osd0: Forcing shutdown of VM...
+ ==> osd0: Destroying VM and associated drives...
+ ==> mon2: Forcing shutdown of VM...
+ ==> mon2: Destroying VM and associated drives...
+ ==> mon1: Forcing shutdown of VM...
+ ==> mon1: Destroying VM and associated drives...
+ ==> mon0: Forcing shutdown of VM...
+ ==> mon0: Destroying VM and associated drives...
+ ==> mds0: Forcing shutdown of VM...
+ ==> mds0: Destroying VM and associated drives...
+ ==> rgw0: Forcing shutdown of VM...
+ ==> rgw0: Destroying VM and associated drives...
+ ==> client0: Forcing shutdown of VM...
+ ==> client0: Destroying VM and associated drives...
+
+
+And a brief summary of the scenario(s) that ran is displayed:
+
+.. code-block:: console
+
+ ________________________________________________ summary _________________________________________________
+ luminous-ansible2.4-xenial_cluster: commands succeeded
+ congratulations :)
--- /dev/null
+.. _test_scenarios:
+
+Test Scenarios
+==============
+
+Scenarios are distinct environments that describe a Ceph deployment and
+configuration. Scenarios are isolated as well, and define what machines are
+needed aside from any ``ceph-ansible`` configuration.
+
+.. _scenario_files:
+
+Scenario Files
+==============
+
+The scenario is described in a ``vagrant_variables.yml`` file, which is
+consumed by ``Vagrant`` when bringing up an environment.
+
+This yaml file is loaded in the ``Vagrantfile`` so that the settings can be
+used to bring up the boxes and pass some configuration to ansible when running.
+
+.. note::
+
+ The basic layout of a scenario is covered in :ref:`layout`.
+ There are just a handful of required files, this is the most basic layout.
+
+There are just a handful of required files, these sections will cover the
+required (most basic) ones. Alternatively, other ``ceph-ansible`` files can be
+added to customize the behavior of a scenario deployment.
+
+
+.. _vagrant_variables:
+
+``vagrant_variables.yml``
+-------------------------
+
+There are a few sections in the ``vagrant_variables.yml`` file which are easy
+to follow (most of them are 1 line settings).
+
+* **docker**: (bool) Indicates if the scenario will deploy Docker daemons
+
+* **VMS**: (int) These integer values are just a count of how many machines will be
+ needed. Each supported type is listed, defaulting to 0:
+
+ .. code-block:: yaml
+
+ mon_vms: 0
+ osd_vms: 0
+ mds_vms: 0
+ rgw_vms: 0
+ nfs_vms: 0
+ rbd_mirror_vms: 0
+ client_vms: 0
+ iscsi_gw_vms: 0
+ mgr_vms: 0
+
+ For a deployment that needs 1 MON and 1 OSD, the list would look like:
+
+ .. code-block:: yaml
+
+ mon_vms: 1
+ osd_vms: 1
+
+* **CEPH SOURCE**: (string) indicate whether a ``dev`` or ``stable`` release is
+ needed. A ``stable`` release will use the latest stable release of Ceph,
+ a ``dev`` will use ``shaman`` (http://shaman.ceph.com)
+
+* **SUBNETS**: These are used for configuring the network availability of each
+ server that will be booted as well as being used as configuration for
+ ``ceph-ansible`` (and eventually Ceph). The two values that are **required**:
+
+ .. code-block:: yaml
+
+ public_subnet: 192.168.13
+ cluster_subnet: 192.168.14
+
+* **MEMORY**: Memory requirements (in megabytes) for each server, e.g.
+ ``memory: 512``
+
+* **interfaces**: some vagrant boxes (and linux distros) set specific
+ interfaces. For Ubuntu releases older than Xenial it was common to have
+ ``eth1``, for CentOS and some Xenial boxes ``enp0s8`` is used. **However**
+ the public Vagrant boxes normalize the interface to ``eth1`` for all boxes,
+ making it easier to configure them with Ansible later.
+
+.. warning::
+
+ Do *not* change the interface from ``eth1`` unless absolutely
+ certain that is needed for a box. Some tests that depend on that
+ naming will fail.
+
+* **disks**: The disks that will be created for each machine, for most
+ environments ``/dev/sd*`` style of disks will work, like: ``[ '/dev/sda', '/dev/sdb' ]``
+
+* **vagrant_box**: We have published our own boxes to normalize what we test
+ against. These boxes are published in Atlas
+ (https://atlas.hashicorp.com/ceph/). Currently valid values are:
+ ``ceph/ubuntu-xenial``, and ``ceph/centos7``
+
+The following aren't usually changed/enabled for tests, since they don't have
+an impact, however they are documented here for general knowledge in case they
+are needed:
+
+* **ssh_private_key_path**: The path to the ``id_rsa`` (or other private SSH
+ key) that should be used to connect to these boxes.
+
+* **vagrant_sync_dir**: what should be "synced" (made available on the new
+ servers) from the host.
+
+* **vagrant_disable_synced_folder**: (bool) when disabled, it will make
+ booting machines faster because no files need to be synced over.
+
+* **os_tuning_params**: These are passed onto ``ceph-ansible`` as part of the
+ variables for "system tunning". These shouldn't be changed.
+
+
+.. _vagrant_file:
+
+``Vagrantfile``
+---------------
+
+The ``Vagrantfile`` should not need to change, and it is symlinked back to the
+``Vagrantfile`` that exists in the root of the project. It is linked in this
+way so that a vagrant environment can be isolated to the given scenario.
+
+
+.. _hosts_file:
+
+``hosts``
+---------
+
+The ``hosts`` file should contain the hosts needed for the scenario. This might
+seem a bit repetitive since machines are already defined in
+:ref:`vagrant_variables` but it allows granular changes to hosts (for example
+defining an interface vs. an IP on a monitor) which can help catch issues in
+``ceph-ansible`` configuration. For example:
+
+.. code-block:: ini
+
+ [mons]
+ mon0 monitor_address=192.168.5.10
+ mon1 monitor_address=192.168.5.11
+ mon2 monitor_interface=eth1
+
+.. _group_vars:
+
+``group_vars``
+--------------
+
+This directory holds any configuration change that will affect ``ceph-ansible``
+deployments in the same way as if ansible was executed from the root of the
+project.
+
+The file that will need to be defined always is ``all`` where (again) certain
+values like ``public_network`` and ``cluster_network`` will need to be defined
+along with any customizations that ``ceph-ansible`` supports.
+
+
+.. _scenario_wiring:
+
+Scenario Wiring
+---------------
+
+Scenarios are just meant to provide the Ceph environment for testing, but they
+do need to be defined in the ``tox.ini`` so that they are available to the test
+framework. To see a list of available scenarios, the following command (ran
+from the root of the project) will list them, shortened for brevity:
+
+.. code-block:: console
+
+ $ tox -l
+ ...
+ luminous-ansible2.4-centos7_cluster
+ ...
+
+These scenarios are made from different variables, in the above command there
+are 3:
+
+* ``jewel``: the Ceph version to test
+* ``ansible2.4``: the Ansible version to install
+* ``centos7_cluster``: the name of the scenario
+
+The last one is important in the *wiring up* of the scenario. It is a variable
+that will define in what path the scenario lives. For example, the
+``changedir`` section for ``centos7_cluster`` that looks like:
+
+.. code-block:: ini
+
+ centos7_cluster: {toxinidir}/tests/functional/centos/7/cluster
+
+The actual tests are written for specific daemon types, for all daemon types,
+and for specific use cases (e.g. journal collocation), those have their own
+conventions as well which are explained in detail in :ref:`test_conventions`
+and :ref:`test_files`.
+
+As long as a test scenario defines OSDs and MONs, the OSD tests and MON tests
+will run.
+
+
+.. _scenario_conventions:
+
+Conventions
+-----------
+
+.. _scenario_environment_configuration:
+
+Environment configuration
+-------------------------
+
+.. _scenario_ansible_configuration:
+
+Ansible configuration
+---------------------
--- /dev/null
+.. _tests:
+
+Tests
+=====
+
+Actual tests are written in Python methods that accept optional fixtures. These
+fixtures come with interesting attributes to help with remote assertions.
+
+As described in :ref:`test_conventions`, tests need to go into
+``tests/functional/tests/``. These are collected and *mapped* to a distinct
+node type, or *mapped* to run on all nodes.
+
+Simple Python asserts are used (these tests do not need to follow the Python
+``unittest.TestCase`` base class) that make it easier to reason about failures
+and errors.
+
+The test run is handled by ``py.test`` along with :ref:`testinfra` for handling
+remote execution.
+
+
+.. _test_files:
+
+Test Files
+----------
+
+
+
+.. _test_fixtures:
+
+Test Fixtures
+=============
+
+Test fixtures are a powerful feature of ``py.test`` and most tests depend on
+this for making assertions about remote nodes. To request them in a test
+method, all that is needed is to require it as an argument.
+
+Fixtures are detected by name, so as long as the argument being used has the
+same name, the fixture will be passed in (see `pytest fixtures`_ for more
+in-depth examples). The code that follows shows a test method that will use the
+``node`` fixture that contains useful information about a node in a ceph
+cluster:
+
+.. code-block:: python
+
+ def test_ceph_conf(self, node):
+ assert node['conf_path'] == "/etc/ceph/ceph.conf"
+
+The test is naive (the configuration path might not exist remotely) but
+explains how simple it is to "request" a fixture.
+
+For remote execution, we can rely further on other fixtures (tests can have as
+many fixtures as needed) like ``File``:
+
+.. code-block:: python
+
+ def test_ceph_config_has_inital_members_line(self, node, File):
+ assert File(node["conf_path"]).contains("^mon initial members = .*$")
+
+
+.. _node:
+
+``node`` fixture
+----------------
+
+The ``node`` fixture contains a few useful pieces of information about the node
+where the test is being executed, this is captured once, before tests run:
+
+* ``address``: The IP for the ``eth1`` interface
+* ``subnet``: The subnet that ``address`` belongs to
+* ``vars``: all the Ansible vars set for the current run
+* ``osd_ids``: a list of all the OSD IDs
+* ``num_mons``: the total number of monitors for the current environment
+* ``num_devices``: the number of devices for the current node
+* ``num_osd_hosts``: the total number of OSD hosts
+* ``total_osds``: total number of OSDs on the current node
+* ``cluster_name``: the name of the Ceph cluster (which defaults to 'ceph')
+* ``conf_path``: since the cluster name can change the file path for the Ceph
+ configuration, this gets sets according to the cluster name.
+* ``cluster_address``: the address used for cluster communication. All
+ environments are set up with 2 interfaces, 1 being used exclusively for the
+ cluster
+* ``docker``: A boolean that identifies a Ceph Docker cluster
+* ``osds``: A list of OSD IDs, unless it is a Docker cluster, where it gets the
+ name of the devices (e.g. ``sda1``)
+
+
+Other Fixtures
+--------------
+
+There are a lot of other fixtures provided by :ref:`testinfra` as well as
+``py.test``. The full list of ``testinfra`` fixtures are available in
+`testinfra_fixtures`_
+
+``py.test`` builtin fixtures can be listed with ``pytest -q --fixtures`` and
+they are described in `pytest builtin fixtures`_
+
+.. _pytest fixtures: https://docs.pytest.org/en/latest/fixture.html
+.. _pytest builtin fixtures: https://docs.pytest.org/en/latest/builtin.html#builtin-fixtures-function-arguments
+.. _testinfra_fixtures: https://testinfra.readthedocs.io/en/latest/modules.html#modules
--- /dev/null
+.. _tox:
+
+``tox``
+=======
+
+``tox`` is an automation project we use to run our testing scenarios. It gives us
+the ability to create a dynamic matrix of many testing scenarios, isolated testing environments
+and a provides a single entry point to run all tests in an automated and repeatable fashion.
+
+Documentation for tox can be found `here <https://tox.readthedocs.io/en/latest/>`_.
+
+
+.. _tox_environment_variables:
+
+Environment variables
+---------------------
+
+When running ``tox`` we've allowed for the usage of environment variables to tweak certain settings
+of the playbook run using Ansible's ``--extra-vars``. It's helpful in Jenkins jobs or for manual test
+runs of ``ceph-ansible``.
+
+The following environent variables are available for use:
+
+* ``CEPH_DOCKER_REGISTRY``: (default: ``quay.ceph.io``) This would configure the ``ceph-ansible`` variable ``ceph_docker_registry``.
+
+* ``CEPH_DOCKER_IMAGE``: (default: ``ceph-ci/daemon``) This would configure the ``ceph-ansible`` variable ``ceph_docker_image``.
+
+* ``CEPH_DOCKER_IMAGE_TAG``: (default: ``latest``) This would configure the ``ceph-ansible`` variable ``ceph_docker_image_name``.
+
+* ``CEPH_DEV_BRANCH``: (default: ``master``) This would configure the ``ceph-ansible`` variable ``ceph_dev_branch`` which defines which branch we'd
+ like to install from shaman.ceph.com.
+
+* ``CEPH_DEV_SHA1``: (default: ``latest``) This would configure the ``ceph-ansible`` variable ``ceph_dev_sha1`` which defines which sha1 we'd like
+ to install from shaman.ceph.com.
+
+* ``UPDATE_CEPH_DEV_BRANCH``: (default: ``master``) This would configure the ``ceph-ansible`` variable ``ceph_dev_branch`` which defines which branch we'd
+ like to update to from shaman.ceph.com.
+
+* ``UPDATE_CEPH_DEV_SHA1``: (default: ``latest``) This would configure the ``ceph-ansible`` variable ``ceph_dev_sha1`` which defines which sha1 we'd like
+ to update to from shaman.ceph.com.
+
+
+.. _tox_sections:
+
+Sections
+--------
+
+The ``tox.ini`` file has a number of top level sections defined by ``[ ]`` and subsections within those. For complete documentation
+on all subsections inside of a tox section please refer to the tox documentation.
+
+* ``tox`` : This section contains the ``envlist`` which is used to create our dynamic matrix. Refer to the `section here <http://tox.readthedocs.io/en/latest/config.html#generating-environments-conditional-settings>`_ for more information on how the ``envlist`` works.
+
+* ``purge`` : This section contains commands that only run for scenarios that purge the cluster and redeploy. You'll see this section being reused in ``testenv``
+ with the following syntax: ``{[purge]commands}``
+
+* ``update`` : This section contains commands taht only run for scenarios that deploy a cluster and then upgrade it to another Ceph version.
+
+* ``testenv`` : This is the main section of the ``tox.ini`` file and is run on every scenario. This section contains many *factors* that define conditional
+ settings depending on the scenarios defined in the ``envlist``. For example, the factor ``centos7_cluster`` in the ``changedir`` subsection of ``testenv`` sets
+ the directory that tox will change do when that factor is selected. This is an important behavior that allows us to use the same ``tox.ini`` and reuse commands while
+ tweaking certain sections per testing scenario.
+
+
+.. _tox_environments:
+
+Modifying or Adding environments
+--------------------------------
+
+The tox environments are controlled by the ``envlist`` subsection of the ``[tox]`` section. Anything inside of ``{}`` is considered a *factor* and will be included
+in the dynamic matrix that tox creates. Inside of ``{}`` you can include a comma separated list of the *factors*. Do not use a hyphen (``-``) as part
+of the *factor* name as those are used by tox as the separator between different factor sets.
+
+For example, if wanted to add a new test *factor* for the next Ceph release of luminious this is how you'd accomplish that. Currently, the first factor set in our ``envlist``
+is used to define the Ceph release (``{jewel,kraken,rhcs}-...``). To add luminous you'd change that to look like ``{luminous,kraken,rhcs}-...``. In the ``testenv`` section
+this is a subsection called ``setenv`` which allows you to provide environment variables to the tox environment and we support an environment variable called ``CEPH_STABLE_RELEASE``. To ensure that all the new tests that are created by adding the luminous *factor* you'd do this in that section: ``luminous: CEPH_STABLE_RELEASE=luminous``.
--- /dev/null
+[tox]
+envlist = docs
+skipsdist = True
+
+[testenv:docs]
+basepython=python
+changedir=source
+deps=sphinx==1.7.9
+commands=
+ sphinx-build -W -b html -d {envtmpdir}/doctrees . {envtmpdir}/html
--- /dev/null
+# Dummy ansible host file
+# Used for syntax check by Travis
+# Before committing code please run: ansible-playbook --syntax-check site.yml -i dummy-ansible-hosts
+localhost
--- /dev/null
+#!/usr/bin/env bash
+set -euo pipefail
+
+
+#############
+# VARIABLES #
+#############
+
+basedir=$(dirname "$0")
+do_not_generate="(ceph-common|ceph-container-common|ceph-fetch-keys)$" # pipe separated list of roles we don't want to generate sample file, MUST end with '$', e.g: 'foo$|bar$'
+
+
+#############
+# FUNCTIONS #
+#############
+
+populate_header () {
+ for i in $output; do
+ cat <<EOF > "$basedir"/group_vars/"$i"
+---
+# Variables here are applicable to all host groups NOT roles
+
+# This sample file generated by $(basename "$0")
+
+# Dummy variable to avoid error because ansible does not recognize the
+# file as a good configuration file when no variable in it.
+dummy:
+
+EOF
+ done
+}
+
+generate_group_vars_file () {
+ for i in $output; do
+ if [ "$(uname)" == "Darwin" ]; then
+ sed '/^---/d; s/^\([A-Za-z[:space:]]\)/#\1/' \
+ "$defaults" >> "$basedir"/group_vars/"$i"
+ echo >> "$basedir"/group_vars/"$i"
+ elif [ "$(uname -s)" == "Linux" ]; then
+ sed '/^---/d; s/^\([A-Za-z[:space:]].\+\)/#\1/' \
+ "$defaults" >> "$basedir"/group_vars/"$i"
+ echo >> "$basedir"/group_vars/"$i"
+ else
+ echo "Unsupported platform"
+ exit 1
+ fi
+ done
+}
+
+rhcs_edits () {
+ tail -n +1 rhcs_edits.txt | while IFS= read -r option; do
+ sed -i "s|#${option% *} .*|${option}|" group_vars/rhcs.yml.sample
+ done
+}
+
+########
+# MAIN #
+########
+
+for role in "$basedir"/roles/ceph-*; do
+ rolename=$(basename "$role")
+
+ if [[ $rolename == "ceph-defaults" ]]; then
+ output="all.yml.sample rhcs.yml.sample"
+ elif [[ $rolename == "ceph-fetch-keys" ]]; then
+ output="ceph-fetch-keys.yml.sample"
+ elif [[ $rolename == "ceph-rbd-mirror" ]]; then
+ output="rbdmirrors.yml.sample"
+ elif [[ $rolename == "ceph-iscsi-gw" ]]; then
+ output="iscsigws.yml.sample"
+ elif [[ $rolename == "ceph-rgw-loadbalancer" ]]; then
+ output="rgwloadbalancers.yml.sample"
+ else
+ output="${rolename:5}s.yml.sample"
+ fi
+
+ defaults="$role"/defaults/main.yml
+ if [[ ! -f $defaults ]]; then
+ continue
+ fi
+
+ if ! echo "$rolename" | grep -qE "$do_not_generate"; then
+ populate_header
+ generate_group_vars_file
+ fi
+done
+
+rhcs_edits
--- /dev/null
+---
+# Variables here are applicable to all host groups NOT roles
+
+# This sample file generated by generate_group_vars_sample.sh
+
+# Dummy variable to avoid error because ansible does not recognize the
+# file as a good configuration file when no variable in it.
+dummy:
+
+# You can override vars by using host or group vars
+
+###########
+# GENERAL #
+###########
+
+######################################
+# Releases name to number dictionary #
+######################################
+#ceph_release_num:
+# dumpling: 0.67
+# emperor: 0.72
+# firefly: 0.80
+# giant: 0.87
+# hammer: 0.94
+# infernalis: 9
+# jewel: 10
+# kraken: 11
+# luminous: 12
+# mimic: 13
+# nautilus: 14
+# octopus: 15
+# pacific: 16
+# dev: 99
+
+
+# The 'cluster' variable determines the name of the cluster.
+# Changing the default value to something else means that you will
+# need to change all the command line calls as well, for example if
+# your cluster name is 'foo':
+# "ceph health" will become "ceph --cluster foo health"
+#
+# An easier way to handle this is to use the environment variable CEPH_ARGS
+# So run: "export CEPH_ARGS="--cluster foo"
+# With that you will be able to run "ceph health" normally
+#cluster: ceph
+
+# Inventory host group variables
+#mon_group_name: mons
+#osd_group_name: osds
+#rgw_group_name: rgws
+#mds_group_name: mdss
+#nfs_group_name: nfss
+#rbdmirror_group_name: rbdmirrors
+#client_group_name: clients
+#iscsi_gw_group_name: iscsigws
+#mgr_group_name: mgrs
+#rgwloadbalancer_group_name: rgwloadbalancers
+#monitoring_group_name: monitoring
+#adopt_label_group_names:
+# - "{{ mon_group_name }}"
+# - "{{ osd_group_name }}"
+# - "{{ rgw_group_name }}"
+# - "{{ mds_group_name }}"
+# - "{{ nfs_group_name }}"
+# - "{{ rbdmirror_group_name }}"
+# - "{{ client_group_name }}"
+# - "{{ iscsi_gw_group_name }}"
+# - "{{ mgr_group_name }}"
+# - "{{ rgwloadbalancer_group_name }}"
+# - "{{ monitoring_group_name }}"
+
+# If configure_firewall is true, then ansible will try to configure the
+# appropriate firewalling rules so that Ceph daemons can communicate
+# with each others.
+#configure_firewall: True
+
+# Open ports on corresponding nodes if firewall is installed on it
+#ceph_mon_firewall_zone: public
+#ceph_mgr_firewall_zone: public
+#ceph_osd_firewall_zone: public
+#ceph_rgw_firewall_zone: public
+#ceph_mds_firewall_zone: public
+#ceph_nfs_firewall_zone: public
+#ceph_rbdmirror_firewall_zone: public
+#ceph_iscsi_firewall_zone: public
+#ceph_dashboard_firewall_zone: public
+#ceph_rgwloadbalancer_firewall_zone: public
+
+# cephadm account for remote connections
+#cephadm_ssh_user: root
+#cephadm_ssh_priv_key_path: "/home/{{ cephadm_ssh_user }}/.ssh/id_rsa"
+#cephadm_ssh_pub_key_path: "{{ cephadm_ssh_priv_key_path }}.pub"
+#cephadm_mgmt_network: "{{ public_network }}"
+
+############
+# PACKAGES #
+############
+#debian_package_dependencies: []
+
+#centos_package_dependencies:
+# - epel-release
+# - "{{ (ansible_facts['distribution_major_version'] is version('8', '>=')) | ternary('python3-libselinux', 'libselinux-python') }}"
+
+#redhat_package_dependencies: []
+
+#suse_package_dependencies: []
+
+# Whether or not to install the ceph-test package.
+#ceph_test: false
+
+# Enable the ntp service by default to avoid clock skew on ceph nodes
+# Disable if an appropriate NTP client is already installed and configured
+#ntp_service_enabled: true
+
+# Set type of NTP client daemon to use, valid entries are chronyd, ntpd or timesyncd
+#ntp_daemon_type: chronyd
+
+# This variable determines if ceph packages can be updated. If False, the
+# package resources will use "state=present". If True, they will use
+# "state=latest".
+#upgrade_ceph_packages: False
+
+#ceph_use_distro_backports: false # DEBIAN ONLY
+#ceph_directories_mode: "0755"
+
+###########
+# INSTALL #
+###########
+# ORIGIN SOURCE
+#
+# Choose between:
+# - 'repository' means that you will get ceph installed through a new repository. Later below choose between 'community', 'rhcs', 'dev' or 'obs'
+# - 'distro' means that no separate repo file will be added
+# you will get whatever version of Ceph is included in your Linux distro.
+# 'local' means that the ceph binaries will be copied over from the local machine
+#ceph_origin: dummy
+#valid_ceph_origins:
+# - repository
+# - distro
+# - local
+
+
+#ceph_repository: dummy
+#valid_ceph_repository:
+# - community
+# - rhcs
+# - dev
+# - uca
+# - custom
+# - obs
+
+
+# REPOSITORY: COMMUNITY VERSION
+#
+# Enabled when ceph_repository == 'community'
+#
+#ceph_mirror: https://download.ceph.com
+#ceph_stable_key: https://download.ceph.com/keys/release.asc
+#ceph_stable_release: pacific
+#ceph_stable_repo: "{{ ceph_mirror }}/debian-{{ ceph_stable_release }}"
+
+#nfs_ganesha_stable: true # use stable repos for nfs-ganesha
+#nfs_ganesha_stable_branch: V3.5-stable
+#nfs_ganesha_stable_deb_repo: http://ppa.launchpad.net/nfs-ganesha/nfs-ganesha-3.0/ubuntu
+#nfs_ganesha_apt_keyserver: keyserver.ubuntu.com
+#nfs_ganesha_apt_key_id: EA914D611053D07BD332E18010353E8834DC57CA
+#libntirpc_stable_deb_repo: http://ppa.launchpad.net/nfs-ganesha/libntirpc-3.0/ubuntu
+
+# Use the option below to specify your applicable package tree, eg. when using non-LTS Ubuntu versions
+# # for a list of available Debian distributions, visit http://download.ceph.com/debian-{{ ceph_stable_release }}/dists/
+# for more info read: https://github.com/ceph/ceph-ansible/issues/305
+#ceph_stable_distro_source: "{{ ansible_facts['distribution_release'] }}"
+
+
+# REPOSITORY: RHCS VERSION RED HAT STORAGE (from 5.0)
+#
+# Enabled when ceph_repository == 'rhcs'
+#
+# This version is supported on RHEL 8
+#
+#ceph_rhcs_version: "{{ ceph_stable_rh_storage_version | default(5) }}"
+
+
+# REPOSITORY: UBUNTU CLOUD ARCHIVE
+#
+# Enabled when ceph_repository == 'uca'
+#
+# This allows the install of Ceph from the Ubuntu Cloud Archive. The Ubuntu Cloud Archive
+# usually has newer Ceph releases than the normal distro repository.
+#
+#
+#ceph_stable_repo_uca: "http://ubuntu-cloud.archive.canonical.com/ubuntu"
+#ceph_stable_openstack_release_uca: queens
+#ceph_stable_release_uca: "{{ ansible_facts['distribution_release'] }}-updates/{{ ceph_stable_openstack_release_uca }}"
+
+# REPOSITORY: openSUSE OBS
+#
+# Enabled when ceph_repository == 'obs'
+#
+# This allows the install of Ceph from the openSUSE OBS repository. The OBS repository
+# usually has newer Ceph releases than the normal distro repository.
+#
+#
+#ceph_obs_repo: "https://download.opensuse.org/repositories/filesystems:/ceph:/{{ ceph_stable_release }}/openSUSE_Leap_{{ ansible_facts['distribution_version'] }}/"
+
+# REPOSITORY: DEV
+#
+# Enabled when ceph_repository == 'dev'
+#
+#ceph_dev_branch: main # development branch you would like to use e.g: main, wip-hack
+#ceph_dev_sha1: latest # distinct sha1 to use, defaults to 'latest' (as in latest built)
+
+#nfs_ganesha_dev: false # use development repos for nfs-ganesha
+
+# Set this to choose the version of ceph dev libraries used in the nfs-ganesha packages from shaman
+# flavors so far include: ceph_master, ceph_jewel, ceph_kraken, ceph_luminous
+#nfs_ganesha_flavor: "ceph_master"
+
+#ceph_iscsi_config_dev: true # special repo for deploying iSCSI gateways
+
+
+# REPOSITORY: CUSTOM
+#
+# Enabled when ceph_repository == 'custom'
+#
+# Use a custom repository to install ceph. For RPM, ceph_custom_repo should be
+# a URL to the .repo file to be installed on the targets. For deb,
+# ceph_custom_repo should be the URL to the repo base.
+#
+#ceph_custom_key: https://server.domain.com/ceph-custom-repo/key.asc
+#ceph_custom_repo: https://server.domain.com/ceph-custom-repo
+
+
+# ORIGIN: LOCAL CEPH INSTALLATION
+#
+# Enabled when ceph_repository == 'local'
+#
+# Path to DESTDIR of the ceph install
+#ceph_installation_dir: "/path/to/ceph_installation/"
+# Whether or not to use installer script rundep_installer.sh
+# This script takes in rundep and installs the packages line by line onto the machine
+# If this is set to false then it is assumed that the machine ceph is being copied onto will already have
+# all runtime dependencies installed
+#use_installer: false
+# Root directory for ceph-ansible
+#ansible_dir: "/path/to/ceph-ansible"
+
+
+######################
+# CEPH CONFIGURATION #
+######################
+
+## Ceph options
+#
+# Each cluster requires a unique, consistent filesystem ID. By
+# default, the playbook generates one for you.
+# If you want to customize how the fsid is
+# generated, you may find it useful to disable fsid generation to
+# avoid cluttering up your ansible repo. If you set `generate_fsid` to
+# false, you *must* generate `fsid` in another way.
+# ACTIVATE THE FSID VARIABLE FOR NON-VAGRANT DEPLOYMENT
+#fsid: "{{ cluster_uuid.stdout }}"
+#generate_fsid: true
+
+#ceph_conf_key_directory: /etc/ceph
+
+#ceph_uid: "{{ '64045' if not containerized_deployment | bool and ansible_facts['os_family'] == 'Debian' else '167' }}"
+
+# Permissions for keyring files in /etc/ceph
+#ceph_keyring_permissions: '0600'
+
+#cephx: true
+
+## Client options
+#
+#rbd_cache: "true"
+#rbd_cache_writethrough_until_flush: "true"
+#rbd_concurrent_management_ops: 20
+
+#rbd_client_directories: true # this will create rbd_client_log_path and rbd_client_admin_socket_path directories with proper permissions
+
+# Permissions for the rbd_client_log_path and
+# rbd_client_admin_socket_path. Depending on your use case for Ceph
+# you may want to change these values. The default, which is used if
+# any of the variables are unset or set to a false value (like `null`
+# or `false`) is to automatically determine what is appropriate for
+# the Ceph version with non-OpenStack workloads -- ceph:ceph and 0770
+# for infernalis releases, and root:root and 1777 for pre-infernalis
+# releases.
+#
+# For other use cases, including running Ceph with OpenStack, you'll
+# want to set these differently:
+#
+# For OpenStack on RHEL, you'll want:
+# rbd_client_directory_owner: "qemu"
+# rbd_client_directory_group: "libvirtd" (or "libvirt", depending on your version of libvirt)
+# rbd_client_directory_mode: "0755"
+#
+# For OpenStack on Ubuntu or Debian, set:
+# rbd_client_directory_owner: "libvirt-qemu"
+# rbd_client_directory_group: "kvm"
+# rbd_client_directory_mode: "0755"
+#
+# If you set rbd_client_directory_mode, you must use a string (e.g.,
+# 'rbd_client_directory_mode: "0755"', *not*
+# 'rbd_client_directory_mode: 0755', or Ansible will complain: mode
+# must be in octal or symbolic form
+#rbd_client_directory_owner: ceph
+#rbd_client_directory_group: ceph
+#rbd_client_directory_mode: "0770"
+
+#rbd_client_log_path: /var/log/ceph
+#rbd_client_log_file: "{{ rbd_client_log_path }}/qemu-guest-$pid.log" # must be writable by QEMU and allowed by SELinux or AppArmor
+#rbd_client_admin_socket_path: /var/run/ceph # must be writable by QEMU and allowed by SELinux or AppArmor
+
+## Monitor options
+#
+# You must define either monitor_interface, monitor_address or monitor_address_block.
+# These variables must be defined at least in all.yml and overrided if needed (inventory host file or group_vars/*.yml).
+# Eg. If you want to specify for each monitor which address the monitor will bind to you can set it in your **inventory host file** by using 'monitor_address' variable.
+# Preference will go to monitor_address if both monitor_address and monitor_interface are defined.
+#monitor_interface: interface
+#monitor_address: x.x.x.x
+#monitor_address_block: subnet
+# set to either ipv4 or ipv6, whichever your network is using
+#ip_version: ipv4
+
+#mon_host_v1:
+# enabled: True
+# suffix: ':6789'
+#mon_host_v2:
+# suffix: ':3300'
+
+#enable_ceph_volume_debug: False
+
+##########
+# CEPHFS #
+##########
+# When pg_autoscale_mode is set to True, you must add the target_size_ratio key with a correct value
+# `pg_num` and `pgp_num` keys will be ignored, even if specified.
+# eg:
+# cephfs_data_pool:
+# name: "{{ cephfs_data if cephfs_data is defined else 'cephfs_data' }}"
+# target_size_ratio: 0.2
+#cephfs: cephfs # name of the ceph filesystem
+#cephfs_data_pool:
+# name: "{{ cephfs_data if cephfs_data is defined else 'cephfs_data' }}"
+#cephfs_metadata_pool:
+# name: "{{ cephfs_metadata if cephfs_metadata is defined else 'cephfs_metadata' }}"
+#cephfs_pools:
+# - "{{ cephfs_data_pool }}"
+# - "{{ cephfs_metadata_pool }}"
+
+## OSD options
+#
+#lvmetad_disabled: false
+#is_hci: false
+#hci_safety_factor: 0.2
+#non_hci_safety_factor: 0.7
+#safety_factor: "{{ hci_safety_factor if is_hci | bool else non_hci_safety_factor }}"
+#osd_memory_target: 4294967296
+#journal_size: 5120 # OSD journal size in MB
+#block_db_size: -1 # block db size in bytes for the ceph-volume lvm batch. -1 means use the default of 'as big as possible'.
+#public_network: 0.0.0.0/0
+#cluster_network: "{{ public_network | regex_replace(' ', '') }}"
+#osd_mkfs_type: xfs
+#osd_mkfs_options_xfs: -f -i size=2048
+#osd_mount_options_xfs: noatime,largeio,inode64,swalloc
+#osd_objectstore: bluestore
+
+# Any device containing these patterns in their path will be excluded.
+#osd_auto_discovery_exclude: "dm-*|loop*|md*|rbd*"
+
+# xattrs. by default, 'filestore xattr use omap' is set to 'true' if
+# 'osd_mkfs_type' is set to 'ext4'; otherwise it isn't set. This can
+# be set to 'true' or 'false' to explicitly override those
+# defaults. Leave it 'null' to use the default for your chosen mkfs
+# type.
+#filestore_xattr_use_omap: null
+
+## MDS options
+#
+#mds_max_mds: 1
+
+## Rados Gateway options
+#
+#radosgw_frontend_type: beast # For additional frontends see: https://docs.ceph.com/en/pacific/radosgw/frontends/
+
+#radosgw_civetweb_port: 8080
+#radosgw_civetweb_num_threads: 512
+#radosgw_civetweb_options: "num_threads={{ radosgw_civetweb_num_threads }}"
+# For additional civetweb configuration options available such as logging,
+# keepalive, and timeout settings, please see the civetweb docs at
+# https://github.com/civetweb/civetweb/blob/master/docs/UserManual.md
+
+#radosgw_frontend_port: "{{ radosgw_civetweb_port if radosgw_frontend_type == 'civetweb' else '8080' }}"
+# The server private key, public certificate and any other CA or intermediate certificates should be in one file, in PEM format.
+#radosgw_frontend_ssl_certificate: ""
+#radosgw_frontend_ssl_certificate_data: "" # certificate contents to be written to path defined by radosgw_frontend_ssl_certificate
+#radosgw_frontend_options: "{{ radosgw_civetweb_options if radosgw_frontend_type == 'civetweb' else '' }}"
+#radosgw_thread_pool_size: 512
+
+
+# You must define either radosgw_interface, radosgw_address.
+# These variables must be defined at least in all.yml and overrided if needed (inventory host file or group_vars/*.yml).
+# Eg. If you want to specify for each radosgw node which address the radosgw will bind to you can set it in your **inventory host file** by using 'radosgw_address' variable.
+# Preference will go to radosgw_address if both radosgw_address and radosgw_interface are defined.
+#radosgw_interface: interface
+#radosgw_address: x.x.x.x
+#radosgw_address_block: subnet
+#radosgw_keystone_ssl: false # activate this when using keystone PKI keys
+#radosgw_num_instances: 1
+# Rados Gateway options
+#email_address: foo@bar.com
+
+
+## Testing mode
+# enable this mode _only_ when you have a single node
+# if you don't want it keep the option commented
+#common_single_host_mode: true
+
+## Handlers - restarting daemons after a config change
+# if for whatever reasons the content of your ceph configuration changes
+# ceph daemons will be restarted as well. At the moment, we can not detect
+# which config option changed so all the daemons will be restarted. Although
+# this restart will be serialized for each node, in between a health check
+# will be performed so we make sure we don't move to the next node until
+# ceph is not healthy
+# Obviously between the checks (for monitors to be in quorum and for osd's pgs
+# to be clean) we have to wait. These retries and delays can be configurable
+# for both monitors and osds.
+#
+# Monitor handler checks
+#handler_health_mon_check_retries: 10
+#handler_health_mon_check_delay: 20
+#
+# OSD handler checks
+#handler_health_osd_check_retries: 40
+#handler_health_osd_check_delay: 30
+#handler_health_osd_check: true
+#
+# MDS handler checks
+#handler_health_mds_check_retries: 5
+#handler_health_mds_check_delay: 10
+#
+# RGW handler checks
+#handler_health_rgw_check_retries: 5
+#handler_health_rgw_check_delay: 10
+
+# NFS handler checks
+#handler_health_nfs_check_retries: 5
+#handler_health_nfs_check_delay: 10
+
+# RBD MIRROR handler checks
+#handler_health_rbd_mirror_check_retries: 5
+#handler_health_rbd_mirror_check_delay: 10
+
+# MGR handler checks
+#handler_health_mgr_check_retries: 5
+#handler_health_mgr_check_delay: 10
+
+## health mon/osds check retries/delay:
+
+#health_mon_check_retries: 20
+#health_mon_check_delay: 10
+#health_osd_check_retries: 20
+#health_osd_check_delay: 10
+
+##############
+# RBD-MIRROR #
+##############
+
+#ceph_rbd_mirror_pool: "rbd"
+
+###############
+# NFS-GANESHA #
+###############
+#
+# Access type options
+#
+# Enable NFS File access
+# If set to true, then ganesha is set up to export the root of the
+# Ceph filesystem, and ganesha's attribute and directory caching is disabled
+# as much as possible since libcephfs clients also caches the same
+# information.
+#
+# Set this to true to enable File access via NFS. Requires an MDS role.
+#nfs_file_gw: false
+# Set this to true to enable Object access via NFS. Requires an RGW role.
+#nfs_obj_gw: "{{ False if groups.get(mon_group_name, []) | length == 0 else True }}"
+
+
+#############
+# MULTISITE #
+#############
+
+# Changing this value allows multisite code to run
+#rgw_multisite: false
+
+# If the desired multisite configuration involves only one realm, one zone group and one zone (per cluster), then the multisite variables can be set here.
+# Please see README-MULTISITE.md for more information.
+#
+# If multiple realms or multiple zonegroups or multiple zones need to be created on a cluster then,
+# the multisite config variables should be editted in their respective zone .yaml file and realm .yaml file.
+# See README-MULTISITE-MULTIREALM.md for more information.
+
+# The following Multi-site related variables should be set by the user.
+#
+# rgw_zone is set to "default" to enable compression for clusters configured without rgw multi-site
+# If multisite is configured, rgw_zone should not be set to "default".
+#
+#rgw_zone: default
+
+#rgw_zonemaster: true
+#rgw_zonesecondary: false
+#rgw_zonegroup: solarsystem # should be set by the user
+#rgw_zonegroupmaster: true
+#rgw_zone_user: zone.user
+#rgw_zone_user_display_name: "Zone User"
+#rgw_realm: milkyway # should be set by the user
+#rgw_multisite_proto: "http"
+#system_access_key: 6kWkikvapSnHyE22P7nO # should be re-created by the user
+#system_secret_key: MGecsMrWtKZgngOHZdrd6d3JxGO5CPWgT2lcnpSt # should be re-created by the user
+
+# Multi-site remote pull URL variables
+#rgw_pull_port: "{{ radosgw_frontend_port }}"
+#rgw_pull_proto: "http" # should be the same as rgw_multisite_proto for the master zone cluster
+#rgw_pullhost: localhost # rgw_pullhost only needs to be declared if there is a zone secondary.
+
+###################
+# CONFIG OVERRIDE #
+###################
+
+# Ceph configuration file override.
+# This allows you to specify more configuration options
+# using an INI style format.
+#
+# When configuring RGWs, make sure you use the form [client.rgw.*]
+# instead of [client.radosgw.*].
+# For more examples check the profiles directory of https://github.com/ceph/ceph-ansible.
+#
+# The following sections are supported: [global], [mon], [osd], [mds], [client]
+#
+# Example:
+# ceph_conf_overrides:
+# global:
+# foo: 1234
+# bar: 5678
+# "client.rgw.{{ hostvars[groups.get(rgw_group_name)[0]]['ansible_facts']['hostname'] }}":
+# rgw_zone: zone1
+#
+#ceph_conf_overrides: {}
+
+
+#############
+# OS TUNING #
+#############
+
+#disable_transparent_hugepage: "{{ false if osd_objectstore == 'bluestore' else true }}"
+#os_tuning_params:
+# - { name: fs.file-max, value: 26234859 }
+# - { name: vm.zone_reclaim_mode, value: 0 }
+# - { name: vm.swappiness, value: 10 }
+# - { name: vm.min_free_kbytes, value: "{{ vm_min_free_kbytes }}" }
+
+# For Debian & Red Hat/CentOS installs set TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES
+# Set this to a byte value (e.g. 134217728)
+# A value of 0 will leave the package default.
+#ceph_tcmalloc_max_total_thread_cache: 134217728
+
+
+##########
+# DOCKER #
+##########
+#ceph_docker_image: "ceph/daemon"
+#ceph_docker_image_tag: latest-pacific
+#ceph_docker_registry: quay.io
+#ceph_docker_registry_auth: false
+#ceph_docker_registry_username:
+#ceph_docker_registry_password:
+#ceph_docker_http_proxy:
+#ceph_docker_https_proxy:
+#ceph_docker_no_proxy: "localhost,127.0.0.1"
+## Client only docker image - defaults to {{ ceph_docker_image }}
+#ceph_client_docker_image: "{{ ceph_docker_image }}"
+#ceph_client_docker_image_tag: "{{ ceph_docker_image_tag }}"
+#ceph_client_docker_registry: "{{ ceph_docker_registry }}"
+#containerized_deployment: False
+#container_binary:
+#timeout_command: "{{ 'timeout --foreground -s KILL ' ~ docker_pull_timeout if (docker_pull_timeout != '0') and (ceph_docker_dev_image is undefined or not ceph_docker_dev_image) else '' }}"
+
+
+# this is only here for usage with the rolling_update.yml playbook
+# do not ever change this here
+#rolling_update: false
+
+#####################
+# Docker pull retry #
+#####################
+#docker_pull_retry: 3
+#docker_pull_timeout: "300s"
+
+
+#############
+# OPENSTACK #
+#############
+#openstack_config: false
+# When pg_autoscale_mode is set to True, you must add the target_size_ratio key with a correct value
+# `pg_num` and `pgp_num` keys will be ignored, even if specified.
+# eg:
+# openstack_glance_pool:
+# name: "images"
+# rule_name: "my_replicated_rule"
+# application: "rbd"
+# pg_autoscale_mode: False
+# pg_num: 16
+# pgp_num: 16
+# target_size_ratio: 0.2
+#openstack_glance_pool:
+# name: "images"
+# application: "rbd"
+#openstack_cinder_pool:
+# name: "volumes"
+# application: "rbd"
+#openstack_nova_pool:
+# name: "vms"
+# application: "rbd"
+#openstack_cinder_backup_pool:
+# name: "backups"
+# application: "rbd"
+#openstack_gnocchi_pool:
+# name: "metrics"
+# application: "rbd"
+#openstack_cephfs_data_pool:
+# name: "manila_data"
+# application: "cephfs"
+#openstack_cephfs_metadata_pool:
+# name: "manila_metadata"
+# application: "cephfs"
+#openstack_pools:
+# - "{{ openstack_glance_pool }}"
+# - "{{ openstack_cinder_pool }}"
+# - "{{ openstack_nova_pool }}"
+# - "{{ openstack_cinder_backup_pool }}"
+# - "{{ openstack_gnocchi_pool }}"
+# - "{{ openstack_cephfs_data_pool }}"
+# - "{{ openstack_cephfs_metadata_pool }}"
+
+
+# The value for 'key' can be a pre-generated key,
+# e.g key: "AQDC2UxZH4yeLhAAgTaZb+4wDUlYOsr1OfZSpQ=="
+# By default, keys will be auto-generated.
+#
+#openstack_keys:
+# - { name: client.glance, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_cinder_pool.name }}, profile rbd pool={{ openstack_glance_pool.name }}"}, mode: "0600" }
+# - { name: client.cinder, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_cinder_pool.name }}, profile rbd pool={{ openstack_nova_pool.name }}, profile rbd pool={{ openstack_glance_pool.name }}"}, mode: "0600" }
+# - { name: client.cinder-backup, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_cinder_backup_pool.name }}"}, mode: "0600" }
+# - { name: client.gnocchi, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_gnocchi_pool.name }}"}, mode: "0600", }
+# - { name: client.openstack, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_glance_pool.name }}, profile rbd pool={{ openstack_nova_pool.name }}, profile rbd pool={{ openstack_cinder_pool.name }}, profile rbd pool={{ openstack_cinder_backup_pool.name }}"}, mode: "0600" }
+
+
+#############
+# DASHBOARD #
+#############
+#dashboard_enabled: True
+# Choose http or https
+# For https, you should set dashboard.crt/key and grafana.crt/key
+# If you define the dashboard_crt and dashboard_key variables, but leave them as '',
+# then we will autogenerate a cert and keyfile
+#dashboard_protocol: https
+#dashboard_port: 8443
+# set this variable to the network you want the dashboard to listen on. (Default to public_network)
+#dashboard_network: "{{ public_network }}"
+#dashboard_admin_user: admin
+#dashboard_admin_user_ro: false
+# This variable must be set with a strong custom password when dashboard_enabled is True
+#dashboard_admin_password: p@ssw0rd
+# We only need this for SSL (https) connections
+#dashboard_crt: ''
+#dashboard_key: ''
+#dashboard_certificate_cn: ceph-dashboard
+#dashboard_tls_external: false
+#dashboard_grafana_api_no_ssl_verify: "{{ true if dashboard_protocol == 'https' and not grafana_crt and not grafana_key else false }}"
+#dashboard_rgw_api_user_id: ceph-dashboard
+#dashboard_rgw_api_admin_resource: ''
+#dashboard_rgw_api_no_ssl_verify: False
+#dashboard_frontend_vip: ''
+#dashboard_disabled_features: []
+#prometheus_frontend_vip: ''
+#alertmanager_frontend_vip: ''
+#node_exporter_container_image: "docker.io/prom/node-exporter:v0.17.0"
+#node_exporter_port: 9100
+#grafana_admin_user: admin
+# This variable must be set with a strong custom password when dashboard_enabled is True
+#grafana_admin_password: admin
+# We only need this for SSL (https) connections
+#grafana_crt: ''
+#grafana_key: ''
+# When using https, please fill with a hostname for which grafana_crt is valid.
+#grafana_server_fqdn: ''
+#grafana_container_image: "docker.io/grafana/grafana:6.7.4"
+#grafana_container_cpu_period: 100000
+#grafana_container_cpu_cores: 2
+# container_memory is in GB
+#grafana_container_memory: 4
+#grafana_uid: 472
+#grafana_datasource: Dashboard
+#grafana_dashboards_path: "/etc/grafana/dashboards/ceph-dashboard"
+#grafana_dashboard_version: pacific
+#grafana_dashboard_files:
+# - ceph-cluster.json
+# - cephfs-overview.json
+# - host-details.json
+# - hosts-overview.json
+# - osd-device-details.json
+# - osds-overview.json
+# - pool-detail.json
+# - pool-overview.json
+# - radosgw-detail.json
+# - radosgw-overview.json
+# - radosgw-sync-overview.json
+# - rbd-details.json
+# - rbd-overview.json
+#grafana_plugins:
+# - vonage-status-panel
+# - grafana-piechart-panel
+#grafana_allow_embedding: True
+#grafana_port: 3000
+#grafana_network: "{{ public_network }}"
+#grafana_conf_overrides: {}
+#prometheus_container_image: "docker.io/prom/prometheus:v2.7.2"
+#prometheus_container_cpu_period: 100000
+#prometheus_container_cpu_cores: 2
+# container_memory is in GB
+#prometheus_container_memory: 4
+#prometheus_data_dir: /var/lib/prometheus
+#prometheus_conf_dir: /etc/prometheus
+#prometheus_user_id: '65534' # This is the UID used by the prom/prometheus container image
+#prometheus_port: 9092
+#prometheus_conf_overrides: {}
+# Uncomment out this variable if you need to customize the retention period for prometheus storage.
+# set it to '30d' if you want to retain 30 days of data.
+#prometheus_storage_tsdb_retention_time: 15d
+#alertmanager_container_image: "docker.io/prom/alertmanager:v0.16.2"
+#alertmanager_container_cpu_period: 100000
+#alertmanager_container_cpu_cores: 2
+# container_memory is in GB
+#alertmanager_container_memory: 4
+#alertmanager_data_dir: /var/lib/alertmanager
+#alertmanager_conf_dir: /etc/alertmanager
+#alertmanager_port: 9093
+#alertmanager_cluster_port: 9094
+#alertmanager_conf_overrides: {}
+#alertmanager_dashboard_api_no_ssl_verify: "{{ true if dashboard_protocol == 'https' and not dashboard_crt and not dashboard_key else false }}"
+# igw
+#
+# `igw_network` variable is intended for allowing dashboard deployment with iSCSI node not residing in the same subnet than what is defined in `public_network`.
+# For example:
+# If the ceph public network is 2a00:8a60:1:c301::/64 and the iSCSI Gateway resides
+# at a dedicated gateway network (2a00:8a60:1:c300::/64) (With routing between those networks).
+# It means "{{ hostvars[item]['ansible_facts']['all_ipv4_addresses'] | ips_in_ranges(public_network.split(',')) | last | ipwrap }}" will be empty.
+# As a consequence, this prevent from deploying dashboard with iSCSI node when it reside in a subnet different than `public_network`.
+# Using `igw_network` make it possible, set it with the subnet used by your iSCSI node.
+#igw_network: "{{ public_network }}"
+
+
+##################################
+# DEPRECIATED iSCSI TARGET SETUP #
+##################################
+
+# WARNING #
+
+# The following values are depreciated. To setup targets, gateways, LUNs, and
+# clients you should use gwcli or dashboard. If the following values are set,
+# the old ceph-iscsi-config/ceph-iscsi-cli packages will be used.
+
+# Specify the iqn for ALL gateways. This iqn is shared across the gateways, so an iscsi
+# client sees the gateway group as a single storage subsystem.
+#gateway_iqn: ""
+
+# gateway_ip_list provides a list of the IP Addrresses - one per gateway - that will be used
+# as an iscsi target portal ip. The list must be comma separated - and the order determines
+# the sequence of TPG's within the iscsi target across each gateway. Once set, additional
+# gateways can be added, but the order must *not* be changed.
+#gateway_ip_list: 0.0.0.0
+
+# rbd_devices defines the images that should be created and exported from the iscsi gateways.
+# If the rbd does not exist, it will be created for you. In addition you may increase the
+# size of rbd's by changing the size parameter and rerunning the playbook. A size value lower
+# than the current size of the rbd is ignored.
+#
+# the 'host' parameter defines which of the gateway nodes should handle the physical
+# allocation/expansion or removal of the rbd
+# to remove an image, simply use a state of 'absent'. This will first check the rbd is not allocated
+# to any client, and the remove it from LIO and then delete the rbd image
+#
+# NB. this variable definition can be commented out to bypass LUN management
+#
+# Example:
+#
+#rbd_devices:
+# - { pool: 'rbd', image: 'ansible1', size: '30G', host: 'ceph-1', state: 'present' }
+# - { pool: 'rbd', image: 'ansible2', size: '15G', host: 'ceph-1', state: 'present' }
+# - { pool: 'rbd', image: 'ansible3', size: '30G', host: 'ceph-1', state: 'present' }
+# - { pool: 'rbd', image: 'ansible4', size: '50G', host: 'ceph-1', state: 'present' }
+#rbd_devices: {}
+
+# client_connections defines the client ACL's to restrict client access to specific LUNs
+# The settings are as follows;
+# - image_list is a comma separated list of rbd images of the form <pool name>.<rbd_image_name>
+# - chap supplies the user and password the client will use for authentication of the
+# form <user>/<password>
+# - status shows the intended state of this client definition - 'present' or 'absent'
+#
+# NB. this definition can be commented out to skip client (nodeACL) management
+#
+# Example:
+#
+#client_connections:
+# - { client: 'iqn.1994-05.com.redhat:rh7-iscsi-client', image_list: 'rbd.ansible1,rbd.ansible2', chap: 'rh7-iscsi-client/redhat', status: 'present' }
+# - { client: 'iqn.1991-05.com.microsoft:w2k12r2', image_list: 'rbd.ansible4', chap: 'w2k12r2/microsoft_w2k12', status: 'absent' }
+
+#client_connections: {}
+
+#no_log_on_ceph_key_tasks: True
+
+###############
+# DEPRECATION #
+###############
+
+
+
+######################################################
+# VARIABLES BELOW SHOULD NOT BE MODIFIED BY THE USER #
+# *DO NOT* MODIFY THEM #
+######################################################
+
+#container_exec_cmd:
+#docker: false
+#ceph_volume_debug: "{{ enable_ceph_volume_debug | ternary(1, 0) }}"
+
--- /dev/null
+---
+# Variables here are applicable to all host groups NOT roles
+
+# This sample file generated by generate_group_vars_sample.sh
+
+# Dummy variable to avoid error because ansible does not recognize the
+# file as a good configuration file when no variable in it.
+dummy:
+
+###########
+# GENERAL #
+###########
+
+# Even though Client nodes should not have the admin key
+# at their disposal, some people might want to have it
+# distributed on Client nodes. Setting 'copy_admin_key' to 'true'
+# will copy the admin key to the /etc/ceph/ directory
+#copy_admin_key: false
+
+#user_config: false
+# When pg_autoscale_mode is set to True, you must add the target_size_ratio key with a correct value
+# `pg_num` and `pgp_num` keys will be ignored, even if specified.
+# eg:
+# test:
+# name: "test"
+# application: "rbd"
+# target_size_ratio: 0.2
+#test:
+# name: "test"
+# application: "rbd"
+#test2:
+# name: "test2"
+# application: "rbd"
+#pools:
+# - "{{ test }}"
+# - "{{ test2 }}"
+
+# Generate a keyring using ceph-authtool CLI or python.
+# Eg:
+# $ ceph-authtool --gen-print-key
+# or
+# $ python2 -c "import os ; import struct ; import time; import base64 ; key = os.urandom(16) ; header = struct.pack('<hiih',1,int(time.time()),0,len(key)) ; print(base64.b64encode(header + key))"
+#
+# To use a particular secret, you have to add 'key' to the dict below, so something like:
+# - { name: client.test, key: "AQAin8tUMICVFBAALRHNrV0Z4MXupRw4v9JQ6Q==" ...
+
+#keys:
+# - { name: client.test, caps: { mon: "profile rbd", osd: "allow class-read object_prefix rbd_children, profile rbd pool=test" }, mode: "{{ ceph_keyring_permissions }}" }
+# - { name: client.test2, caps: { mon: "profile rbd", osd: "allow class-read object_prefix rbd_children, profile rbd pool=test2" }, mode: "{{ ceph_keyring_permissions }}" }
+
--- /dev/null
+---
+# Variables here are applicable to all host groups NOT roles
+
+# This sample file generated by generate_group_vars_sample.sh
+
+# Dummy variable to avoid error because ansible does not recognize the
+# file as a good configuration file when no variable in it.
+dummy:
+
+# You can override vars by using host or group vars
+
+###########
+# GENERAL #
+###########
+# Whether or not to generate secure certificate to iSCSI gateway nodes
+#generate_crt: False
+
+#iscsi_conf_overrides: {}
+#iscsi_pool_name: rbd
+#iscsi_pool_size: 3
+
+#copy_admin_key: True
+
+##################
+# RBD-TARGET-API #
+##################
+# Optional settings related to the CLI/API service
+#api_user: admin
+#api_password: admin
+#api_port: 5000
+#api_secure: false
+#loop_delay: 1
+# set the variable below with a comma separated list of IPs
+# in order to restrict the access to the iSCSI API
+# trusted_ip_list: 192.168.122.1
+
+
+##########
+# DOCKER #
+##########
+
+# Resource limitation
+# For the whole list of limits you can apply see: docs.docker.com/engine/admin/resource_constraints
+# Default values are based from: https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/2/html/red_hat_ceph_storage_hardware_guide/minimum_recommendations
+# These options can be passed using the 'ceph_mds_docker_extra_env' variable.
+
+# TCMU_RUNNER resource limitation
+#ceph_tcmu_runner_docker_memory_limit: "{{ ansible_facts['memtotal_mb'] }}m"
+#ceph_tcmu_runner_docker_cpu_limit: 1
+
+# RBD_TARGET_GW resource limitation
+#ceph_rbd_target_gw_docker_memory_limit: "{{ ansible_facts['memtotal_mb'] }}m"
+#ceph_rbd_target_gw_docker_cpu_limit: 1
+
+# RBD_TARGET_API resource limitation
+#ceph_rbd_target_api_docker_memory_limit: "{{ ansible_facts['memtotal_mb'] }}m"
+#ceph_rbd_target_api_docker_cpu_limit: 1
+
--- /dev/null
+---
+# Variables here are applicable to all host groups NOT roles
+
+# This sample file generated by generate_group_vars_sample.sh
+
+# Dummy variable to avoid error because ansible does not recognize the
+# file as a good configuration file when no variable in it.
+dummy:
+
+# You can override vars by using host or group vars
+
+###########
+# GENERAL #
+###########
+
+# Even though MDS nodes should not have the admin key
+# at their disposal, some people might want to have it
+# distributed on MDS nodes. Setting 'copy_admin_key' to 'true'
+# will copy the admin key to the /etc/ceph/ directory
+#copy_admin_key: false
+
+##########
+# DOCKER #
+##########
+
+# Resource limitation
+# For the whole list of limits you can apply see: docs.docker.com/engine/admin/resource_constraints
+# Default values are based from: https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/2/html/red_hat_ceph_storage_hardware_guide/minimum_recommendations
+# These options can be passed using the 'ceph_mds_docker_extra_env' variable.
+#ceph_mds_docker_memory_limit: "{{ ansible_facts['memtotal_mb'] }}m"
+#ceph_mds_docker_cpu_limit: 4
+
+# we currently for MDS_NAME to hostname because of a bug in ceph-docker
+# fix here: https://github.com/ceph/ceph-docker/pull/770
+# this will go away soon.
+#ceph_mds_docker_extra_env: -e MDS_NAME={{ ansible_facts['hostname'] }}
+#ceph_config_keys: [] # DON'T TOUCH ME
+
+
+###########
+# SYSTEMD #
+###########
+# ceph_mds_systemd_overrides will override the systemd settings
+# for the ceph-mds services.
+# For example,to set "PrivateDevices=false" you can specify:
+#ceph_mds_systemd_overrides:
+# Service:
+# PrivateDevices: False
+
--- /dev/null
+---
+# Variables here are applicable to all host groups NOT roles
+
+# This sample file generated by generate_group_vars_sample.sh
+
+# Dummy variable to avoid error because ansible does not recognize the
+# file as a good configuration file when no variable in it.
+dummy:
+
+##########
+# GLOBAL #
+##########
+# Even though MGR nodes should not have the admin key
+# at their disposal, some people might want to have it
+# distributed on MGR nodes. Setting 'copy_admin_key' to 'true'
+# will copy the admin key to the /etc/ceph/ directory
+#copy_admin_key: false
+#mgr_secret: 'mgr_secret'
+
+
+###########
+# MODULES #
+###########
+# Ceph mgr modules to enable, to view the list of available mpdules see: http://docs.ceph.com/docs/CEPH_VERSION/mgr/
+# and replace CEPH_VERSION with your current Ceph version, e,g: 'mimic'
+#ceph_mgr_modules: []
+
+############
+# PACKAGES #
+############
+# Ceph mgr packages to install, ceph-mgr + extra module packages.
+#ceph_mgr_packages:
+# - ceph-mgr
+
+
+##########
+# DOCKER #
+##########
+
+# Resource limitation
+# For the whole list of limits you can apply see: docs.docker.com/engine/admin/resource_constraints
+# Default values are based from: https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/2/html/red_hat_ceph_storage_hardware_guide/minimum_recommendations
+# These options can be passed using the 'ceph_mgr_docker_extra_env' variable.
+#ceph_mgr_docker_memory_limit: "{{ ansible_facts['memtotal_mb'] }}m"
+#ceph_mgr_docker_cpu_limit: 1
+
+#ceph_mgr_docker_extra_env:
+#ceph_config_keys: [] # DON'T TOUCH ME
+
+
+###########
+# SYSTEMD #
+###########
+# ceph_mgr_systemd_overrides will override the systemd settings
+# for the ceph-mgr services.
+# For example,to set "PrivateDevices=false" you can specify:
+#ceph_mgr_systemd_overrides:
+# Service:
+# PrivateDevices: False
+
--- /dev/null
+---
+# Variables here are applicable to all host groups NOT roles
+
+# This sample file generated by generate_group_vars_sample.sh
+
+# Dummy variable to avoid error because ansible does not recognize the
+# file as a good configuration file when no variable in it.
+dummy:
+
+# You can override vars by using host or group vars
+
+###########
+# GENERAL #
+###########
+#mon_group_name: mons
+
+# ACTIVATE BOTH FSID AND MONITOR_SECRET VARIABLES FOR NON-VAGRANT DEPLOYMENT
+#monitor_secret: "{{ monitor_keyring.stdout }}"
+#admin_secret: 'admin_secret'
+
+# Secure your cluster
+# This will set the following flags on all the pools:
+# * nosizechange
+# * nopgchange
+# * nodelete
+
+#secure_cluster: false
+#secure_cluster_flags:
+# - nopgchange
+# - nodelete
+# - nosizechange
+
+#client_admin_ceph_authtool_cap:
+# mon: allow *
+# osd: allow *
+# mds: allow *
+# mgr: allow *
+
+
+##########
+# DOCKER #
+##########
+
+# Resource limitation
+# For the whole list of limits you can apply see: docs.docker.com/engine/admin/resource_constraints
+# Default values are based from: https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/2/html/red_hat_ceph_storage_hardware_guide/minimum_recommendations
+# These options can be passed using the 'ceph_mon_docker_extra_env' variable.
+#ceph_mon_docker_memory_limit: "{{ ansible_facts['memtotal_mb'] }}m"
+#ceph_mon_docker_cpu_limit: 1
+#ceph_mon_container_listen_port: 3300
+
+# Use this variable to add extra env configuration to run your mon container.
+# If you want to set a custom admin keyring you can set this variable like following:
+# ceph_mon_docker_extra_env: -e ADMIN_SECRET={{ admin_secret }}
+#ceph_mon_docker_extra_env:
+#mon_docker_privileged: false
+#mon_docker_net_host: true
+#ceph_config_keys: [] # DON'T TOUCH ME
+
+
+###########
+# SYSTEMD #
+###########
+# ceph_mon_systemd_overrides will override the systemd settings
+# for the ceph-mon services.
+# For example,to set "PrivateDevices=false" you can specify:
+#ceph_mon_systemd_overrides:
+# Service:
+# PrivateDevices: False
+
--- /dev/null
+---
+# Variables here are applicable to all host groups NOT roles
+
+# This sample file generated by generate_group_vars_sample.sh
+
+# Dummy variable to avoid error because ansible does not recognize the
+# file as a good configuration file when no variable in it.
+dummy:
+
+# You can override vars by using host or group vars
+
+###########
+# GENERAL #
+###########
+
+# Even though NFS nodes should not have the admin key
+# at their disposal, some people might want to have it
+# distributed on RGW nodes. Setting 'copy_admin_key' to 'true'
+# will copy the admin key to the /etc/ceph/ directory
+#copy_admin_key: false
+
+# Whether docker container or systemd service should be enabled
+# and started, it's useful to set it to false if nfs-ganesha
+# service is managed by pacemaker
+#ceph_nfs_enable_service: true
+
+# ceph-nfs systemd service uses ansible's hostname as an instance id,
+# so service name is ceph-nfs@{{ ansible_facts['hostname'] }}, this is not
+# ideal when ceph-nfs is managed by pacemaker across multiple hosts - in
+# such case it's better to have constant instance id instead which
+# can be set by 'ceph_nfs_service_suffix'
+# ceph_nfs_service_suffix: "{{ ansible_facts['hostname'] }}"
+
+######################
+# NFS Ganesha Config #
+######################
+#ceph_nfs_log_file: "/var/log/ganesha/ganesha.log"
+#ceph_nfs_dynamic_exports: false
+# If set to true then rados is used to store ganesha exports
+# and client sessions information, this is useful if you
+# run multiple nfs-ganesha servers in active/passive mode and
+# want to do failover
+#ceph_nfs_rados_backend: false
+# Name of the rados object used to store a list of the export rados
+# object URLS
+#ceph_nfs_rados_export_index: "ganesha-export-index"
+# Address ganesha service should listen on, by default ganesha listens on all
+# addresses. (Note: ganesha ignores this parameter in current version due to
+# this bug: https://github.com/nfs-ganesha/nfs-ganesha/issues/217)
+# ceph_nfs_bind_addr: 0.0.0.0
+
+# If set to true, then ganesha's attribute and directory caching is disabled
+# as much as possible. Currently, ganesha caches by default.
+# When using ganesha as CephFS's gateway, it is recommended to turn off
+# ganesha's caching as the libcephfs clients also cache the same information.
+# Note: Irrespective of this option's setting, ganesha's caching is disabled
+# when setting 'nfs_file_gw' option as true.
+#ceph_nfs_disable_caching: false
+
+# This is the file ganesha will use to control NFSv4 ID mapping
+#ceph_nfs_idmap_conf: "/etc/ganesha/idmap.conf"
+
+# idmap configuration file override.
+# This allows you to specify more configuration options
+# using an INI style format.
+# Example:
+# idmap_conf_overrides:
+# General:
+# Domain: foo.domain.net
+#idmap_conf_overrides: {}
+
+####################
+# FSAL Ceph Config #
+####################
+#ceph_nfs_ceph_export_id: 20133
+#ceph_nfs_ceph_pseudo_path: "/cephfile"
+#ceph_nfs_ceph_protocols: "3,4"
+#ceph_nfs_ceph_access_type: "RW"
+#ceph_nfs_ceph_user: "admin"
+#ceph_nfs_ceph_squash: "Root_Squash"
+#ceph_nfs_ceph_sectype: "sys,krb5,krb5i,krb5p"
+
+###################
+# FSAL RGW Config #
+###################
+#ceph_nfs_rgw_export_id: 20134
+#ceph_nfs_rgw_pseudo_path: "/cephobject"
+#ceph_nfs_rgw_protocols: "3,4"
+#ceph_nfs_rgw_access_type: "RW"
+#ceph_nfs_rgw_user: "cephnfs"
+#ceph_nfs_rgw_squash: "Root_Squash"
+#ceph_nfs_rgw_sectype: "sys,krb5,krb5i,krb5p"
+# Note: keys are optional and can be generated, but not on containerized, where
+# they must be configered.
+#ceph_nfs_rgw_access_key: "QFAMEDSJP5DEKJO0DDXY"
+#ceph_nfs_rgw_secret_key: "iaSFLDVvDdQt6lkNzHyW4fPLZugBAI1g17LO0+87[MAC[M#C"
+#rgw_client_name: client.rgw.{{ ansible_facts['hostname'] }}
+
+###################
+# CONFIG OVERRIDE #
+###################
+
+# Ganesha configuration file override.
+# These multiline strings will be appended to the contents of the blocks in ganesha.conf and
+# must be in the correct ganesha.conf format seen here:
+# https://github.com/nfs-ganesha/nfs-ganesha/blob/next/src/config_samples/ganesha.conf.example
+#
+# Example:
+#CACHEINODE {
+# #Entries_HWMark = 100000;
+#}
+#
+#ganesha_core_param_overrides:
+#ganesha_ceph_export_overrides:
+#ganesha_rgw_export_overrides:
+#ganesha_rgw_section_overrides:
+#ganesha_log_overrides:
+#ganesha_conf_overrides: |
+# CACHEINODE {
+# #Entries_HWMark = 100000;
+# }
+
+##########
+# DOCKER #
+##########
+
+#ceph_docker_image: "ceph/daemon"
+#ceph_docker_image_tag: latest
+#ceph_nfs_docker_extra_env:
+#ceph_config_keys: [] # DON'T TOUCH ME
+
--- /dev/null
+---
+# Variables here are applicable to all host groups NOT roles
+
+# This sample file generated by generate_group_vars_sample.sh
+
+# Dummy variable to avoid error because ansible does not recognize the
+# file as a good configuration file when no variable in it.
+dummy:
+
+###########
+# GENERAL #
+###########
+
+# Even though OSD nodes should not have the admin key
+# at their disposal, some people might want to have it
+# distributed on OSD nodes. Setting 'copy_admin_key' to 'true'
+# will copy the admin key to the /etc/ceph/ directory
+#copy_admin_key: false
+
+
+##############
+# CEPH OPTIONS
+##############
+
+# Devices to be used as OSDs
+# You can pre-provision disks that are not present yet.
+# Ansible will just skip them. Newly added disk will be
+# automatically configured during the next run.
+#
+
+
+# Declare devices to be used as OSDs
+# All scenario(except 3rd) inherit from the following device declaration
+# Note: This scenario uses the ceph-volume lvm batch method to provision OSDs
+
+#devices:
+# - /dev/sdb
+# - /dev/sdc
+# - /dev/sdd
+# - /dev/sde
+
+#devices: []
+
+# Declare devices to be used as block.db devices
+
+#dedicated_devices:
+# - /dev/sdx
+# - /dev/sdy
+
+#dedicated_devices: []
+
+# Declare devices to be used as block.wal devices
+
+#bluestore_wal_devices:
+# - /dev/nvme0n1
+# - /dev/nvme0n2
+
+#bluestore_wal_devices: []
+
+#'osd_auto_discovery' mode prevents you from filling out the 'devices' variable above.
+# Device discovery is based on the Ansible fact 'ansible_facts["devices"]'
+# which reports all the devices on a system. If chosen, all the disks
+# found will be passed to ceph-volume lvm batch. You should not be worried on using
+# this option since ceph-volume has a built-in check which looks for empty devices.
+# Thus devices with existing partition tables will not be used.
+#
+#osd_auto_discovery: false
+
+# Encrypt your OSD device using dmcrypt
+# If set to True, no matter which osd_objecstore you use the data will be encrypted
+#dmcrypt: False
+
+# Use ceph-volume to create OSDs from logical volumes.
+# lvm_volumes is a list of dictionaries.
+#
+# Filestore: Each dictionary must contain a data, journal and vg_name key. Any
+# logical volume or logical group used must be a name and not a path. data
+# can be a logical volume, device or partition. journal can be either a lv or partition.
+# You can not use the same journal for many data lvs.
+# data_vg must be the volume group name of the data lv, only applicable when data is an lv.
+# journal_vg is optional and must be the volume group name of the journal lv, if applicable.
+# For example:
+# lvm_volumes:
+# - data: data-lv1
+# data_vg: vg1
+# journal: journal-lv1
+# journal_vg: vg2
+# crush_device_class: foo
+# - data: data-lv2
+# journal: /dev/sda1
+# data_vg: vg1
+# - data: data-lv3
+# journal: /dev/sdb1
+# data_vg: vg2
+# - data: /dev/sda
+# journal: /dev/sdb1
+# - data: /dev/sda1
+# journal: /dev/sdb1
+#
+# Bluestore: Each dictionary must contain at least data. When defining wal or
+# db, it must have both the lv name and vg group (db and wal are not required).
+# This allows for four combinations: just data, data and wal, data and wal and
+# db, data and db.
+# For example:
+# lvm_volumes:
+# - data: data-lv1
+# data_vg: vg1
+# wal: wal-lv1
+# wal_vg: vg1
+# crush_device_class: foo
+# - data: data-lv2
+# db: db-lv2
+# db_vg: vg2
+# - data: data-lv3
+# wal: wal-lv1
+# wal_vg: vg3
+# db: db-lv3
+# db_vg: vg3
+# - data: data-lv4
+# data_vg: vg4
+# - data: /dev/sda
+# - data: /dev/sdb1
+
+#lvm_volumes: []
+#crush_device_class: ""
+#osds_per_device: 1
+
+###############
+# CRUSH RULES #
+###############
+#crush_rule_config: false
+
+#crush_rule_hdd:
+# name: HDD
+# root: default
+# type: host
+# class: hdd
+# default: false
+
+#crush_rule_ssd:
+# name: SSD
+# root: default
+# type: host
+# class: ssd
+# default: false
+
+#crush_rules:
+# - "{{ crush_rule_hdd }}"
+# - "{{ crush_rule_ssd }}"
+
+# Caution: this will create crush roots and racks according to hostvars {{ osd_crush_location }}
+# and will move hosts into them which might lead to significant data movement in the cluster!
+#
+# In order for the playbook to create CRUSH hierarchy, you have to setup your Ansible inventory file like so:
+#
+# [osds]
+# ceph-osd-01 osd_crush_location="{ 'root': 'mon-roottt', 'rack': 'mon-rackkkk', 'pod': 'monpod', 'host': 'ceph-osd-01' }"
+#
+# Note that 'host' is mandatory and that you need to submit at least two bucket type (including the host)
+#create_crush_tree: false
+
+##########
+# DOCKER #
+##########
+
+#ceph_config_keys: [] # DON'T TOUCH ME
+
+# Resource limitation
+# For the whole list of limits you can apply see: docs.docker.com/engine/admin/resource_constraints
+# Default values are based from: https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/2/html/red_hat_ceph_storage_hardware_guide/minimum_recommendations
+# These options can be passed using the 'ceph_osd_docker_extra_env' variable.
+#ceph_osd_docker_memory_limit: "{{ ansible_facts['memtotal_mb'] }}m"
+#ceph_osd_docker_cpu_limit: 4
+
+# The next two variables are undefined, and thus, unused by default.
+# If `lscpu | grep NUMA` returned the following:
+# NUMA node0 CPU(s): 0,2,4,6,8,10,12,14,16
+# NUMA node1 CPU(s): 1,3,5,7,9,11,13,15,17
+# then, the following would run the OSD on the first NUMA node only.
+#ceph_osd_docker_cpuset_cpus: "0,2,4,6,8,10,12,14,16"
+#ceph_osd_docker_cpuset_mems: "0"
+
+# PREPARE DEVICE
+#
+# WARNING /!\ DMCRYPT scenario ONLY works with Docker version 1.12.5 and above
+#
+#ceph_osd_docker_devices: "{{ devices }}"
+#ceph_osd_docker_prepare_env: -e OSD_JOURNAL_SIZE={{ journal_size }}
+
+# ACTIVATE DEVICE
+#
+#ceph_osd_docker_extra_env:
+#ceph_osd_numactl_opts: ""
+
+###########
+# SYSTEMD #
+###########
+
+# ceph_osd_systemd_overrides will override the systemd settings
+# for the ceph-osd services.
+# For example,to set "PrivateDevices=false" you can specify:
+#ceph_osd_systemd_overrides:
+# Service:
+# PrivateDevices: False
+
+
+###########
+# CHECK #
+###########
+
+#nb_retry_wait_osd_up: 60
+#delay_wait_osd_up: 10
+
--- /dev/null
+---
+# Variables here are applicable to all host groups NOT roles
+
+# This sample file generated by generate_group_vars_sample.sh
+
+# Dummy variable to avoid error because ansible does not recognize the
+# file as a good configuration file when no variable in it.
+dummy:
+
+#########
+# SETUP #
+#########
+
+# Even though rbd-mirror nodes should not have the admin key
+# at their disposal, some people might want to have it
+# distributed on rbd-mirror nodes. Setting 'copy_admin_key' to 'true'
+# will copy the admin key to the /etc/ceph/ directory. Only
+# valid for Luminous and later releases.
+#copy_admin_key: false
+
+
+#################
+# CONFIGURATION #
+#################
+
+#ceph_rbd_mirror_local_user: client.rbd-mirror-peer
+#ceph_rbd_mirror_configure: false
+#ceph_rbd_mirror_mode: pool
+#ceph_rbd_mirror_remote_cluster: remote
+
+##########
+# DOCKER #
+##########
+
+# Resource limitation
+# For the whole list of limits you can apply see: docs.docker.com/engine/admin/resource_constraints
+# Default values are based from: https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/2/html/red_hat_ceph_storage_hardware_guide/minimum_recommendations
+# These options can be passed using the 'ceph_rbd_mirror_docker_extra_env' variable.
+#ceph_rbd_mirror_docker_memory_limit: "{{ ansible_facts['memtotal_mb'] }}m"
+#ceph_rbd_mirror_docker_cpu_limit: 1
+
+#ceph_rbd_mirror_docker_extra_env:
+#ceph_config_keys: [] # DON'T TOUCH ME
+
+
+###########
+# SYSTEMD #
+###########
+# ceph_rbd_mirror_systemd_overrides will override the systemd settings
+# for the ceph-rbd-mirror services.
+# For example,to set "PrivateDevices=false" you can specify:
+#ceph_rbd_mirror_systemd_overrides:
+# Service:
+# PrivateDevices: False
+
--- /dev/null
+#rgw_realm: usa
+
+# the user should generate a new pair of keys for each realm
+
+#system_access_key: 6kWkikvapSnHyE22P7nO
+#system_secret_key: MGecsMrWtKZgngOHZdrd6d3JxGO5CPWgT2lcnpSt
+
+#rgw_realm_system_user: bostonian
+#rgw_realm_system_user_display_name: "Mark Wahlberg"
+
+# The variables rgw_pull_port, rgw_pull_proto, rgw_pullhost, are what comprise one of the rgw endpoints in a master zone in the zonegroup and realm you want to create secondary zones in.
+
+#rgw_pull_port: "{{ radosgw_frontend_port }}"
+#rgw_pull_proto: "http"
+#rgw_pullhost: localhost
--- /dev/null
+---
+# Variables here are applicable to all host groups NOT roles
+
+# This sample file generated by generate_group_vars_sample.sh
+
+# Dummy variable to avoid error because ansible does not recognize the
+# file as a good configuration file when no variable in it.
+dummy:
+
+# You can override vars by using host or group vars
+
+###########
+# GENERAL #
+###########
+
+#haproxy_frontend_port: 80
+#haproxy_frontend_ssl_port: 443
+#haproxy_frontend_ssl_certificate:
+#haproxy_ssl_dh_param: 4096
+#haproxy_ssl_ciphers:
+# - EECDH+AESGCM
+# - EDH+AESGCM
+#haproxy_ssl_options:
+# - no-sslv3
+# - no-tlsv10
+# - no-tlsv11
+# - no-tls-tickets
+#
+#virtual_ips:
+# - 192.168.238.250
+# - 192.168.238.251
+#
+#virtual_ip_netmask: 24
+#virtual_ip_interface: ens33
+
--- /dev/null
+---
+# Variables here are applicable to all host groups NOT roles
+
+# This sample file generated by generate_group_vars_sample.sh
+
+# Dummy variable to avoid error because ansible does not recognize the
+# file as a good configuration file when no variable in it.
+dummy:
+
+# You can override vars by using host or group vars
+
+###########
+# GENERAL #
+###########
+
+# Even though RGW nodes should not have the admin key
+# at their disposal, some people might want to have it
+# distributed on RGW nodes. Setting 'copy_admin_key' to 'true'
+# will copy the admin key to the /etc/ceph/ directory
+#copy_admin_key: false
+
+##########
+# TUNING #
+##########
+
+# To support buckets with a very large number of objects it's
+# important to split them into shards. We suggest about 100K
+# objects per shard as a conservative maximum.
+#rgw_override_bucket_index_max_shards: 16
+
+# Consider setting a quota on buckets so that exceeding this
+# limit will require admin intervention.
+#rgw_bucket_default_quota_max_objects: 1638400 # i.e., 100K * 16
+
+# Declaring rgw_create_pools will create pools with the given number of pgs,
+# size, and type. The following are some important notes on this automatic
+# pool creation:
+# - The pools and associated pg_num's below are merely examples of pools that
+# could be automatically created when rgws are deployed.
+# - The default pg_num is 8 (from osd_pool_default_pg_num) for pool created
+# if rgw_create_pools isn't declared and configured.
+# - A pgcalc tool should be used to determine the optimal sizes for
+# the rgw.buckets.data, rgw.buckets.index pools as well as any other
+# pools declared in this dictionary.
+# https://ceph.io/pgcalc is the upstream pgcalc tool
+# https://access.redhat.com/labsinfo/cephpgc is a pgcalc tool offered by
+# Red Hat if you are using RHCS.
+# - The default value of {{ rgw_zone }} is 'default'.
+# - The type must be set as either 'replicated' or 'ec' for
+# each pool.
+# - If a pool's type is 'ec', k and m values must be set via
+# the ec_k, and ec_m variables.
+# - The rule_name key can be used with a specific crush rule value (must exist).
+# If the key doesn't exist it falls back to the default replicated_rule.
+# This only works for replicated pool type not erasure.
+
+#rgw_create_pools:
+# "{{ rgw_zone }}.rgw.buckets.data":
+# pg_num: 64
+# type: ec
+# ec_profile: myecprofile
+# ec_k: 5
+# ec_m: 3
+# "{{ rgw_zone }}.rgw.buckets.index":
+# pg_num: 16
+# size: 3
+# type: replicated
+# "{{ rgw_zone }}.rgw.meta":
+# pg_num: 8
+# size: 3
+# type: replicated
+# "{{ rgw_zone }}.rgw.log":
+# pg_num: 8
+# size: 3
+# type: replicated
+# "{{ rgw_zone }}.rgw.control":
+# pg_num: 8
+# size: 3
+# type: replicated
+# rule_name: foo
+
+
+##########
+# DOCKER #
+##########
+
+# Resource limitation
+# For the whole list of limits you can apply see: docs.docker.com/engine/admin/resource_constraints
+# Default values are based from: https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/2/html/red_hat_ceph_storage_hardware_guide/minimum_recommendations
+# These options can be passed using the 'ceph_rgw_docker_extra_env' variable.
+#ceph_rgw_docker_memory_limit: "4096m"
+#ceph_rgw_docker_cpu_limit: 8
+#ceph_rgw_docker_cpuset_cpus: "0,2,4,6,8,10,12,14,16"
+#ceph_rgw_docker_cpuset_mems: "0"
+
+#ceph_rgw_docker_extra_env:
+#ceph_config_keys: [] # DON'T TOUCH ME
+#rgw_config_keys: "/" # DON'T TOUCH ME
+
+###########
+# SYSTEMD #
+###########
+# ceph_rgw_systemd_overrides will override the systemd settings
+# for the ceph-rgw services.
+# For example,to set "PrivateDevices=false" you can specify:
+#ceph_rgw_systemd_overrides:
+# Service:
+# PrivateDevices: False
+
--- /dev/null
+---
+# Variables here are applicable to all host groups NOT roles
+
+# This sample file generated by generate_group_vars_sample.sh
+
+# Dummy variable to avoid error because ansible does not recognize the
+# file as a good configuration file when no variable in it.
+dummy:
+
+# You can override vars by using host or group vars
+
+###########
+# GENERAL #
+###########
+
+######################################
+# Releases name to number dictionary #
+######################################
+#ceph_release_num:
+# dumpling: 0.67
+# emperor: 0.72
+# firefly: 0.80
+# giant: 0.87
+# hammer: 0.94
+# infernalis: 9
+# jewel: 10
+# kraken: 11
+# luminous: 12
+# mimic: 13
+# nautilus: 14
+# octopus: 15
+# pacific: 16
+# dev: 99
+
+
+# The 'cluster' variable determines the name of the cluster.
+# Changing the default value to something else means that you will
+# need to change all the command line calls as well, for example if
+# your cluster name is 'foo':
+# "ceph health" will become "ceph --cluster foo health"
+#
+# An easier way to handle this is to use the environment variable CEPH_ARGS
+# So run: "export CEPH_ARGS="--cluster foo"
+# With that you will be able to run "ceph health" normally
+#cluster: ceph
+
+# Inventory host group variables
+#mon_group_name: mons
+#osd_group_name: osds
+#rgw_group_name: rgws
+#mds_group_name: mdss
+#nfs_group_name: nfss
+#rbdmirror_group_name: rbdmirrors
+#client_group_name: clients
+#iscsi_gw_group_name: iscsigws
+#mgr_group_name: mgrs
+#rgwloadbalancer_group_name: rgwloadbalancers
+#monitoring_group_name: monitoring
+#adopt_label_group_names:
+# - "{{ mon_group_name }}"
+# - "{{ osd_group_name }}"
+# - "{{ rgw_group_name }}"
+# - "{{ mds_group_name }}"
+# - "{{ nfs_group_name }}"
+# - "{{ rbdmirror_group_name }}"
+# - "{{ client_group_name }}"
+# - "{{ iscsi_gw_group_name }}"
+# - "{{ mgr_group_name }}"
+# - "{{ rgwloadbalancer_group_name }}"
+# - "{{ monitoring_group_name }}"
+
+# If configure_firewall is true, then ansible will try to configure the
+# appropriate firewalling rules so that Ceph daemons can communicate
+# with each others.
+#configure_firewall: True
+
+# Open ports on corresponding nodes if firewall is installed on it
+#ceph_mon_firewall_zone: public
+#ceph_mgr_firewall_zone: public
+#ceph_osd_firewall_zone: public
+#ceph_rgw_firewall_zone: public
+#ceph_mds_firewall_zone: public
+#ceph_nfs_firewall_zone: public
+#ceph_rbdmirror_firewall_zone: public
+#ceph_iscsi_firewall_zone: public
+#ceph_dashboard_firewall_zone: public
+#ceph_rgwloadbalancer_firewall_zone: public
+
+# cephadm account for remote connections
+#cephadm_ssh_user: root
+#cephadm_ssh_priv_key_path: "/home/{{ cephadm_ssh_user }}/.ssh/id_rsa"
+#cephadm_ssh_pub_key_path: "{{ cephadm_ssh_priv_key_path }}.pub"
+#cephadm_mgmt_network: "{{ public_network }}"
+
+############
+# PACKAGES #
+############
+#debian_package_dependencies: []
+
+#centos_package_dependencies:
+# - epel-release
+# - "{{ (ansible_facts['distribution_major_version'] is version('8', '>=')) | ternary('python3-libselinux', 'libselinux-python') }}"
+
+#redhat_package_dependencies: []
+
+#suse_package_dependencies: []
+
+# Whether or not to install the ceph-test package.
+#ceph_test: false
+
+# Enable the ntp service by default to avoid clock skew on ceph nodes
+# Disable if an appropriate NTP client is already installed and configured
+#ntp_service_enabled: true
+
+# Set type of NTP client daemon to use, valid entries are chronyd, ntpd or timesyncd
+#ntp_daemon_type: chronyd
+
+# This variable determines if ceph packages can be updated. If False, the
+# package resources will use "state=present". If True, they will use
+# "state=latest".
+#upgrade_ceph_packages: False
+
+#ceph_use_distro_backports: false # DEBIAN ONLY
+#ceph_directories_mode: "0755"
+
+###########
+# INSTALL #
+###########
+# ORIGIN SOURCE
+#
+# Choose between:
+# - 'repository' means that you will get ceph installed through a new repository. Later below choose between 'community', 'rhcs', 'dev' or 'obs'
+# - 'distro' means that no separate repo file will be added
+# you will get whatever version of Ceph is included in your Linux distro.
+# 'local' means that the ceph binaries will be copied over from the local machine
+ceph_origin: repository
+#valid_ceph_origins:
+# - repository
+# - distro
+# - local
+
+
+ceph_repository: rhcs
+#valid_ceph_repository:
+# - community
+# - rhcs
+# - dev
+# - uca
+# - custom
+# - obs
+
+
+# REPOSITORY: COMMUNITY VERSION
+#
+# Enabled when ceph_repository == 'community'
+#
+#ceph_mirror: https://download.ceph.com
+#ceph_stable_key: https://download.ceph.com/keys/release.asc
+#ceph_stable_release: pacific
+#ceph_stable_repo: "{{ ceph_mirror }}/debian-{{ ceph_stable_release }}"
+
+#nfs_ganesha_stable: true # use stable repos for nfs-ganesha
+#nfs_ganesha_stable_branch: V3.5-stable
+#nfs_ganesha_stable_deb_repo: http://ppa.launchpad.net/nfs-ganesha/nfs-ganesha-3.0/ubuntu
+#nfs_ganesha_apt_keyserver: keyserver.ubuntu.com
+#nfs_ganesha_apt_key_id: EA914D611053D07BD332E18010353E8834DC57CA
+#libntirpc_stable_deb_repo: http://ppa.launchpad.net/nfs-ganesha/libntirpc-3.0/ubuntu
+
+# Use the option below to specify your applicable package tree, eg. when using non-LTS Ubuntu versions
+# # for a list of available Debian distributions, visit http://download.ceph.com/debian-{{ ceph_stable_release }}/dists/
+# for more info read: https://github.com/ceph/ceph-ansible/issues/305
+#ceph_stable_distro_source: "{{ ansible_facts['distribution_release'] }}"
+
+
+# REPOSITORY: RHCS VERSION RED HAT STORAGE (from 5.0)
+#
+# Enabled when ceph_repository == 'rhcs'
+#
+# This version is supported on RHEL 8
+#
+ceph_rhcs_version: 5
+
+
+# REPOSITORY: UBUNTU CLOUD ARCHIVE
+#
+# Enabled when ceph_repository == 'uca'
+#
+# This allows the install of Ceph from the Ubuntu Cloud Archive. The Ubuntu Cloud Archive
+# usually has newer Ceph releases than the normal distro repository.
+#
+#
+#ceph_stable_repo_uca: "http://ubuntu-cloud.archive.canonical.com/ubuntu"
+#ceph_stable_openstack_release_uca: queens
+#ceph_stable_release_uca: "{{ ansible_facts['distribution_release'] }}-updates/{{ ceph_stable_openstack_release_uca }}"
+
+# REPOSITORY: openSUSE OBS
+#
+# Enabled when ceph_repository == 'obs'
+#
+# This allows the install of Ceph from the openSUSE OBS repository. The OBS repository
+# usually has newer Ceph releases than the normal distro repository.
+#
+#
+#ceph_obs_repo: "https://download.opensuse.org/repositories/filesystems:/ceph:/{{ ceph_stable_release }}/openSUSE_Leap_{{ ansible_facts['distribution_version'] }}/"
+
+# REPOSITORY: DEV
+#
+# Enabled when ceph_repository == 'dev'
+#
+#ceph_dev_branch: main # development branch you would like to use e.g: main, wip-hack
+#ceph_dev_sha1: latest # distinct sha1 to use, defaults to 'latest' (as in latest built)
+
+#nfs_ganesha_dev: false # use development repos for nfs-ganesha
+
+# Set this to choose the version of ceph dev libraries used in the nfs-ganesha packages from shaman
+# flavors so far include: ceph_master, ceph_jewel, ceph_kraken, ceph_luminous
+#nfs_ganesha_flavor: "ceph_master"
+
+ceph_iscsi_config_dev: false
+
+
+# REPOSITORY: CUSTOM
+#
+# Enabled when ceph_repository == 'custom'
+#
+# Use a custom repository to install ceph. For RPM, ceph_custom_repo should be
+# a URL to the .repo file to be installed on the targets. For deb,
+# ceph_custom_repo should be the URL to the repo base.
+#
+#ceph_custom_key: https://server.domain.com/ceph-custom-repo/key.asc
+#ceph_custom_repo: https://server.domain.com/ceph-custom-repo
+
+
+# ORIGIN: LOCAL CEPH INSTALLATION
+#
+# Enabled when ceph_repository == 'local'
+#
+# Path to DESTDIR of the ceph install
+#ceph_installation_dir: "/path/to/ceph_installation/"
+# Whether or not to use installer script rundep_installer.sh
+# This script takes in rundep and installs the packages line by line onto the machine
+# If this is set to false then it is assumed that the machine ceph is being copied onto will already have
+# all runtime dependencies installed
+#use_installer: false
+# Root directory for ceph-ansible
+#ansible_dir: "/path/to/ceph-ansible"
+
+
+######################
+# CEPH CONFIGURATION #
+######################
+
+## Ceph options
+#
+# Each cluster requires a unique, consistent filesystem ID. By
+# default, the playbook generates one for you.
+# If you want to customize how the fsid is
+# generated, you may find it useful to disable fsid generation to
+# avoid cluttering up your ansible repo. If you set `generate_fsid` to
+# false, you *must* generate `fsid` in another way.
+# ACTIVATE THE FSID VARIABLE FOR NON-VAGRANT DEPLOYMENT
+#fsid: "{{ cluster_uuid.stdout }}"
+#generate_fsid: true
+
+#ceph_conf_key_directory: /etc/ceph
+
+#ceph_uid: "{{ '64045' if not containerized_deployment | bool and ansible_facts['os_family'] == 'Debian' else '167' }}"
+
+# Permissions for keyring files in /etc/ceph
+#ceph_keyring_permissions: '0600'
+
+#cephx: true
+
+## Client options
+#
+#rbd_cache: "true"
+#rbd_cache_writethrough_until_flush: "true"
+#rbd_concurrent_management_ops: 20
+
+#rbd_client_directories: true # this will create rbd_client_log_path and rbd_client_admin_socket_path directories with proper permissions
+
+# Permissions for the rbd_client_log_path and
+# rbd_client_admin_socket_path. Depending on your use case for Ceph
+# you may want to change these values. The default, which is used if
+# any of the variables are unset or set to a false value (like `null`
+# or `false`) is to automatically determine what is appropriate for
+# the Ceph version with non-OpenStack workloads -- ceph:ceph and 0770
+# for infernalis releases, and root:root and 1777 for pre-infernalis
+# releases.
+#
+# For other use cases, including running Ceph with OpenStack, you'll
+# want to set these differently:
+#
+# For OpenStack on RHEL, you'll want:
+# rbd_client_directory_owner: "qemu"
+# rbd_client_directory_group: "libvirtd" (or "libvirt", depending on your version of libvirt)
+# rbd_client_directory_mode: "0755"
+#
+# For OpenStack on Ubuntu or Debian, set:
+# rbd_client_directory_owner: "libvirt-qemu"
+# rbd_client_directory_group: "kvm"
+# rbd_client_directory_mode: "0755"
+#
+# If you set rbd_client_directory_mode, you must use a string (e.g.,
+# 'rbd_client_directory_mode: "0755"', *not*
+# 'rbd_client_directory_mode: 0755', or Ansible will complain: mode
+# must be in octal or symbolic form
+#rbd_client_directory_owner: ceph
+#rbd_client_directory_group: ceph
+#rbd_client_directory_mode: "0770"
+
+#rbd_client_log_path: /var/log/ceph
+#rbd_client_log_file: "{{ rbd_client_log_path }}/qemu-guest-$pid.log" # must be writable by QEMU and allowed by SELinux or AppArmor
+#rbd_client_admin_socket_path: /var/run/ceph # must be writable by QEMU and allowed by SELinux or AppArmor
+
+## Monitor options
+#
+# You must define either monitor_interface, monitor_address or monitor_address_block.
+# These variables must be defined at least in all.yml and overrided if needed (inventory host file or group_vars/*.yml).
+# Eg. If you want to specify for each monitor which address the monitor will bind to you can set it in your **inventory host file** by using 'monitor_address' variable.
+# Preference will go to monitor_address if both monitor_address and monitor_interface are defined.
+#monitor_interface: interface
+#monitor_address: x.x.x.x
+#monitor_address_block: subnet
+# set to either ipv4 or ipv6, whichever your network is using
+#ip_version: ipv4
+
+#mon_host_v1:
+# enabled: True
+# suffix: ':6789'
+#mon_host_v2:
+# suffix: ':3300'
+
+#enable_ceph_volume_debug: False
+
+##########
+# CEPHFS #
+##########
+# When pg_autoscale_mode is set to True, you must add the target_size_ratio key with a correct value
+# `pg_num` and `pgp_num` keys will be ignored, even if specified.
+# eg:
+# cephfs_data_pool:
+# name: "{{ cephfs_data if cephfs_data is defined else 'cephfs_data' }}"
+# target_size_ratio: 0.2
+#cephfs: cephfs # name of the ceph filesystem
+#cephfs_data_pool:
+# name: "{{ cephfs_data if cephfs_data is defined else 'cephfs_data' }}"
+#cephfs_metadata_pool:
+# name: "{{ cephfs_metadata if cephfs_metadata is defined else 'cephfs_metadata' }}"
+#cephfs_pools:
+# - "{{ cephfs_data_pool }}"
+# - "{{ cephfs_metadata_pool }}"
+
+## OSD options
+#
+#lvmetad_disabled: false
+#is_hci: false
+#hci_safety_factor: 0.2
+#non_hci_safety_factor: 0.7
+#safety_factor: "{{ hci_safety_factor if is_hci | bool else non_hci_safety_factor }}"
+#osd_memory_target: 4294967296
+#journal_size: 5120 # OSD journal size in MB
+#block_db_size: -1 # block db size in bytes for the ceph-volume lvm batch. -1 means use the default of 'as big as possible'.
+#public_network: 0.0.0.0/0
+#cluster_network: "{{ public_network | regex_replace(' ', '') }}"
+#osd_mkfs_type: xfs
+#osd_mkfs_options_xfs: -f -i size=2048
+#osd_mount_options_xfs: noatime,largeio,inode64,swalloc
+#osd_objectstore: bluestore
+
+# Any device containing these patterns in their path will be excluded.
+#osd_auto_discovery_exclude: "dm-*|loop*|md*|rbd*"
+
+# xattrs. by default, 'filestore xattr use omap' is set to 'true' if
+# 'osd_mkfs_type' is set to 'ext4'; otherwise it isn't set. This can
+# be set to 'true' or 'false' to explicitly override those
+# defaults. Leave it 'null' to use the default for your chosen mkfs
+# type.
+#filestore_xattr_use_omap: null
+
+## MDS options
+#
+#mds_max_mds: 1
+
+## Rados Gateway options
+#
+#radosgw_frontend_type: beast # For additional frontends see: https://docs.ceph.com/en/pacific/radosgw/frontends/
+
+#radosgw_civetweb_port: 8080
+#radosgw_civetweb_num_threads: 512
+#radosgw_civetweb_options: "num_threads={{ radosgw_civetweb_num_threads }}"
+# For additional civetweb configuration options available such as logging,
+# keepalive, and timeout settings, please see the civetweb docs at
+# https://github.com/civetweb/civetweb/blob/master/docs/UserManual.md
+
+#radosgw_frontend_port: "{{ radosgw_civetweb_port if radosgw_frontend_type == 'civetweb' else '8080' }}"
+# The server private key, public certificate and any other CA or intermediate certificates should be in one file, in PEM format.
+#radosgw_frontend_ssl_certificate: ""
+#radosgw_frontend_ssl_certificate_data: "" # certificate contents to be written to path defined by radosgw_frontend_ssl_certificate
+#radosgw_frontend_options: "{{ radosgw_civetweb_options if radosgw_frontend_type == 'civetweb' else '' }}"
+#radosgw_thread_pool_size: 512
+
+
+# You must define either radosgw_interface, radosgw_address.
+# These variables must be defined at least in all.yml and overrided if needed (inventory host file or group_vars/*.yml).
+# Eg. If you want to specify for each radosgw node which address the radosgw will bind to you can set it in your **inventory host file** by using 'radosgw_address' variable.
+# Preference will go to radosgw_address if both radosgw_address and radosgw_interface are defined.
+#radosgw_interface: interface
+#radosgw_address: x.x.x.x
+#radosgw_address_block: subnet
+#radosgw_keystone_ssl: false # activate this when using keystone PKI keys
+#radosgw_num_instances: 1
+# Rados Gateway options
+#email_address: foo@bar.com
+
+
+## Testing mode
+# enable this mode _only_ when you have a single node
+# if you don't want it keep the option commented
+#common_single_host_mode: true
+
+## Handlers - restarting daemons after a config change
+# if for whatever reasons the content of your ceph configuration changes
+# ceph daemons will be restarted as well. At the moment, we can not detect
+# which config option changed so all the daemons will be restarted. Although
+# this restart will be serialized for each node, in between a health check
+# will be performed so we make sure we don't move to the next node until
+# ceph is not healthy
+# Obviously between the checks (for monitors to be in quorum and for osd's pgs
+# to be clean) we have to wait. These retries and delays can be configurable
+# for both monitors and osds.
+#
+# Monitor handler checks
+#handler_health_mon_check_retries: 10
+#handler_health_mon_check_delay: 20
+#
+# OSD handler checks
+#handler_health_osd_check_retries: 40
+#handler_health_osd_check_delay: 30
+#handler_health_osd_check: true
+#
+# MDS handler checks
+#handler_health_mds_check_retries: 5
+#handler_health_mds_check_delay: 10
+#
+# RGW handler checks
+#handler_health_rgw_check_retries: 5
+#handler_health_rgw_check_delay: 10
+
+# NFS handler checks
+#handler_health_nfs_check_retries: 5
+#handler_health_nfs_check_delay: 10
+
+# RBD MIRROR handler checks
+#handler_health_rbd_mirror_check_retries: 5
+#handler_health_rbd_mirror_check_delay: 10
+
+# MGR handler checks
+#handler_health_mgr_check_retries: 5
+#handler_health_mgr_check_delay: 10
+
+## health mon/osds check retries/delay:
+
+#health_mon_check_retries: 20
+#health_mon_check_delay: 10
+#health_osd_check_retries: 20
+#health_osd_check_delay: 10
+
+##############
+# RBD-MIRROR #
+##############
+
+#ceph_rbd_mirror_pool: "rbd"
+
+###############
+# NFS-GANESHA #
+###############
+#
+# Access type options
+#
+# Enable NFS File access
+# If set to true, then ganesha is set up to export the root of the
+# Ceph filesystem, and ganesha's attribute and directory caching is disabled
+# as much as possible since libcephfs clients also caches the same
+# information.
+#
+# Set this to true to enable File access via NFS. Requires an MDS role.
+#nfs_file_gw: false
+# Set this to true to enable Object access via NFS. Requires an RGW role.
+#nfs_obj_gw: "{{ False if groups.get(mon_group_name, []) | length == 0 else True }}"
+
+
+#############
+# MULTISITE #
+#############
+
+# Changing this value allows multisite code to run
+#rgw_multisite: false
+
+# If the desired multisite configuration involves only one realm, one zone group and one zone (per cluster), then the multisite variables can be set here.
+# Please see README-MULTISITE.md for more information.
+#
+# If multiple realms or multiple zonegroups or multiple zones need to be created on a cluster then,
+# the multisite config variables should be editted in their respective zone .yaml file and realm .yaml file.
+# See README-MULTISITE-MULTIREALM.md for more information.
+
+# The following Multi-site related variables should be set by the user.
+#
+# rgw_zone is set to "default" to enable compression for clusters configured without rgw multi-site
+# If multisite is configured, rgw_zone should not be set to "default".
+#
+#rgw_zone: default
+
+#rgw_zonemaster: true
+#rgw_zonesecondary: false
+#rgw_zonegroup: solarsystem # should be set by the user
+#rgw_zonegroupmaster: true
+#rgw_zone_user: zone.user
+#rgw_zone_user_display_name: "Zone User"
+#rgw_realm: milkyway # should be set by the user
+#rgw_multisite_proto: "http"
+#system_access_key: 6kWkikvapSnHyE22P7nO # should be re-created by the user
+#system_secret_key: MGecsMrWtKZgngOHZdrd6d3JxGO5CPWgT2lcnpSt # should be re-created by the user
+
+# Multi-site remote pull URL variables
+#rgw_pull_port: "{{ radosgw_frontend_port }}"
+#rgw_pull_proto: "http" # should be the same as rgw_multisite_proto for the master zone cluster
+#rgw_pullhost: localhost # rgw_pullhost only needs to be declared if there is a zone secondary.
+
+###################
+# CONFIG OVERRIDE #
+###################
+
+# Ceph configuration file override.
+# This allows you to specify more configuration options
+# using an INI style format.
+#
+# When configuring RGWs, make sure you use the form [client.rgw.*]
+# instead of [client.radosgw.*].
+# For more examples check the profiles directory of https://github.com/ceph/ceph-ansible.
+#
+# The following sections are supported: [global], [mon], [osd], [mds], [client]
+#
+# Example:
+# ceph_conf_overrides:
+# global:
+# foo: 1234
+# bar: 5678
+# "client.rgw.{{ hostvars[groups.get(rgw_group_name)[0]]['ansible_facts']['hostname'] }}":
+# rgw_zone: zone1
+#
+#ceph_conf_overrides: {}
+
+
+#############
+# OS TUNING #
+#############
+
+#disable_transparent_hugepage: "{{ false if osd_objectstore == 'bluestore' else true }}"
+#os_tuning_params:
+# - { name: fs.file-max, value: 26234859 }
+# - { name: vm.zone_reclaim_mode, value: 0 }
+# - { name: vm.swappiness, value: 10 }
+# - { name: vm.min_free_kbytes, value: "{{ vm_min_free_kbytes }}" }
+
+# For Debian & Red Hat/CentOS installs set TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES
+# Set this to a byte value (e.g. 134217728)
+# A value of 0 will leave the package default.
+#ceph_tcmalloc_max_total_thread_cache: 134217728
+
+
+##########
+# DOCKER #
+##########
+ceph_docker_image: "rhceph/rhceph-5-rhel8"
+ceph_docker_image_tag: "latest"
+ceph_docker_registry: "registry.redhat.io"
+ceph_docker_registry_auth: true
+#ceph_docker_registry_username:
+#ceph_docker_registry_password:
+#ceph_docker_http_proxy:
+#ceph_docker_https_proxy:
+#ceph_docker_no_proxy: "localhost,127.0.0.1"
+## Client only docker image - defaults to {{ ceph_docker_image }}
+#ceph_client_docker_image: "{{ ceph_docker_image }}"
+#ceph_client_docker_image_tag: "{{ ceph_docker_image_tag }}"
+#ceph_client_docker_registry: "{{ ceph_docker_registry }}"
+containerized_deployment: true
+#container_binary:
+#timeout_command: "{{ 'timeout --foreground -s KILL ' ~ docker_pull_timeout if (docker_pull_timeout != '0') and (ceph_docker_dev_image is undefined or not ceph_docker_dev_image) else '' }}"
+
+
+# this is only here for usage with the rolling_update.yml playbook
+# do not ever change this here
+#rolling_update: false
+
+#####################
+# Docker pull retry #
+#####################
+#docker_pull_retry: 3
+#docker_pull_timeout: "300s"
+
+
+#############
+# OPENSTACK #
+#############
+#openstack_config: false
+# When pg_autoscale_mode is set to True, you must add the target_size_ratio key with a correct value
+# `pg_num` and `pgp_num` keys will be ignored, even if specified.
+# eg:
+# openstack_glance_pool:
+# name: "images"
+# rule_name: "my_replicated_rule"
+# application: "rbd"
+# pg_autoscale_mode: False
+# pg_num: 16
+# pgp_num: 16
+# target_size_ratio: 0.2
+#openstack_glance_pool:
+# name: "images"
+# application: "rbd"
+#openstack_cinder_pool:
+# name: "volumes"
+# application: "rbd"
+#openstack_nova_pool:
+# name: "vms"
+# application: "rbd"
+#openstack_cinder_backup_pool:
+# name: "backups"
+# application: "rbd"
+#openstack_gnocchi_pool:
+# name: "metrics"
+# application: "rbd"
+#openstack_cephfs_data_pool:
+# name: "manila_data"
+# application: "cephfs"
+#openstack_cephfs_metadata_pool:
+# name: "manila_metadata"
+# application: "cephfs"
+#openstack_pools:
+# - "{{ openstack_glance_pool }}"
+# - "{{ openstack_cinder_pool }}"
+# - "{{ openstack_nova_pool }}"
+# - "{{ openstack_cinder_backup_pool }}"
+# - "{{ openstack_gnocchi_pool }}"
+# - "{{ openstack_cephfs_data_pool }}"
+# - "{{ openstack_cephfs_metadata_pool }}"
+
+
+# The value for 'key' can be a pre-generated key,
+# e.g key: "AQDC2UxZH4yeLhAAgTaZb+4wDUlYOsr1OfZSpQ=="
+# By default, keys will be auto-generated.
+#
+#openstack_keys:
+# - { name: client.glance, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_cinder_pool.name }}, profile rbd pool={{ openstack_glance_pool.name }}"}, mode: "0600" }
+# - { name: client.cinder, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_cinder_pool.name }}, profile rbd pool={{ openstack_nova_pool.name }}, profile rbd pool={{ openstack_glance_pool.name }}"}, mode: "0600" }
+# - { name: client.cinder-backup, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_cinder_backup_pool.name }}"}, mode: "0600" }
+# - { name: client.gnocchi, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_gnocchi_pool.name }}"}, mode: "0600", }
+# - { name: client.openstack, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_glance_pool.name }}, profile rbd pool={{ openstack_nova_pool.name }}, profile rbd pool={{ openstack_cinder_pool.name }}, profile rbd pool={{ openstack_cinder_backup_pool.name }}"}, mode: "0600" }
+
+
+#############
+# DASHBOARD #
+#############
+#dashboard_enabled: True
+# Choose http or https
+# For https, you should set dashboard.crt/key and grafana.crt/key
+# If you define the dashboard_crt and dashboard_key variables, but leave them as '',
+# then we will autogenerate a cert and keyfile
+#dashboard_protocol: https
+#dashboard_port: 8443
+# set this variable to the network you want the dashboard to listen on. (Default to public_network)
+#dashboard_network: "{{ public_network }}"
+#dashboard_admin_user: admin
+#dashboard_admin_user_ro: false
+# This variable must be set with a strong custom password when dashboard_enabled is True
+#dashboard_admin_password: p@ssw0rd
+# We only need this for SSL (https) connections
+#dashboard_crt: ''
+#dashboard_key: ''
+#dashboard_certificate_cn: ceph-dashboard
+#dashboard_tls_external: false
+#dashboard_grafana_api_no_ssl_verify: "{{ true if dashboard_protocol == 'https' and not grafana_crt and not grafana_key else false }}"
+#dashboard_rgw_api_user_id: ceph-dashboard
+#dashboard_rgw_api_admin_resource: ''
+#dashboard_rgw_api_no_ssl_verify: False
+#dashboard_frontend_vip: ''
+#dashboard_disabled_features: []
+#prometheus_frontend_vip: ''
+#alertmanager_frontend_vip: ''
+node_exporter_container_image: registry.redhat.io/openshift4/ose-prometheus-node-exporter:v4.6
+#node_exporter_port: 9100
+#grafana_admin_user: admin
+# This variable must be set with a strong custom password when dashboard_enabled is True
+#grafana_admin_password: admin
+# We only need this for SSL (https) connections
+#grafana_crt: ''
+#grafana_key: ''
+# When using https, please fill with a hostname for which grafana_crt is valid.
+#grafana_server_fqdn: ''
+grafana_container_image: registry.redhat.io/rhceph/rhceph-5-dashboard-rhel8:5
+#grafana_container_cpu_period: 100000
+#grafana_container_cpu_cores: 2
+# container_memory is in GB
+#grafana_container_memory: 4
+#grafana_uid: 472
+#grafana_datasource: Dashboard
+#grafana_dashboards_path: "/etc/grafana/dashboards/ceph-dashboard"
+#grafana_dashboard_version: pacific
+#grafana_dashboard_files:
+# - ceph-cluster.json
+# - cephfs-overview.json
+# - host-details.json
+# - hosts-overview.json
+# - osd-device-details.json
+# - osds-overview.json
+# - pool-detail.json
+# - pool-overview.json
+# - radosgw-detail.json
+# - radosgw-overview.json
+# - radosgw-sync-overview.json
+# - rbd-details.json
+# - rbd-overview.json
+#grafana_plugins:
+# - vonage-status-panel
+# - grafana-piechart-panel
+#grafana_allow_embedding: True
+#grafana_port: 3000
+#grafana_network: "{{ public_network }}"
+#grafana_conf_overrides: {}
+prometheus_container_image: registry.redhat.io/openshift4/ose-prometheus:v4.6
+#prometheus_container_cpu_period: 100000
+#prometheus_container_cpu_cores: 2
+# container_memory is in GB
+#prometheus_container_memory: 4
+#prometheus_data_dir: /var/lib/prometheus
+#prometheus_conf_dir: /etc/prometheus
+#prometheus_user_id: '65534' # This is the UID used by the prom/prometheus container image
+#prometheus_port: 9092
+#prometheus_conf_overrides: {}
+# Uncomment out this variable if you need to customize the retention period for prometheus storage.
+# set it to '30d' if you want to retain 30 days of data.
+#prometheus_storage_tsdb_retention_time: 15d
+alertmanager_container_image: registry.redhat.io/openshift4/ose-prometheus-alertmanager:v4.6
+#alertmanager_container_cpu_period: 100000
+#alertmanager_container_cpu_cores: 2
+# container_memory is in GB
+#alertmanager_container_memory: 4
+#alertmanager_data_dir: /var/lib/alertmanager
+#alertmanager_conf_dir: /etc/alertmanager
+#alertmanager_port: 9093
+#alertmanager_cluster_port: 9094
+#alertmanager_conf_overrides: {}
+#alertmanager_dashboard_api_no_ssl_verify: "{{ true if dashboard_protocol == 'https' and not dashboard_crt and not dashboard_key else false }}"
+# igw
+#
+# `igw_network` variable is intended for allowing dashboard deployment with iSCSI node not residing in the same subnet than what is defined in `public_network`.
+# For example:
+# If the ceph public network is 2a00:8a60:1:c301::/64 and the iSCSI Gateway resides
+# at a dedicated gateway network (2a00:8a60:1:c300::/64) (With routing between those networks).
+# It means "{{ hostvars[item]['ansible_facts']['all_ipv4_addresses'] | ips_in_ranges(public_network.split(',')) | last | ipwrap }}" will be empty.
+# As a consequence, this prevent from deploying dashboard with iSCSI node when it reside in a subnet different than `public_network`.
+# Using `igw_network` make it possible, set it with the subnet used by your iSCSI node.
+#igw_network: "{{ public_network }}"
+
+
+##################################
+# DEPRECIATED iSCSI TARGET SETUP #
+##################################
+
+# WARNING #
+
+# The following values are depreciated. To setup targets, gateways, LUNs, and
+# clients you should use gwcli or dashboard. If the following values are set,
+# the old ceph-iscsi-config/ceph-iscsi-cli packages will be used.
+
+# Specify the iqn for ALL gateways. This iqn is shared across the gateways, so an iscsi
+# client sees the gateway group as a single storage subsystem.
+#gateway_iqn: ""
+
+# gateway_ip_list provides a list of the IP Addrresses - one per gateway - that will be used
+# as an iscsi target portal ip. The list must be comma separated - and the order determines
+# the sequence of TPG's within the iscsi target across each gateway. Once set, additional
+# gateways can be added, but the order must *not* be changed.
+#gateway_ip_list: 0.0.0.0
+
+# rbd_devices defines the images that should be created and exported from the iscsi gateways.
+# If the rbd does not exist, it will be created for you. In addition you may increase the
+# size of rbd's by changing the size parameter and rerunning the playbook. A size value lower
+# than the current size of the rbd is ignored.
+#
+# the 'host' parameter defines which of the gateway nodes should handle the physical
+# allocation/expansion or removal of the rbd
+# to remove an image, simply use a state of 'absent'. This will first check the rbd is not allocated
+# to any client, and the remove it from LIO and then delete the rbd image
+#
+# NB. this variable definition can be commented out to bypass LUN management
+#
+# Example:
+#
+#rbd_devices:
+# - { pool: 'rbd', image: 'ansible1', size: '30G', host: 'ceph-1', state: 'present' }
+# - { pool: 'rbd', image: 'ansible2', size: '15G', host: 'ceph-1', state: 'present' }
+# - { pool: 'rbd', image: 'ansible3', size: '30G', host: 'ceph-1', state: 'present' }
+# - { pool: 'rbd', image: 'ansible4', size: '50G', host: 'ceph-1', state: 'present' }
+#rbd_devices: {}
+
+# client_connections defines the client ACL's to restrict client access to specific LUNs
+# The settings are as follows;
+# - image_list is a comma separated list of rbd images of the form <pool name>.<rbd_image_name>
+# - chap supplies the user and password the client will use for authentication of the
+# form <user>/<password>
+# - status shows the intended state of this client definition - 'present' or 'absent'
+#
+# NB. this definition can be commented out to skip client (nodeACL) management
+#
+# Example:
+#
+#client_connections:
+# - { client: 'iqn.1994-05.com.redhat:rh7-iscsi-client', image_list: 'rbd.ansible1,rbd.ansible2', chap: 'rh7-iscsi-client/redhat', status: 'present' }
+# - { client: 'iqn.1991-05.com.microsoft:w2k12r2', image_list: 'rbd.ansible4', chap: 'w2k12r2/microsoft_w2k12', status: 'absent' }
+
+#client_connections: {}
+
+#no_log_on_ceph_key_tasks: True
+
+###############
+# DEPRECATION #
+###############
+
+
+
+######################################################
+# VARIABLES BELOW SHOULD NOT BE MODIFIED BY THE USER #
+# *DO NOT* MODIFY THEM #
+######################################################
+
+#container_exec_cmd:
+#docker: false
+#ceph_volume_debug: "{{ enable_ceph_volume_debug | ternary(1, 0) }}"
+
--- /dev/null
+#rgw_zone: boston
+
+# Both rgw_zonemaster and rgw_zonesecondary must be set and they cannot have the same value
+
+#rgw_zonemaster: true
+#rgw_zonesecondary: false
+
+#rgw_zonegroup: massachusetts
+
+# The variable rgw_zonegroupmaster specifies the zonegroup will be the master zonegroup in a realm. There can only be one master zonegroup in a realm
+
+#rgw_zonegroupmaster: true
--- /dev/null
+Infrastructure playbooks
+========================
+
+This directory contains a variety of playbooks that can be used independently of the Ceph roles we have.
+They aim to perform infrastructure related tasks that would help use managing a Ceph cluster or performing certain operational tasks.
+
+To use them, run `ansible-playbook infrastructure-playbooks/<playbook>`.
--- /dev/null
+---
+# This playbook is used to add a new MON to
+# an existing cluster. It can run from any machine. Even if the fetch
+# directory is not present it will be created.
+#
+# Ensure that all monitors are present in the mons
+# group in your inventory so that the ceph configuration file
+# is created correctly for the new OSD(s).
+- hosts: mons
+ gather_facts: false
+ vars:
+ delegate_facts_host: true
+ become: true
+ pre_tasks:
+ - import_tasks: "{{ playbook_dir }}/../raw_install_python.yml"
+
+ - name: gather facts
+ setup:
+ gather_subset:
+ - 'all'
+ - '!facter'
+ - '!ohai'
+ when: not delegate_facts_host | bool or inventory_hostname in groups.get(client_group_name, [])
+ - import_role:
+ name: ceph-defaults
+
+ - name: gather and delegate facts
+ setup:
+ gather_subset:
+ - 'all'
+ - '!facter'
+ - '!ohai'
+ delegate_to: "{{ item }}"
+ delegate_facts: true
+ with_items: "{{ groups[mon_group_name] }}"
+ run_once: true
+ when: delegate_facts_host | bool
+ tasks:
+ - import_role:
+ name: ceph-facts
+ - import_role:
+ name: ceph-validate
+ - import_role:
+ name: ceph-infra
+ - import_role:
+ name: ceph-handler
+ - import_role:
+ name: ceph-common
+ when: not containerized_deployment | bool
+ - import_role:
+ name: ceph-container-engine
+ when: containerized_deployment | bool
+ - import_role:
+ name: ceph-container-common
+ when: containerized_deployment | bool
+
+- hosts: mons
+ gather_facts: false
+ become: true
+ tasks:
+ - import_role:
+ name: ceph-defaults
+ - import_role:
+ name: ceph-facts
+ - import_role:
+ name: ceph-handler
+ - import_role:
+ name: ceph-config
+ - import_role:
+ name: ceph-mon
+ - import_role:
+ name: ceph-crash
+ when: containerized_deployment | bool
+
+# update config files on OSD nodes
+- hosts: osds
+ gather_facts: true
+ become: true
+ tasks:
+ - import_role:
+ name: ceph-defaults
+ - import_role:
+ name: ceph-facts
+ - import_role:
+ name: ceph-handler
+ - import_role:
+ name: ceph-config
--- /dev/null
+---
+# Copyright Red Hat
+# SPDX-License-Identifier: Apache-2.0
+#
+# This playbook can help in order to backup some Ceph files and restore them later.
+#
+# Usage:
+#
+# ansible-playbook -i <inventory> backup-and-restore-ceph-files.yml -e backup_dir=<backup directory path> -e mode=<backup|restore> -e target_node=<inventory_name>
+#
+# Required run-time variables
+# ------------------
+# backup_dir : a path where files will be read|write.
+# mode : tell the playbook either to backup or restore files.
+# target_node : the name of the node being processed, it must match the name set in the inventory.
+#
+# Examples
+# --------
+# ansible-playbook -i hosts, backup-and-restore-ceph-files.yml -e backup_dir=/usr/share/ceph-ansible/backup-ceph-files -e mode=backup -e target_node=mon01
+# ansible-playbook -i hosts, backup-and-restore-ceph-files.yml -e backup_dir=/usr/share/ceph-ansible/backup-ceph-files -e mode=restore -e target_node=mon01
+
+- hosts: localhost
+ become: true
+ gather_facts: true
+ tasks:
+ - name: exit playbook, if user did not set the source node
+ fail:
+ msg: >
+ "You must pass the node name: -e target_node=<inventory_name>.
+ The name must match what is set in your inventory."
+ when:
+ - target_node is not defined
+ or target_node not in groups.get('all', [])
+
+ - name: exit playbook, if user did not set the backup directory
+ fail:
+ msg: >
+ "you must pass the backup directory path: -e backup_dir=<backup directory path>"
+ when: backup_dir is not defined
+
+ - name: exit playbook, if user did not set the playbook mode (backup|restore)
+ fail:
+ msg: >
+ "you must pass the mode: -e mode=<backup|restore>"
+ when:
+ - mode is not defined
+ or mode not in ['backup', 'restore']
+
+ - name: gather facts on source node
+ setup:
+ delegate_to: "{{ target_node }}"
+ delegate_facts: true
+
+ - name: backup mode
+ when: mode == 'backup'
+ block:
+ - name: create a temp directory
+ ansible.builtin.tempfile:
+ state: directory
+ suffix: ansible-archive-ceph
+ register: tmp_dir
+ delegate_to: "{{ target_node }}"
+
+ - name: archive files
+ archive:
+ path: "{{ item }}"
+ dest: "{{ tmp_dir.path }}/backup{{ item | replace('/', '-') }}.tar"
+ format: tar
+ delegate_to: "{{ target_node }}"
+ loop:
+ - /etc/ceph
+ - /var/lib/ceph
+
+ - name: create backup directory
+ become: false
+ file:
+ path: "{{ backup_dir }}/{{ hostvars[target_node]['ansible_facts']['hostname'] }}"
+ state: directory
+
+ - name: backup files
+ fetch:
+ src: "{{ tmp_dir.path }}/backup{{ item | replace('/', '-') }}.tar"
+ dest: "{{ backup_dir }}/{{ hostvars[target_node]['ansible_facts']['hostname'] }}/backup{{ item | replace('/', '-') }}.tar"
+ flat: yes
+ loop:
+ - /etc/ceph
+ - /var/lib/ceph
+ delegate_to: "{{ target_node }}"
+
+ - name: remove temp directory
+ file:
+ path: "{{ tmp_dir.path }}"
+ state: absent
+ delegate_to: "{{ target_node }}"
+
+ - name: restore mode
+ when: mode == 'restore'
+ block:
+ - name: unarchive files
+ ansible.builtin.unarchive:
+ src: "{{ backup_dir }}/{{ hostvars[target_node]['ansible_facts']['hostname'] }}/backup{{ item | replace('/', '-') }}.tar"
+ dest: "{{ item | dirname }}"
+ loop:
+ - /etc/ceph
+ - /var/lib/ceph
+ delegate_to: "{{ target_node }}"
--- /dev/null
+---
+# This playbook is used to manage CephX Keys
+# You will find examples below on how the module can be used on daily operations
+#
+# It currently runs on localhost
+
+- hosts: localhost
+ gather_facts: false
+ vars:
+ cluster: ceph
+ container_exec_cmd: "docker exec ceph-nano"
+ keys_to_info:
+ - client.admin
+ - mds.0
+ keys_to_delete:
+ - client.leseb
+ - client.leseb1
+ - client.pythonnnn
+ keys_to_create:
+ - { name: client.pythonnnn, caps: { mon: "allow rwx", mds: "allow *" } , mode: "0600" }
+ - { name: client.existpassss, caps: { mon: "allow r", osd: "allow *" } , mode: "0600" }
+ - { name: client.path, caps: { mon: "allow r", osd: "allow *" } , mode: "0600" }
+
+ tasks:
+ - name: create ceph key(s) module
+ ceph_key:
+ name: "{{ item.name }}"
+ caps: "{{ item.caps }}"
+ cluster: "{{ cluster }}"
+ secret: "{{ item.key | default('') }}"
+ containerized: "{{ container_exec_cmd | default(False) }}"
+ with_items: "{{ keys_to_create }}"
+
+ - name: update ceph key(s)
+ ceph_key:
+ name: "{{ item.name }}"
+ state: update
+ caps: "{{ item.caps }}"
+ cluster: "{{ cluster }}"
+ containerized: "{{ container_exec_cmd | default(False) }}"
+ with_items: "{{ keys_to_create }}"
+
+ - name: delete ceph key(s)
+ ceph_key:
+ name: "{{ item }}"
+ state: absent
+ cluster: "{{ cluster }}"
+ containerized: "{{ container_exec_cmd | default(False) }}"
+ with_items: "{{ keys_to_delete }}"
+
+ - name: info ceph key(s)
+ ceph_key:
+ name: "{{ item }}"
+ state: info
+ cluster: "{{ cluster }}"
+ containerized: "{{ container_exec_cmd }}"
+ register: key_info
+ ignore_errors: true
+ with_items: "{{ keys_to_info }}"
+
+ - name: list ceph key(s)
+ ceph_key:
+ state: list
+ cluster: "{{ cluster }}"
+ containerized: "{{ container_exec_cmd | default(False) }}"
+ register: list_keys
+ ignore_errors: true
+
+ - name: fetch_initial_keys
+ ceph_key:
+ state: fetch_initial_keys
+ cluster: "{{ cluster }}"
+ ignore_errors: true
--- /dev/null
+---
+#
+# This playbook does a cephadm adopt for all the Ceph services
+#
+
+- name: confirm whether user really meant to adopt the cluster by cephadm
+ hosts: localhost
+ connection: local
+ become: false
+ gather_facts: false
+ vars_prompt:
+ - name: ireallymeanit
+ prompt: Are you sure you want to adopt the cluster by cephadm ?
+ default: 'no'
+ private: no
+ tasks:
+ - name: exit playbook, if user did not mean to adopt the cluster by cephadm
+ fail:
+ msg: >
+ Exiting cephadm-adopt playbook, cluster was NOT adopted.
+ To adopt the cluster, either say 'yes' on the prompt or
+ use `-e ireallymeanit=yes` on the command line when
+ invoking the playbook
+ when: ireallymeanit != 'yes'
+
+ - name: import_role ceph-defaults
+ import_role:
+ name: ceph-defaults
+
+ - name: check if a legacy grafana-server group exists
+ import_role:
+ name: ceph-facts
+ tasks_from: convert_grafana_server_group_name.yml
+ when: groups.get((grafana_server_group_name | default('grafana-server')), []) | length > 0
+
+- name: gather facts and prepare system for cephadm
+ hosts:
+ - "{{ mon_group_name|default('mons') }}"
+ - "{{ osd_group_name|default('osds') }}"
+ - "{{ mds_group_name|default('mdss') }}"
+ - "{{ rgw_group_name|default('rgws') }}"
+ - "{{ mgr_group_name|default('mgrs') }}"
+ - "{{ rbdmirror_group_name|default('rbdmirrors') }}"
+ - "{{ nfs_group_name|default('nfss') }}"
+ - "{{ iscsi_gw_group_name|default('iscsigws') }}"
+ - "{{ monitoring_group_name|default('monitoring') }}"
+ become: true
+ any_errors_fatal: True
+ gather_facts: false
+ vars:
+ delegate_facts_host: true
+ tasks:
+ - import_role:
+ name: ceph-defaults
+
+ - name: gather facts
+ setup:
+ gather_subset:
+ - 'all'
+ - '!facter'
+ - '!ohai'
+ when: not delegate_facts_host | bool or inventory_hostname in groups.get(client_group_name, [])
+
+ - name: gather and delegate facts
+ setup:
+ gather_subset:
+ - 'all'
+ - '!facter'
+ - '!ohai'
+ delegate_to: "{{ item }}"
+ delegate_facts: true
+ with_items: "{{ groups['all'] | difference(groups.get('clients', [])) }}"
+ run_once: true
+ when: delegate_facts_host | bool
+
+ - name: fail if one osd node is using filestore
+ fail:
+ msg: >
+ filestore OSDs are not supported with cephadm.
+ Please convert them with the filestore-to-bluestore.yml playbook first.
+ when:
+ - osd_group_name in group_names
+ - osd_objectstore == 'filestore'
+
+ - import_role:
+ name: ceph-facts
+ tasks_from: container_binary.yml
+
+ - name: set_fact ceph_cmd
+ set_fact:
+ ceph_cmd: "{{ container_binary + ' run --rm --net=host -v /etc/ceph:/etc/ceph:z -v /var/lib/ceph:/var/lib/ceph:ro -v /var/run/ceph:/var/run/ceph:z --entrypoint=ceph ' + ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else 'ceph' }} --cluster {{ cluster }}"
+
+ - name: check pools have an application enabled
+ command: "{{ ceph_cmd }} health detail --format json"
+ register: health_detail
+ run_once: true
+ changed_when: false
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+
+ - name: check for POOL_APP_NOT_ENABLED warning
+ fail:
+ msg: "Make sure all your pool have an application enabled."
+ run_once: true
+ delegate_to: localhost
+ when:
+ - (health_detail.stdout | default('{}', True) | from_json)['status'] == "HEALTH_WARN"
+ - "'POOL_APP_NOT_ENABLED' in (health_detail.stdout | default('{}', True) | from_json)['checks']"
+
+ - import_role:
+ name: ceph-facts
+ tasks_from: convert_grafana_server_group_name.yml
+ when: groups.get((grafana_server_group_name|default('grafana-server')), []) | length > 0
+
+ - name: get the ceph version
+ command: "{{ container_binary + ' run --rm --entrypoint=ceph ' + ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else 'ceph' }} --version"
+ changed_when: false
+ register: ceph_version_out
+
+ - name: set_fact ceph_version
+ set_fact:
+ ceph_version: "{{ ceph_version_out.stdout.split(' ')[2] }}"
+
+ - name: fail on pre octopus ceph releases
+ fail:
+ msg: >
+ Your Ceph version {{ ceph_version }} is not supported for this operation.
+ Please upgrade your cluster with the rolling_update.yml playbook first.
+ when: ceph_version is version('15.2', '<')
+
+ - name: check if it is atomic host
+ stat:
+ path: /run/ostree-booted
+ register: stat_ostree
+
+ - name: set_fact is_atomic
+ set_fact:
+ is_atomic: "{{ stat_ostree.stat.exists }}"
+
+ - import_role:
+ name: ceph-container-engine
+ when: not containerized_deployment | bool
+
+ - import_role:
+ name: ceph-container-common
+ tasks_from: registry.yml
+ when:
+ - not containerized_deployment | bool
+ - ceph_docker_registry_auth | bool
+
+ - name: "pulling {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} image"
+ command: "{{ timeout_command }} {{ container_binary }} pull {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
+ changed_when: false
+ register: docker_image
+ until: docker_image.rc == 0
+ retries: "{{ docker_pull_retry }}"
+ delay: 10
+ when:
+ - not containerized_deployment | bool
+ - inventory_hostname in groups.get(mon_group_name, []) or
+ inventory_hostname in groups.get(osd_group_name, []) or
+ inventory_hostname in groups.get(mds_group_name, []) or
+ inventory_hostname in groups.get(rgw_group_name, []) or
+ inventory_hostname in groups.get(mgr_group_name, []) or
+ inventory_hostname in groups.get(rbdmirror_group_name, []) or
+ inventory_hostname in groups.get(iscsi_gw_group_name, []) or
+ inventory_hostname in groups.get(nfs_group_name, [])
+
+ - name: configure repository for installing cephadm
+ when: containerized_deployment | bool
+ tags: with_pkg
+ block:
+ - name: set_fact ceph_origin
+ set_fact:
+ ceph_origin: repository
+ when: ceph_origin == 'dummy'
+
+ - name: set_fact ceph_repository
+ set_fact:
+ ceph_repository: community
+ when: ceph_repository == 'dummy'
+
+ - name: validate repository variables
+ import_role:
+ name: ceph-validate
+ tasks_from: check_repository.yml
+
+ - name: configure repository
+ import_role:
+ name: ceph-common
+ tasks_from: "configure_repository.yml"
+
+ - name: install cephadm requirements
+ tags: with_pkg
+ package:
+ name: ['python3', 'lvm2']
+ register: result
+ until: result is succeeded
+
+ - name: install cephadm
+ tags: with_pkg
+ package:
+ name: cephadm
+ register: result
+ until: result is succeeded
+
+ - name: install cephadm mgr module
+ tags: with_pkg
+ package:
+ name: ceph-mgr-cephadm
+ register: result
+ until: result is succeeded
+ when:
+ - not containerized_deployment | bool
+ - mgr_group_name in group_names
+
+ - name: get current fsid
+ command: "{{ ceph_cmd }} fsid"
+ register: current_fsid
+ run_once: true
+ changed_when: false
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+
+ - name: get a minimal ceph configuration
+ command: "{{ ceph_cmd }} config generate-minimal-conf"
+ register: minimal_config
+ run_once: true
+ changed_when: false
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+
+ - name: set_fact fsid
+ set_fact:
+ fsid: "{{ current_fsid.stdout }}"
+ run_once: true
+
+ - name: enable cephadm mgr module
+ ceph_mgr_module:
+ name: cephadm
+ cluster: "{{ cluster }}"
+ state: enable
+ environment:
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+ run_once: true
+ delegate_to: '{{ groups[mon_group_name][0] }}'
+
+ - name: set cephadm as orchestrator backend
+ command: "{{ ceph_cmd }} orch set backend cephadm"
+ changed_when: false
+ run_once: true
+ delegate_to: '{{ groups[mon_group_name][0] }}'
+
+ - name: check if there is an existing ssh keypair
+ stat:
+ path: "{{ item }}"
+ loop:
+ - "{{ cephadm_ssh_priv_key_path }}"
+ - "{{ cephadm_ssh_pub_key_path }}"
+ register: ssh_keys
+ changed_when: false
+ run_once: true
+ delegate_to: '{{ groups[mon_group_name][0] }}'
+
+ - name: set fact
+ set_fact:
+ stat_ssh_key_pair: "{{ ssh_keys.results | map(attribute='stat.exists') | list }}"
+
+ - name: fail if either ssh public or private key is missing
+ fail:
+ msg: "One part of the ssh keypair of user {{ cephadm_ssh_user }} is missing"
+ when:
+ - false in stat_ssh_key_pair
+ - true in stat_ssh_key_pair
+
+ - name: generate cephadm ssh key if there is none
+ command: "{{ ceph_cmd }} cephadm generate-key"
+ when: not true in stat_ssh_key_pair
+ changed_when: false
+ run_once: true
+ delegate_to: '{{ groups[mon_group_name][0] }}'
+
+ - name: use existing user keypair for remote connections
+ when: not false in stat_ssh_key_pair
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ run_once: true
+ command: >
+ {{ container_binary + ' run --rm --net=host --security-opt label=disable
+ -v /etc/ceph:/etc/ceph:z
+ -v /var/lib/ceph:/var/lib/ceph:ro
+ -v /var/run/ceph:/var/run/ceph:z
+ -v ' + item.1 + ':/etc/ceph/cephadm.' + item.0 + ':ro --entrypoint=ceph '+ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else 'ceph' }}
+ --cluster {{ cluster }} cephadm set-{{ item.0 }}-key -i /etc/ceph/cephadm.{{ item.0 }}
+ with_together:
+ - [ 'pub', 'priv' ]
+ - [ '{{ cephadm_ssh_pub_key_path }}', '{{ cephadm_ssh_priv_key_path }}' ]
+
+ - name: get the cephadm ssh pub key
+ command: "{{ ceph_cmd }} cephadm get-pub-key"
+ changed_when: false
+ run_once: true
+ register: cephadm_pubpkey
+ delegate_to: '{{ groups[mon_group_name][0] }}'
+
+ - name: allow cephadm key for {{ cephadm_ssh_user }} account
+ authorized_key:
+ user: "{{ cephadm_ssh_user }}"
+ key: '{{ cephadm_pubpkey.stdout }}'
+
+ - name: set cephadm ssh user to {{ cephadm_ssh_user }}
+ command: "{{ ceph_cmd }} cephadm set-user {{ cephadm_ssh_user }}"
+ changed_when: false
+ run_once: true
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+
+ - name: run cephadm prepare-host
+ command: cephadm prepare-host
+ changed_when: false
+ environment:
+ CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
+
+ - name: set default container image in ceph configuration
+ command: "{{ ceph_cmd }} config set global container_image {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
+ changed_when: false
+ run_once: true
+ delegate_to: '{{ groups[mon_group_name][0] }}'
+
+ - name: set container image base in ceph configuration
+ command: "{{ ceph_cmd }} config set mgr mgr/cephadm/container_image_base {{ ceph_docker_registry }}/{{ ceph_docker_image }}"
+ changed_when: false
+ run_once: true
+ delegate_to: '{{ groups[mon_group_name][0] }}'
+
+ - name: set dashboard container image in ceph mgr configuration
+ when: dashboard_enabled | bool
+ run_once: true
+ block:
+ - name: set alertmanager container image in ceph configuration
+ command: "{{ ceph_cmd }} config set mgr mgr/cephadm/container_image_alertmanager {{ alertmanager_container_image }}"
+ changed_when: false
+ delegate_to: '{{ groups[mon_group_name][0] }}'
+
+ - name: set grafana container image in ceph configuration
+ command: "{{ ceph_cmd }} config set mgr mgr/cephadm/container_image_grafana {{ grafana_container_image }}"
+ changed_when: false
+ delegate_to: '{{ groups[mon_group_name][0] }}'
+
+ - name: set node-exporter container image in ceph configuration
+ command: "{{ ceph_cmd }} config set mgr mgr/cephadm/container_image_node_exporter {{ node_exporter_container_image }}"
+ changed_when: false
+ delegate_to: '{{ groups[mon_group_name][0] }}'
+
+ - name: set prometheus container image in ceph configuration
+ command: "{{ ceph_cmd }} config set mgr mgr/cephadm/container_image_prometheus {{ prometheus_container_image }}"
+ changed_when: false
+ delegate_to: '{{ groups[mon_group_name][0] }}'
+
+ - name: enable the osd memory autotune for hci environment
+ command: "{{ ceph_cmd }} config set osd osd_memory_target_autotune true"
+ changed_when: false
+ run_once: true
+ delegate_to: '{{ groups[mon_group_name][0] }}'
+ when: is_hci | bool
+
+ - name: set autotune_memory_target_ratio
+ command: "{{ ceph_cmd }} config set mgr mgr/cephadm/autotune_memory_target_ratio {{ '0.2' if is_hci | bool else '0.7' }}"
+ changed_when: false
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ run_once: true
+ environment:
+ CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
+
+ - name: manage nodes with cephadm - ipv4
+ command: "{{ ceph_cmd }} orch host add {{ ansible_facts['nodename'] }} {{ ansible_facts['all_ipv4_addresses'] | ips_in_ranges(cephadm_mgmt_network.split(',')) | first }} {{ group_names | intersect(adopt_label_group_names) | join(' ') }}"
+ changed_when: false
+ delegate_to: '{{ groups[mon_group_name][0] }}'
+ when: ip_version == 'ipv4'
+
+ - name: manage nodes with cephadm - ipv6
+ command: "{{ ceph_cmd }} orch host add {{ ansible_facts['nodename'] }} {{ ansible_facts['all_ipv6_addresses'] | ips_in_ranges(cephadm_mgmt_network.split(',')) | last | ipwrap }} {{ group_names | intersect(adopt_label_group_names) | join(' ') }}"
+ changed_when: false
+ delegate_to: '{{ groups[mon_group_name][0] }}'
+ when: ip_version == 'ipv6'
+
+ - name: add ceph label for core component
+ command: "{{ ceph_cmd }} orch host label add {{ ansible_facts['nodename'] }} ceph"
+ changed_when: false
+ delegate_to: '{{ groups[mon_group_name][0] }}'
+ when: inventory_hostname in groups.get(mon_group_name, []) or
+ inventory_hostname in groups.get(osd_group_name, []) or
+ inventory_hostname in groups.get(mds_group_name, []) or
+ inventory_hostname in groups.get(rgw_group_name, []) or
+ inventory_hostname in groups.get(mgr_group_name, []) or
+ inventory_hostname in groups.get(rbdmirror_group_name, [])
+
+ - name: get the client.admin keyring
+ ceph_key:
+ name: client.admin
+ cluster: "{{ cluster }}"
+ output_format: plain
+ state: info
+ environment:
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+ run_once: true
+ delegate_to: '{{ groups[mon_group_name][0] }}'
+ register: client_admin_keyring
+
+ - name: copy the client.admin keyring
+ copy:
+ dest: "/etc/ceph/{{ cluster }}.client.admin.keyring"
+ content: "{{ client_admin_keyring.stdout + '\n' }}"
+ owner: "{{ ceph_uid | int if containerized_deployment | bool else 'ceph' }}"
+ group: "{{ ceph_uid | int if containerized_deployment | bool else 'ceph' }}"
+ mode: "{{ ceph_keyring_permissions }}"
+ run_once: true
+ delegate_to: "{{ item }}"
+ with_items:
+ - "{{ groups.get(osd_group_name, []) }}"
+ - "{{ groups.get(mds_group_name, []) }}"
+ - "{{ groups.get(rgw_group_name, []) }}"
+ - "{{ groups.get(mgr_group_name, []) }}"
+ - "{{ groups.get(rbdmirror_group_name, []) }}"
+
+ - name: assimilate ceph configuration
+ command: "{{ ceph_cmd }} config assimilate-conf -i /etc/ceph/{{ cluster }}.conf"
+ changed_when: false
+ when: inventory_hostname in groups.get(mon_group_name, []) or
+ inventory_hostname in groups.get(osd_group_name, []) or
+ inventory_hostname in groups.get(mds_group_name, []) or
+ inventory_hostname in groups.get(rgw_group_name, []) or
+ inventory_hostname in groups.get(mgr_group_name, []) or
+ inventory_hostname in groups.get(rbdmirror_group_name, [])
+
+ - name: set_fact cephadm_cmd
+ set_fact:
+ cephadm_cmd: "cephadm {{ '--docker' if container_binary == 'docker' else '' }}"
+
+ - name: set container registry info
+ command: "{{ ceph_cmd }} cephadm registry-login {{ ceph_docker_registry }} {{ ceph_docker_registry_username }} {{ ceph_docker_registry_password }}"
+ changed_when: false
+ no_log: true
+ run_once: true
+ delegate_to: '{{ groups[mon_group_name][0] }}'
+ when: ceph_docker_registry_auth | bool
+
+ - name: remove logrotate configuration
+ file:
+ path: /etc/logrotate.d/ceph
+ state: absent
+ when: inventory_hostname in groups.get(mon_group_name, []) or
+ inventory_hostname in groups.get(osd_group_name, []) or
+ inventory_hostname in groups.get(mds_group_name, []) or
+ inventory_hostname in groups.get(rgw_group_name, []) or
+ inventory_hostname in groups.get(mgr_group_name, []) or
+ inventory_hostname in groups.get(rbdmirror_group_name, []) or
+ inventory_hostname in groups.get(iscsi_gw_group_name, [])
+
+
+- name: store existing rbd mirror peers in monitor config store
+ hosts: "{{ rbdmirror_group_name|default('rbdmirrors') }}"
+ become: true
+ any_errors_fatal: true
+ gather_facts: true
+ tasks:
+ - name: store existing rbd mirror peers in monitor config store
+ when: ceph_rbd_mirror_configure | default(False) | bool
+ block:
+ - name: import ceph-defaults
+ import_role:
+ name: ceph-defaults
+
+ - name: import ceph-validate
+ import_role:
+ name: ceph-validate
+ tasks_from: check_rbdmirror.yml
+
+ - name: import container_binary
+ import_role:
+ name: ceph-facts
+ tasks_from: container_binary.yml
+
+ - name: set_fact rbd_cmd
+ set_fact:
+ rbd_cmd: "{{ container_binary + ' run --rm --net=host -v /etc/ceph:/etc/ceph:z -v /var/lib/ceph:/var/lib/ceph:z -v /var/run/ceph:/var/run/ceph:z --entrypoint=rbd ' + ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else 'ceph' }} --cluster {{ cluster }} -n client.rbd-mirror.{{ ansible_facts['hostname'] }} -k /etc/ceph/{{ cluster }}.client.rbd-mirror.{{ ansible_facts['hostname'] }}.keyring"
+
+ - name: set_fact admin_rbd_cmd
+ set_fact:
+ admin_rbd_cmd: "{{ container_binary + ' run --rm --net=host -v /etc/ceph:/etc/ceph:z -v /var/lib/ceph:/var/lib/ceph:z -v /var/run/ceph:/var/run/ceph:z --entrypoint=rbd ' + ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else 'ceph' }} --cluster {{ cluster }}"
+ - name: get mirror pool info
+ command: "{{ rbd_cmd }} mirror pool info {{ ceph_rbd_mirror_pool }} --format json"
+ register: mirror_pool_info
+ changed_when: false
+
+ - name: set_fact mirror_peer_found
+ set_fact:
+ mirror_peer_uuid: "{{ ((mirror_pool_info.stdout | default('{}') | from_json)['peers'] | selectattr('site_name', 'match', '^'+ceph_rbd_mirror_remote_cluster+'$') | map(attribute='uuid') | list) }}"
+
+ - name: remove current rbd mirror peer, add new peer into mon config store
+ when: mirror_peer_uuid | length > 0
+ block:
+ - name: get remote user keyring
+ slurp:
+ src: "/etc/ceph/{{ ceph_rbd_mirror_remote_cluster }}.{{ ceph_rbd_mirror_remote_user }}.keyring"
+ register: remote_user_keyring
+
+ - name: get quorum_status
+ command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph quorum_status --format json"
+ changed_when: false
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ register: quorum_status
+ run_once: true
+ environment:
+ CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
+
+ - name: set_fact mon_ip_list
+ set_fact:
+ mon_ip_list: "{{ mon_ip_list | default([]) | union([item['addr'].split(':')[0]]) }}"
+ loop: "{{ (quorum_status.stdout | default('{}') | from_json)['monmap']['mons'] }}"
+ run_once: true
+
+ - name: remove current mirror peer
+ command: "{{ admin_rbd_cmd }} mirror pool peer remove {{ ceph_rbd_mirror_pool }} {{ ((mirror_pool_info.stdout | default('{}') | from_json)['peers'] | selectattr('site_name', 'match', '^'+ceph_rbd_mirror_remote_cluster+'$') | map(attribute='uuid') | list)[0] }}"
+ delegate_to: "{{ groups.get(mon_group_name | default('mons'))[0] }}"
+ changed_when: false
+
+ - name: get remote user keyring secret
+ set_fact:
+ remote_user_keyring_secret: "{{ item.split('=', 1)[1] | trim }}"
+ with_items: "{{ (remote_user_keyring.content | b64decode).split('\n') }}"
+ when: "'key = ' in item"
+
+ - name: create a temporary file
+ tempfile:
+ path: /etc/ceph
+ state: file
+ suffix: _ceph-ansible
+ register: tmp_file
+ delegate_to: "{{ groups.get(mon_group_name | default('mons'))[0] }}"
+
+ - name: write secret to temporary file
+ copy:
+ dest: "{{ tmp_file.path }}"
+ content: "{{ remote_user_keyring_secret }}"
+ delegate_to: "{{ groups.get(mon_group_name | default('mons'))[0] }}"
+
+ - name: re-add mirror peer
+ command: "{{ admin_rbd_cmd }} mirror pool peer add {{ ceph_rbd_mirror_pool }} {{ ceph_rbd_mirror_remote_user }}@{{ ceph_rbd_mirror_remote_cluster }} --remote-mon-host {{ ','.join(mon_ip_list) }} --remote-key-file {{ tmp_file.path }}"
+ delegate_to: "{{ groups.get(mon_group_name | default('mons'))[0] }}"
+ changed_when: false
+
+ - name: rm temporary file
+ file:
+ path: "{{ tmp_file.path }}"
+ state: absent
+ delegate_to: "{{ groups.get(mon_group_name | default('mons'))[0] }}"
+
+
+- name: adopt ceph mon daemons
+ hosts: "{{ mon_group_name|default('mons') }}"
+ serial: 1
+ become: true
+ gather_facts: false
+ any_errors_fatal: True
+ tasks:
+ - import_role:
+ name: ceph-defaults
+
+ - name: adopt mon daemon
+ cephadm_adopt:
+ name: "mon.{{ ansible_facts['hostname'] }}"
+ cluster: "{{ cluster }}"
+ image: "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
+ docker: "{{ true if container_binary == 'docker' else false }}"
+ pull: false
+ firewalld: "{{ true if configure_firewall | bool else false }}"
+
+ - name: reset failed ceph-mon systemd unit
+ command: "systemctl reset-failed ceph-mon@{{ ansible_facts['hostname'] }}" # noqa 303
+ changed_when: false
+ failed_when: false
+ when: containerized_deployment | bool
+
+ - name: remove ceph-mon systemd files
+ file:
+ path: "{{ item }}"
+ state: absent
+ loop:
+ - /etc/systemd/system/ceph-mon@.service
+ - /etc/systemd/system/ceph-mon@.service.d
+ - /etc/systemd/system/ceph-mon.target
+
+ - name: waiting for the monitor to join the quorum...
+ command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph quorum_status --format json"
+ changed_when: false
+ register: ceph_health_raw
+ until: >
+ ansible_facts['hostname'] in (ceph_health_raw.stdout | from_json)["quorum_names"]
+ retries: "{{ health_mon_check_retries }}"
+ delay: "{{ health_mon_check_delay }}"
+ environment:
+ CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
+
+- name: adopt ceph mgr daemons
+ hosts: "{{ groups[mgr_group_name] | default(groups[mon_group_name]) }}"
+ serial: 1
+ become: true
+ gather_facts: false
+ any_errors_fatal: True
+ tasks:
+ - import_role:
+ name: ceph-defaults
+
+ - name: adopt mgr daemon
+ cephadm_adopt:
+ name: "mgr.{{ ansible_facts['hostname'] }}"
+ cluster: "{{ cluster }}"
+ image: "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
+ docker: "{{ true if container_binary == 'docker' else false }}"
+ pull: false
+ firewalld: "{{ true if configure_firewall | bool else false }}"
+
+ - name: reset failed ceph-mgr systemd unit
+ command: "systemctl reset-failed ceph-mgr@{{ ansible_facts['hostname'] }}" # noqa 303
+ changed_when: false
+ failed_when: false
+ when: containerized_deployment | bool
+
+ - name: remove ceph-mgr systemd files
+ file:
+ path: "{{ item }}"
+ state: absent
+ loop:
+ - /etc/systemd/system/ceph-mgr@.service
+ - /etc/systemd/system/ceph-mgr@.service.d
+ - /etc/systemd/system/ceph-mgr.target
+
+
+- name: stop and remove legacy iscsigw daemons
+ hosts: "{{ iscsi_gw_group_name|default('iscsigws') }}"
+ serial: 1
+ become: true
+ gather_facts: false
+ any_errors_fatal: True
+ tasks:
+ - import_role:
+ name: ceph-defaults
+
+ - name: stop and disable iscsigw systemd services
+ service:
+ name: '{{ item }}'
+ state: stopped
+ enabled: false
+ failed_when: false
+ with_items:
+ - rbd-target-api
+ - rbd-target-gw
+ - tcmu-runner
+
+ - name: reset failed iscsigw systemd units
+ command: 'systemctl reset-failed {{ item }}' # noqa 303
+ changed_when: false
+ failed_when: false
+ with_items:
+ - rbd-target-api
+ - rbd-target-gw
+ - tcmu-runner
+ when: containerized_deployment | bool
+
+ - name: remove iscsigw systemd unit files
+ file:
+ path: '/etc/systemd/system/{{ item }}.service'
+ state: absent
+ with_items:
+ - rbd-target-api
+ - rbd-target-gw
+ - tcmu-runner
+ when: containerized_deployment | bool
+
+
+- name: redeploy iscsigw daemons
+ hosts: "{{ iscsi_gw_group_name|default('iscsigws') }}"
+ become: true
+ gather_facts: false
+ any_errors_fatal: True
+ tasks:
+ - import_role:
+ name: ceph-defaults
+
+ - name: update the placement of iscsigw hosts
+ command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph orch apply iscsi {{ iscsi_pool_name | default('rbd') }} {{ api_user | default('admin') }} {{ api_password | default('admin') }} {{ trusted_ip_list | default('192.168.122.1') }} --placement='{{ groups.get(iscsi_gw_group_name, []) | length }} label:{{ iscsi_gw_group_name }}'"
+ run_once: true
+ changed_when: false
+ delegate_to: '{{ groups[mon_group_name][0] }}'
+ environment:
+ CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
+
+
+- name: set osd flags
+ hosts: "{{ osd_group_name|default('osds') }}"
+ become: true
+ gather_facts: false
+ any_errors_fatal: True
+ tasks:
+ - import_role:
+ name: ceph-defaults
+
+ - name: get pool list
+ command: "{{ ceph_cmd }} --cluster {{ cluster }} osd pool ls detail -f json"
+ register: pool_list
+ run_once: true
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ changed_when: false
+ check_mode: false
+
+ - name: get balancer module status
+ command: "{{ ceph_cmd }} --cluster {{ cluster }} balancer status -f json"
+ register: balancer_status_adopt
+ run_once: true
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ changed_when: false
+ check_mode: false
+
+ - name: set_fact pools_pgautoscaler_mode
+ set_fact:
+ pools_pgautoscaler_mode: "{{ pools_pgautoscaler_mode | default([]) | union([{'name': item.pool_name, 'mode': item.pg_autoscale_mode}]) }}"
+ run_once: true
+ with_items: "{{ pool_list.stdout | default('{}') | from_json }}"
+
+ - name: disable balancer
+ command: "{{ ceph_cmd }} --cluster {{ cluster }} balancer off"
+ run_once: true
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ changed_when: false
+ when: (balancer_status_adopt.stdout | from_json)['active'] | bool
+
+ - name: disable pg autoscale on pools
+ ceph_pool:
+ name: "{{ item.name }}"
+ cluster: "{{ cluster }}"
+ pg_autoscale_mode: false
+ with_items: "{{ pools_pgautoscaler_mode }}"
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ run_once: true
+ when:
+ - pools_pgautoscaler_mode is defined
+ - item.mode == 'on'
+ environment:
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+
+ - name: set osd flags
+ ceph_osd_flag:
+ cluster: "{{ cluster }}"
+ name: "{{ item }}"
+ state: present
+ with_items:
+ - noout
+ - nodeep-scrub
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ run_once: true
+ environment:
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+
+- name: adopt ceph osd daemons
+ hosts: "{{ osd_group_name|default('osd') }}"
+ serial: 1
+ become: true
+ gather_facts: false
+ any_errors_fatal: True
+ tasks:
+ - import_role:
+ name: ceph-defaults
+
+ - import_role:
+ name: ceph-facts
+ tasks_from: container_binary.yml
+ when: containerized_deployment | bool
+
+ - name: get osd list
+ ceph_volume:
+ cluster: "{{ cluster }}"
+ action: list
+ environment:
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+ register: osd_list
+
+ - name: set osd fsid for containerized deployment
+ lineinfile:
+ path: '/var/lib/ceph/osd/{{ cluster }}-{{ item.key }}/fsid'
+ line: "{{ (item.value | selectattr('type', 'equalto', 'block') | map(attribute='tags') | first)['ceph.osd_fsid'] }}"
+ owner: '{{ ceph_uid }}'
+ group: '{{ ceph_uid }}'
+ create: true
+ with_dict: '{{ osd_list.stdout | from_json }}'
+ when: containerized_deployment | bool
+
+ - name: set osd type for containerized deployment
+ lineinfile:
+ path: '/var/lib/ceph/osd/{{ cluster }}-{{ item }}/type'
+ line: 'bluestore'
+ owner: '{{ ceph_uid }}'
+ group: '{{ ceph_uid }}'
+ create: true
+ loop: '{{ (osd_list.stdout | from_json).keys() | list }}'
+ when: containerized_deployment | bool
+
+ - name: adopt osd daemon
+ cephadm_adopt:
+ name: "osd.{{ item }}"
+ cluster: "{{ cluster }}"
+ image: "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
+ docker: "{{ true if container_binary == 'docker' else false }}"
+ pull: false
+ firewalld: "{{ true if configure_firewall | bool else false }}"
+ loop: '{{ (osd_list.stdout | from_json).keys() | list }}'
+
+ - name: remove ceph-osd systemd and ceph-osd-run.sh files
+ file:
+ path: "{{ item }}"
+ state: absent
+ loop:
+ - /etc/systemd/system/ceph-osd@.service
+ - /etc/systemd/system/ceph-osd@.service.d
+ - /etc/systemd/system/ceph-osd.target
+ - "{{ ceph_osd_docker_run_script_path | default('/usr/share') }}/ceph-osd-run.sh"
+
+ - name: remove osd directory
+ file:
+ path: "/var/lib/ceph/osd/{{ cluster }}-{{ item }}"
+ state: absent
+ loop: '{{ (osd_list.stdout | from_json).keys() | list }}'
+
+ - name: remove any legacy directories in /var/lib/ceph/mon (workaround)
+ file:
+ path: "/var/lib/ceph/mon/{{ cluster }}-{{ ansible_facts['hostname'] }}"
+ state: absent
+
+ - name: waiting for clean pgs...
+ command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph pg stat --format json"
+ changed_when: false
+ register: ceph_health_post
+ until: >
+ (((ceph_health_post.stdout | from_json).pg_summary.num_pg_by_state | length) > 0)
+ and
+ (((ceph_health_post.stdout | from_json).pg_summary.num_pg_by_state | selectattr('name', 'search', '^active\\+clean') | map(attribute='num') | list | sum) == (ceph_health_post.stdout | from_json).pg_summary.num_pgs)
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ retries: "{{ health_osd_check_retries }}"
+ delay: "{{ health_osd_check_delay }}"
+ environment:
+ CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
+
+- name: unset osd flags
+ hosts: "{{ osd_group_name|default('osds') }}"
+ become: true
+ gather_facts: false
+ any_errors_fatal: True
+ tasks:
+ - import_role:
+ name: ceph-defaults
+
+ - name: re-enable pg autoscale on pools
+ ceph_pool:
+ name: "{{ item.name }}"
+ cluster: "{{ cluster }}"
+ pg_autoscale_mode: true
+ with_items: "{{ pools_pgautoscaler_mode }}"
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ run_once: true
+ when:
+ - pools_pgautoscaler_mode is defined
+ - item.mode == 'on'
+ environment:
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+
+ - name: unset osd flags
+ ceph_osd_flag:
+ cluster: "{{ cluster }}"
+ name: "{{ item }}"
+ state: absent
+ with_items:
+ - noout
+ - nodeep-scrub
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ run_once: true
+ environment:
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+
+ - name: re-enable balancer
+ command: "{{ ceph_cmd }} --cluster {{ cluster }} balancer on"
+ run_once: true
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ changed_when: false
+ when: (balancer_status_adopt.stdout | from_json)['active'] | bool
+
+- name: redeploy mds daemons
+ hosts: "{{ mds_group_name|default('mdss') }}"
+ become: true
+ gather_facts: false
+ any_errors_fatal: True
+ tasks:
+ - import_role:
+ name: ceph-defaults
+
+ - name: update the placement of metadata hosts
+ command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph orch apply mds {{ cephfs }} --placement='{{ groups.get(mds_group_name, []) | length }} label:{{ mds_group_name }}'"
+ run_once: true
+ changed_when: false
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ environment:
+ CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
+
+- name: stop and remove legacy ceph mds daemons
+ hosts: "{{ mds_group_name|default('mdss') }}"
+ serial: 1
+ become: true
+ gather_facts: false
+ any_errors_fatal: True
+ tasks:
+ - import_role:
+ name: ceph-defaults
+
+ - name: stop and disable ceph-mds systemd service
+ service:
+ name: "ceph-mds@{{ ansible_facts['hostname'] }}"
+ state: stopped
+ enabled: false
+ failed_when: false
+
+ - name: stop and disable ceph-mds systemd target
+ service:
+ name: ceph-mds.target
+ state: stopped
+ enabled: false
+ failed_when: false
+
+ - name: reset failed ceph-mds systemd unit
+ command: "systemctl reset-failed ceph-mds@{{ ansible_facts['hostname'] }}" # noqa 303
+ changed_when: false
+ failed_when: false
+ when: containerized_deployment | bool
+
+ - name: remove ceph-mds systemd files
+ file:
+ path: "{{ item }}"
+ state: absent
+ loop:
+ - /etc/systemd/system/ceph-mds@.service
+ - /etc/systemd/system/ceph-mds@.service.d
+ - /etc/systemd/system/ceph-mds.target
+
+ - name: remove legacy ceph mds data
+ file:
+ path: "/var/lib/ceph/mds/{{ cluster }}-{{ ansible_facts['hostname'] }}"
+ state: absent
+
+- name: redeploy rgw daemons
+ hosts: "{{ rgw_group_name | default('rgws') }}"
+ become: true
+ gather_facts: false
+ any_errors_fatal: True
+ tasks:
+ - import_role:
+ name: ceph-defaults
+
+ - import_role:
+ name: ceph-facts
+ tasks_from: set_radosgw_address.yml
+
+ - name: import rgw ssl certificate into kv store
+ when: radosgw_frontend_ssl_certificate | length > 0
+ block:
+ - name: slurp rgw ssl certificate
+ slurp:
+ src: "{{ radosgw_frontend_ssl_certificate }}"
+ register: rgw_ssl_cert
+
+ - name: store ssl certificate in kv store (not multisite)
+ command: >
+ {{ container_binary }} run --rm -i -v /etc/ceph:/etc/ceph:z --entrypoint=ceph {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} --cluster {{ cluster }}
+ config-key set rgw/cert/rgw.{{ ansible_facts['hostname'] }} -i -
+ args:
+ stdin: "{{ rgw_ssl_cert.content | b64decode }}"
+ stdin_add_newline: no
+ changed_when: false
+ when: not rgw_multisite | bool
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ environment:
+ CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
+
+ - name: store ssl certificate in kv store (multisite)
+ command: >
+ {{ container_binary }} run --rm -i -v /etc/ceph:/etc/ceph:z --entrypoint=ceph {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} --cluster {{ cluster }}
+ config-key set rgw/cert/rgw.{{ ansible_facts['hostname'] }}.{{ item.rgw_realm }}.{{ item.rgw_zone }}.{{ item.radosgw_frontend_port }} -i -
+ args:
+ stdin: "{{ rgw_ssl_cert.content | b64decode }}"
+ stdin_add_newline: no
+ changed_when: false
+ loop: "{{ rgw_instances }}"
+ when: rgw_multisite | bool
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ environment:
+ CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
+
+ - name: update the placement of radosgw hosts
+ command: >
+ {{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} --
+ ceph orch apply rgw {{ ansible_facts['hostname'] }}
+ --placement='count-per-host:{{ radosgw_num_instances }} {{ ansible_facts['nodename'] }}'
+ --port={{ radosgw_frontend_port }}
+ {{ '--ssl' if radosgw_frontend_ssl_certificate else '' }}
+ changed_when: false
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ when: not rgw_multisite | bool
+ environment:
+ CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
+
+ - name: update the placement of radosgw multisite hosts
+ command: >
+ {{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} --
+ ceph orch apply rgw {{ ansible_facts['hostname'] }}.{{ item.rgw_realm }}.{{ item.rgw_zone }}.{{ item.radosgw_frontend_port }}
+ --placement={{ ansible_facts['nodename'] }}
+ --realm={{ item.rgw_realm }} --zone={{ item.rgw_zone }}
+ --port={{ item.radosgw_frontend_port }}
+ {{ '--ssl' if radosgw_frontend_ssl_certificate else '' }}
+ changed_when: false
+ loop: "{{ rgw_instances }}"
+ when: rgw_multisite | bool
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ environment:
+ CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
+
+- name: stop and remove legacy ceph rgw daemons
+ hosts: "{{ rgw_group_name|default('rgws') }}"
+ serial: 1
+ become: true
+ gather_facts: false
+ any_errors_fatal: True
+ tasks:
+ - import_role:
+ name: ceph-defaults
+
+ - import_role:
+ name: ceph-facts
+ tasks_from: set_radosgw_address.yml
+
+ - name: stop and disable ceph-radosgw systemd service
+ service:
+ name: "ceph-radosgw@rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}"
+ state: stopped
+ enabled: false
+ failed_when: false
+ loop: '{{ rgw_instances }}'
+
+ - name: stop and disable ceph-radosgw systemd target
+ service:
+ name: ceph-radosgw.target
+ state: stopped
+ enabled: false
+ failed_when: false
+
+ - name: reset failed ceph-radosgw systemd unit
+ command: "systemctl reset-failed ceph-radosgw@rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}" # noqa 303
+ changed_when: false
+ failed_when: false
+ loop: '{{ rgw_instances }}'
+ when: containerized_deployment | bool
+
+ - name: remove ceph-radosgw systemd files
+ file:
+ path: "{{ item }}"
+ state: absent
+ loop:
+ - /etc/systemd/system/ceph-radosgw@.service
+ - /etc/systemd/system/ceph-radosgw@.service.d
+ - /etc/systemd/system/ceph-radosgw.target
+
+ - name: remove legacy ceph radosgw data
+ file:
+ path: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}"
+ state: absent
+ loop: '{{ rgw_instances }}'
+
+ - name: remove legacy ceph radosgw directory
+ file:
+ path: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}"
+ state: absent
+
+- name: stop and remove legacy ceph nfs daemons
+ hosts: "{{ nfs_group_name|default('nfss') }}"
+ tags: 'ceph_nfs_adopt'
+ serial: 1
+ become: true
+ gather_facts: false
+ any_errors_fatal: True
+ tasks:
+ - import_role:
+ name: ceph-defaults
+
+ - import_role:
+ name: ceph-nfs
+ tasks_from: create_rgw_nfs_user.yml
+
+ - name: enable ceph mgr nfs module
+ ceph_mgr_module:
+ name: "nfs"
+ cluster: "{{ cluster }}"
+ state: enable
+ environment:
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+
+ - name: stop and disable ceph-nfs systemd service
+ service:
+ name: "ceph-nfs@{{ ansible_facts['hostname'] }}"
+ state: stopped
+ enabled: false
+ failed_when: false
+
+ - name: reset failed ceph-nfs systemd unit
+ command: "systemctl reset-failed ceph-nfs@{{ ansible_facts['hostname'] }}" # noqa 303
+ changed_when: false
+ failed_when: false
+ when: containerized_deployment | bool
+
+ - name: remove ceph-nfs systemd unit files
+ file:
+ path: "{{ item }}"
+ state: absent
+ loop:
+ - /etc/systemd/system/ceph-nfs@.service
+ - /etc/systemd/system/ceph-nfs@.service.d
+
+ - name: remove legacy ceph radosgw directory
+ file:
+ path: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}"
+ state: absent
+
+ - name: create nfs ganesha cluster
+ command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph nfs cluster create {{ ansible_facts['hostname'] }} {{ ansible_facts['hostname'] }}"
+ changed_when: false
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ environment:
+ CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
+
+ - name: create cephfs export
+ command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph nfs export create cephfs {{ cephfs }} {{ ansible_facts['hostname'] }} {{ ceph_nfs_ceph_pseudo_path }} --squash {{ ceph_nfs_ceph_squash }}"
+ changed_when: false
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ environment:
+ CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
+ when: nfs_file_gw | bool
+
+ - name: create rgw export
+ command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph nfs export create rgw --cluster-id {{ ansible_facts['hostname'] }} --pseudo-path {{ ceph_nfs_rgw_pseudo_path }} --user-id {{ ceph_nfs_rgw_user }} --squash {{ ceph_nfs_rgw_squash }}"
+ changed_when: false
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ environment:
+ CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
+ when: nfs_obj_gw | bool
+
+- name: redeploy rbd-mirror daemons
+ hosts: "{{ rbdmirror_group_name|default('rbdmirrors') }}"
+ become: true
+ gather_facts: false
+ any_errors_fatal: True
+ tasks:
+ - import_role:
+ name: ceph-defaults
+
+ - name: update the placement of rbd-mirror hosts
+ command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph orch apply rbd-mirror --placement='{{ groups.get(rbdmirror_group_name, []) | length }} label:{{ rbdmirror_group_name }}'"
+ run_once: true
+ changed_when: false
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ environment:
+ CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
+
+- name: stop and remove legacy rbd-mirror daemons
+ hosts: "{{ rbdmirror_group_name|default('rbdmirrors') }}"
+ serial: 1
+ become: true
+ gather_facts: false
+ any_errors_fatal: True
+ tasks:
+ - import_role:
+ name: ceph-defaults
+
+ - name: stop and disable rbd-mirror systemd service
+ service:
+ name: "ceph-rbd-mirror@rbd-mirror.{{ ansible_facts['hostname'] }}"
+ state: stopped
+ enabled: false
+ failed_when: false
+
+ - name: stop and disable rbd-mirror systemd target
+ service:
+ name: ceph-rbd-mirror.target
+ state: stopped
+ enabled: false
+ failed_when: false
+
+ - name: reset failed rbd-mirror systemd unit
+ command: "systemctl reset-failed ceph-rbd-mirror@rbd-mirror.{{ ansible_facts['hostname'] }}" # noqa 303
+ changed_when: false
+ failed_when: false
+ when: containerized_deployment | bool
+
+ - name: remove rbd-mirror systemd files
+ file:
+ path: "{{ item }}"
+ state: absent
+ loop:
+ - /etc/systemd/system/ceph-rbd-mirror@.service
+ - /etc/systemd/system/ceph-rbd-mirror@.service.d
+ - /etc/systemd/system/ceph-rbd-mirror.target
+
+
+- name: redeploy ceph-crash daemons
+ hosts:
+ - "{{ mon_group_name|default('mons') }}"
+ - "{{ osd_group_name|default('osds') }}"
+ - "{{ mds_group_name|default('mdss') }}"
+ - "{{ rgw_group_name|default('rgws') }}"
+ - "{{ mgr_group_name|default('mgrs') }}"
+ - "{{ rbdmirror_group_name|default('rbdmirrors') }}"
+ become: true
+ gather_facts: false
+ any_errors_fatal: True
+ tasks:
+ - import_role:
+ name: ceph-defaults
+
+ - name: stop and disable ceph-crash systemd service
+ service:
+ name: "{{ 'ceph-crash@' + ansible_facts['hostname'] if containerized_deployment | bool else 'ceph-crash.service' }}"
+ state: stopped
+ enabled: false
+ failed_when: false
+
+ - name: remove ceph-crash systemd unit file
+ file:
+ path: /etc/systemd/system/ceph-crash@.service
+ state: absent
+
+ - name: update the placement of ceph-crash hosts
+ command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph orch apply crash --placement='label:ceph'"
+ run_once: true
+ changed_when: false
+ delegate_to: '{{ groups[mon_group_name][0] }}'
+ environment:
+ CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
+
+
+- name: redeploy alertmanager/grafana/prometheus daemons
+ hosts: "{{ monitoring_group_name|default('monitoring') }}"
+ serial: 1
+ become: true
+ gather_facts: false
+ any_errors_fatal: True
+ tasks:
+ - import_role:
+ name: ceph-defaults
+
+ - name: check whether a ceph config file is present
+ stat:
+ path: "/etc/ceph/{{ cluster }}.conf"
+ register: ceph_config
+
+ - name: ensure /etc/ceph is present
+ file:
+ path: /etc/ceph
+ state: directory
+ owner: "{{ ceph_uid | int if containerized_deployment | bool else 'ceph' }}"
+ group: "{{ ceph_uid | int if containerized_deployment | bool else 'ceph' }}"
+ mode: "{{ ceph_directories_mode }}"
+
+ - name: write a ceph.conf with minimal config
+ copy:
+ dest: "/etc/ceph/{{ cluster }}.conf"
+ content: "{{ minimal_config.stdout }}"
+ owner: "{{ ceph_uid | int if containerized_deployment | bool else 'ceph' }}"
+ group: "{{ ceph_uid | int if containerized_deployment | bool else 'ceph' }}"
+ mode: "{{ ceph_keyring_permissions }}"
+ when: not ceph_config.stat.exists | bool
+
+ - name: with dashboard enabled
+ when: dashboard_enabled | bool
+ block:
+ - name: ensure alertmanager/prometheus data directories are present
+ file:
+ path: "{{ item }}"
+ state: directory
+ owner: "{{ prometheus_user_id }}"
+ group: "{{ prometheus_user_id }}"
+ with_items:
+ - "{{ alertmanager_data_dir }}"
+ - "{{ prometheus_data_dir }}"
+
+ # (workaround) cephadm adopt alertmanager only stops prometheus-alertmanager systemd service
+ - name: stop and disable alertmanager systemd unit
+ service:
+ name: alertmanager
+ state: stopped
+ enabled: false
+ failed_when: false
+
+ # (workaround) cephadm adopt alertmanager only uses /etc/prometheus/alertmanager.yml
+ - name: create alertmanager config symlink
+ file:
+ path: /etc/prometheus/alertmanager.yml
+ src: '{{ alertmanager_conf_dir }}/alertmanager.yml'
+ state: link
+
+ # (workaround) cephadm adopt alertmanager only uses /var/lib/prometheus/alertmanager/
+ - name: create alertmanager data symlink
+ file:
+ path: '{{ prometheus_data_dir }}/alertmanager'
+ src: '{{ alertmanager_data_dir }}'
+ state: link
+
+ - name: adopt alertmanager daemon
+ cephadm_adopt:
+ name: "alertmanager.{{ ansible_facts['hostname'] }}"
+ cluster: "{{ cluster }}"
+ image: "{{ alertmanager_container_image }}"
+ docker: "{{ true if container_binary == 'docker' else false }}"
+ pull: false
+ firewalld: "{{ true if configure_firewall | bool else false }}"
+
+ - name: remove alertmanager systemd unit file
+ file:
+ path: /etc/systemd/system/alertmanager.service
+ state: absent
+
+ - name: remove the legacy alertmanager data
+ file:
+ path: '{{ alertmanager_data_dir }}'
+ state: absent
+
+ - name: stop and disable prometheus systemd unit
+ service:
+ name: prometheus
+ state: stopped
+ enabled: false
+ failed_when: false
+
+ - name: remove alertmanager data symlink
+ file:
+ path: '{{ prometheus_data_dir }}/alertmanager'
+ state: absent
+
+ # (workaround) cephadm adopt prometheus only uses /var/lib/prometheus/metrics/
+ - name: tmp copy the prometheus data
+ copy:
+ src: '{{ prometheus_data_dir }}/'
+ dest: /var/lib/prom_metrics
+ owner: 65534
+ group: 65534
+ remote_src: true
+
+ # (workaround) cephadm adopt prometheus only uses /var/lib/prometheus/metrics/
+ - name: restore the prometheus data
+ copy:
+ src: /var/lib/prom_metrics/
+ dest: /var/lib/prometheus/metrics
+ owner: 65534
+ group: 65534
+ remote_src: true
+
+ - name: remove the tmp prometheus data copy
+ file:
+ path: /var/lib/prom_metrics
+ state: absent
+
+ - name: adopt prometheus daemon
+ cephadm_adopt:
+ name: "prometheus.{{ ansible_facts['hostname'] }}"
+ cluster: "{{ cluster }}"
+ image: "{{ prometheus_container_image }}"
+ docker: "{{ true if container_binary == 'docker' else false }}"
+ pull: false
+ firewalld: "{{ true if configure_firewall | bool else false }}"
+
+ - name: remove prometheus systemd unit file
+ file:
+ path: /etc/systemd/system/prometheus.service
+ state: absent
+
+ - name: remove the legacy prometheus data
+ file:
+ path: '{{ prometheus_data_dir }}'
+ state: absent
+
+ # (workaround) cephadm adopt grafana only stops grafana systemd service
+ - name: stop and disable grafana systemd unit
+ service:
+ name: grafana-server
+ state: stopped
+ enabled: false
+ failed_when: false
+
+ - name: adopt grafana daemon
+ cephadm_adopt:
+ name: "grafana.{{ ansible_facts['hostname'] }}"
+ cluster: "{{ cluster }}"
+ image: "{{ grafana_container_image }}"
+ docker: "{{ true if container_binary == 'docker' else false }}"
+ pull: false
+ firewalld: "{{ true if configure_firewall | bool else false }}"
+
+ - name: remove grafana systemd unit file
+ file:
+ path: /etc/systemd/system/grafana-server.service
+ state: absent
+
+ - name: remove the legacy grafana data
+ file:
+ path: /var/lib/grafana
+ state: absent
+
+- name: redeploy node-exporter daemons
+ hosts:
+ - "{{ mon_group_name|default('mons') }}"
+ - "{{ osd_group_name|default('osds') }}"
+ - "{{ mds_group_name|default('mdss') }}"
+ - "{{ rgw_group_name|default('rgws') }}"
+ - "{{ mgr_group_name|default('mgrs') }}"
+ - "{{ rbdmirror_group_name|default('rbdmirrors') }}"
+ - "{{ nfs_group_name|default('nfss') }}"
+ - "{{ iscsi_gw_group_name|default('iscsigws') }}"
+ - "{{ monitoring_group_name|default('monitoring') }}"
+ become: true
+ gather_facts: false
+ any_errors_fatal: True
+ tasks:
+ - import_role:
+ name: ceph-defaults
+
+ - name: with dashboard enabled
+ when: dashboard_enabled | bool
+ block:
+ - name: stop and disable node-exporter systemd service
+ service:
+ name: node_exporter
+ state: stopped
+ enabled: false
+ failed_when: false
+
+ - name: remove node_exporter systemd unit file
+ file:
+ path: /etc/systemd/system/node_exporter.service
+ state: absent
+
+ - name: update the placement of node-exporter hosts
+ command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph orch apply node-exporter --placement='*'"
+ run_once: true
+ changed_when: false
+ delegate_to: '{{ groups[mon_group_name][0] }}'
+ environment:
+ CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
+
+
+- name: adjust placement daemons
+ hosts: "{{ mon_group_name|default('mons') }}[0]"
+ become: true
+ gather_facts: false
+ any_errors_fatal: True
+ tasks:
+ - import_role:
+ name: ceph-defaults
+
+ - name: update the placement of monitor hosts
+ command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph orch apply mon --placement='{{ groups.get(mon_group_name, []) | length }} label:{{ mon_group_name }}'"
+ changed_when: false
+ environment:
+ CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
+
+ - name: set_fact mgr_placement
+ set_fact:
+ mgr_placement_count: "{{ groups.get(mgr_group_name, []) | length if groups.get(mgr_group_name, []) | length > 0 else groups.get(mon_group_name, []) | length }}"
+
+ - name: set_fact mgr_placement_label
+ set_fact:
+ mgr_placement_label: "{{ mgr_group_name if groups.get(mgr_group_name, []) | length > 0 else mon_group_name }}"
+
+ - name: update the placement of manager hosts
+ command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph orch apply mgr --placement='{{ mgr_placement_count }} label:{{ mgr_placement_label }}'"
+ changed_when: false
+ environment:
+ CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
+
+ - name: with dashboard enabled
+ when: dashboard_enabled | bool
+ block:
+ - name: update the placement of alertmanager hosts
+ command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph orch apply alertmanager --placement='{{ groups.get(monitoring_group_name, []) | length }} label:{{ monitoring_group_name }}'"
+ changed_when: false
+ environment:
+ CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
+
+ - name: update the placement of grafana hosts
+ command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph orch apply grafana --placement='{{ groups.get(monitoring_group_name, []) | length }} label:{{ monitoring_group_name }}'"
+ changed_when: false
+ environment:
+ CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
+
+ - name: update the placement of prometheus hosts
+ command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph orch apply prometheus --placement='{{ groups.get(monitoring_group_name, []) | length }} label:{{ monitoring_group_name }}'"
+ changed_when: false
+ environment:
+ CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
+
+- name: show ceph orchestrator status
+ hosts: "{{ mon_group_name|default('mons') }}[0]"
+ become: true
+ gather_facts: false
+ any_errors_fatal: True
+ tasks:
+ - import_role:
+ name: ceph-defaults
+
+ - name: show ceph orchestrator services
+ command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph orch ls --refresh"
+ changed_when: false
+ environment:
+ CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
+
+ - name: show ceph orchestrator daemons
+ command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph orch ps --refresh"
+ changed_when: false
+ environment:
+ CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
+
+ - name: inform users about cephadm
+ debug:
+ msg: |
+ This Ceph cluster is now managed by cephadm. Any new changes to the
+ cluster need to be achieved by using the cephadm CLI and you don't
+ need to use ceph-ansible playbooks anymore.
--- /dev/null
+---
+- name: gather facts and prepare system for cephadm
+ hosts:
+ - "{{ mon_group_name|default('mons') }}"
+ - "{{ osd_group_name|default('osds') }}"
+ - "{{ mds_group_name|default('mdss') }}"
+ - "{{ rgw_group_name|default('rgws') }}"
+ - "{{ mgr_group_name|default('mgrs') }}"
+ - "{{ rbdmirror_group_name|default('rbdmirrors') }}"
+ - "{{ nfs_group_name|default('nfss') }}"
+ - "{{ iscsi_gw_group_name|default('iscsigws') }}"
+ - "{{ monitoring_group_name|default('monitoring') }}"
+ become: true
+ gather_facts: false
+ vars:
+ delegate_facts_host: true
+ tasks:
+ - import_role:
+ name: ceph-defaults
+
+ - name: validate if monitor group doesn't exist or empty
+ fail:
+ msg: "you must add a [mons] group and add at least one node."
+ run_once: true
+ when: groups[mon_group_name] is undefined or groups[mon_group_name] | length == 0
+
+ - name: validate if manager group doesn't exist or empty
+ fail:
+ msg: "you must add a [mgrs] group and add at least one node."
+ run_once: true
+ when: groups[mgr_group_name] is undefined or groups[mgr_group_name] | length == 0
+
+ - name: validate monitor network configuration
+ fail:
+ msg: "Either monitor_address, monitor_address_block or monitor_interface must be provided"
+ when:
+ - mon_group_name in group_names
+ - monitor_address == 'x.x.x.x'
+ - monitor_address_block == 'subnet'
+ - monitor_interface == 'interface'
+
+ - name: validate dashboard configuration
+ when: dashboard_enabled | bool
+ run_once: true
+ block:
+ - name: fail if [monitoring] group doesn't exist or empty
+ fail:
+ msg: "you must add a [monitoring] group and add at least one node."
+ when: groups[monitoring_group_name] is undefined or groups[monitoring_group_name] | length == 0
+
+ - name: fail when dashboard_admin_password is not set
+ fail:
+ msg: "you must set dashboard_admin_password."
+ when: dashboard_admin_password is undefined
+
+ - name: validate container registry credentials
+ fail:
+ msg: 'ceph_docker_registry_username and/or ceph_docker_registry_password variables need to be set'
+ when:
+ - ceph_docker_registry_auth | bool
+ - (ceph_docker_registry_username is not defined or ceph_docker_registry_password is not defined) or
+ (ceph_docker_registry_username | length == 0 or ceph_docker_registry_password | length == 0)
+
+ - name: gather facts
+ setup:
+ gather_subset:
+ - 'all'
+ - '!facter'
+ - '!ohai'
+ when: not delegate_facts_host | bool
+
+ - name: gather and delegate facts
+ setup:
+ gather_subset:
+ - 'all'
+ - '!facter'
+ - '!ohai'
+ delegate_to: "{{ item }}"
+ delegate_facts: true
+ with_items: "{{ groups['all'] }}"
+ run_once: true
+ when: delegate_facts_host | bool
+
+ - import_role:
+ name: ceph-facts
+ tasks_from: container_binary.yml
+
+ - name: check if it is atomic host
+ stat:
+ path: /run/ostree-booted
+ register: stat_ostree
+
+ - name: set_fact is_atomic
+ set_fact:
+ is_atomic: "{{ stat_ostree.stat.exists }}"
+
+ - import_role:
+ name: ceph-container-engine
+
+ - import_role:
+ name: ceph-container-common
+ tasks_from: registry.yml
+ when: ceph_docker_registry_auth | bool
+
+ - name: configure repository for installing cephadm
+ vars:
+ ceph_origin: repository
+ ceph_repository: community
+ block:
+ - name: validate repository variables
+ import_role:
+ name: ceph-validate
+ tasks_from: check_repository.yml
+
+ - name: configure repository
+ import_role:
+ name: ceph-common
+ tasks_from: "configure_repository.yml"
+
+ - name: install cephadm requirements
+ package:
+ name: ['python3', 'lvm2']
+ register: result
+ until: result is succeeded
+
+ - name: install cephadm
+ package:
+ name: cephadm
+ register: result
+ until: result is succeeded
+
+ - name: set_fact cephadm_cmd
+ set_fact:
+ cephadm_cmd: "cephadm {{ '--docker' if container_binary == 'docker' else '' }}"
+
+- name: bootstrap the cluster
+ hosts: "{{ mon_group_name|default('mons') }}[0]"
+ become: true
+ gather_facts: false
+ tasks:
+ - import_role:
+ name: ceph-defaults
+
+ - import_role:
+ name: ceph-facts
+ tasks_from: set_monitor_address.yml
+
+ - name: create /etc/ceph directory
+ file:
+ path: /etc/ceph
+ state: directory
+
+ - name: bootstrap the new cluster
+ cephadm_bootstrap:
+ mon_ip: "{{ _current_monitor_address }}"
+ image: "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
+ docker: "{{ true if container_binary == 'docker' else false }}"
+ pull: false
+ dashboard: "{{ dashboard_enabled }}"
+ dashboard_user: "{{ dashboard_admin_user if dashboard_enabled | bool else omit }}"
+ dashboard_password: "{{ dashboard_admin_password if dashboard_enabled | bool else omit }}"
+ monitoring: false
+ firewalld: "{{ configure_firewall }}"
+ ssh_user: "{{ cephadm_ssh_user | default('root') }}"
+ ssh_config: "{{ cephadm_ssh_config | default(omit) }}"
+
+ - name: set default container image in ceph configuration
+ command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} config set global container_image {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
+ changed_when: false
+ environment:
+ CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
+
+ - name: set container image base in ceph configuration
+ command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} config set mgr mgr/cephadm/container_image_base {{ ceph_docker_registry }}/{{ ceph_docker_image }}"
+ changed_when: false
+ environment:
+ CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
+
+ - name: set dashboard container image in ceph mgr configuration
+ when: dashboard_enabled | bool
+ block:
+ - name: set alertmanager container image in ceph configuration
+ command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} config set mgr mgr/cephadm/container_image_alertmanager {{ alertmanager_container_image }}"
+ changed_when: false
+ environment:
+ CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
+
+ - name: set grafana container image in ceph configuration
+ command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} config set mgr mgr/cephadm/container_image_grafana {{ grafana_container_image }}"
+ changed_when: false
+ environment:
+ CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
+
+ - name: set node-exporter container image in ceph configuration
+ command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} config set mgr mgr/cephadm/container_image_node_exporter {{ node_exporter_container_image }}"
+ changed_when: false
+ environment:
+ CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
+
+ - name: set prometheus container image in ceph configuration
+ command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} config set mgr mgr/cephadm/container_image_prometheus {{ prometheus_container_image }}"
+ changed_when: false
+ environment:
+ CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
+
+- name: add the other nodes
+ hosts:
+ - "{{ mon_group_name|default('mons') }}"
+ - "{{ osd_group_name|default('osds') }}"
+ - "{{ mds_group_name|default('mdss') }}"
+ - "{{ rgw_group_name|default('rgws') }}"
+ - "{{ mgr_group_name|default('mgrs') }}"
+ - "{{ rbdmirror_group_name|default('rbdmirrors') }}"
+ - "{{ nfs_group_name|default('nfss') }}"
+ - "{{ iscsi_gw_group_name|default('iscsigws') }}"
+ - "{{ monitoring_group_name|default('monitoring') }}"
+ become: true
+ gather_facts: false
+ tasks:
+ - import_role:
+ name: ceph-defaults
+
+ - name: get the cephadm ssh pub key
+ command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} cephadm get-pub-key"
+ changed_when: false
+ run_once: true
+ register: cephadm_pubpkey
+ delegate_to: '{{ groups[mon_group_name][0] }}'
+ environment:
+ CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
+
+ - name: allow cephadm key for {{ cephadm_ssh_user | default('root') }} account
+ authorized_key:
+ user: "{{ cephadm_ssh_user | default('root') }}"
+ key: '{{ cephadm_pubpkey.stdout }}'
+
+ - name: run cephadm prepare-host
+ command: cephadm prepare-host
+ changed_when: false
+ environment:
+ CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
+
+ - name: manage nodes with cephadm - ipv4
+ command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch host add {{ ansible_facts['hostname'] }} {{ ansible_facts['all_ipv4_addresses'] | ips_in_ranges(public_network.split(',')) | first }} {{ group_names | join(' ') }} {{ '_admin' if mon_group_name | default('mons') in group_names else '' }}"
+ changed_when: false
+ delegate_to: '{{ groups[mon_group_name][0] }}'
+ environment:
+ CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
+ when: ip_version == 'ipv4'
+
+ - name: manage nodes with cephadm - ipv6
+ command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch host add {{ ansible_facts['hostname'] }} {{ ansible_facts['all_ipv6_addresses'] | ips_in_ranges(public_network.split(',')) | last | ipwrap }} {{ group_names | join(' ') }} {{ '_admin' if mon_group_name | default('mons') in group_names else '' }}"
+ changed_when: false
+ delegate_to: '{{ groups[mon_group_name][0] }}'
+ environment:
+ CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
+ when: ip_version == 'ipv6'
+
+ - name: add ceph label for core component
+ command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch host label add {{ ansible_facts['hostname'] }} ceph"
+ changed_when: false
+ delegate_to: '{{ groups[mon_group_name][0] }}'
+ when: inventory_hostname in groups.get(mon_group_name, []) or
+ inventory_hostname in groups.get(osd_group_name, []) or
+ inventory_hostname in groups.get(mds_group_name, []) or
+ inventory_hostname in groups.get(rgw_group_name, []) or
+ inventory_hostname in groups.get(mgr_group_name, []) or
+ inventory_hostname in groups.get(rbdmirror_group_name, [])
+ environment:
+ CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
+
+- name: adjust service placement
+ hosts: "{{ mon_group_name|default('mons') }}[0]"
+ become: true
+ gather_facts: false
+ tasks:
+ - import_role:
+ name: ceph-defaults
+
+ - name: update the placement of monitor hosts
+ command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch apply mon --placement='label:{{ mon_group_name }}'"
+ changed_when: false
+ environment:
+ CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
+
+ - name: waiting for the monitor to join the quorum...
+ command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} quorum_status --format json"
+ changed_when: false
+ register: ceph_health_raw
+ until: (ceph_health_raw.stdout | from_json)["quorum_names"] | length == groups.get(mon_group_name, []) | length
+ retries: "{{ health_mon_check_retries }}"
+ delay: "{{ health_mon_check_delay }}"
+ environment:
+ CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
+
+ - name: update the placement of manager hosts
+ command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch apply mgr --placement='label:{{ mgr_group_name }}'"
+ changed_when: false
+ environment:
+ CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
+
+ - name: update the placement of crash hosts
+ command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch apply crash --placement='label:ceph'"
+ changed_when: false
+ environment:
+ CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
+
+- name: adjust monitoring service placement
+ hosts: "{{ monitoring_group_name|default('monitoring') }}"
+ become: true
+ gather_facts: false
+ tasks:
+ - import_role:
+ name: ceph-defaults
+
+ - name: with dashboard enabled
+ when: dashboard_enabled | bool
+ delegate_to: '{{ groups[mon_group_name][0] }}'
+ run_once: true
+ block:
+ - name: enable the prometheus module
+ command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} mgr module enable prometheus"
+ changed_when: false
+ environment:
+ CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
+
+ - name: update the placement of alertmanager hosts
+ command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch apply alertmanager --placement='label:{{ monitoring_group_name }}'"
+ changed_when: false
+ environment:
+ CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
+
+ - name: update the placement of grafana hosts
+ command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch apply grafana --placement='label:{{ monitoring_group_name }}'"
+ changed_when: false
+ environment:
+ CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
+
+ - name: update the placement of prometheus hosts
+ command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch apply prometheus --placement='label:{{ monitoring_group_name }}'"
+ changed_when: false
+ environment:
+ CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
+
+ - name: update the placement of node-exporter hosts
+ command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch apply node-exporter --placement='*'"
+ changed_when: false
+ environment:
+ CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
+
+- name: print information
+ hosts: "{{ mon_group_name|default('mons') }}[0]"
+ become: true
+ gather_facts: false
+ tasks:
+ - import_role:
+ name: ceph-defaults
+
+ - name: show ceph orchestrator services
+ command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch ls --refresh"
+ changed_when: false
+ environment:
+ CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
+
+ - name: show ceph orchestrator daemons
+ command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch ps --refresh"
+ changed_when: false
+ environment:
+ CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
+
+ - name: inform users about cephadm
+ debug:
+ msg: |
+ This Ceph cluster is now ready to receive more configuration like
+ adding OSD, MDS daemons, create pools or keyring.
+ You can do this by using the cephadm CLI and you don't need to use
+ ceph-ansible playbooks anymore.
--- /dev/null
+---
+# This playbook is intended to be used as part of the el7 to el8 OS upgrade.
+# It modifies the systemd unit files so containers are launched with podman
+# instead of docker after the OS reboot once it is upgraded.
+# It is *not* intended to restart services since we don't want multiple services
+# restarts.
+
+- hosts:
+ - mons
+ - osds
+ - mdss
+ - rgws
+ - nfss
+ - rbdmirrors
+ - clients
+ - iscsigws
+ - mgrs
+ - monitoring
+
+ gather_facts: false
+ become: True
+ any_errors_fatal: true
+
+ vars:
+ delegate_facts_host: True
+
+ pre_tasks:
+ - import_tasks: "{{ playbook_dir }}/../raw_install_python.yml"
+
+ tasks:
+ - import_role:
+ name: ceph-defaults
+
+ # pre-tasks for following import -
+ - name: gather facts
+ setup:
+ gather_subset:
+ - 'all'
+ - '!facter'
+ - '!ohai'
+ when: not delegate_facts_host | bool or inventory_hostname in groups.get(client_group_name, [])
+
+ - name: gather and delegate facts
+ setup:
+ gather_subset:
+ - 'all'
+ - '!facter'
+ - '!ohai'
+ delegate_to: "{{ item }}"
+ delegate_facts: True
+ with_items: "{{ groups['all'] | difference(groups.get(client_group_name | default('clients'), [])) }}"
+ run_once: true
+ when: delegate_facts_host | bool
+
+- hosts:
+ - "{{ mon_group_name | default('mons') }}"
+ - "{{ osd_group_name | default('osds') }}"
+ - "{{ mds_group_name | default('mdss') }}"
+ - "{{ rgw_group_name | default('rgws') }}"
+ - "{{ nfs_group_name | default('nfss') }}"
+ - "{{ mgr_group_name | default('mgrs') }}"
+ - "{{ iscsi_gw_group_name | default('iscsigws') }}"
+ - "{{ rbdmirror_group_name | default('rbdmirrors') }}"
+ - "{{ monitoring_group_name | default('monitoring') }}"
+ gather_facts: false
+ become: true
+ tasks:
+ - name: set_fact docker2podman and container_binary
+ set_fact:
+ docker2podman: True
+ container_binary: podman
+
+ - import_role:
+ name: ceph-defaults
+ - import_role:
+ name: ceph-facts
+ - import_role:
+ name: ceph-handler
+
+ - name: install podman
+ package:
+ name: podman
+ state: present
+ register: result
+ until: result is succeeded
+ tags: with_pkg
+ when: not is_atomic | bool
+
+ - name: check podman presence # noqa : 305
+ shell: command -v podman
+ register: podman_presence
+ changed_when: false
+ failed_when: false
+
+ - name: pulling images from docker daemon
+ when: podman_presence.rc == 0
+ block:
+ - name: "pulling {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} image from docker daemon"
+ command: "{{ timeout_command }} {{ container_binary }} pull docker-daemon:{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
+ changed_when: false
+ register: pull_image
+ until: pull_image.rc == 0
+ retries: "{{ docker_pull_retry }}"
+ delay: 10
+ when: inventory_hostname in groups.get(mon_group_name, []) or
+ inventory_hostname in groups.get(osd_group_name, []) or
+ inventory_hostname in groups.get(mds_group_name, []) or
+ inventory_hostname in groups.get(rgw_group_name, []) or
+ inventory_hostname in groups.get(mgr_group_name, []) or
+ inventory_hostname in groups.get(rbdmirror_group_name, []) or
+ inventory_hostname in groups.get(iscsi_gw_group_name, []) or
+ inventory_hostname in groups.get(nfs_group_name, [])
+
+ - name: "pulling alertmanager/grafana/prometheus images from docker daemon"
+ command: "{{ timeout_command }} {{ container_binary }} pull docker-daemon:{{ item }}"
+ changed_when: false
+ register: pull_image
+ until: pull_image.rc == 0
+ retries: "{{ docker_pull_retry }}"
+ delay: 10
+ loop:
+ - "{{ alertmanager_container_image }}"
+ - "{{ grafana_container_image }}"
+ - "{{ prometheus_container_image }}"
+ when:
+ - dashboard_enabled | bool
+ - inventory_hostname in groups.get(monitoring_group_name, [])
+
+ - name: "pulling {{ node_exporter_container_image }} image from docker daemon"
+ command: "{{ timeout_command }} {{ container_binary }} pull docker-daemon:{{ node_exporter_container_image }}"
+ changed_when: false
+ register: pull_image
+ until: pull_image.rc == 0
+ retries: "{{ docker_pull_retry }}"
+ delay: 10
+ when: dashboard_enabled | bool
+
+ - import_role:
+ name: ceph-mon
+ tasks_from: systemd.yml
+ when: inventory_hostname in groups.get(mon_group_name, [])
+
+ - import_role:
+ name: ceph-iscsi-gw
+ tasks_from: systemd.yml
+ when: inventory_hostname in groups.get(iscsi_gw_group_name, [])
+
+ - import_role:
+ name: ceph-mds
+ tasks_from: systemd.yml
+ when: inventory_hostname in groups.get(mds_group_name, [])
+
+ - import_role:
+ name: ceph-mgr
+ tasks_from: systemd.yml
+ when: inventory_hostname in groups.get(mgr_group_name, [])
+
+ - import_role:
+ name: ceph-nfs
+ tasks_from: systemd.yml
+ when: inventory_hostname in groups.get(nfs_group_name, [])
+
+ - import_role:
+ name: ceph-osd
+ tasks_from: container_options_facts.yml
+ when: inventory_hostname in groups.get(osd_group_name, [])
+
+ - import_role:
+ name: ceph-osd
+ tasks_from: systemd.yml
+ when: inventory_hostname in groups.get(osd_group_name, [])
+
+ - import_role:
+ name: ceph-rbd-mirror
+ tasks_from: systemd.yml
+ when: inventory_hostname in groups.get(rbdmirror_group_name, [])
+
+ - import_role:
+ name: ceph-rgw
+ tasks_from: systemd.yml
+ when: inventory_hostname in groups.get(rgw_group_name, [])
+
+ - import_role:
+ name: ceph-crash
+ tasks_from: systemd.yml
+ when: inventory_hostname in groups.get(mon_group_name, []) or
+ inventory_hostname in groups.get(osd_group_name, []) or
+ inventory_hostname in groups.get(mds_group_name, []) or
+ inventory_hostname in groups.get(rgw_group_name, []) or
+ inventory_hostname in groups.get(mgr_group_name, []) or
+ inventory_hostname in groups.get(rbdmirror_group_name, [])
+
+ - name: dashboard configuration
+ when: dashboard_enabled | bool
+ block:
+ - import_role:
+ name: ceph-node-exporter
+ tasks_from: systemd.yml
+
+ - import_role:
+ name: ceph-facts
+ tasks_from: grafana.yml
+ when: inventory_hostname in groups.get(monitoring_group_name, [])
+
+ - import_role:
+ name: ceph-grafana
+ tasks_from: systemd.yml
+ when: inventory_hostname in groups.get(monitoring_group_name, [])
+
+ - import_role:
+ name: ceph-prometheus
+ tasks_from: systemd.yml
+ when: inventory_hostname in groups.get(monitoring_group_name, [])
+
+ - name: reload systemd daemon
+ systemd:
+ daemon_reload: yes
\ No newline at end of file
--- /dev/null
+# This playbook migrates an OSD from filestore to bluestore backend.
+#
+# Use it like this:
+# ansible-playbook infrastructure-playbooks/filestore-to-bluestore.yml --limit <osd-node-to-migrate>
+# If all osds on the node are using filestore backend, then *ALL* of them will be shrinked and redeployed using bluestore backend with ceph-volume.
+#
+# If a mix of filestore and bluestore OSDs is detected on the node, the node will be skipped unless you pass `force_filestore_to_bluestore=True` as an extra var.
+# ie: ansible-playbook infrastructure-playbooks/filestore-to-bluestore.yml --limit <osd-node-to-migrate> -e force_filestore_to_bluestore=True
+
+- hosts: "{{ osd_group_name }}"
+ become: true
+ serial: 1
+ vars:
+ delegate_facts_host: true
+ tasks:
+ - name: gather and delegate facts
+ setup:
+ gather_subset:
+ - 'all'
+ - '!facter'
+ - '!ohai'
+ delegate_to: "{{ item }}"
+ delegate_facts: True
+ with_items: "{{ groups[mon_group_name] }}"
+ run_once: true
+ when: delegate_facts_host | bool
+
+ - import_role:
+ name: ceph-defaults
+
+ - name: import_role ceph-facts
+ import_role:
+ name: ceph-facts
+ tasks_from: container_binary.yml
+
+ - name: set_fact ceph_cmd
+ set_fact:
+ ceph_cmd: "{{ container_binary + ' run --rm --net=host -v /etc/ceph:/etc/ceph:z -v /var/lib/ceph:/var/lib/ceph:z -v /var/run/ceph:/var/run/ceph:z --entrypoint=ceph ' + ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else 'ceph' }}"
+
+ - name: get ceph osd tree data
+ command: "{{ ceph_cmd }} --cluster {{ cluster }} osd tree -f json"
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ register: osd_tree
+ changed_when: false
+ run_once: true
+
+ - name: set_fact osd_ids
+ set_fact:
+ osd_ids: "{{ osd_ids | default([]) | union(item) }}"
+ with_items:
+ - "{{ ((osd_tree.stdout | default('{}') | trim | from_json).nodes | selectattr('name', 'match', '^' + inventory_hostname + '$') | map(attribute='children') | list) }}"
+
+ - name: get osd metadata
+ command: "{{ ceph_cmd }} --cluster {{ cluster }} osd metadata osd.{{ item }} -f json"
+ register: osd_metadata
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ run_once: true
+ changed_when: false
+ with_items: "{{ osd_ids }}"
+
+ - name: set_fact _osd_objectstore
+ set_fact:
+ _osd_objectstore: "{{ _osd_objectstore | default([]) | union([(item.stdout | from_json).osd_objectstore]) }}"
+ with_items: "{{ osd_metadata.results }}"
+
+ - name: set_fact skip_this_node
+ set_fact:
+ skip_this_node: "{{ ('filestore' in _osd_objectstore and 'bluestore' in _osd_objectstore and not force_filestore_to_bluestore | default(False)) or ('filestore' not in _osd_objectstore) }}"
+
+ - name: filestore to bluestore migration workflow
+ when: not skip_this_node | bool
+ block:
+ - name: get ceph-volume lvm inventory data
+ ceph_volume:
+ cluster: "{{ cluster }}"
+ action: inventory
+ environment:
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+ register: ceph_volume_inventory
+
+ - name: set_fact inventory
+ set_fact:
+ inventory: "{{ ceph_volume_inventory.stdout | from_json }}"
+
+ - name: set_fact ceph_disk_osds
+ set_fact:
+ ceph_disk_osds_devices: "{{ ceph_disk_osds_devices | default([]) + [item.path] }}"
+ with_items: "{{ inventory }}"
+ when:
+ - not item.available | bool
+ - "'Used by ceph-disk' in item.rejected_reasons"
+
+ - name: ceph-disk prepared OSDs related tasks
+ when: ceph_disk_osds_devices | default([]) | length > 0
+ block:
+ - name: get partlabel
+ command: blkid "{{ item + 'p' if item is match('/dev/(cciss/c[0-9]d[0-9]|nvme[0-9]n[0-9]){1,2}$') else item }}"1 -s PARTLABEL -o value
+ register: partlabel
+ with_items: "{{ ceph_disk_osds_devices | default([]) }}"
+
+ - name: get simple scan data
+ ceph_volume_simple_scan:
+ path: "{{ item.item + 'p1' if item.item is match('/dev/(cciss/c[0-9]d[0-9]|nvme[0-9]n[0-9]){1,2}$') else item.item + '1' }}"
+ cluster: "{{ cluster }"
+ stdout: true
+ environment:
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+ register: simple_scan
+ with_items: "{{ partlabel.results | default([]) }}"
+ when: item.stdout == 'ceph data'
+ ignore_errors: true
+
+ - name: mark out osds
+ ceph_osd:
+ ids: "{{ (item.0.stdout | from_json).whoami }}"
+ cluster: "{{ cluster }}"
+ state: out
+ environment:
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+ with_together:
+ - "{{ simple_scan.results }}"
+ - "{{ partlabel.results }}"
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ run_once: true
+ when: item.1.stdout == 'ceph data'
+
+ - name: stop and disable old osd services
+ service:
+ name: "ceph-osd@{{ (item.0.stdout | from_json).whoami }}"
+ state: stopped
+ enabled: no
+ with_together:
+ - "{{ simple_scan.results }}"
+ - "{{ partlabel.results }}"
+ when: item.1.stdout == 'ceph data'
+
+ - name: umount osd data
+ mount:
+ path: "/var/lib/ceph/osd/{{ cluster }}-{{ (item.0.stdout | from_json).whoami }}"
+ state: unmounted
+ with_together:
+ - "{{ simple_scan.results }}"
+ - "{{ partlabel.results }}"
+ when: item.1.stdout == 'ceph data'
+
+ - name: umount osd lockbox
+ mount:
+ path: "/var/lib/ceph/osd-lockbox/{{ (item.0.stdout | from_json).data.uuid }}"
+ state: unmounted
+ with_together:
+ - "{{ simple_scan.results }}"
+ - "{{ partlabel.results }}"
+ when:
+ - item.1.stdout == 'ceph data'
+ - (item.0.stdout | from_json).encrypted | default(False) | bool
+
+ - name: ensure dmcrypt for data device is closed
+ command: cryptsetup close "{{ (item.0.stdout | from_json).data.uuid }}"
+ with_together:
+ - "{{ simple_scan.results }}"
+ - "{{ partlabel.results }}"
+ failed_when: false
+ changed_when: false
+ when:
+ - item.1.stdout == 'ceph data'
+ - (item.0.stdout | from_json).encrypted | default(False) | bool
+
+ - name: ensure dmcrypt for journal device is closed
+ command: cryptsetup close "{{ (item.0.stdout | from_json).journal.uuid }}"
+ with_together:
+ - "{{ simple_scan.results }}"
+ - "{{ partlabel.results }}"
+ failed_when: false
+ changed_when: false
+ when:
+ - item.1.stdout == 'ceph data'
+ - (item.0.stdout | from_json).encrypted | default(False) | bool
+
+ - name: zap data devices
+ ceph_volume:
+ cluster: "{{ cluster }}"
+ action: zap
+ destroy: true
+ data: "{{ (item.0.stdout | from_json).data.path }}"
+ environment:
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+ with_together:
+ - "{{ simple_scan.results }}"
+ - "{{ partlabel.results }}"
+ when: item.1.stdout == 'ceph data'
+
+ - name: zap journal devices
+ ceph_volume:
+ cluster: "{{ cluster }}"
+ action: zap
+ destroy: true
+ journal: "{{ (item.0.stdout | from_json).journal.path }}"
+ environment:
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+ with_together:
+ - "{{ simple_scan.results }}"
+ - "{{ partlabel.results }}"
+ when:
+ - item.1.stdout == 'ceph data'
+ - (item.0.stdout | from_json).journal.path is defined
+
+ - name: get ceph-volume lvm list data
+ ceph_volume:
+ cluster: "{{ cluster }}"
+ action: list
+ environment:
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+ register: ceph_volume_lvm_list
+
+ - name: set_fact _lvm_list
+ set_fact:
+ _lvm_list: "{{ _lvm_list | default([]) + item.value }}"
+ with_dict: "{{ (ceph_volume_lvm_list.stdout | default('{}') | from_json) }}"
+
+ - name: ceph-volume prepared OSDs related tasks
+ block:
+ - name: mark out osds
+ ceph_osd:
+ ids: "{{ (ceph_volume_lvm_list.stdout | default('{}') | from_json).keys() | list }}"
+ cluster: "{{ cluster }}"
+ state: out
+ environment:
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ run_once: true
+
+ - name: stop and disable old osd services
+ service:
+ name: "ceph-osd@{{ item }}"
+ state: stopped
+ enabled: no
+ with_items: "{{ (ceph_volume_lvm_list.stdout | default('{}') | from_json).keys() | list }}"
+
+ - name: stop and disable ceph-volume services
+ service:
+ name: "ceph-volume@lvm-{{ item.tags['ceph.osd_id'] }}-{{ item.tags['ceph.osd_fsid'] }}"
+ state: stopped
+ enabled: no
+ with_items: "{{ _lvm_list }}"
+ when:
+ - not containerized_deployment | bool
+ - item.type == 'data'
+
+ - name: mark down osds
+ ceph_osd:
+ ids: "{{ (ceph_volume_lvm_list.stdout | default('{}') | from_json).keys() | list }}"
+ cluster: "{{ cluster }}"
+ state: down
+ environment:
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ run_once: true
+
+ - name: ensure all dmcrypt for data and journal are closed
+ command: cryptsetup close "{{ item['lv_uuid'] }}"
+ with_items: "{{ _lvm_list }}"
+ changed_when: false
+ failed_when: false
+ when: item['tags'].get('ceph.encrypted', 0) | int == 1
+
+ - name: set_fact osd_fsid_list
+ set_fact:
+ osd_fsid_list: "{{ osd_fsid_list | default([]) + [{'osd_fsid': item.tags['ceph.osd_fsid'], 'destroy': (item.lv_name.startswith('osd-data-') and item.vg_name.startswith('ceph-')) | ternary(true, false), 'device': item.devices[0], 'journal': item['tags']['ceph.journal_device'] }] }}"
+ with_items: "{{ _lvm_list }}"
+ when: item.type == 'data'
+
+ - name: zap ceph-volume prepared OSDs
+ ceph_volume:
+ action: "zap"
+ osd_fsid: "{{ item.osd_fsid }}"
+ destroy: false
+ environment:
+ CEPH_VOLUME_DEBUG: "{{ ceph_volume_debug }}"
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+ loop: "{{ osd_fsid_list }}"
+ when: osd_fsid_list is defined
+
+ - name: zap destroy ceph-volume prepared devices
+ ceph_volume:
+ action: "zap"
+ data: "{{ item.device }}"
+ destroy: true
+ environment:
+ CEPH_VOLUME_DEBUG: "{{ ceph_volume_debug }}"
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+ loop: "{{ osd_fsid_list }}"
+ when:
+ - osd_fsid_list is defined
+ - item.destroy | bool
+
+ - name: test if the journal device hasn't been already destroyed because of collocation
+ stat:
+ path: "{{ item.journal }}"
+ loop: "{{ osd_fsid_list }}"
+ register: journal_path
+ when:
+ - osd_fsid_list is defined
+ - item.destroy | bool
+ - item.journal is defined
+ - item.journal not in (lvm_volumes | selectattr('journal', 'defined') | map(attribute='journal') | list)
+
+ - name: zap destroy ceph-volume prepared journal devices
+ ceph_volume:
+ action: "zap"
+ data: "{{ item.0.journal }}"
+ destroy: true
+ environment:
+ CEPH_VOLUME_DEBUG: "{{ ceph_volume_debug }}"
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment else None }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+ loop: "{{ osd_fsid_list | zip(journal_path.results) | list }}"
+ when:
+ - osd_fsid_list is defined
+ - item.0.destroy | bool
+ - item.0.journal is defined
+ - item.0.journal not in (lvm_volumes | selectattr('journal', 'defined') | map(attribute='journal') | list)
+ - item.1.stat.exists | bool
+
+ - name: ensure all dm are closed
+ command: dmsetup remove "{{ item['lv_path'] }}"
+ with_items: "{{ _lvm_list }}"
+ changed_when: false
+ failed_when: false
+ when:
+ - item['lv_path'] is defined
+ # Do not close mappers for non 'lvm batch' devices
+ - devices | default([]) | length > 0
+
+ - name: ensure all pv are removed
+ command: "pvremove --yes {{ item.devices[0] }}"
+ with_items: "{{ _lvm_list }}"
+ failed_when: false
+ when:
+ - item.type == 'data'
+ - item.lv_name.startswith('osd-data-') | bool
+ - item.vg_name.startswith('ceph-') | bool
+ when: _lvm_list is defined
+
+ - name: set_fact osd_ids
+ set_fact:
+ osd_ids: "{{ osd_ids | default([]) + [item] }}"
+ with_items:
+ - "{{ ((osd_tree.stdout | default('{}') | from_json).nodes | selectattr('name', 'match', '^' + inventory_hostname + '$') | map(attribute='children') | list) }}"
+
+ - name: purge osd(s) from the cluster
+ ceph_osd:
+ ids: "{{ item }}"
+ cluster: "{{ cluster }}"
+ state: purge
+ environment:
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+ run_once: true
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ with_items: "{{ osd_ids }}"
+
+ - name: purge /var/lib/ceph/osd directories
+ file:
+ path: "/var/lib/ceph/osd/{{ cluster }}-{{ item }}"
+ state: absent
+ with_items: "{{ osd_ids }}"
+
+ - name: force osd_objectstore to bluestore
+ set_fact:
+ osd_objectstore: bluestore
+
+ - name: refresh ansible devices fact
+ setup:
+ gather_subset:
+ - 'all'
+ - '!facter'
+ - '!ohai'
+ filter: ansible_devices
+ when: osd_auto_discovery | bool
+
+ - import_role:
+ name: ceph-defaults
+ - import_role:
+ name: ceph-facts
+
+ - name: remove gpt header
+ command: parted -s "{{ item }}" mklabel msdos
+ with_items: "{{ (devices + dedicated_devices | default([]) + ceph_disk_osds_devices | default([])) | unique }}"
+
+ - name: update lvm_volumes configuration for bluestore
+ when:
+ - lvm_volumes | length > 0
+ - not osd_auto_discovery | bool
+ block:
+ - name: reuse filestore journal partition for bluestore db
+ set_fact:
+ config_part: "{{ config_part | default([]) + [item | combine({'db': item.journal})] }}"
+ with_items: "{{ lvm_volumes | selectattr('journal_vg', 'undefined') | list }}"
+
+ - name: reuse filestore journal vg/lv for bluestore db
+ set_fact:
+ config_vglv: "{{ config_vglv | default([]) + [item | combine({'db': item.journal, 'db_vg': item.journal_vg})] }}"
+ with_items: "{{ lvm_volumes | selectattr('journal_vg', 'defined') | list }}"
+
+ - name: override lvm_volumes with bluestore configuration
+ set_fact:
+ lvm_volumes: "{{ config_part | default([]) + config_vglv | default([]) }}"
+
+ - import_role:
+ name: ceph-handler
+ - import_role:
+ name: ceph-container-common
+ when: containerized_deployment | bool
+ - import_role:
+ name: ceph-config
+ - import_role:
+ name: ceph-osd
+
+- name: final play
+ hosts: "{{ osd_group_name }}"
+ become: true
+ gather_facts: false
+ tasks:
+
+ - import_role:
+ name: ceph-defaults
+ - name: report any skipped node during this playbook
+ debug:
+ msg: |
+ "WARNING:"
+ "This node has been skipped because OSDs are either"
+ "all bluestore or there's a mix of filestore and bluestore OSDs"
+ when:
+ - skip_this_node | bool
\ No newline at end of file
--- /dev/null
+- hosts:
+ - mons
+ - osds
+ - mdss
+ - rgws
+ - nfss
+ - rbdmirrors
+ - clients
+ - mgrs
+ - iscsigws
+
+ gather_facts: false
+ become: yes
+
+ tasks:
+ - name: create a temp directory
+ tempfile:
+ state: directory
+ prefix: ceph_ansible
+ run_once: true
+ register: localtempfile
+ become: false
+ delegate_to: localhost
+
+ - name: set_fact lookup_ceph_config - lookup keys, conf and logs
+ find:
+ paths:
+ - /etc/ceph
+ - /var/log/ceph
+ register: ceph_collect
+
+ - name: collect ceph logs, config and keys in "{{ localtempfile.path }}" on the machine running ansible
+ fetch:
+ src: "{{ item.path }}"
+ dest: "{{ localtempfile.path }}"
+ fail_on_missing: no
+ flat: no
+ with_items: "{{ ceph_collect.files }}"
--- /dev/null
+- name: creates logical volumes for the bucket index or fs journals on a single device.
+ become: true
+ hosts: osds
+
+ vars:
+ logfile: |
+ Suggested cut and paste under "lvm_volumes:" in "group_vars/osds.yml"
+ -----------------------------------------------------------------------------------------------------------
+ {% for lv in nvme_device_lvs %}
+ - data: {{ lv.lv_name }}
+ data_vg: {{ nvme_vg_name }}
+ journal: {{ lv.journal_name }}
+ journal_vg: {{ nvme_vg_name }}
+ {% endfor %}
+ {% for hdd in hdd_devices %}
+ - data: {{ hdd_lv_prefix }}-{{ hdd.split('/')[-1] }}
+ data_vg: {{ hdd_vg_prefix }}-{{ hdd.split('/')[-1] }}
+ journal: {{ hdd_journal_prefix }}-{{ hdd.split('/')[-1] }}
+ journal_vg: {{ nvme_vg_name }}
+ {% endfor %}
+
+ tasks:
+
+ - name: include vars of lv_vars.yaml
+ include_vars:
+ file: lv_vars.yaml # noqa 505
+ failed_when: false
+
+ # ensure nvme_device is set
+ - name: fail if nvme_device is not defined
+ fail:
+ msg: "nvme_device has not been set by the user"
+ when: nvme_device is undefined or nvme_device == 'dummy'
+
+ # need to check if lvm2 is installed
+ - name: install lvm2
+ package:
+ name: lvm2
+ state: present
+ register: result
+ until: result is succeeded
+
+ # Make entire nvme device a VG
+ - name: add nvme device as lvm pv
+ lvg:
+ force: yes
+ pvs: "{{ nvme_device }}"
+ pesize: 4
+ state: present
+ vg: "{{ nvme_vg_name }}"
+
+ - name: create lvs for fs journals for the bucket index on the nvme device
+ lvol:
+ lv: "{{ item.journal_name }}"
+ vg: "{{ nvme_vg_name }}"
+ size: "{{ journal_size }}"
+ pvs: "{{ nvme_device }}"
+ with_items: "{{ nvme_device_lvs }}"
+
+ - name: create lvs for fs journals for hdd devices
+ lvol:
+ lv: "{{ hdd_journal_prefix }}-{{ item.split('/')[-1] }}"
+ vg: "{{ nvme_vg_name }}"
+ size: "{{ journal_size }}"
+ with_items: "{{ hdd_devices }}"
+
+ - name: create the lv for data portion of the bucket index on the nvme device
+ lvol:
+ lv: "{{ item.lv_name }}"
+ vg: "{{ nvme_vg_name }}"
+ size: "{{ item.size }}"
+ pvs: "{{ nvme_device }}"
+ with_items: "{{ nvme_device_lvs }}"
+
+ # Make sure all hdd devices have a unique volume group
+ - name: create vgs for all hdd devices
+ lvg:
+ force: yes
+ pvs: "{{ item }}"
+ pesize: 4
+ state: present
+ vg: "{{ hdd_vg_prefix }}-{{ item.split('/')[-1] }}"
+ with_items: "{{ hdd_devices }}"
+
+ - name: create lvs for the data portion on hdd devices
+ lvol:
+ lv: "{{ hdd_lv_prefix }}-{{ item.split('/')[-1] }}"
+ vg: "{{ hdd_vg_prefix }}-{{ item.split('/')[-1] }}"
+ size: "{{ hdd_lv_size }}"
+ pvs: "{{ item }}"
+ with_items: "{{ hdd_devices }}"
+
+ - name: "write output for osds.yml to {{ logfile_path }}"
+ become: false
+ copy:
+ content: "{{ logfile }}"
+ dest: "{{ logfile_path }}"
+ delegate_to: localhost
--- /dev/null
+- name: tear down existing osd filesystems then logical volumes, volume groups, and physical volumes
+ become: true
+ hosts: osds
+
+ vars_prompt:
+ - name: ireallymeanit
+ prompt: Are you sure you want to tear down the logical volumes?
+ default: 'no'
+ private: no
+
+ tasks:
+ - name: exit playbook, if user did not mean to tear down logical volumes
+ fail:
+ msg: >
+ "Exiting lv-teardown playbook, logical volumes were NOT torn down.
+ To tear down the logical volumes, either say 'yes' on the prompt or
+ or use `-e ireallymeanit=yes` on the command line when
+ invoking the playbook"
+ when: ireallymeanit != 'yes'
+
+ - name: include vars of lv_vars.yaml
+ include_vars:
+ file: lv_vars.yaml # noqa 505
+ failed_when: false
+
+ # need to check if lvm2 is installed
+ - name: install lvm2
+ package:
+ name: lvm2
+ state: present
+ register: result
+ until: result is succeeded
+
+# BEGIN TEARDOWN
+ - name: find any existing osd filesystems
+ shell: |
+ set -o pipefail;
+ grep /var/lib/ceph/osd /proc/mounts | awk '{print $2}'
+ register: old_osd_filesystems
+ changed_when: false
+
+ - name: tear down any existing osd filesystem
+ mount:
+ path: "{{ item }}"
+ state: unmounted
+ with_items: "{{ old_osd_filesystems.stdout_lines }}"
+
+ - name: kill all lvm commands that may have been hung
+ command: "killall -q lvcreate pvcreate vgcreate lvconvert || echo -n"
+ failed_when: false
+ changed_when: false
+
+ ## Logcal Vols
+ - name: tear down existing lv for bucket index
+ lvol:
+ lv: "{{ item.lv_name }}"
+ vg: "{{ nvme_vg_name }}"
+ state: absent
+ force: yes
+ with_items: "{{ nvme_device_lvs }}"
+
+ - name: tear down any existing hdd data lvs
+ lvol:
+ lv: "{{ hdd_lv_prefix }}-{{ item.split('/')[-1] }}"
+ vg: "{{ hdd_vg_prefix }}-{{ item.split('/')[-1] }}"
+ state: absent
+ force: yes
+ with_items: "{{ hdd_devices }}"
+
+ - name: tear down any existing lv of journal for bucket index
+ lvol:
+ lv: "{{ item.journal_name }}"
+ vg: "{{ nvme_vg_name }}"
+ state: absent
+ force: yes
+ with_items: "{{ nvme_device_lvs }}"
+
+ - name: tear down any existing lvs of hdd journals
+ lvol:
+ lv: "{{ hdd_journal_prefix }}-{{ item.split('/')[-1] }}"
+ vg: "{{ nvme_vg_name }}"
+ state: absent
+ force: yes
+ with_items: "{{ hdd_devices }}"
+
+ ## Volume Groups
+ - name: remove vg on nvme device
+ lvg:
+ vg: "{{ nvme_vg_name }}"
+ state: absent
+ force: yes
+
+ - name: remove vg for each hdd device
+ lvg:
+ vg: "{{ hdd_vg_prefix }}-{{ item.split('/')[-1] }}"
+ state: absent
+ force: yes
+ with_items: "{{ hdd_devices }}"
+
+ ## Physical Vols
+ - name: tear down pv for nvme device
+ command: "pvremove --force --yes {{ nvme_device }}"
+ changed_when: false
+
+ - name: tear down pv for each hdd device
+ command: "pvremove --force --yes {{ item }}"
+ changed_when: false
+ with_items: "{{ hdd_devices }}"
--- /dev/null
+---
+# This playbook purges Ceph
+# It removes: packages, configuration files and ALL THE DATA
+#
+# Use it like this:
+# ansible-playbook purge-cluster.yml
+# Prompts for confirmation to purge, defaults to no and
+# doesn't purge the cluster. yes purges the cluster.
+#
+# ansible-playbook -e ireallymeanit=yes|no purge-cluster.yml
+# Overrides the prompt using -e option. Can be used in
+# automation scripts to avoid interactive prompt.
+
+- name: confirm whether user really meant to purge the cluster
+ hosts: localhost
+ gather_facts: false
+ vars_prompt:
+ - name: ireallymeanit
+ prompt: Are you sure you want to purge the cluster?
+ default: 'no'
+ private: no
+ tasks:
+ - name: exit playbook, if user did not mean to purge cluster
+ fail:
+ msg: >
+ "Exiting purge-cluster playbook, cluster was NOT purged.
+ To purge the cluster, either say 'yes' on the prompt or
+ or use `-e ireallymeanit=yes` on the command line when
+ invoking the playbook"
+ when: ireallymeanit != 'yes'
+
+
+- name: gather facts on all hosts
+ hosts:
+ - mons
+ - osds
+ - mdss
+ - rgws
+ - rbdmirrors
+ - nfss
+ - clients
+ - mgrs
+ - monitoring
+ become: true
+ tasks:
+ - debug:
+ msg: "gather facts on all Ceph hosts for following reference"
+
+
+- name: check there's no ceph kernel threads present
+ hosts: clients
+ become: true
+ gather_facts: false
+ any_errors_fatal: true
+ tasks:
+ - import_role:
+ name: ceph-defaults
+
+ - block:
+ - name: get nfs nodes ansible facts
+ setup:
+ gather_subset:
+ - 'all'
+ - '!facter'
+ - '!ohai'
+ delegate_to: "{{ item }}"
+ delegate_facts: True
+ with_items: "{{ groups[nfs_group_name] }}"
+ run_once: true
+
+ - name: get all nfs-ganesha mount points
+ command: grep "{{ hostvars[item]['ansible_facts']['all_ipv4_addresses'] | ips_in_ranges(public_network.split(',')) | first }}" /proc/mounts
+ register: nfs_ganesha_mount_points
+ failed_when: false
+ with_items: "{{ groups[nfs_group_name] }}"
+
+ - name: ensure nfs-ganesha mountpoint(s) are unmounted
+ mount:
+ path: "{{ item.split(' ')[1] }}"
+ state: unmounted
+ with_items:
+ - "{{ nfs_ganesha_mount_points.results | map(attribute='stdout_lines') | list }}"
+ when: item | length > 0
+ when: groups[nfs_group_name] | default([]) | length > 0
+
+ - name: ensure cephfs mountpoint(s) are unmounted
+ command: umount -a -t ceph
+ changed_when: false
+
+ - name: find mapped rbd ids
+ find:
+ paths: /sys/bus/rbd/devices
+ file_type: any
+ register: rbd_mapped_ids
+
+ - name: use sysfs to unmap rbd devices
+ shell: "echo {{ item.path | basename }} > /sys/bus/rbd/remove_single_major"
+ changed_when: false
+ with_items: "{{ rbd_mapped_ids.files }}"
+
+ - name: unload ceph kernel modules
+ modprobe:
+ name: "{{ item }}"
+ state: absent
+ with_items:
+ - rbd
+ - ceph
+ - libceph
+
+
+- name: purge ceph nfs cluster
+ hosts: nfss
+ gather_facts: false # Already gathered previously
+ become: true
+ tasks:
+ - import_role:
+ name: ceph-defaults
+
+ - name: stop ceph nfss with systemd
+ service:
+ name: "{{ 'ceph-nfs@' + ansible_facts['hostname'] if containerized_deployment | bool else 'nfs-ganesha' }}"
+ state: stopped
+ failed_when: false
+
+ - name: remove ceph nfs directories for "{{ ansible_facts['hostname'] }}"
+ file:
+ path: "{{ item }}"
+ state: absent
+ with_items:
+ - /etc/ganesha
+ - /var/lib/nfs/ganesha
+ - /var/run/ganesha
+ - /etc/systemd/system/ceph-nfs@.service
+
+
+- name: purge node-exporter
+ hosts:
+ - mons
+ - osds
+ - mdss
+ - rgws
+ - rbdmirrors
+ - nfss
+ - clients
+ - mgrs
+ - monitoring
+ - iscsigws
+ become: true
+ tasks:
+ - import_role:
+ name: ceph-defaults
+
+ - block:
+ - import_role:
+ name: ceph-facts
+ tasks_from: container_binary
+
+ - name: disable node_exporter service
+ service:
+ name: node_exporter
+ state: stopped
+ enabled: no
+ failed_when: false
+
+ - name: remove node_exporter service file
+ file:
+ name: /etc/systemd/system/node_exporter.service
+ state: absent
+
+ - name: remove node-exporter image
+ command: "{{ container_binary }} rmi {{ node_exporter_container_image }}"
+ failed_when: false
+ tags:
+ - remove_img
+ when: dashboard_enabled | bool
+
+
+- name: purge ceph monitoring
+ hosts: monitoring
+ become: true
+ vars:
+ grafana_services:
+ - grafana-server
+ - prometheus
+ - alertmanager
+ tasks:
+ - import_role:
+ name: ceph-defaults
+
+ - block:
+ - import_role:
+ name: ceph-facts
+ tasks_from: container_binary
+
+ - name: stop services
+ service:
+ name: "{{ item }}"
+ state: stopped
+ enabled: no
+ with_items: "{{ grafana_services }}"
+ failed_when: false
+
+ - name: remove service files
+ file:
+ name: "/etc/systemd/system/{{ item }}.service"
+ state: absent
+ with_items: "{{ grafana_services }}"
+ failed_when: false
+
+ - name: remove ceph dashboard container images
+ command: "{{ container_binary }} rmi {{ item }}"
+ with_items:
+ - "{{ prometheus_container_image }}"
+ - "{{ grafana_container_image }}"
+ - "{{ alertmanager_container_image }}"
+ failed_when: false
+ tags:
+ - remove_img
+
+ - name: remove data
+ file:
+ name: "{{ item }}"
+ state: absent
+ with_items:
+ - /etc/grafana/dashboards
+ - /etc/grafana/grafana.ini
+ - /etc/grafana/provisioning
+ - /var/lib/grafana
+ - /etc/alertmanager
+ - /var/lib/alertmanager
+ - /var/lib/prometheus
+ - /etc/prometheus
+ failed_when: false
+ when: dashboard_enabled | bool
+
+
+- name: purge ceph mds cluster
+ hosts: mdss
+ gather_facts: false # Already gathered previously
+ become: true
+ tasks:
+ - name: stop ceph mdss with systemd
+ service:
+ name: ceph-mds@{{ ansible_facts['hostname'] }}
+ state: stopped
+ enabled: no
+ failed_when: false
+
+ - name: remove ceph mds service
+ file:
+ path: /etc/systemd/system/ceph-mds{{ item }}
+ state: absent
+ loop:
+ - '@.service'
+ - '.target'
+
+
+- name: purge ceph mgr cluster
+ hosts: mgrs
+ gather_facts: false # Already gathered previously
+ become: true
+ tasks:
+ - name: stop ceph mgrs with systemd
+ service:
+ name: ceph-mgr@{{ ansible_facts['hostname'] }}
+ state: stopped
+ enabled: no
+ failed_when: false
+ when: ansible_facts['service_mgr'] == 'systemd'
+
+ - name: remove ceph mgr service
+ file:
+ path: /etc/systemd/system/ceph-mgr{{ item }}
+ state: absent
+ loop:
+ - '@.service'
+ - '.target'
+
+- name: purge rgwloadbalancer cluster
+ hosts: rgwloadbalancers
+ gather_facts: false # Already gathered previously
+ become: true
+ tasks:
+ - name: stop rgwloadbalancer services
+ service:
+ name: ['keepalived', 'haproxy']
+ state: stopped
+ enabled: no
+ failed_when: false
+
+
+- name: purge ceph rgw cluster
+ hosts: rgws
+ gather_facts: false # Already gathered previously
+ become: true
+ tasks:
+ - import_role:
+ name: ceph-defaults
+
+ - import_role:
+ name: ceph-facts
+ tasks_from: set_radosgw_address
+
+ - name: stop ceph rgws with systemd
+ service:
+ name: "ceph-radosgw@rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}"
+ state: stopped
+ enabled: no
+ failed_when: false
+ with_items: "{{ rgw_instances }}"
+
+ - name: remove ceph rgw service
+ file:
+ path: /etc/systemd/system/ceph-radosgw{{ item }}
+ state: absent
+ loop:
+ - '@.service'
+ - '.target'
+
+
+- name: purge ceph rbd-mirror cluster
+ hosts: rbdmirrors
+ gather_facts: false # Already gathered previously
+ become: true
+ tasks:
+ - name: stop ceph rbd mirror with systemd
+ service:
+ name: "ceph-rbd-mirror@rbd-mirror.{{ ansible_facts['hostname'] }}"
+ state: stopped
+ enabled: no
+ failed_when: false
+
+ - name: remove ceph rbd-mirror service
+ file:
+ path: /etc/systemd/system/ceph-rbd-mirror{{ item }}
+ state: absent
+ loop:
+ - '@.service'
+ - '.target'
+
+
+- name: purge ceph osd cluster
+ vars:
+ reboot_osd_node: False
+ hosts: osds
+ gather_facts: false # Already gathered previously
+ become: true
+ handlers:
+ - name: restart machine
+ shell: sleep 2 && shutdown -r now "Ansible updates triggered"
+ async: 1
+ poll: 0
+ ignore_errors: true
+
+ - name: wait for server to boot
+ become: false
+ wait_for:
+ port: 22
+ host: "{{ hostvars[inventory_hostname]['ansible_facts']['default_ipv4']['address'] }}"
+ state: started
+ delay: 10
+ timeout: 500
+ delegate_to: localhost
+
+ - name: remove data
+ shell: rm -rf /var/lib/ceph/* # noqa 302
+ tasks:
+ - import_role:
+ name: ceph-defaults
+
+ - import_role:
+ name: ceph-facts
+ tasks_from: container_binary
+
+ - name: default lvm_volumes if not defined
+ set_fact:
+ lvm_volumes: []
+ when: lvm_volumes is not defined
+
+ - name: get osd numbers
+ shell: if [ -d /var/lib/ceph/osd ] ; then ls /var/lib/ceph/osd | sed 's/.*-//' ; fi # noqa 306
+ register: osd_ids
+ changed_when: false
+
+ - name: stop ceph-osd
+ service:
+ name: ceph-osd@{{ item }}
+ state: stopped
+ enabled: no
+ with_items: "{{ osd_ids.stdout_lines }}"
+
+ - name: remove ceph udev rules
+ file:
+ path: "{{ item }}"
+ state: absent
+ with_items:
+ - /usr/lib/udev/rules.d/95-ceph-osd.rules
+ - /usr/lib/udev/rules.d/60-ceph-by-parttypeuuid.rules
+ when: not containerized_deployment | bool
+
+ # NOTE(leseb): hope someone will find a more elegant way one day...
+ - name: see if encrypted partitions are present
+ shell: blkid -t TYPE=crypto_LUKS -s PARTLABEL -s PARTUUID | grep "ceph.*." | grep -o PARTUUID.* | cut -d '"' -f 2 # noqa 306
+ register: encrypted_ceph_partuuid
+ changed_when: false
+
+ - name: get osd data and lockbox mount points
+ shell: (grep /var/lib/ceph/osd /proc/mounts || echo -n) | awk '{ print $2 }' # noqa 306
+ register: mounted_osd
+ changed_when: false
+
+ - name: drop all cache
+ shell: "sync && sleep 1 && echo 3 > /proc/sys/vm/drop_caches"
+ changed_when: false
+
+ - name: see if ceph-volume is installed # noqa : 305
+ shell: command -v ceph-volume
+ changed_when: false
+ failed_when: false
+ register: ceph_volume_present
+ when: not containerized_deployment | bool
+
+ - name: zap and destroy osds by osd ids
+ ceph_volume:
+ osd_id: "{{ item | int }}"
+ action: "zap"
+ environment:
+ CEPH_VOLUME_DEBUG: "{{ ceph_volume_debug }}"
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+ with_items: "{{ osd_ids.stdout_lines }}"
+ when:
+ - osd_auto_discovery | default(False) | bool
+ - (containerized_deployment | bool or ceph_volume_present.rc == 0)
+
+ - name: umount osd data partition
+ mount:
+ path: "{{ item }}"
+ state: unmounted
+ with_items: "{{ mounted_osd.stdout_lines }}"
+
+ - name: remove osd mountpoint tree
+ file:
+ path: /var/lib/ceph/osd/
+ state: absent
+ register: remove_osd_mountpoints
+ ignore_errors: true
+
+ - name: is reboot needed
+ command: echo requesting reboot
+ delegate_to: localhost
+ become: false
+ notify:
+ - restart machine
+ - wait for server to boot
+ - remove data
+ when:
+ - reboot_osd_node | bool
+ - remove_osd_mountpoints.failed is defined
+
+ - name: wipe table on dm-crypt devices
+ command: dmsetup wipe_table --force "{{ item }}"
+ with_items: "{{ encrypted_ceph_partuuid.stdout_lines }}"
+ when: encrypted_ceph_partuuid.stdout_lines | length > 0
+
+ - name: delete dm-crypt devices if any
+ command: dmsetup remove --retry --force {{ item }}
+ with_items: "{{ encrypted_ceph_partuuid.stdout_lines }}"
+ when: encrypted_ceph_partuuid.stdout_lines | length > 0
+
+ - name: get payload_offset
+ shell: cryptsetup luksDump /dev/disk/by-partuuid/{{ item }} | awk '/Payload offset:/ { print $3 }' # noqa 306
+ register: payload_offset
+ with_items: "{{ encrypted_ceph_partuuid.stdout_lines }}"
+ when: encrypted_ceph_partuuid.stdout_lines | length > 0
+
+ - name: get physical sector size
+ command: blockdev --getpbsz /dev/disk/by-partuuid/{{ item }}
+ changed_when: false
+ with_items: "{{ encrypted_ceph_partuuid.stdout_lines }}"
+ when: encrypted_ceph_partuuid.stdout_lines | length > 0
+ register: phys_sector_size
+
+ - name: wipe dmcrypt device
+ command: dd if=/dev/zero of=/dev/disk/by-partuuid/{{ item.0 }} bs={{ item.1.stdout }} count={{ item.2.stdout }} oflag=direct
+ changed_when: false
+ with_together:
+ - "{{ encrypted_ceph_partuuid.stdout_lines }}"
+ - "{{ payload_offset.results }}"
+ - "{{ phys_sector_size.results }}"
+
+ - name: get ceph data partitions
+ shell: |
+ blkid -o device -t PARTLABEL="ceph data"
+ changed_when: false
+ failed_when: false
+ register: ceph_data_partition_to_erase_path
+
+ - name: get ceph lockbox partitions
+ shell: |
+ blkid -o device -t PARTLABEL="ceph lockbox"
+ changed_when: false
+ failed_when: false
+ register: ceph_lockbox_partition_to_erase_path
+
+ - name: see if ceph-volume is installed # noqa : 305
+ shell: command -v ceph-volume
+ changed_when: false
+ failed_when: false
+ register: ceph_volume_present
+ when: not containerized_deployment | bool
+
+ - name: zap and destroy osds created by ceph-volume with lvm_volumes
+ ceph_volume:
+ data: "{{ item.data }}"
+ data_vg: "{{ item.data_vg|default(omit) }}"
+ journal: "{{ item.journal|default(omit) }}"
+ journal_vg: "{{ item.journal_vg|default(omit) }}"
+ db: "{{ item.db|default(omit) }}"
+ db_vg: "{{ item.db_vg|default(omit) }}"
+ wal: "{{ item.wal|default(omit) }}"
+ wal_vg: "{{ item.wal_vg|default(omit) }}"
+ action: "zap"
+ environment:
+ CEPH_VOLUME_DEBUG: "{{ ceph_volume_debug }}"
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+ with_items: "{{ lvm_volumes | default([]) }}"
+ when:
+ - containerized_deployment | bool
+ or ceph_volume_present.rc == 0
+
+ - name: zap and destroy osds created by ceph-volume with devices
+ ceph_volume:
+ data: "{{ item }}"
+ action: "zap"
+ environment:
+ CEPH_VOLUME_DEBUG: "{{ ceph_volume_debug }}"
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+ with_items:
+ - "{{ devices | default([]) }}"
+ - "{{ dedicated_devices | default([]) }}"
+ - "{{ bluestore_wal_devices | default([]) }}"
+ when:
+ - containerized_deployment | bool
+ or ceph_volume_present.rc == 0
+
+ - name: get ceph block partitions
+ shell: |
+ blkid -o device -t PARTLABEL="ceph block"
+ changed_when: false
+ failed_when: false
+ register: ceph_block_partition_to_erase_path
+
+ - name: get ceph journal partitions
+ shell: |
+ blkid -o device -t PARTLABEL="ceph journal"
+ changed_when: false
+ failed_when: false
+ register: ceph_journal_partition_to_erase_path
+
+ - name: get ceph db partitions
+ shell: |
+ blkid -o device -t PARTLABEL="ceph block.db"
+ changed_when: false
+ failed_when: false
+ register: ceph_db_partition_to_erase_path
+
+ - name: get ceph wal partitions
+ shell: |
+ blkid -o device -t PARTLABEL="ceph block.wal"
+ changed_when: false
+ failed_when: false
+ register: ceph_wal_partition_to_erase_path
+
+ - name: set_fact combined_devices_list
+ set_fact:
+ combined_devices_list: "{{ ceph_data_partition_to_erase_path.stdout_lines +
+ ceph_lockbox_partition_to_erase_path.stdout_lines +
+ ceph_block_partition_to_erase_path.stdout_lines +
+ ceph_journal_partition_to_erase_path.stdout_lines +
+ ceph_db_partition_to_erase_path.stdout_lines +
+ ceph_wal_partition_to_erase_path.stdout_lines }}"
+
+ - name: resolve parent device
+ command: lsblk --nodeps -no pkname "{{ item }}"
+ register: tmp_resolved_parent_device
+ changed_when: false
+ with_items: "{{ combined_devices_list }}"
+
+ - name: set_fact resolved_parent_device
+ set_fact:
+ resolved_parent_device: "{{ tmp_resolved_parent_device.results | map(attribute='stdout') | list | unique }}"
+
+ - name: wipe partitions
+ shell: |
+ wipefs --all "{{ item }}"
+ dd if=/dev/zero of="{{ item }}" bs=1 count=4096
+ changed_when: false
+ with_items: "{{ combined_devices_list }}"
+
+ - name: check parent device partition
+ parted:
+ device: "/dev/{{ item }}"
+ loop: "{{ resolved_parent_device }}"
+ register: parted_info
+
+ - name: fail if there is a boot partition on the device
+ fail:
+ msg: "{{ item.item }} has a boot partition"
+ loop: "{{ parted_info.results }}"
+ when: "'boot' in (item.partitions | map(attribute='flags') | list | flatten)"
+
+ - name: zap ceph journal/block db/block wal partitions # noqa 306
+ shell: |
+ sgdisk -Z --clear --mbrtogpt -g -- /dev/"{{ item }}"
+ dd if=/dev/zero of=/dev/"{{ item }}" bs=1M count=200
+ parted -s /dev/"{{ item }}" mklabel gpt
+ partprobe /dev/"{{ item }}"
+ udevadm settle --timeout=600
+ with_items: "{{ resolved_parent_device }}"
+ changed_when: false
+
+ - name: remove ceph osd service
+ file:
+ path: /etc/systemd/system/ceph-osd{{ item }}
+ state: absent
+ loop:
+ - '@.service'
+ - '.target'
+
+- name: purge ceph mon cluster
+ hosts: mons
+ gather_facts: false # already gathered previously
+ become: true
+ tasks:
+ - name: stop ceph mons with systemd
+ service:
+ name: "ceph-{{ item }}@{{ ansible_facts['hostname'] }}"
+ state: stopped
+ enabled: no
+ failed_when: false
+ with_items:
+ - mon
+ - mgr
+
+ - name: remove monitor store and bootstrap keys
+ file:
+ path: "{{ item }}"
+ state: absent
+ with_items:
+ - /var/lib/ceph/mon
+ - /var/lib/ceph/bootstrap-mds
+ - /var/lib/ceph/bootstrap-osd
+ - /var/lib/ceph/bootstrap-rgw
+ - /var/lib/ceph/bootstrap-rbd
+ - /var/lib/ceph/bootstrap-mgr
+ - /var/lib/ceph/tmp
+
+ - name: remove ceph mon and mgr service
+ file:
+ path: "/etc/systemd/system/ceph-{{ item.0 }}{{ item.1 }}"
+ state: absent
+ loop: "{{ ['mon', 'mgr'] | product(['@.service', '.target']) | list }}"
+
+
+- name: purge ceph-crash daemons
+ hosts:
+ - mons
+ - osds
+ - mdss
+ - rgws
+ - rbdmirrors
+ - mgrs
+ gather_facts: false
+ become: true
+ tasks:
+ - import_role:
+ name: ceph-defaults
+
+ - name: stop ceph-crash service
+ service:
+ name: "{{ 'ceph-crash@' + ansible_facts['hostname'] if containerized_deployment | bool else 'ceph-crash.service' }}"
+ state: stopped
+ enabled: no
+ failed_when: false
+
+ - name: systemctl reset-failed ceph-crash # noqa 303
+ command: "systemctl reset-failed {{ 'ceph-crash@' + ansible_facts['hostname'] if containerized_deployment | bool else 'ceph-crash.service' }}"
+ changed_when: false
+ failed_when: false
+
+ - name: remove service file
+ file:
+ name: "/etc/systemd/system/ceph-crash{{ '@' if containerized_deployment | bool else '' }}.service"
+ state: absent
+ failed_when: false
+
+ - name: remove /var/lib/ceph/crash
+ file:
+ path: /var/lib/ceph/crash
+ state: absent
+
+
+- name: check container hosts
+ hosts:
+ - mons
+ - osds
+ - mdss
+ - rgws
+ - rbdmirrors
+ - nfss
+ - mgrs
+ become: true
+ tasks:
+ - name: containerized_deployment only
+ when: containerized_deployment | bool
+ block:
+ - import_role:
+ name: ceph-defaults
+
+ - import_role:
+ name: ceph-facts
+ tasks_from: container_binary
+
+ - name: remove stopped/exited containers
+ command: >
+ {{ container_binary }} container prune -f
+ changed_when: false
+
+ - name: show container list on all the nodes (should be empty)
+ command: >
+ {{ container_binary }} ps --filter='name=ceph' -a -q
+ register: containers_list
+ changed_when: false
+
+ - name: show container images on all the nodes (should be empty if tags was passed remove_img)
+ command: >
+ {{ container_binary }} images
+ register: images_list
+ changed_when: false
+
+ - name: fail if container are still present
+ fail:
+ msg: "It looks like container are still present."
+ when: containers_list.stdout_lines|length > 0
+
+
+- name: final cleanup - check any running ceph, purge ceph packages, purge config and remove data
+ vars:
+ # When set to true both groups of packages are purged.
+ # This can cause problem with qemu-kvm
+ purge_all_packages: true
+ ceph_packages:
+ - ceph
+ - ceph-base
+ - ceph-common
+ - ceph-fuse
+ - ceph-mds
+ - ceph-mgr
+ - ceph-mgr-modules-core
+ - ceph-mon
+ - ceph-osd
+ - ceph-release
+ - ceph-radosgw
+ - ceph-grafana-dashboards
+ - rbd-mirror
+ ceph_remaining_packages:
+ - libcephfs2
+ - librados2
+ - libradosstriper1
+ - librbd1
+ - librgw2
+ - python3-ceph-argparse
+ - python3-ceph-common
+ - python3-cephfs
+ - python3-rados
+ - python3-rbd
+ - python3-rgw
+ extra_packages:
+ - keepalived
+ - haproxy
+ hosts:
+ - mons
+ - osds
+ - mdss
+ - rgws
+ - rbdmirrors
+ - nfss
+ - clients
+ - mgrs
+ - monitoring
+ gather_facts: false # Already gathered previously
+ become: true
+ handlers:
+ - name: get osd data and lockbox mount points
+ shell: "(grep /var/lib/ceph/osd /proc/mounts || echo -n) | awk '{ print $2 }'"
+ register: mounted_osd
+ changed_when: false
+ listen: "remove data"
+
+ - name: umount osd data partition
+ mount:
+ path: "{{ item }}"
+ state: unmounted
+ with_items: "{{ mounted_osd.stdout_lines }}"
+ listen: "remove data"
+
+ - name: remove data
+ shell: rm -rf /var/lib/ceph/* # noqa 302
+ listen: "remove data"
+ tasks:
+ - import_role:
+ name: ceph-defaults
+
+ - name: non containerized related tasks
+ when: not containerized_deployment | bool
+ block:
+ - name: purge ceph packages with yum
+ yum:
+ name: "{{ ceph_packages }}"
+ state: absent
+ when: ansible_facts['pkg_mgr'] == 'yum'
+
+ - name: purge ceph packages with dnf
+ dnf:
+ name: "{{ ceph_packages }}"
+ state: absent
+ when: ansible_facts['pkg_mgr'] == 'dnf'
+
+ - name: purge ceph packages with apt
+ apt:
+ name: "{{ ceph_packages }}"
+ state: absent
+ purge: true
+ when: ansible_facts['pkg_mgr'] == 'apt'
+
+ - name: purge remaining ceph packages with yum
+ yum:
+ name: "{{ ceph_remaining_packages }}"
+ state: absent
+ when:
+ - ansible_facts['pkg_mgr'] == 'yum'
+ - purge_all_packages | bool
+
+ - name: purge remaining ceph packages with dnf
+ dnf:
+ name: "{{ ceph_remaining_packages }}"
+ state: absent
+ when:
+ - ansible_facts['pkg_mgr'] == 'dnf'
+ - purge_all_packages | bool
+
+ - name: purge remaining ceph packages with apt
+ apt:
+ name: "{{ ceph_remaining_packages }}"
+ state: absent
+ when:
+ - ansible_facts['pkg_mgr'] == 'apt'
+ - purge_all_packages | bool
+
+ - name: purge extra packages with yum
+ yum:
+ name: "{{ extra_packages }}"
+ state: absent
+ when:
+ - ansible_facts['pkg_mgr'] == 'yum'
+ - purge_all_packages | bool
+
+ - name: purge extra packages with dnf
+ dnf:
+ name: "{{ extra_packages }}"
+ state: absent
+ when:
+ - ansible_facts['pkg_mgr'] == 'dnf'
+ - purge_all_packages | bool
+
+ - name: purge extra packages with apt
+ apt:
+ name: "{{ extra_packages }}"
+ state: absent
+ when:
+ - ansible_facts['pkg_mgr'] == 'apt'
+ - purge_all_packages | bool
+
+ - name: remove config and any ceph socket left
+ file:
+ path: "{{ item }}"
+ state: absent
+ with_items:
+ - /etc/ceph
+ - /etc/keepalived
+ - /etc/haproxy
+ - /run/ceph
+
+ - name: remove logs
+ file:
+ path: /var/log/ceph
+ state: absent
+
+ - name: request data removal
+ command: echo requesting data removal # noqa 301
+ become: false
+ delegate_to: localhost
+ notify: remove data
+
+ - name: purge dnf cache
+ command: dnf clean all
+ when: ansible_facts['pkg_mgr'] == 'dnf'
+
+ - name: clean apt
+ command: apt-get clean # noqa 303
+ when: ansible_facts['pkg_mgr'] == 'apt'
+
+ - name: purge ceph repo file in /etc/yum.repos.d
+ file:
+ path: '/etc/yum.repos.d/{{ item }}.repo'
+ state: absent
+ with_items:
+ - ceph-dev
+ - ceph_stable
+ when: ansible_facts['os_family'] == 'RedHat'
+
+ - name: check for anything running ceph
+ command: "ps -u ceph -U ceph"
+ register: check_for_running_ceph
+ changed_when: false
+ failed_when: check_for_running_ceph.rc == 0
+
+ - name: find ceph systemd unit files to remove
+ find:
+ paths: "/etc/systemd/system"
+ pattern: "ceph*"
+ recurse: true
+ file_type: any
+ register: systemd_files
+
+ - name: remove ceph systemd unit files
+ file:
+ path: "{{ item.path }}"
+ state: absent
+ with_items: "{{ systemd_files.files }}"
+ when: ansible_facts['service_mgr'] == 'systemd'
+
+ - name: containerized related tasks
+ when: containerized_deployment | bool
+ block:
+ - name: check if it is Atomic host
+ stat: path=/run/ostree-booted
+ register: stat_ostree
+
+ - name: set fact for using Atomic host
+ set_fact:
+ is_atomic: "{{ stat_ostree.stat.exists }}"
+
+ - import_role:
+ name: ceph-facts
+ tasks_from: container_binary
+
+ - name: remove ceph container image
+ command: "{{ container_binary }} rmi {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
+ changed_when: false
+ when:
+ - inventory_hostname not in groups.get(client_group_name, [])
+ or inventory_hostname == groups.get(client_group_name, []) | first
+ tags:
+ - remove_img
+
+ - name: stop docker service
+ service:
+ name: docker
+ state: stopped
+ enabled: no
+ when:
+ - not is_atomic
+ - container_binary == 'docker'
+ ignore_errors: true
+ tags:
+ - remove_docker
+
+ - name: remove docker on debian/ubuntu
+ apt:
+ name: ['docker-ce', 'docker-engine', 'docker.io', 'python-docker', 'python3-docker']
+ state: absent
+ update_cache: yes
+ autoremove: yes
+ when: ansible_facts['os_family'] == 'Debian'
+ tags:
+ - remove_docker
+
+ - name: red hat based systems tasks
+ block:
+ - name: yum related tasks on red hat
+ block:
+ - name: remove packages on redhat
+ yum:
+ name: ['epel-release', 'docker', 'python-docker-py']
+ state: absent
+
+ - name: remove package dependencies on redhat
+ command: yum -y autoremove
+ args:
+ warn: no
+
+ - name: remove package dependencies on redhat again
+ command: yum -y autoremove
+ args:
+ warn: no
+ when:
+ ansible_facts['pkg_mgr'] == "yum"
+
+ - name: dnf related tasks on red hat
+ block:
+ - name: remove docker on redhat
+ dnf:
+ name: ['docker', 'python3-docker']
+ state: absent
+
+ - name: remove package dependencies on redhat
+ command: dnf -y autoremove
+ args:
+ warn: no
+
+ - name: remove package dependencies on redhat again
+ command: dnf -y autoremove
+ args:
+ warn: no
+ when:
+ ansible_facts['pkg_mgr'] == "dnf"
+ when:
+ ansible_facts['os_family'] == 'RedHat' and
+ not is_atomic
+ tags:
+ - remove_docker
+
+ - name: find any service-cid file left
+ find:
+ paths: /run
+ patterns:
+ - "ceph-*.service-cid"
+ - "rbd-target-api.service-cid"
+ - "rbd-target-gw.service-cid"
+ - "tcmu-runner.service-cid"
+ - "node_exporter.service-cid"
+ - "prometheus.service-cid"
+ - "grafana-server.service-cid"
+ - "alertmanager.service-cid"
+ register: service_cid_files
+
+ - name: rm any service-cid file
+ file:
+ path: "{{ item.path }}"
+ state: absent
+ with_items: "{{ service_cid_files.files }}"
+
+
+- name: purge ceph directories
+ hosts:
+ - mons
+ - osds
+ - mdss
+ - rgws
+ - rbdmirrors
+ - nfss
+ - mgrs
+ - clients
+ gather_facts: false # Already gathered previously
+ become: true
+ tasks:
+ - import_role:
+ name: ceph-defaults
+
+ - name: purge ceph directories - containerized deployments
+ when: containerized_deployment | bool
+ block:
+ - name: purge ceph directories for "{{ ansible_facts['hostname'] }}" and ceph socket
+ file:
+ path: "{{ item }}"
+ state: absent
+ with_items:
+ - /etc/ceph
+ - /var/log/ceph
+ - /run/ceph
+ - "{{ ceph_osd_docker_run_script_path | default('/usr/share') }}/ceph-osd-run.sh"
+
+ - name: remove ceph data
+ shell: rm -rf /var/lib/ceph/* # noqa 302
+ changed_when: false
+
+ # (todo): remove this when we are able to manage docker
+ # service on atomic host.
+ - name: remove docker data
+ shell: rm -rf /var/lib/docker/* # noqa 302
+ when: not is_atomic | bool
+ tags:
+ - remove_docker
+
+
+- name: purge fetch directory
+ hosts: localhost
+ gather_facts: false
+ tasks:
+ - name: set fetch_directory value if not set
+ set_fact:
+ fetch_directory: "fetch/"
+ when: fetch_directory is not defined
+
+ - name: purge fetch directory for localhost
+ file:
+ path: "{{ fetch_directory | default('fetch/') }}"
+ state: absent
--- /dev/null
+purge-cluster.yml
\ No newline at end of file
--- /dev/null
+---
+# This playbook purges the Ceph MGR Dashboard and Monitoring
+# (alertmanager/prometheus/grafana/node-exporter) stack.
+# It removes: packages, configuration files and ALL THE DATA
+#
+# Use it like this:
+# ansible-playbook purge-dashboard.yml
+# Prompts for confirmation to purge, defaults to no and
+# doesn't purge anything. yes purges the dashboard and
+# monitoring stack.
+#
+# ansible-playbook -e ireallymeanit=yes|no purge-dashboard.yml
+# Overrides the prompt using -e option. Can be used in
+# automation scripts to avoid interactive prompt.
+
+- name: confirm whether user really meant to purge the dashboard
+ hosts: localhost
+ gather_facts: false
+ vars_prompt:
+ - name: ireallymeanit
+ prompt: Are you sure you want to purge the dashboard?
+ default: 'no'
+ private: no
+ tasks:
+ - name: exit playbook, if user did not mean to purge dashboard
+ fail:
+ msg: >
+ "Exiting purge-dashboard playbook, dashboard was NOT purged.
+ To purge the dashboard, either say 'yes' on the prompt or
+ or use `-e ireallymeanit=yes` on the command line when
+ invoking the playbook"
+ when: ireallymeanit != 'yes'
+
+ - name: import_role ceph-defaults
+ import_role:
+ name: ceph-defaults
+
+ - name: check if a legacy grafana-server group exists
+ import_role:
+ name: ceph-facts
+ tasks_from: convert_grafana_server_group_name.yml
+ when: groups.get((grafana_server_group_name | default('grafana-server')), []) | length > 0
+
+
+- name: gather facts on all hosts
+ hosts:
+ - "{{ mon_group_name|default('mons') }}"
+ - "{{ osd_group_name|default('osds') }}"
+ - "{{ mds_group_name|default('mdss') }}"
+ - "{{ rgw_group_name|default('rgws') }}"
+ - "{{ rbdmirror_group_name|default('rbdmirrors') }}"
+ - "{{ nfs_group_name|default('nfss') }}"
+ - "{{ client_group_name|default('clients') }}"
+ - "{{ mgr_group_name|default('mgrs') }}"
+ - "{{ monitoring_group_name | default('monitoring') }}"
+ become: true
+ tasks:
+ - debug: msg="gather facts on all Ceph hosts for following reference"
+
+- name: purge node exporter
+ hosts:
+ - "{{ mon_group_name|default('mons') }}"
+ - "{{ osd_group_name|default('osds') }}"
+ - "{{ mds_group_name|default('mdss') }}"
+ - "{{ rgw_group_name|default('rgws') }}"
+ - "{{ rbdmirror_group_name|default('rbdmirrors') }}"
+ - "{{ nfs_group_name|default('nfss') }}"
+ - "{{ client_group_name|default('clients') }}"
+ - "{{ mgr_group_name|default('mgrs') }}"
+ - "{{ monitoring_group_name | default('monitoring') }}"
+ gather_facts: false
+ become: true
+ tasks:
+ - import_role:
+ name: ceph-defaults
+
+ - import_role:
+ name: ceph-facts
+ tasks_from: container_binary
+
+ - name: disable node_exporter service
+ service:
+ name: node_exporter
+ state: stopped
+ enabled: no
+ failed_when: false
+
+ - name: remove node_exporter service files
+ file:
+ name: "{{ item }}"
+ state: absent
+ loop:
+ - /etc/systemd/system/node_exporter.service
+ - /run/node_exporter.service-cid
+
+ - name: remove node-exporter image
+ command: "{{ container_binary }} rmi {{ node_exporter_container_image }}"
+ changed_when: false
+ failed_when: false
+
+- name: purge ceph monitoring
+ hosts: "{{ monitoring_group_name | default('monitoring') }}"
+ gather_facts: false
+ become: true
+ tasks:
+ - import_role:
+ name: ceph-defaults
+
+ - import_role:
+ name: ceph-facts
+ tasks_from: container_binary
+
+ - name: stop services
+ service:
+ name: "{{ item }}"
+ state: stopped
+ enabled: no
+ failed_when: false
+ loop:
+ - alertmanager
+ - prometheus
+ - grafana-server
+
+ - name: remove systemd service files
+ file:
+ name: "{{ item }}"
+ state: absent
+ loop:
+ - /etc/systemd/system/alertmanager.service
+ - /etc/systemd/system/prometheus.service
+ - /etc/systemd/system/grafana-server.service
+ - /run/alertmanager.service-cid
+ - /run/prometheus.service-cid
+ - /run/grafana-server.service-cid
+
+ - name: remove ceph dashboard container images
+ command: "{{ container_binary }} rmi {{ item }}"
+ loop:
+ - "{{ alertmanager_container_image }}"
+ - "{{ prometheus_container_image }}"
+ - "{{ grafana_container_image }}"
+ changed_when: false
+ failed_when: false
+
+ - name: remove ceph-grafana-dashboards package on RedHat or SUSE
+ package:
+ name: ceph-grafana-dashboards
+ state: absent
+ when:
+ - not containerized_deployment | bool
+ - ansible_facts['os_family'] in ['RedHat', 'Suse']
+
+ - name: remove data
+ file:
+ name: "{{ item }}"
+ state: absent
+ loop:
+ - "{{ alertmanager_conf_dir }}"
+ - "{{ prometheus_conf_dir }}"
+ - /etc/grafana
+ - "{{ alertmanager_data_dir }}"
+ - "{{ prometheus_data_dir }}"
+ - /var/lib/grafana
+
+- name: purge ceph dashboard
+ hosts: "{{ groups[mgr_group_name] | default(groups[mon_group_name]) | default(omit) }}"
+ gather_facts: false
+ become: true
+ environment:
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+ tasks:
+ - import_role:
+ name: ceph-defaults
+
+ - import_role:
+ name: ceph-facts
+ tasks_from: container_binary
+
+ - name: remove the dashboard admin user
+ ceph_dashboard_user:
+ name: "{{ dashboard_admin_user }}"
+ cluster: "{{ cluster }}"
+ state: absent
+ run_once: true
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+
+ - name: remove radosgw system user
+ radosgw_user:
+ name: "{{ dashboard_rgw_api_user_id }}"
+ cluster: "{{ cluster }}"
+ state: absent
+ run_once: true
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ when: groups.get(rgw_group_name, []) | length > 0
+
+ - name: disable mgr dashboard and prometheus modules
+ ceph_mgr_module:
+ name: "{{ item }}"
+ cluster: "{{ cluster }}"
+ state: disable
+ run_once: true
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ loop:
+ - dashboard
+ - prometheus
+
+ - name: remove TLS certificate and key files
+ file:
+ name: "/etc/ceph/ceph-dashboard.{{ item }}"
+ state: absent
+ loop:
+ - crt
+ - key
+ when: dashboard_protocol == "https"
+
+ - name: remove ceph-mgr-dashboard package
+ package:
+ name: ceph-mgr-dashboard
+ state: absent
+ when: not containerized_deployment | bool
--- /dev/null
+---
+
+- name: Confirm removal of the iSCSI gateway configuration
+ hosts: localhost
+
+ vars_prompt:
+ - name: purge_config
+ prompt: Which configuration elements should be purged? (all, lio or abort)
+ default: 'abort'
+ private: no
+
+ tasks:
+ - name: Exit playbook if user aborted the purge
+ fail:
+ msg: >
+ "You have aborted the purge of the iSCSI gateway configuration"
+ when: purge_config == 'abort'
+
+ - name: set_fact igw_purge_type
+ set_fact:
+ igw_purge_type: "{{ purge_config }}"
+
+- name: stopping the gateways
+ hosts:
+ - iscsigws
+ become: yes
+ vars:
+ - igw_purge_type: "{{ hostvars['localhost']['igw_purge_type'] }}"
+
+ tasks:
+ - name: stopping and disabling iscsi daemons
+ service:
+ name: "{{ item }}"
+ state: stopped
+ enabled: no
+ with_items:
+ - rbd-target-gw
+ - rbd-target-api
+ - tcmu-runner
+
+- name: removing the gateway configuration
+ hosts:
+ - iscsigws
+ become: yes
+ vars:
+ - igw_purge_type: "{{ hostvars['localhost']['igw_purge_type'] }}"
+
+ tasks:
+ - name: igw_purge | deleting configured rbd devices
+ igw_purge: mode="disks"
+ when: igw_purge_type == 'all'
+ run_once: true
+
+ - name: igw_purge | purging the gateway configuration
+ igw_purge: mode="gateway"
+ run_once: true
+
+ - name: restart and enable iscsi daemons
+ when: igw_purge_type == 'lio'
+ service:
+ name: "{{ item }}"
+ state: started
+ enabled: yes
+ with_items:
+ - tcmu-runner
+ - rbd-target-api
+ - rbd-target-gw
+
+- name: remove the gateways from the ceph dashboard
+ hosts: mons
+ become: true
+ tasks:
+ - import_role:
+ name: ceph-defaults
+
+ - name: iscsi gateways with ceph dashboard
+ when: dashboard_enabled | bool
+ run_once: true
+ block:
+ - import_role:
+ name: ceph-facts
+ tasks_from: container_binary
+
+ - name: set_fact container_exec_cmd
+ set_fact:
+ container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_facts['hostname'] }}"
+ when: containerized_deployment | bool
+
+ - name: get iscsi gateway list
+ command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} dashboard iscsi-gateway-list -f json"
+ changed_when: false
+ register: gateways
+
+ - name: remove iscsi gateways
+ command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} dashboard iscsi-gateway-rm {{ item }}"
+ with_items: '{{ (gateways.stdout | from_json)["gateways"] }}'
--- /dev/null
+# This example playbook is used to add rgw users and buckets
+#
+# This example is run on your local machine
+#
+# Ensure that your local machine can connect to rgw of your cluster
+#
+# You will need to update the following vars
+#
+# rgw_host
+# port
+# admin_access_key
+# admin_secret_key
+#
+# Additionally modify the users list and buckets list to create the
+# users and buckets you want
+#
+- name: add rgw users and buckets
+ connection: local
+ hosts: localhost
+ gather_facts: no
+ tasks:
+ - name: add rgw users and buckets
+ ceph_add_users_buckets:
+ rgw_host: '172.20.0.2'
+ port: 8000
+ admin_access_key: '8W56BITCSX27CD555Z5B'
+ admin_secret_key: 'JcrsUNDNPAvnAWHiBmwKOzMNreOIw2kJWAclQQ20'
+ users:
+ - username: 'test1'
+ fullname: 'tester'
+ email: 'dan1@email.com'
+ maxbucket: 666
+ suspend: false
+ autogenkey: false
+ accesskey: 'B3AR4Q33L59YV56A9A2F'
+ secretkey: 'd84BRnMysnVGSyZiRlYUMduVgIarQWiNMdKzrF76'
+ userquota: true
+ usermaxsize: '1000'
+ usermaxobjects: 3
+ bucketquota: true
+ bucketmaxsize: '1000'
+ bucketmaxobjects: 3
+ - username: 'test2'
+ fullname: 'tester'
+ buckets:
+ - bucket: 'bucket1'
+ user: 'test2'
+ - bucket: 'bucket2'
+ user: 'test1'
+ - bucket: 'bucket3'
+ user: 'test1'
+ - bucket: 'bucket4'
+ user: 'test1'
+ - bucket: 'bucket5'
+ user: 'test1'
+ - bucket: 'bucket6'
+ user: 'test2'
+ - bucket: 'bucket7'
+ user: 'test2'
+ - bucket: 'bucket8'
+ user: 'test2'
+ - bucket: 'bucket9'
+ user: 'test2'
+ - bucket: 'bucket10'
+ user: 'test2'
--- /dev/null
+---
+# This playbook does a rolling update for all the Ceph services
+#
+# The value of 'serial:' adjusts the number of servers to be updated simultaneously.
+# We recommend a value of 1, which means hosts of a group (e.g: monitor) will be
+# upgraded one by one. It is really crucial for the update process to happen
+# in a serialized fashion. DO NOT CHANGE THIS VALUE.
+#
+#
+# If you run a Ceph community version, you have to change the variable: ceph_stable_release to the new release
+#
+# If you run Red Hat Ceph Storage and are doing a **major** update (e.g: from 2 to 3), you have to change the ceph_rhcs_version to a newer one
+#
+
+- name: confirm whether user really meant to upgrade the cluster
+ hosts: localhost
+ tags: always
+ become: false
+ gather_facts: false
+ vars:
+ - mgr_group_name: mgrs
+
+ vars_prompt:
+ - name: ireallymeanit
+ prompt: Are you sure you want to upgrade the cluster?
+ default: 'no'
+ private: no
+
+ tasks:
+ - name: import_role ceph-defaults
+ import_role:
+ name: ceph-defaults
+
+ - name: exit playbook, if user did not mean to upgrade cluster
+ fail:
+ msg: >
+ "Exiting rolling_update.yml playbook, cluster was NOT upgraded.
+ To upgrade the cluster, either say 'yes' on the prompt or
+ use `-e ireallymeanit=yes` on the command line when
+ invoking the playbook"
+ when: ireallymeanit != 'yes'
+
+ - name: check if a legacy grafana-server group exists
+ import_role:
+ name: ceph-facts
+ tasks_from: convert_grafana_server_group_name.yml
+ when: groups.get((grafana_server_group_name | default('grafana-server')), []) | length > 0
+
+
+- name: gather facts and check the init system
+ hosts:
+ - "{{ mon_group_name|default('mons') }}"
+ - "{{ osd_group_name|default('osds') }}"
+ - "{{ mds_group_name|default('mdss') }}"
+ - "{{ rgw_group_name|default('rgws') }}"
+ - "{{ mgr_group_name|default('mgrs') }}"
+ - "{{ rbdmirror_group_name|default('rbdmirrors') }}"
+ - "{{ nfs_group_name|default('nfss') }}"
+ - "{{ client_group_name|default('clients') }}"
+ - "{{ iscsi_gw_group_name|default('iscsigws') }}"
+ - "{{ monitoring_group_name|default('monitoring') }}"
+ tags: always
+ any_errors_fatal: True
+ become: True
+ gather_facts: False
+ vars:
+ delegate_facts_host: True
+ tasks:
+ - debug: msg="gather facts on all Ceph hosts for following reference"
+
+ - import_role:
+ name: ceph-defaults
+
+ - name: gather facts
+ setup:
+ gather_subset:
+ - 'all'
+ - '!facter'
+ - '!ohai'
+ when: not delegate_facts_host | bool or inventory_hostname in groups.get(client_group_name, [])
+
+ - name: gather and delegate facts
+ setup:
+ gather_subset:
+ - 'all'
+ - '!facter'
+ - '!ohai'
+ delegate_to: "{{ item }}"
+ delegate_facts: True
+ with_items: "{{ groups['all'] | difference(groups.get('clients', [])) }}"
+ run_once: true
+ when: delegate_facts_host | bool
+
+ - name: set_fact rolling_update
+ set_fact:
+ rolling_update: true
+
+ - import_role:
+ name: ceph-facts
+
+ - import_role:
+ name: ceph-infra
+ tags: ceph_infra
+
+ - import_role:
+ name: ceph-validate
+
+ - import_role:
+ name: ceph-container-engine
+ when:
+ - (group_names != ['clients']) or (inventory_hostname == groups.get('clients', [''])|first)
+ - (containerized_deployment | bool) or (dashboard_enabled | bool)
+
+ - import_role:
+ name: ceph-container-common
+ tasks_from: registry
+ when:
+ - (group_names != ['clients']) or (inventory_hostname == groups.get('clients', [''])|first)
+ - (containerized_deployment | bool) or (dashboard_enabled | bool)
+ - ceph_docker_registry_auth | bool
+
+ - name: check ceph release in container image
+ when:
+ - groups.get(mon_group_name, []) | length > 0
+ - containerized_deployment | bool
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ run_once: true
+ block:
+ - name: get the ceph release being deployed
+ command: "{{ ceph_cmd }} --cluster {{ cluster }} --version"
+ register: ceph_version
+ changed_when: false
+
+ - name: check ceph release being deployed
+ fail:
+ msg: "This version of ceph-ansible is intended for upgrading to Ceph Pacific only."
+ when: "'pacific' not in ceph_version.stdout.split()"
+
+
+- name: upgrade ceph mon cluster
+ tags: mons
+ vars:
+ health_mon_check_retries: 5
+ health_mon_check_delay: 15
+ upgrade_ceph_packages: True
+ hosts: "{{ mon_group_name|default('mons') }}"
+ serial: 1
+ become: True
+ gather_facts: false
+ tasks:
+ - name: upgrade ceph mon cluster
+ block:
+ - name: remove ceph aliases
+ file:
+ path: /etc/profile.d/ceph-aliases.sh
+ state: absent
+ when: containerized_deployment | bool
+
+ - name: set mon_host_count
+ set_fact:
+ mon_host_count: "{{ groups[mon_group_name] | length }}"
+
+ - name: fail when less than three monitors
+ fail:
+ msg: "Upgrade of cluster with less than three monitors is not supported."
+ when: mon_host_count | int < 3
+
+ - name: select a running monitor
+ set_fact:
+ mon_host: "{{ groups[mon_group_name] | difference([inventory_hostname]) | last }}"
+
+ - import_role:
+ name: ceph-defaults
+ - import_role:
+ name: ceph-facts
+
+ - block:
+ - name: get ceph cluster status
+ command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} health -f json"
+ register: check_cluster_health
+ delegate_to: "{{ mon_host }}"
+
+ - block:
+ - name: display ceph health detail
+ command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} health detail"
+ delegate_to: "{{ mon_host }}"
+
+ - name: fail if cluster isn't in an acceptable state
+ fail:
+ msg: "cluster is not in an acceptable state!"
+ when: (check_cluster_health.stdout | from_json).status == 'HEALTH_ERR'
+
+ - name: get the ceph quorum status
+ command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} quorum_status -f json"
+ register: check_quorum_status
+ delegate_to: "{{ mon_host }}"
+
+ - name: fail if the cluster quorum isn't in an acceptable state
+ fail:
+ msg: "cluster quorum is not in an acceptable state!"
+ when: (check_quorum_status.stdout | from_json).quorum | length != groups[mon_group_name] | length
+ when: inventory_hostname == groups[mon_group_name] | first
+
+ - name: ensure /var/lib/ceph/bootstrap-rbd-mirror is present
+ file:
+ path: /var/lib/ceph/bootstrap-rbd-mirror
+ owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
+ group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
+ mode: '755'
+ state: directory
+ delegate_to: "{{ item }}"
+ with_items: "{{ groups[mon_group_name] }}"
+ when:
+ - cephx | bool
+ - inventory_hostname == groups[mon_group_name][0]
+
+ - name: create potentially missing keys (rbd and rbd-mirror)
+ ceph_key:
+ name: "client.{{ item.0 }}"
+ dest: "/var/lib/ceph/{{ item.0 }}/"
+ caps:
+ mon: "allow profile {{ item.0 }}"
+ cluster: "{{ cluster }}"
+ delegate_to: "{{ item.1 }}"
+ with_nested:
+ - ['bootstrap-rbd', 'bootstrap-rbd-mirror']
+ - "{{ groups[mon_group_name] }}" # so the key goes on all the nodes
+ environment:
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+ when:
+ - cephx | bool
+ - inventory_hostname == groups[mon_group_name][0]
+
+ # NOTE: we mask the service so the RPM can't restart it
+ # after the package gets upgraded
+ - name: stop ceph mon
+ systemd:
+ name: ceph-mon@{{ item }}
+ state: stopped
+ enabled: no
+ masked: yes
+ with_items:
+ - "{{ ansible_facts['hostname'] }}"
+ - "{{ ansible_facts['fqdn'] }}"
+
+ # only mask the service for mgr because it must be upgraded
+ # after ALL monitors, even when collocated
+ - name: mask the mgr service
+ systemd:
+ name: ceph-mgr@{{ ansible_facts['hostname'] }}
+ masked: yes
+ when: inventory_hostname in groups[mgr_group_name] | default([])
+ or groups[mgr_group_name] | default([]) | length == 0
+
+ - import_role:
+ name: ceph-handler
+ - import_role:
+ name: ceph-common
+ when: not containerized_deployment | bool
+ - import_role:
+ name: ceph-container-common
+ when: containerized_deployment | bool
+ - import_role:
+ name: ceph-config
+ - import_role:
+ name: ceph-mon
+
+ - name: start ceph mgr
+ systemd:
+ name: ceph-mgr@{{ ansible_facts['hostname'] }}
+ state: started
+ enabled: yes
+ masked: no
+ when: inventory_hostname in groups[mgr_group_name] | default([])
+ or groups[mgr_group_name] | default([]) | length == 0
+
+ - name: import_role ceph-facts
+ import_role:
+ name: ceph-facts
+ tasks_from: set_monitor_address.yml
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ delegate_facts: true
+
+ - name: non container | waiting for the monitor to join the quorum...
+ command: ceph --cluster "{{ cluster }}" -m "{{ hostvars[groups[mon_group_name][0]]['_current_monitor_address'] }}" quorum_status --format json
+ register: ceph_health_raw
+ until:
+ - ceph_health_raw.rc == 0
+ - (hostvars[inventory_hostname]['ansible_facts']['hostname'] in (ceph_health_raw.stdout | default('{}') | from_json)["quorum_names"] or
+ hostvars[inventory_hostname]['ansible_facts']['fqdn'] in (ceph_health_raw.stdout | default('{}') | from_json)["quorum_names"])
+ retries: "{{ health_mon_check_retries }}"
+ delay: "{{ health_mon_check_delay }}"
+ when: not containerized_deployment | bool
+
+ - name: container | waiting for the containerized monitor to join the quorum...
+ command: >
+ {{ container_binary }} exec ceph-mon-{{ ansible_facts['hostname'] }} ceph --cluster "{{ cluster }}" -m "{{ hostvars[groups[mon_group_name][0]]['_current_monitor_address'] }}" quorum_status --format json
+ register: ceph_health_raw
+ until:
+ - ceph_health_raw.rc == 0
+ - (hostvars[inventory_hostname]['ansible_facts']['hostname'] in (ceph_health_raw.stdout | default('{}') | from_json)["quorum_names"] or
+ hostvars[inventory_hostname]['ansible_facts']['fqdn'] in (ceph_health_raw.stdout | default('{}') | from_json)["quorum_names"])
+ retries: "{{ health_mon_check_retries }}"
+ delay: "{{ health_mon_check_delay }}"
+ when: containerized_deployment | bool
+
+ rescue:
+ - name: unmask the mon service
+ systemd:
+ name: ceph-mon@{{ ansible_facts['hostname'] }}
+ enabled: yes
+ masked: no
+
+ - name: unmask the mgr service
+ systemd:
+ name: ceph-mgr@{{ ansible_facts['hostname'] }}
+ masked: no
+ when: inventory_hostname in groups[mgr_group_name] | default([])
+ or groups[mgr_group_name] | default([]) | length == 0
+
+ - name: stop the playbook execution
+ fail:
+ msg: "There was an error during monitor upgrade. Please, check the previous task results."
+
+- name: reset mon_host
+ hosts: "{{ mon_group_name|default('mons') }}"
+ tags: always
+ become: True
+ gather_facts: false
+ tasks:
+ - import_role:
+ name: ceph-defaults
+
+ - name: reset mon_host fact
+ set_fact:
+ mon_host: "{{ groups[mon_group_name][0] }}"
+
+
+- name: upgrade ceph mgr nodes when implicitly collocated on monitors
+ vars:
+ health_mon_check_retries: 5
+ health_mon_check_delay: 15
+ upgrade_ceph_packages: True
+ hosts: "{{ mon_group_name|default('mons') }}"
+ tags: mgrs
+ serial: 1
+ become: True
+ gather_facts: false
+ tasks:
+ - name: upgrade mgrs when no mgr group explicitly defined in inventory
+ when: groups.get(mgr_group_name, []) | length == 0
+ block:
+ - name: stop ceph mgr
+ systemd:
+ name: ceph-mgr@{{ ansible_facts['hostname'] }}
+ state: stopped
+ masked: yes
+
+ - import_role:
+ name: ceph-defaults
+ - import_role:
+ name: ceph-facts
+ - import_role:
+ name: ceph-handler
+ - import_role:
+ name: ceph-common
+ when: not containerized_deployment | bool
+ - import_role:
+ name: ceph-container-common
+ when: containerized_deployment | bool
+ - import_role:
+ name: ceph-config
+ - import_role:
+ name: ceph-mgr
+
+- name: upgrade ceph mgr nodes
+ vars:
+ upgrade_ceph_packages: True
+ ceph_release: "{{ ceph_stable_release }}"
+ hosts: "{{ mgr_group_name|default('mgrs') }}"
+ tags: mgrs
+ serial: 1
+ become: True
+ gather_facts: false
+ tasks:
+ # The following task has a failed_when: false
+ # to handle the scenario where no mgr existed before the upgrade
+ # or if we run a Ceph cluster before Luminous
+ - name: stop ceph mgr
+ systemd:
+ name: ceph-mgr@{{ ansible_facts['hostname'] }}
+ state: stopped
+ enabled: no
+ masked: yes
+ failed_when: false
+
+ - import_role:
+ name: ceph-defaults
+ - import_role:
+ name: ceph-facts
+ - import_role:
+ name: ceph-handler
+ - import_role:
+ name: ceph-common
+ when: not containerized_deployment | bool
+ - import_role:
+ name: ceph-container-common
+ when: containerized_deployment | bool
+ - import_role:
+ name: ceph-config
+ - import_role:
+ name: ceph-mgr
+
+
+- name: set osd flags
+ hosts: "{{ osd_group_name | default('osds') }}"
+ tags: osds
+ become: True
+ gather_facts: false
+ tasks:
+ - import_role:
+ name: ceph-defaults
+ - import_role:
+ name: ceph-facts
+ tasks_from: container_binary.yml
+
+ - name: set osd flags, disable autoscaler and balancer
+ run_once: true
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ block:
+ - name: get pool list
+ command: "{{ ceph_cmd }} --cluster {{ cluster }} osd pool ls detail -f json"
+ register: pool_list
+ changed_when: false
+ check_mode: false
+
+ - name: get balancer module status
+ command: "{{ ceph_cmd }} --cluster {{ cluster }} balancer status -f json"
+ register: balancer_status_update
+ run_once: true
+ changed_when: false
+ check_mode: false
+
+ - name: set_fact pools_pgautoscaler_mode
+ set_fact:
+ pools_pgautoscaler_mode: "{{ pools_pgautoscaler_mode | default([]) | union([{'name': item.pool_name, 'mode': item.pg_autoscale_mode}]) }}"
+ with_items: "{{ pool_list.stdout | default('{}') | from_json }}"
+
+ - name: disable balancer
+ command: "{{ ceph_cmd }} --cluster {{ cluster }} balancer off"
+ changed_when: false
+ when: (balancer_status_update.stdout | from_json)['active'] | bool
+
+ - name: disable pg autoscale on pools
+ ceph_pool:
+ name: "{{ item.name }}"
+ cluster: "{{ cluster }}"
+ pg_autoscale_mode: false
+ with_items: "{{ pools_pgautoscaler_mode }}"
+ when:
+ - pools_pgautoscaler_mode is defined
+ - item.mode == 'on'
+ environment:
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+
+ - name: set osd flags
+ ceph_osd_flag:
+ name: "{{ item }}"
+ cluster: "{{ cluster }}"
+ environment:
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+ with_items:
+ - noout
+ - nodeep-scrub
+
+- name: upgrade ceph osds cluster
+ vars:
+ health_osd_check_retries: 600
+ health_osd_check_delay: 2
+ upgrade_ceph_packages: True
+ hosts: "{{ osd_group_name|default('osds') }}"
+ tags: osds
+ serial: 1
+ become: True
+ gather_facts: false
+ tasks:
+ - import_role:
+ name: ceph-defaults
+ - import_role:
+ name: ceph-facts
+
+ - name: get osd numbers - non container
+ shell: if [ -d /var/lib/ceph/osd ] ; then ls /var/lib/ceph/osd | sed 's/.*-//' ; fi # noqa 306
+ register: osd_ids
+ changed_when: false
+
+ - name: set num_osds
+ set_fact:
+ num_osds: "{{ osd_ids.stdout_lines|default([])|length }}"
+
+ - name: set_fact container_exec_cmd_osd
+ set_fact:
+ container_exec_cmd_update_osd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_facts']['hostname'] }}"
+ when: containerized_deployment | bool
+
+ - name: stop ceph osd
+ systemd:
+ name: ceph-osd@{{ item }}
+ state: stopped
+ enabled: no
+ masked: yes
+ with_items: "{{ osd_ids.stdout_lines }}"
+
+ - import_role:
+ name: ceph-handler
+ - import_role:
+ name: ceph-common
+ when: not containerized_deployment | bool
+ - import_role:
+ name: ceph-container-common
+ when: containerized_deployment | bool
+ - import_role:
+ name: ceph-config
+ - import_role:
+ name: ceph-osd
+
+ - name: scan ceph-disk osds with ceph-volume if deploying nautilus
+ ceph_volume_simple_scan:
+ cluster: "{{ cluster }}"
+ force: true
+ environment:
+ CEPH_VOLUME_DEBUG: "{{ ceph_volume_debug }}"
+ when: not containerized_deployment | bool
+
+ - name: activate scanned ceph-disk osds and migrate to ceph-volume if deploying nautilus
+ ceph_volume_simple_activate:
+ cluster: "{{ cluster }}"
+ osd_all: true
+ environment:
+ CEPH_VOLUME_DEBUG: "{{ ceph_volume_debug }}"
+ when: not containerized_deployment | bool
+
+ - name: waiting for clean pgs...
+ command: "{{ container_exec_cmd_update_osd|default('') }} ceph --cluster {{ cluster }} pg stat --format json"
+ register: ceph_health_post
+ until: >
+ (((ceph_health_post.stdout | from_json).pg_summary.num_pg_by_state | length) > 0)
+ and
+ (((ceph_health_post.stdout | from_json).pg_summary.num_pg_by_state | selectattr('name', 'search', '^active\\+clean') | map(attribute='num') | list | sum) == (ceph_health_post.stdout | from_json).pg_summary.num_pgs)
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ changed_when: false
+ retries: "{{ health_osd_check_retries }}"
+ delay: "{{ health_osd_check_delay }}"
+
+
+- name: complete osd upgrade
+ hosts: "{{ osd_group_name | default('osds') }}"
+ tags: osds
+ become: True
+ gather_facts: false
+ tasks:
+ - import_role:
+ name: ceph-defaults
+ - import_role:
+ name: ceph-facts
+ tasks_from: container_binary.yml
+
+ - name: unset osd flags, re-enable pg autoscaler and balancer
+ run_once: true
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ block:
+ - name: re-enable pg autoscale on pools
+ ceph_pool:
+ name: "{{ item.name }}"
+ cluster: "{{ cluster }}"
+ pg_autoscale_mode: true
+ with_items: "{{ pools_pgautoscaler_mode }}"
+ when:
+ - pools_pgautoscaler_mode is defined
+ - item.mode == 'on'
+ environment:
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+
+ - name: unset osd flags
+ ceph_osd_flag:
+ name: "{{ item }}"
+ cluster: "{{ cluster }}"
+ state: absent
+ environment:
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+ with_items:
+ - noout
+ - nodeep-scrub
+
+ - name: re-enable balancer
+ command: "{{ ceph_cmd }} --cluster {{ cluster }} balancer on"
+ changed_when: false
+ when: (balancer_status_update.stdout | from_json)['active'] | bool
+
+- name: upgrade ceph mdss cluster, deactivate all rank > 0
+ hosts: "{{ mon_group_name | default('mons') }}[0]"
+ tags: mdss
+ become: true
+ gather_facts: false
+ tasks:
+ - name: deactivate all mds rank > 0
+ when: groups.get(mds_group_name, []) | length > 0
+ block:
+ - import_role:
+ name: ceph-defaults
+ - import_role:
+ name: ceph-facts
+
+ - name: deactivate all mds rank > 0 if any
+ when: groups.get(mds_group_name, []) | length > 1
+ block:
+ - name: set max_mds 1 on ceph fs
+ ceph_fs:
+ name: "{{ cephfs }}"
+ cluster: "{{ cluster }}"
+ data: "{{ cephfs_data_pool.name }}"
+ metadata: "{{ cephfs_metadata_pool.name }}"
+ max_mds: 1
+ environment:
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+
+ - name: wait until only rank 0 is up
+ ceph_fs:
+ name: "{{ cephfs }}"
+ cluster: "{{ cluster }}"
+ state: info
+ register: wait_rank_zero
+ retries: 720
+ delay: 5
+ until: (wait_rank_zero.stdout | from_json).mdsmap.in | length == 1 and (wait_rank_zero.stdout | from_json).mdsmap.in[0] == 0
+ environment:
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+
+ - name: get name of remaining active mds
+ command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} fs dump -f json"
+ changed_when: false
+ register: _mds_active_name
+
+ - name: set_fact mds_active_name
+ set_fact:
+ mds_active_name: "{{ (_mds_active_name.stdout | from_json)['filesystems'][0]['mdsmap']['info'][item.key]['name'] }}"
+ with_dict: "{{ (_mds_active_name.stdout | default('{}') | from_json).filesystems[0]['mdsmap']['info'] | default({}) }}"
+
+ - name: set_fact mds_active_host
+ set_fact:
+ mds_active_host: "{{ [hostvars[item]['inventory_hostname']] }}"
+ with_items: "{{ groups[mds_group_name] }}"
+ when: hostvars[item]['ansible_facts']['hostname'] == mds_active_name
+
+ - name: create standby_mdss group
+ add_host:
+ name: "{{ item }}"
+ groups: standby_mdss
+ ansible_host: "{{ hostvars[item]['ansible_host'] | default(omit) }}"
+ ansible_port: "{{ hostvars[item]['ansible_port'] | default(omit) }}"
+ with_items: "{{ groups[mds_group_name] | difference(mds_active_host) }}"
+
+ - name: stop standby ceph mds
+ systemd:
+ name: "ceph-mds@{{ hostvars[item]['ansible_facts']['hostname'] }}"
+ state: stopped
+ enabled: no
+ delegate_to: "{{ item }}"
+ with_items: "{{ groups['standby_mdss'] }}"
+ when: groups['standby_mdss'] | default([]) | length > 0
+
+ # dedicated task for masking systemd unit
+ # somehow, having a single task doesn't work in containerized context
+ - name: mask systemd units for standby ceph mds
+ systemd:
+ name: "ceph-mds@{{ hostvars[item]['ansible_facts']['hostname'] }}"
+ masked: yes
+ delegate_to: "{{ item }}"
+ with_items: "{{ groups['standby_mdss'] }}"
+ when: groups['standby_mdss'] | default([]) | length > 0
+
+ - name: wait until all standbys mds are stopped
+ command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} fs dump -f json"
+ changed_when: false
+ register: wait_standbys_down
+ retries: 300
+ delay: 5
+ until: (wait_standbys_down.stdout | from_json).standbys | length == 0
+
+ - name: create active_mdss group
+ add_host:
+ name: "{{ mds_active_host[0] if mds_active_host is defined else groups.get(mds_group_name)[0] }}"
+ groups: active_mdss
+ ansible_host: "{{ hostvars[mds_active_host[0] if mds_active_host is defined else groups.get(mds_group_name)[0]]['ansible_host'] | default(omit) }}"
+ ansible_port: "{{ hostvars[mds_active_host[0] if mds_active_host is defined else groups.get(mds_group_name)[0]]['ansible_port'] | default(omit) }}"
+
+
+- name: upgrade active mds
+ vars:
+ upgrade_ceph_packages: True
+ hosts: active_mdss
+ tags: mdss
+ become: true
+ gather_facts: false
+ tasks:
+ - import_role:
+ name: ceph-defaults
+
+ - import_role:
+ name: ceph-facts
+
+ - name: prevent restart from the packaging
+ systemd:
+ name: ceph-mds@{{ ansible_facts['hostname'] }}
+ enabled: no
+ masked: yes
+ when: not containerized_deployment | bool
+
+ - import_role:
+ name: ceph-handler
+ - import_role:
+ name: ceph-common
+ when: not containerized_deployment | bool
+ - import_role:
+ name: ceph-container-common
+ when: containerized_deployment | bool
+ - import_role:
+ name: ceph-config
+ - import_role:
+ name: ceph-mds
+
+ - name: restart ceph mds
+ systemd:
+ name: ceph-mds@{{ ansible_facts['hostname'] }}
+ state: restarted
+ enabled: yes
+ masked: no
+ when: not containerized_deployment | bool
+
+ - name: restart active mds
+ command: "{{ container_binary }} stop ceph-mds-{{ ansible_facts['hostname'] }}"
+ changed_when: false
+ when: containerized_deployment | bool
+
+- name: upgrade standbys ceph mdss cluster
+ vars:
+ upgrade_ceph_packages: True
+ hosts: standby_mdss
+ tags: mdss
+ become: True
+ gather_facts: false
+
+ tasks:
+ - import_role:
+ name: ceph-defaults
+
+ - import_role:
+ name: ceph-facts
+
+ - name: prevent restarts from the packaging
+ systemd:
+ name: ceph-mds@{{ ansible_facts['hostname'] }}
+ enabled: no
+ masked: yes
+ when: not containerized_deployment | bool
+
+ - import_role:
+ name: ceph-handler
+ - import_role:
+ name: ceph-common
+ when: not containerized_deployment | bool
+ - import_role:
+ name: ceph-container-common
+ when: containerized_deployment | bool
+ - import_role:
+ name: ceph-config
+ - import_role:
+ name: ceph-mds
+
+ - name: set max_mds
+ ceph_fs:
+ name: "{{ cephfs }}"
+ cluster: "{{ cluster }}"
+ max_mds: "{{ mds_max_mds }}"
+ data: "{{ cephfs_data_pool.name }}"
+ metadata: "{{ cephfs_metadata_pool.name }}"
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ environment:
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+ when: inventory_hostname == groups['standby_mdss'] | last
+
+
+- name: upgrade ceph rgws cluster
+ vars:
+ upgrade_ceph_packages: True
+ hosts: "{{ rgw_group_name|default('rgws') }}"
+ tags: rgws
+ serial: 1
+ become: True
+ gather_facts: false
+ tasks:
+
+ - import_role:
+ name: ceph-defaults
+ - import_role:
+ name: ceph-facts
+
+ - name: stop ceph rgw when upgrading from stable-3.2
+ systemd:
+ name: ceph-radosgw@rgw.{{ ansible_facts['hostname'] }}
+ state: stopped
+ enabled: no
+ masked: yes
+ ignore_errors: True
+
+ - name: stop ceph rgw
+ systemd:
+ name: ceph-radosgw@rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}
+ state: stopped
+ enabled: no
+ masked: yes
+ with_items: "{{ rgw_instances }}"
+
+ - import_role:
+ name: ceph-handler
+ - import_role:
+ name: ceph-common
+ when: not containerized_deployment | bool
+ - import_role:
+ name: ceph-container-common
+ when: containerized_deployment | bool
+ - import_role:
+ name: ceph-config
+ - import_role:
+ name: ceph-rgw
+
+
+- name: upgrade ceph rbd mirror node
+ vars:
+ upgrade_ceph_packages: True
+ hosts: "{{ rbdmirror_group_name|default('rbdmirrors') }}"
+ tags: rbdmirrors
+ serial: 1
+ become: True
+ gather_facts: false
+ tasks:
+ - name: stop ceph rbd mirror
+ systemd:
+ name: "ceph-rbd-mirror@rbd-mirror.{{ ansible_facts['hostname'] }}"
+ state: stopped
+ enabled: no
+ masked: yes
+
+ - import_role:
+ name: ceph-defaults
+ - import_role:
+ name: ceph-facts
+ - import_role:
+ name: ceph-handler
+ - import_role:
+ name: ceph-common
+ when: not containerized_deployment | bool
+ - import_role:
+ name: ceph-container-common
+ when: containerized_deployment | bool
+ - import_role:
+ name: ceph-config
+ - import_role:
+ name: ceph-rbd-mirror
+
+
+- name: upgrade ceph nfs node
+ vars:
+ upgrade_ceph_packages: True
+ hosts: "{{ nfs_group_name|default('nfss') }}"
+ tags: nfss
+ serial: 1
+ become: True
+ gather_facts: false
+ tasks:
+ # failed_when: false is here so that if we upgrade
+ # from a version of ceph that does not have nfs-ganesha
+ # then this task will not fail
+ - name: stop ceph nfs
+ systemd:
+ name: nfs-ganesha
+ state: stopped
+ enabled: no
+ masked: yes
+ failed_when: false
+ when: not containerized_deployment | bool
+
+ - name: systemd stop nfs container
+ systemd:
+ name: ceph-nfs@{{ ceph_nfs_service_suffix | default(ansible_facts['hostname']) }}
+ state: stopped
+ enabled: no
+ masked: yes
+ failed_when: false
+ when:
+ - ceph_nfs_enable_service | bool
+ - containerized_deployment | bool
+
+ - import_role:
+ name: ceph-defaults
+ - import_role:
+ name: ceph-facts
+ - import_role:
+ name: ceph-handler
+ - import_role:
+ name: ceph-common
+ when: not containerized_deployment | bool
+ - import_role:
+ name: ceph-container-common
+ when: containerized_deployment | bool
+ - import_role:
+ name: ceph-config
+ - import_role:
+ name: ceph-nfs
+
+
+- name: upgrade ceph iscsi gateway node
+ vars:
+ upgrade_ceph_packages: True
+ hosts: "{{ iscsi_gw_group_name|default('iscsigws') }}"
+ tags: iscsigws
+ serial: 1
+ become: True
+ gather_facts: false
+ tasks:
+ # failed_when: false is here so that if we upgrade
+ # from a version of ceph that does not have iscsi gws
+ # then this task will not fail
+ - name: stop ceph iscsi services
+ systemd:
+ name: '{{ item }}'
+ state: stopped
+ enabled: no
+ masked: yes
+ failed_when: false
+ with_items:
+ - rbd-target-api
+ - rbd-target-gw
+ - tcmu-runner
+
+ - import_role:
+ name: ceph-defaults
+ - import_role:
+ name: ceph-facts
+ - import_role:
+ name: ceph-handler
+ - import_role:
+ name: ceph-common
+ when: not containerized_deployment | bool
+ - import_role:
+ name: ceph-container-common
+ when: containerized_deployment | bool
+ - import_role:
+ name: ceph-config
+ - import_role:
+ name: ceph-iscsi-gw
+
+
+- name: upgrade ceph client node
+ vars:
+ upgrade_ceph_packages: True
+ hosts: "{{ client_group_name|default('clients') }}"
+ tags: clients
+ serial: "{{ client_update_batch | default(20) }}"
+ become: True
+ gather_facts: false
+ tasks:
+ - import_role:
+ name: ceph-defaults
+ - import_role:
+ name: ceph-facts
+ tasks_from: container_binary.yml
+ when: containerized_deployment | bool
+ - import_role:
+ name: ceph-handler
+ - import_role:
+ name: ceph-common
+ when: not containerized_deployment | bool
+ - import_role:
+ name: ceph-container-common
+ when:
+ - (group_names != ['clients']) or (inventory_hostname == groups.get('clients', [''])|first)
+ - containerized_deployment | bool
+
+
+- name: upgrade ceph-crash daemons
+ hosts:
+ - "{{ mon_group_name | default('mons') }}"
+ - "{{ osd_group_name | default('osds') }}"
+ - "{{ mds_group_name | default('mdss') }}"
+ - "{{ rgw_group_name | default('rgws') }}"
+ - "{{ rbdmirror_group_name | default('rbdmirrors') }}"
+ - "{{ mgr_group_name | default('mgrs') }}"
+ tags:
+ - post_upgrade
+ - crash
+ gather_facts: false
+ become: true
+ tasks:
+ - name: stop the ceph-crash service
+ systemd:
+ name: "{{ 'ceph-crash@' + ansible_facts['hostname'] if containerized_deployment | bool else 'ceph-crash.service' }}"
+ state: stopped
+
+ # it needs to be done in a separate task otherwise the stop just before doesn't work.
+ - name: mask and disable the ceph-crash service
+ systemd:
+ name: "{{ 'ceph-crash@' + ansible_facts['hostname'] if containerized_deployment | bool else 'ceph-crash.service' }}"
+ enabled: no
+ masked: yes
+
+ - import_role:
+ name: ceph-defaults
+ - import_role:
+ name: ceph-facts
+ tasks_from: container_binary.yml
+ - import_role:
+ name: ceph-handler
+ - import_role:
+ name: ceph-crash
+
+- name: complete upgrade
+ hosts: "{{ mon_group_name | default('mons') }}"
+ tags: post_upgrade
+ become: True
+ gather_facts: false
+ tasks:
+ - import_role:
+ name: ceph-defaults
+ - import_role:
+ name: ceph-facts
+ tasks_from: container_binary.yml
+
+ - name: container | disallow pre-pacific OSDs and enable all new pacific-only functionality
+ command: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_facts']['hostname'] }} ceph --cluster {{ cluster }} osd require-osd-release pacific"
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ run_once: True
+ when:
+ - containerized_deployment | bool
+ - groups.get(mon_group_name, []) | length > 0
+
+ - name: non container | disallow pre-pacific OSDs and enable all new pacific-only functionality
+ command: "ceph --cluster {{ cluster }} osd require-osd-release pacific"
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ run_once: True
+ when:
+ - not containerized_deployment | bool
+ - groups.get(mon_group_name, []) | length > 0
+
+- name: upgrade node-exporter
+ hosts:
+ - "{{ mon_group_name|default('mons') }}"
+ - "{{ osd_group_name|default('osds') }}"
+ - "{{ mds_group_name|default('mdss') }}"
+ - "{{ rgw_group_name|default('rgws') }}"
+ - "{{ mgr_group_name|default('mgrs') }}"
+ - "{{ rbdmirror_group_name|default('rbdmirrors') }}"
+ - "{{ nfs_group_name|default('nfss') }}"
+ - "{{ iscsi_gw_group_name|default('iscsigws') }}"
+ - "{{ monitoring_group_name|default('monitoring') }}"
+ tags: monitoring
+ gather_facts: false
+ become: true
+ tasks:
+ - import_role:
+ name: ceph-defaults
+
+ - name: with dashboard configuration
+ when: dashboard_enabled | bool
+ block:
+ - name: stop node-exporter
+ service:
+ name: node_exporter
+ state: stopped
+ failed_when: false
+
+ - import_role:
+ name: ceph-facts
+ - import_role:
+ name: ceph-container-engine
+ - import_role:
+ name: ceph-container-common
+ tasks_from: registry
+ when:
+ - not containerized_deployment | bool
+ - ceph_docker_registry_auth | bool
+ - import_role:
+ name: ceph-node-exporter
+
+- name: upgrade monitoring node
+ hosts: "{{ monitoring_group_name }}"
+ tags: monitoring
+ gather_facts: false
+ become: true
+ tasks:
+ - import_role:
+ name: ceph-defaults
+
+ - name: with dashboard configuration
+ when: dashboard_enabled | bool
+ block:
+ - name: stop monitoring services
+ service:
+ name: '{{ item }}'
+ state: stopped
+ failed_when: false
+ with_items:
+ - alertmanager
+ - prometheus
+ - grafana-server
+
+ - import_role:
+ name: ceph-facts
+ - import_role:
+ name: ceph-facts
+ tasks_from: grafana
+ - import_role:
+ name: ceph-prometheus
+ - import_role:
+ name: ceph-grafana
+
+- name: upgrade ceph dashboard
+ hosts: "{{ groups[mgr_group_name] | default(groups[mon_group_name]) | default(omit) }}"
+ tags: monitoring
+ gather_facts: false
+ become: true
+ tasks:
+ - import_role:
+ name: ceph-defaults
+
+ - name: with dashboard configuration
+ when: dashboard_enabled | bool
+ block:
+ - import_role:
+ name: ceph-facts
+ - import_role:
+ name: ceph-facts
+ tasks_from: grafana
+ - import_role:
+ name: ceph-dashboard
+
+- name: switch any existing crush buckets to straw2
+ hosts: "{{ mon_group_name | default('mons') }}[0]"
+ tags: post_upgrade
+ become: true
+ any_errors_fatal: true
+ gather_facts: false
+ tasks:
+ - import_role:
+ name: ceph-defaults
+
+ - import_role:
+ name: ceph-facts
+ tasks_from: container_binary.yml
+
+ - name: set_fact ceph_cmd
+ set_fact:
+ ceph_cmd: "{{ container_binary + ' run --rm --net=host -v /etc/ceph:/etc/ceph:z --entrypoint=ceph ' + ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else 'ceph' }}"
+
+ - name: backup the crushmap
+ command: "{{ ceph_cmd }} --cluster {{ cluster }} osd getcrushmap -o /etc/ceph/{{ cluster }}-crushmap"
+ changed_when: false
+
+ - block:
+ - name: switch crush buckets to straw2
+ command: "{{ ceph_cmd }} --cluster {{ cluster }} osd crush set-all-straw-buckets-to-straw2"
+ changed_when: false
+ rescue:
+ - name: restore the crushmap
+ command: "{{ ceph_cmd }} --cluster {{ cluster }} osd setcrushmap -i /etc/ceph/{{ cluster }}-crushmap"
+ changed_when: false
+
+ - name: inform that the switch to straw2 buckets failed
+ fail:
+ msg: >
+ "An attempt to switch to straw2 bucket was made but failed.
+ Check the cluster status."
+
+ - name: remove crushmap backup
+ file:
+ path: /etc/ceph/{{ cluster }}-crushmap
+ state: absent
+
+- name: show ceph status
+ hosts: "{{ mon_group_name|default('mons') }}"
+ tags: always
+ become: True
+ gather_facts: false
+ tasks:
+ - import_role:
+ name: ceph-defaults
+
+ - name: set_fact container_exec_cmd_status
+ set_fact:
+ container_exec_cmd_status: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_facts']['hostname'] }}"
+ when: containerized_deployment | bool
+
+ - name: show ceph status
+ command: "{{ container_exec_cmd_status|default('') }} ceph --cluster {{ cluster }} -s"
+ changed_when: false
+ run_once: True
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+
+ - name: show all daemons version
+ command: "{{ container_exec_cmd_status|default('') }} ceph --cluster {{ cluster }} versions"
+ run_once: True
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ changed_when: false
--- /dev/null
+---
+# This playbook removes the Ceph MDS from your cluster.
+#
+# Use it like this:
+# ansible-playbook shrink-mds.yml -e mds_to_kill=ceph-mds01
+# Prompts for confirmation to shrink, defaults to no and
+# doesn't shrink the cluster. yes shrinks the cluster.
+#
+# ansible-playbook -e ireallymeanit=yes|no shrink-mds.yml
+# Overrides the prompt using -e option. Can be used in
+# automation scripts to avoid interactive prompt.
+- name: gather facts and check the init system
+ hosts:
+ - "{{ mon_group_name | default('mons') }}"
+ - "{{ mds_group_name | default('mdss') }}"
+ become: true
+ tasks:
+ - debug:
+ msg: gather facts on all Ceph hosts for following reference
+ - import_role:
+ name: ceph-defaults
+ - import_role:
+ name: ceph-facts
+ tasks_from: container_binary
+
+- name: perform checks, remove mds and print cluster health
+ hosts: "{{ groups[mon_group_name][0] }}"
+ become: true
+ vars_prompt:
+ - name: ireallymeanit
+ prompt: Are you sure you want to shrink the cluster?
+ default: 'no'
+ private: no
+ pre_tasks:
+ - import_role:
+ name: ceph-defaults
+
+ - name: exit playbook, if no mds was given
+ when: mds_to_kill is not defined
+ fail:
+ msg: >
+ mds_to_kill must be declared.
+ Exiting shrink-cluster playbook, no MDS was removed. On the command
+ line when invoking the playbook, you can use
+ "-e mds_to_kill=ceph-mds1" argument. You can only remove a single
+ MDS each time the playbook runs."
+
+ - name: exit playbook, if the mds is not part of the inventory
+ when: mds_to_kill not in groups[mds_group_name]
+ fail:
+ msg: "It seems that the host given is not part of your inventory,
+ please make sure it is."
+
+ - name: exit playbook, if user did not mean to shrink cluster
+ when: ireallymeanit != 'yes'
+ fail:
+ msg: "Exiting shrink-mds playbook, no mds was removed.
+ To shrink the cluster, either say 'yes' on the prompt or
+ or use `-e ireallymeanit=yes` on the command line when
+ invoking the playbook"
+
+ - name: set_fact container_exec_cmd for mon0
+ set_fact:
+ container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_facts['hostname'] }}"
+ when: containerized_deployment | bool
+
+ - name: exit playbook, if can not connect to the cluster
+ command: "{{ container_exec_cmd | default('') }} timeout 5 ceph --cluster {{ cluster }} health"
+ changed_when: false
+ register: ceph_health
+ until: ceph_health is succeeded
+ retries: 5
+ delay: 2
+
+ - name: set_fact mds_to_kill_hostname
+ set_fact:
+ mds_to_kill_hostname: "{{ hostvars[mds_to_kill]['ansible_facts']['hostname'] }}"
+
+ tasks:
+ # get rid of this as soon as "systemctl stop ceph-msd@$HOSTNAME" also
+ # removes the MDS from the FS map.
+ - name: exit mds when containerized deployment
+ command: "{{ container_exec_cmd | default('') }} ceph tell mds.{{ mds_to_kill_hostname }} exit"
+ changed_when: false
+ when: containerized_deployment | bool
+
+ - name: get ceph status
+ command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} -s -f json"
+ register: ceph_status
+ changed_when: false
+
+ - name: set_fact current_max_mds
+ set_fact:
+ current_max_mds: "{{ (ceph_status.stdout | from_json)['fsmap']['max'] }}"
+
+ - name: fail if removing that mds node wouldn't satisfy max_mds anymore
+ fail:
+ msg: "Can't remove more mds as it won't satisfy current max_mds setting"
+ when:
+ - ((((ceph_status.stdout | from_json)['fsmap']['up'] | int) + ((ceph_status.stdout | from_json)['fsmap']['up:standby'] | int)) - 1) < current_max_mds | int
+ - (ceph_status.stdout | from_json)['fsmap']['up'] | int > 1
+
+ - name: stop mds service and verify it
+ block:
+ - name: stop mds service
+ service:
+ name: ceph-mds@{{ mds_to_kill_hostname }}
+ state: stopped
+ enabled: no
+ delegate_to: "{{ mds_to_kill }}"
+ failed_when: false
+
+ - name: ensure that the mds is stopped
+ command: "systemctl is-active ceph-mds@{{ mds_to_kill_hostname }}" # noqa 303
+ register: mds_to_kill_status
+ failed_when: mds_to_kill_status.rc == 0
+ delegate_to: "{{ mds_to_kill }}"
+ retries: 5
+ delay: 2
+
+ - name: fail if the mds is reported as active or standby
+ block:
+ - name: get new ceph status
+ command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} -s -f json"
+ register: ceph_status
+
+ - name: get active mds nodes list
+ set_fact:
+ active_mdss: "{{ active_mdss | default([]) + [item.name] }}"
+ with_items: "{{ (ceph_status.stdout | from_json)['fsmap']['by_rank'] }}"
+
+ - name: get ceph fs dump status
+ command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} fs dump -f json"
+ register: ceph_fs_status
+
+ - name: create a list of standby mdss
+ set_fact:
+ standby_mdss: (ceph_fs_status.stdout | from_json)['standbys'] | map(attribute='name') | list
+
+ - name: fail if mds just killed is being reported as active or standby
+ fail:
+ msg: "mds node {{ mds_to_kill }} still up and running."
+ when:
+ - (mds_to_kill in active_mdss | default([])) or
+ (mds_to_kill in standby_mdss | default([]))
+
+ - name: delete the filesystem when killing last mds
+ ceph_fs:
+ name: "{{ cephfs }}"
+ cluster: "{{ cluster }}"
+ state: absent
+ when:
+ - (ceph_status.stdout | from_json)['fsmap']['up'] | int == 0
+ - (ceph_status.stdout | from_json)['fsmap']['up:standby'] | int == 0
+ environment:
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+
+ - name: purge mds store
+ file:
+ path: /var/lib/ceph/mds/{{ cluster }}-{{ mds_to_kill_hostname }}
+ state: absent
+ delegate_to: "{{ mds_to_kill }}"
+
+ post_tasks:
+ - name: show ceph health
+ command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} -s"
+ changed_when: false
\ No newline at end of file
--- /dev/null
+---
+# This playbook shrinks the Ceph manager from your cluster
+#
+# Use it like this:
+# ansible-playbook shrink-mgr.yml -e mgr_to_kill=ceph-mgr1
+# Prompts for confirmation to shrink, defaults to no and
+# doesn't shrink the cluster and yes shrinks the cluster.
+#
+# ansible-playbook -e ireallymeanit=yes|no shrink-mgr.yml
+# Overrides the prompt using -e option. Can be used in
+# automation scripts to avoid interactive prompt.
+
+
+- name: gather facts and check the init system
+ hosts:
+ - "{{ mon_group_name | default('mons') }}"
+ - "{{ mgr_group_name | default('mgrs') }}"
+ become: true
+ tasks:
+ - debug:
+ msg: gather facts on all Ceph hosts for following reference
+
+- name: confirm if user really meant to remove manager from the ceph cluster
+ hosts: "{{ groups[mon_group_name][0] }}"
+ become: true
+ vars_prompt:
+ - name: ireallymeanit
+ prompt: Are you sure you want to shrink the cluster?
+ default: 'no'
+ private: no
+ pre_tasks:
+ - import_role:
+ name: ceph-defaults
+
+ - import_role:
+ name: ceph-facts
+ tasks_from: container_binary
+
+ - name: set_fact container_exec_cmd
+ when: containerized_deployment | bool
+ set_fact:
+ container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_facts['hostname'] }}"
+
+ - name: exit playbook, if can not connect to the cluster
+ command: "{{ container_exec_cmd | default('') }} timeout 5 ceph --cluster {{ cluster }} health"
+ register: ceph_health
+ changed_when: false
+ until: ceph_health is succeeded
+ retries: 5
+ delay: 2
+
+ - name: get total number of mgrs in cluster
+ block:
+ - name: save mgr dump output
+ command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} mgr dump -f json"
+ register: mgr_dump
+
+ - name: get active and standbys mgr list
+ set_fact:
+ active_mgr: "{{ [mgr_dump.stdout | from_json] | map(attribute='active_name') | list }}"
+ standbys_mgr: "{{ (mgr_dump.stdout | from_json)['standbys'] | map(attribute='name') | list }}"
+
+ - name: exit playbook, if there's no standby manager
+ fail:
+ msg: "You are about to shrink the only manager present in the cluster."
+ when: standbys_mgr | length | int < 1
+
+ - name: exit playbook, if no manager was given
+ fail:
+ msg: "mgr_to_kill must be declared
+ Exiting shrink-cluster playbook, no manager was removed.
+ On the command line when invoking the playbook, you can use
+ -e mgr_to_kill=ceph-mgr01 argument. You can only remove a single
+ manager each time the playbook runs."
+ when: mgr_to_kill is not defined
+
+ - name: exit playbook, if user did not mean to shrink cluster
+ fail:
+ msg: "Exiting shrink-mgr playbook, no manager was removed.
+ To shrink the cluster, either say 'yes' on the prompt or
+ or use `-e ireallymeanit=yes` on the command line when
+ invoking the playbook"
+ when: ireallymeanit != 'yes'
+
+ - name: set_fact mgr_to_kill_hostname
+ set_fact:
+ mgr_to_kill_hostname: "{{ hostvars[mgr_to_kill]['ansible_facts']['hostname'] }}"
+
+ - name: exit playbook, if the selected manager is not present in the cluster
+ fail:
+ msg: "It seems that the host given is not present in the cluster."
+ when:
+ - mgr_to_kill_hostname not in active_mgr
+ - mgr_to_kill_hostname not in standbys_mgr
+
+ tasks:
+ - name: stop manager services and verify it
+ block:
+ - name: stop manager service
+ service:
+ name: ceph-mgr@{{ mgr_to_kill_hostname }}
+ state: stopped
+ enabled: no
+ delegate_to: "{{ mgr_to_kill }}"
+ failed_when: false
+
+ - name: ensure that the mgr is stopped
+ command: "systemctl is-active ceph-mgr@{{ mgr_to_kill_hostname }}" # noqa 303
+ register: mgr_to_kill_status
+ failed_when: mgr_to_kill_status.rc == 0
+ delegate_to: "{{ mgr_to_kill }}"
+ retries: 5
+ delay: 2
+
+ - name: fail if the mgr is reported in ceph mgr dump
+ command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} mgr dump -f json"
+ register: mgr_dump
+ changed_when: false
+ failed_when: mgr_to_kill_hostname in (([mgr_dump.stdout | from_json] | map(attribute='active_name') | list) + (mgr_dump.stdout | from_json)['standbys'] | map(attribute='name') | list)
+ until: mgr_to_kill_hostname not in (([mgr_dump.stdout | from_json] | map(attribute='active_name') | list) + (mgr_dump.stdout | from_json)['standbys'] | map(attribute='name') | list)
+ retries: 12
+ delay: 10
+
+ - name: purge manager store
+ file:
+ path: /var/lib/ceph/mgr/{{ cluster }}-{{ mgr_to_kill_hostname }}
+ state: absent
+ delegate_to: "{{ mgr_to_kill }}"
+
+ post_tasks:
+ - name: show ceph health
+ command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} -s"
+ changed_when: false
\ No newline at end of file
--- /dev/null
+---
+# This playbook shrinks the Ceph monitors from your cluster
+# It can remove a Ceph of monitor from the cluster and ALL ITS DATA
+#
+# Use it like this:
+# ansible-playbook shrink-mon.yml -e mon_to_kill=ceph-mon01
+# Prompts for confirmation to shrink, defaults to no and
+# doesn't shrink the cluster. yes shrinks the cluster.
+#
+# ansible-playbook -e ireallymeanit=yes|no shrink-mon.yml
+# Overrides the prompt using -e option. Can be used in
+# automation scripts to avoid interactive prompt.
+
+
+- name: gather facts and check the init system
+
+ hosts: "{{ mon_group_name|default('mons') }}"
+
+ become: true
+
+ tasks:
+ - debug: msg="gather facts on all Ceph hosts for following reference"
+
+- name: confirm whether user really meant to remove monitor from the ceph cluster
+ hosts: "{{ groups[mon_group_name][0] }}"
+ become: true
+ vars_prompt:
+ - name: ireallymeanit
+ prompt: Are you sure you want to shrink the cluster?
+ default: 'no'
+ private: no
+ vars:
+ mon_group_name: mons
+
+ pre_tasks:
+ - name: exit playbook, if only one monitor is present in cluster
+ fail:
+ msg: "You are about to shrink the only monitor present in the cluster.
+ If you really want to do that, please use the purge-cluster playbook."
+ when: groups[mon_group_name] | length | int == 1
+
+ - name: exit playbook, if no monitor was given
+ fail:
+ msg: "mon_to_kill must be declared
+ Exiting shrink-cluster playbook, no monitor was removed.
+ On the command line when invoking the playbook, you can use
+ -e mon_to_kill=ceph-mon01 argument. You can only remove a single monitor each time the playbook runs."
+ when: mon_to_kill is not defined
+
+ - name: exit playbook, if the monitor is not part of the inventory
+ fail:
+ msg: "It seems that the host given is not part of your inventory, please make sure it is."
+ when: mon_to_kill not in groups[mon_group_name]
+
+ - name: exit playbook, if user did not mean to shrink cluster
+ fail:
+ msg: "Exiting shrink-mon playbook, no monitor was removed.
+ To shrink the cluster, either say 'yes' on the prompt or
+ or use `-e ireallymeanit=yes` on the command line when
+ invoking the playbook"
+ when: ireallymeanit != 'yes'
+
+ - import_role:
+ name: ceph-defaults
+
+ - import_role:
+ name: ceph-facts
+ tasks_from: container_binary
+
+ tasks:
+ - name: pick a monitor different than the one we want to remove
+ set_fact:
+ mon_host: "{{ item }}"
+ with_items: "{{ groups[mon_group_name] }}"
+ when: item != mon_to_kill
+
+ - name: "set_fact container_exec_cmd build {{ container_binary }} exec command (containerized)"
+ set_fact:
+ container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[mon_host]['ansible_facts']['hostname'] }}"
+ when: containerized_deployment | bool
+
+ - name: exit playbook, if can not connect to the cluster
+ command: "{{ container_exec_cmd }} timeout 5 ceph --cluster {{ cluster }} health"
+ register: ceph_health
+ changed_when: false
+ until: ceph_health.stdout.find("HEALTH") > -1
+ delegate_to: "{{ mon_host }}"
+ retries: 5
+ delay: 2
+
+ - name: set_fact mon_to_kill_hostname
+ set_fact:
+ mon_to_kill_hostname: "{{ hostvars[mon_to_kill]['ansible_facts']['hostname'] }}"
+
+ - name: stop monitor service(s)
+ service:
+ name: ceph-mon@{{ mon_to_kill_hostname }}
+ state: stopped
+ enabled: no
+ delegate_to: "{{ mon_to_kill }}"
+ failed_when: false
+
+ - name: purge monitor store
+ file:
+ path: /var/lib/ceph/mon/{{ cluster }}-{{ mon_to_kill_hostname }}
+ state: absent
+ delegate_to: "{{ mon_to_kill }}"
+
+ - name: remove monitor from the quorum
+ command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} mon remove {{ mon_to_kill_hostname }}"
+ changed_when: false
+ failed_when: false
+ delegate_to: "{{ mon_host }}"
+
+ post_tasks:
+ - name: verify the monitor is out of the cluster
+ command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} quorum_status -f json"
+ delegate_to: "{{ mon_host }}"
+ changed_when: false
+ failed_when: false
+ register: result
+ until: mon_to_kill_hostname not in (result.stdout | from_json)['quorum_names']
+ retries: 2
+ delay: 10
+
+ - name: please remove the monitor from your ceph configuration file
+ debug:
+ msg: "The monitor has been successfully removed from the cluster.
+ Please remove the monitor entry from the rest of your ceph configuration files, cluster wide."
+ run_once: true
+ when: mon_to_kill_hostname not in (result.stdout | from_json)['quorum_names']
+
+ - name: fail if monitor is still part of the cluster
+ fail:
+ msg: "Monitor appears to still be part of the cluster, please check what happened."
+ run_once: true
+ when: mon_to_kill_hostname in (result.stdout | from_json)['quorum_names']
+
+ - name: show ceph health
+ command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} -s"
+ delegate_to: "{{ mon_host }}"
+ changed_when: false
+
+ - name: show ceph mon status
+ command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} mon stat"
+ delegate_to: "{{ mon_host }}"
+ changed_when: false
\ No newline at end of file
--- /dev/null
+---
+# This playbook shrinks Ceph OSDs that have been created with ceph-volume.
+# It can remove any number of OSD(s) from the cluster and ALL THEIR DATA
+#
+# Use it like this:
+# ansible-playbook shrink-osd.yml -e osd_to_kill=0,2,6
+# Prompts for confirmation to shrink, defaults to no and
+# doesn't shrink the cluster. yes shrinks the cluster.
+#
+# ansible-playbook -e ireallymeanit=yes|no shrink-osd.yml
+# Overrides the prompt using -e option. Can be used in
+# automation scripts to avoid interactive prompt.
+
+- name: gather facts and check the init system
+
+ hosts:
+ - "{{ mon_group_name|default('mons') }}"
+ - "{{ osd_group_name|default('osds') }}"
+
+ become: True
+ tasks:
+ - debug: msg="gather facts on all Ceph hosts for following reference"
+
+- name: confirm whether user really meant to remove osd(s) from the cluster
+
+ hosts: "{{ groups[mon_group_name][0] }}"
+
+ become: true
+
+ vars_prompt:
+ - name: ireallymeanit
+ prompt: Are you sure you want to shrink the cluster?
+ default: 'no'
+ private: no
+
+ vars:
+ mon_group_name: mons
+ osd_group_name: osds
+
+ pre_tasks:
+ - name: exit playbook, if user did not mean to shrink cluster
+ fail:
+ msg: "Exiting shrink-osd playbook, no osd(s) was/were removed..
+ To shrink the cluster, either say 'yes' on the prompt or
+ or use `-e ireallymeanit=yes` on the command line when
+ invoking the playbook"
+ when: ireallymeanit != 'yes'
+
+ - name: exit playbook, if no osd(s) was/were given
+ fail:
+ msg: "osd_to_kill must be declared
+ Exiting shrink-osd playbook, no OSD(s) was/were removed.
+ On the command line when invoking the playbook, you can use
+ -e osd_to_kill=0,1,2,3 argument."
+ when: osd_to_kill is not defined
+
+ - name: check the osd ids passed have the correct format
+ fail:
+ msg: "The id {{ item }} has wrong format, please pass the number only"
+ with_items: "{{ osd_to_kill.split(',') }}"
+ when: not item is regex("^\d+$")
+
+ tasks:
+ - import_role:
+ name: ceph-defaults
+
+ - import_role:
+ name: ceph-facts
+ tasks_from: container_binary
+
+ post_tasks:
+ - name: set_fact container_exec_cmd build docker exec command (containerized)
+ set_fact:
+ container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_facts['hostname'] }}"
+ when: containerized_deployment | bool
+
+ - name: exit playbook, if can not connect to the cluster
+ command: "{{ container_exec_cmd }} timeout 5 ceph --cluster {{ cluster }} health"
+ register: ceph_health
+ changed_when: false
+ until: ceph_health.stdout.find("HEALTH") > -1
+ retries: 5
+ delay: 2
+
+ - name: find the host(s) where the osd(s) is/are running on
+ command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} osd find {{ item }}"
+ changed_when: false
+ with_items: "{{ osd_to_kill.split(',') }}"
+ register: find_osd_hosts
+
+ - name: set_fact osd_hosts
+ set_fact:
+ osd_hosts: "{{ osd_hosts | default([]) + [ [ (item.stdout | from_json).crush_location.host, (item.stdout | from_json).osd_fsid, item.item ] ] }}"
+ with_items: "{{ find_osd_hosts.results }}"
+
+ - name: set_fact _osd_hosts
+ set_fact:
+ _osd_hosts: "{{ _osd_hosts | default([]) + [ [ item.0, item.2, item.3 ] ] }}"
+ with_nested:
+ - "{{ groups.get(osd_group_name) }}"
+ - "{{ osd_hosts }}"
+ when: hostvars[item.0]['ansible_facts']['hostname'] == item.1
+
+ - name: set_fact host_list
+ set_fact:
+ host_list: "{{ host_list | default([]) | union([item.0]) }}"
+ loop: "{{ _osd_hosts }}"
+
+ - name: get ceph-volume lvm list data
+ ceph_volume:
+ cluster: "{{ cluster }}"
+ action: list
+ environment:
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+ register: _lvm_list_data
+ delegate_to: "{{ item }}"
+ loop: "{{ host_list }}"
+
+ - name: set_fact _lvm_list
+ set_fact:
+ _lvm_list: "{{ _lvm_list | default({}) | combine(item.stdout | from_json) }}"
+ with_items: "{{ _lvm_list_data.results }}"
+
+ - name: refresh /etc/ceph/osd files non containerized_deployment
+ ceph_volume_simple_scan:
+ cluster: "{{ cluster }}"
+ force: true
+ delegate_to: "{{ item }}"
+ loop: "{{ host_list }}"
+ when: not containerized_deployment | bool
+
+ - name: refresh /etc/ceph/osd files containerized_deployment
+ command: "{{ container_binary }} exec ceph-osd-{{ item.2 }} ceph-volume simple scan --force /var/lib/ceph/osd/{{ cluster }}-{{ item.2 }}"
+ changed_when: false
+ delegate_to: "{{ item.0 }}"
+ loop: "{{ _osd_hosts }}"
+ when: containerized_deployment | bool
+
+ - name: find /etc/ceph/osd files
+ find:
+ paths: /etc/ceph/osd
+ pattern: "{{ item.2 }}-*"
+ register: ceph_osd_data
+ delegate_to: "{{ item.0 }}"
+ loop: "{{ _osd_hosts }}"
+ when: item.2 not in _lvm_list.keys()
+
+ - name: slurp ceph osd files content
+ slurp:
+ src: "{{ item['files'][0]['path'] }}"
+ delegate_to: "{{ item.item.0 }}"
+ register: ceph_osd_files_content
+ loop: "{{ ceph_osd_data.results }}"
+ when:
+ - item.skipped is undefined
+ - item.matched > 0
+
+ - name: set_fact ceph_osd_files_json
+ set_fact:
+ ceph_osd_data_json: "{{ ceph_osd_data_json | default({}) | combine({ item.item.item.2: item.content | b64decode | from_json}) }}"
+ with_items: "{{ ceph_osd_files_content.results }}"
+ when: item.skipped is undefined
+
+ - name: mark osd(s) out of the cluster
+ ceph_osd:
+ ids: "{{ osd_to_kill.split(',') }}"
+ cluster: "{{ cluster }}"
+ state: out
+ environment:
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+ run_once: true
+
+ - name: stop osd(s) service
+ service:
+ name: ceph-osd@{{ item.2 }}
+ state: stopped
+ enabled: no
+ loop: "{{ _osd_hosts }}"
+ delegate_to: "{{ item.0 }}"
+
+ - name: umount osd lockbox
+ mount:
+ path: "/var/lib/ceph/osd-lockbox/{{ ceph_osd_data_json[item.2]['data']['uuid'] }}"
+ state: absent
+ loop: "{{ _osd_hosts }}"
+ delegate_to: "{{ item.0 }}"
+ when:
+ - not containerized_deployment | bool
+ - item.2 not in _lvm_list.keys()
+ - ceph_osd_data_json[item.2]['encrypted'] | default(False) | bool
+ - ceph_osd_data_json[item.2]['data']['uuid'] is defined
+
+ - name: umount osd data
+ mount:
+ path: "/var/lib/ceph/osd/{{ cluster }}-{{ item.2 }}"
+ state: absent
+ loop: "{{ _osd_hosts }}"
+ delegate_to: "{{ item.0 }}"
+ when: not containerized_deployment | bool
+
+ - name: get parent device for data partition
+ command: lsblk --noheadings --output PKNAME --nodeps "{{ ceph_osd_data_json[item.2]['data']['path'] }}"
+ register: parent_device_data_part
+ loop: "{{ _osd_hosts }}"
+ delegate_to: "{{ item.0 }}"
+ when:
+ - item.2 not in _lvm_list.keys()
+ - ceph_osd_data_json[item.2]['data']['path'] is defined
+
+ - name: add pkname information in ceph_osd_data_json
+ set_fact:
+ ceph_osd_data_json: "{{ ceph_osd_data_json | default({}) | combine({item.item[2]: {'pkname_data': '/dev/' + item.stdout }}, recursive=True) }}"
+ loop: "{{ parent_device_data_part.results }}"
+ when: item.skipped is undefined
+
+ - name: close dmcrypt close on devices if needed
+ command: "cryptsetup close {{ ceph_osd_data_json[item.2][item.3]['uuid'] }}"
+ with_nested:
+ - "{{ _osd_hosts }}"
+ - [ 'block_dmcrypt', 'block.db_dmcrypt', 'block.wal_dmcrypt', 'data', 'journal_dmcrypt' ]
+ delegate_to: "{{ item.0 }}"
+ failed_when: false
+ register: result
+ until: result is succeeded
+ when:
+ - item.2 not in _lvm_list.keys()
+ - ceph_osd_data_json[item.2]['encrypted'] | default(False) | bool
+ - ceph_osd_data_json[item.2][item.3] is defined
+
+ - name: use ceph-volume lvm zap to destroy all partitions
+ ceph_volume:
+ cluster: "{{ cluster }}"
+ action: zap
+ destroy: true
+ data: "{{ ceph_osd_data_json[item.2]['pkname_data'] if item.3 == 'data' else ceph_osd_data_json[item.2][item.3]['path'] }}"
+ environment:
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+ with_nested:
+ - "{{ _osd_hosts }}"
+ - [ 'block', 'block.db', 'block.wal', 'journal', 'data' ]
+ delegate_to: "{{ item.0 }}"
+ failed_when: false
+ register: result
+ when:
+ - item.2 not in _lvm_list.keys()
+ - ceph_osd_data_json[item.2][item.3] is defined
+
+ - name: zap osd devices
+ ceph_volume:
+ action: "zap"
+ osd_fsid: "{{ item.1 }}"
+ environment:
+ CEPH_VOLUME_DEBUG: "{{ ceph_volume_debug }}"
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+ delegate_to: "{{ item.0 }}"
+ loop: "{{ _osd_hosts }}"
+ when: item.2 in _lvm_list.keys()
+
+ - name: ensure osds are marked down
+ ceph_osd:
+ ids: "{{ osd_to_kill.split(',') }}"
+ cluster: "{{ cluster }}"
+ state: down
+ environment:
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+ run_once: true
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+
+ - name: purge osd(s) from the cluster
+ ceph_osd:
+ ids: "{{ item }}"
+ cluster: "{{ cluster }}"
+ state: purge
+ environment:
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+ run_once: true
+ with_items: "{{ osd_to_kill.split(',') }}"
+
+ - name: remove osd data dir
+ file:
+ path: "/var/lib/ceph/osd/{{ cluster }}-{{ item.2 }}"
+ state: absent
+ loop: "{{ _osd_hosts }}"
+ delegate_to: "{{ item.0 }}"
+
+ - name: show ceph health
+ command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} -s"
+ changed_when: false
+
+ - name: show ceph osd tree
+ command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} osd tree"
+ changed_when: false
--- /dev/null
+---
+# This playbook removes the Ceph RBD mirror from your cluster on the given
+# node.
+#
+# Use it like this:
+# ansible-playbook shrink-rbdmirror.yml -e rbdmirror_to_kill=ceph-rbdmirror01
+# Prompts for confirmation to shrink, defaults to no and
+# doesn't shrink the cluster. yes shrinks the cluster.
+#
+# ansible-playbook -e ireallymeanit=yes|no shrink-rbdmirror.yml
+# Overrides the prompt using -e option. Can be used in
+# automation scripts to avoid interactive prompt.
+
+- name: gather facts and check the init system
+ hosts:
+ - "{{ mon_group_name|default('mons') }}"
+ - "{{ mon_group_name|default('rbdmirrors') }}"
+ become: true
+ tasks:
+ - debug:
+ msg: gather facts on MONs and RBD mirrors
+
+- name: confirm whether user really meant to remove rbd mirror from the ceph
+ cluster
+ hosts: "{{ groups[mon_group_name][0] }}"
+ become: true
+ vars_prompt:
+ - name: ireallymeanit
+ prompt: Are you sure you want to shrink the cluster?
+ default: 'no'
+ private: no
+ pre_tasks:
+ - import_role:
+ name: ceph-defaults
+
+ - import_role:
+ name: ceph-facts
+ tasks_from: container_binary
+
+ - name: exit playbook, if no rbdmirror was given
+ fail:
+ msg: "rbdmirror_to_kill must be declared
+ Exiting shrink-cluster playbook, no RBD mirror was removed.
+ On the command line when invoking the playbook, you can use
+ -e rbdmirror_to_kill=rbd-mirror01 argument. You can only remove a
+ single rbd mirror each time the playbook runs."
+ when: rbdmirror_to_kill is not defined
+
+ - name: exit playbook, if the rbdmirror is not part of the inventory
+ fail:
+ msg: >
+ It seems that the host given is not part of your inventory,
+ please make sure it is.
+ when: rbdmirror_to_kill not in groups[rbdmirror_group_name]
+
+ - name: exit playbook, if user did not mean to shrink cluster
+ fail:
+ msg: "Exiting shrink-rbdmirror playbook, no rbd-mirror was removed.
+ To shrink the cluster, either say 'yes' on the prompt or
+ or use `-e ireallymeanit=yes` on the command line when
+ invoking the playbook"
+ when: ireallymeanit != 'yes'
+
+ - name: set_fact container_exec_cmd for mon0
+ when: containerized_deployment | bool
+ set_fact:
+ container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_facts['hostname'] }}"
+
+ - name: exit playbook, if can not connect to the cluster
+ command: "{{ container_exec_cmd | default('') }} timeout 5 ceph --cluster {{ cluster }} service dump -f json"
+ register: ceph_health
+ changed_when: false
+ until: ceph_health is succeeded
+ retries: 5
+ delay: 2
+
+ - name: set_fact rbdmirror_to_kill_hostname
+ set_fact:
+ rbdmirror_to_kill_hostname: "{{ hostvars[rbdmirror_to_kill]['ansible_facts']['hostname'] }}"
+
+ - name: set_fact rbdmirror_gids
+ set_fact:
+ rbdmirror_gids: "{{ rbdmirror_gids | default([]) + [ item ] }}"
+ with_items: "{{ (ceph_health.stdout | from_json)['services']['rbd-mirror']['daemons'].keys() | list }}"
+ when: item != 'summary'
+
+ - name: set_fact rbdmirror_to_kill_gid
+ set_fact:
+ rbdmirror_to_kill_gid: "{{ (ceph_health.stdout | from_json)['services']['rbd-mirror']['daemons'][item]['gid'] }}"
+ with_items: "{{ rbdmirror_gids }}"
+ when: (ceph_health.stdout | from_json)['services']['rbd-mirror']['daemons'][item]['metadata']['id'] == rbdmirror_to_kill_hostname
+
+ tasks:
+ - name: stop rbdmirror service
+ service:
+ name: ceph-rbd-mirror@rbd-mirror.{{ rbdmirror_to_kill_hostname }}
+ state: stopped
+ enabled: no
+ delegate_to: "{{ rbdmirror_to_kill }}"
+ failed_when: false
+
+ - name: purge related directories
+ file:
+ path: /var/lib/ceph/bootstrap-rbd-mirror/{{ cluster }}-{{ rbdmirror_to_kill_hostname }}
+ state: absent
+ delegate_to: "{{ rbdmirror_to_kill }}"
+
+ post_tasks:
+ - name: get servicemap details
+ command: "{{ container_exec_cmd | default('') }} timeout 5 ceph --cluster {{ cluster }} service dump -f json"
+ register: ceph_health
+ failed_when:
+ - "'rbd-mirror' in (ceph_health.stdout | from_json)['services'].keys() | list"
+ - rbdmirror_to_kill_gid in (ceph_health.stdout | from_json)['services']['rbd-mirror']['daemons'].keys() | list
+ until:
+ - "'rbd-mirror' in (ceph_health.stdout | from_json)['services'].keys() | list"
+ - rbdmirror_to_kill_gid not in (ceph_health.stdout | from_json)['services']['rbd-mirror']['daemons'].keys() | list
+ when: rbdmirror_to_kill_gid is defined
+ retries: 12
+ delay: 10
+
+ - name: show ceph health
+ command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} -s"
+ changed_when: false
--- /dev/null
+---
+# This playbook shrinks the Ceph RGW from your cluster
+#
+# Use it like this:
+# ansible-playbook shrink-rgw.yml -e rgw_to_kill=ceph-rgw01
+# Prompts for confirmation to shrink, defaults to no and
+# doesn't shrink the cluster. yes shrinks the cluster.
+#
+# ansible-playbook -e ireallymeanit=yes|no shrink-rgw.yml
+# Overrides the prompt using -e option. Can be used in
+# automation scripts to avoid interactive prompt.
+
+
+- name: confirm whether user really meant to remove rgw from the ceph cluster
+ hosts: localhost
+ become: false
+ gather_facts: false
+ vars_prompt:
+ - name: ireallymeanit
+ prompt: Are you sure you want to shrink the cluster?
+ default: 'no'
+ private: no
+ tasks:
+ - name: exit playbook, if no rgw was given
+ when: rgw_to_kill is not defined or rgw_to_kill | length == 0
+ fail:
+ msg: >
+ rgw_to_kill must be declared.
+ Exiting shrink-cluster playbook, no RGW was removed. On the command
+ line when invoking the playbook, you can use
+ "-e rgw_to_kill=ceph.rgw0 argument". You can only remove a single
+ RGW each time the playbook runs.
+
+ - name: exit playbook, if user did not mean to shrink cluster
+ when: ireallymeanit != 'yes'
+ fail:
+ msg: >
+ Exiting shrink-mon playbook, no monitor was removed. To shrink the
+ cluster, either say 'yes' on the prompt or use
+ '-e ireallymeanit=yes' on the command line when invoking the playbook
+
+- name: gather facts and mons and rgws
+ hosts:
+ - "{{ mon_group_name | default('mons') }}[0]"
+ - "{{ rgw_group_name | default('rgws') }}"
+ become: true
+ gather_facts: false
+ tasks:
+ - name: gather facts
+ setup:
+ gather_subset:
+ - 'all'
+ - '!facter'
+ - '!ohai'
+
+- hosts: mons[0]
+ become: true
+ gather_facts: false
+ pre_tasks:
+ - import_role:
+ name: ceph-defaults
+
+ - import_role:
+ name: ceph-facts
+ tasks_from: container_binary
+
+ - name: set_fact container_exec_cmd for mon0
+ set_fact:
+ container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_facts['hostname'] }}"
+ when: containerized_deployment | bool
+
+ - name: exit playbook, if can not connect to the cluster
+ command: "{{ container_exec_cmd | default('') }} timeout 5 ceph --cluster {{ cluster }} health"
+ register: ceph_health
+ changed_when: false
+ until: ceph_health is succeeded
+ retries: 5
+ delay: 2
+
+ - name: get rgw instances
+ command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} service dump -f json"
+ register: rgw_instances
+ changed_when: false
+
+
+ - name: exit playbook, if the rgw_to_kill doesn't exist
+ when: rgw_to_kill not in (rgw_instances.stdout | from_json).services.rgw.daemons.keys() | list
+ fail:
+ msg: >
+ It seems that the rgw instance given is not part of the ceph cluster. Please
+ make sure it is.
+ The rgw instance format is $(hostname}.rgw$(instance number).
+ tasks:
+ - name: get rgw host running the rgw instance to kill
+ set_fact:
+ rgw_host: '{{ item }}'
+ with_items: '{{ groups[rgw_group_name] }}'
+ when: hostvars[item]['ansible_facts']['hostname'] == rgw_to_kill.split('.')[0]
+
+ - name: stop rgw service
+ service:
+ name: ceph-radosgw@rgw.{{ rgw_to_kill }}
+ state: stopped
+ enabled: no
+ delegate_to: "{{ rgw_host }}"
+ failed_when: false
+
+ - name: ensure that the rgw is stopped
+ command: "systemctl is-active ceph-radosgw@rgw.{{ rgw_to_kill }}" # noqa 303
+ register: rgw_to_kill_status
+ failed_when: rgw_to_kill_status.rc == 0
+ changed_when: false
+ delegate_to: "{{ rgw_host }}"
+ retries: 5
+ delay: 2
+
+ - name: exit if rgw_to_kill is reported in ceph status
+ command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} service dump -f json"
+ register: ceph_status
+ changed_when: false
+ failed_when:
+ - (ceph_status.stdout | from_json).services.rgw is defined
+ - rgw_to_kill in (ceph_status.stdout | from_json).services.rgw.daemons.keys() | list
+ until:
+ - (ceph_status.stdout | from_json).services.rgw is defined
+ - rgw_to_kill not in (ceph_status.stdout | from_json).services.rgw.daemons.keys() | list
+ retries: 3
+ delay: 3
+
+ - name: purge directories related to rgw
+ file:
+ path: /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ rgw_to_kill }}
+ state: absent
+ delegate_to: "{{ rgw_host }}"
+ post_tasks:
+ - name: show ceph health
+ command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} -s"
+ changed_when: false
--- /dev/null
+---
+# This playbook queries each OSD using `ceph-volume inventory` to report the
+# entire storage device inventory of a cluster.
+#
+# Usage:
+# ansible-playbook storage-inventory.yml
+
+- name: gather facts and check the init system
+
+ hosts: "{{ osd_group_name|default('osds') }}"
+
+ become: true
+
+ tasks:
+ - debug: msg="gather facts on all Ceph hosts for following reference"
+
+- name: query each host for storage device inventory
+
+ hosts: "{{ osd_group_name|default('osds') }}"
+
+ become: true
+
+ tasks:
+ - import_role:
+ name: ceph-defaults
+
+ - name: list storage inventory
+ ceph_volume:
+ action: "inventory"
+ environment:
+ CEPH_VOLUME_DEBUG: "{{ ceph_volume_debug }}"
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
--- /dev/null
+---
+# This playbook switches from non-containerized to containerized Ceph daemons
+
+- name: confirm whether user really meant to switch from non-containerized to containerized ceph daemons
+
+ hosts: localhost
+
+ gather_facts: false
+ any_errors_fatal: true
+ vars_prompt:
+ - name: ireallymeanit
+ prompt: Are you sure you want to switch from non-containerized to containerized ceph daemons?
+ default: 'no'
+ private: no
+
+ tasks:
+ - import_role:
+ name: ceph-defaults
+
+ - name: fail when less than three monitors
+ fail:
+ msg: "This playbook requires at least three monitors."
+ when: groups[mon_group_name] | length | int < 3
+
+ - name: exit playbook, if user did not mean to switch from non-containerized to containerized daemons?
+ fail:
+ msg: >
+ "Exiting switch-from-non-containerized-to-containerized-ceph-daemons.yml playbook,
+ cluster did not switch from non-containerized to containerized ceph daemons.
+ To switch from non-containerized to containerized ceph daemons, either say 'yes' on the prompt or
+ or use `-e ireallymeanit=yes` on the command line when
+ invoking the playbook"
+ when: ireallymeanit != 'yes'
+
+
+- name: gather facts
+
+ hosts:
+ - "{{ mon_group_name|default('mons') }}"
+ - "{{ mgr_group_name|default('mgrs') }}"
+ - "{{ osd_group_name|default('osds') }}"
+ - "{{ mds_group_name|default('mdss') }}"
+ - "{{ rgw_group_name|default('rgws') }}"
+ - "{{ rbdmirror_group_name|default('rbdmirrors') }}"
+ - "{{ nfs_group_name|default('nfss') }}"
+
+ become: true
+
+ vars:
+ delegate_facts_host: True
+
+ tasks:
+ - import_role:
+ name: ceph-defaults
+
+ - name: gather and delegate facts
+ setup:
+ gather_subset:
+ - 'all'
+ - '!facter'
+ - '!ohai'
+ delegate_to: "{{ item }}"
+ delegate_facts: True
+ with_items: "{{ groups['all'] | difference(groups.get(client_group_name, [])) }}"
+ run_once: true
+ when: delegate_facts_host | bool
+ tags: always
+
+ - import_role:
+ name: ceph-facts
+ - import_role:
+ name: ceph-validate
+
+- name: switching from non-containerized to containerized ceph mon
+ vars:
+ containerized_deployment: true
+ switch_to_containers: True
+ mon_group_name: mons
+ hosts: "{{ mon_group_name|default('mons') }}"
+ serial: 1
+ become: true
+ pre_tasks:
+ - name: select a running monitor
+ set_fact: mon_host={{ item }}
+ with_items: "{{ groups[mon_group_name] }}"
+ when: item != inventory_hostname
+
+ - name: stop non-containerized ceph mon
+ service:
+ name: "ceph-mon@{{ ansible_facts['hostname'] }}"
+ state: stopped
+ enabled: no
+
+ - name: remove old systemd unit files
+ file:
+ path: "{{ item }}"
+ state: absent
+ with_items:
+ - /usr/lib/systemd/system/ceph-mon@.service
+ - /usr/lib/systemd/system/ceph-mon.target
+ - /lib/systemd/system/ceph-mon@.service
+ - /lib/systemd/system/ceph-mon.target
+
+ - import_role:
+ name: ceph-defaults
+
+ - import_role:
+ name: ceph-facts
+
+ # NOTE: changed from file module to raw find command for performance reasons
+ # The file module has to run checks on current ownership of all directories and files. This is unnecessary
+ # as in this case we know we want all owned by ceph user
+ - name: set proper ownership on ceph directories
+ command: "find /var/lib/ceph/mon /etc/ceph -not -( -user {{ ceph_uid }} -or -group {{ ceph_uid }} -) -execdir chown -h {{ ceph_uid }}:{{ ceph_uid }} {} +"
+ changed_when: false
+
+ - name: check for existing old leveldb file extension (ldb)
+ shell: stat /var/lib/ceph/mon/*/store.db/*.ldb
+ changed_when: false
+ failed_when: false
+ register: ldb_files
+
+ - name: rename leveldb extension from ldb to sst
+ shell: rename -v .ldb .sst /var/lib/ceph/mon/*/store.db/*.ldb
+ changed_when: false
+ failed_when: false
+ when: ldb_files.rc == 0
+
+ - name: copy mon initial keyring in /etc/ceph to satisfy fetch config task in ceph-container-common
+ command: cp /var/lib/ceph/mon/{{ cluster }}-{{ ansible_facts['hostname'] }}/keyring /etc/ceph/{{ cluster }}.mon.keyring
+ args:
+ creates: /etc/ceph/{{ cluster }}.mon.keyring
+ changed_when: false
+ failed_when: false
+
+ tasks:
+ - import_role:
+ name: ceph-handler
+
+ - import_role:
+ name: ceph-container-engine
+
+ - import_role:
+ name: ceph-container-common
+
+ - import_role:
+ name: ceph-mon
+
+ post_tasks:
+ - name: waiting for the monitor to join the quorum...
+ command: "{{ container_binary }} run --rm -v /etc/ceph:/etc/ceph:z --entrypoint=ceph {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} --cluster {{ cluster }} quorum_status --format json"
+ register: ceph_health_raw
+ until: ansible_facts['hostname'] in (ceph_health_raw.stdout | trim | from_json)["quorum_names"]
+ changed_when: false
+ retries: "{{ health_mon_check_retries }}"
+ delay: "{{ health_mon_check_delay }}"
+
+- name: switching from non-containerized to containerized ceph mgr
+
+ hosts: "{{ mgr_group_name|default('mgrs') }}"
+
+ vars:
+ containerized_deployment: true
+ mgr_group_name: mgrs
+
+ serial: 1
+ become: true
+ pre_tasks:
+ # failed_when: false is here because if we're
+ # working with a jewel cluster then ceph mgr
+ # will not exist
+ - name: stop non-containerized ceph mgr(s)
+ service:
+ name: "ceph-mgr@{{ ansible_facts['hostname'] }}"
+ state: stopped
+ enabled: no
+ failed_when: false
+
+ - name: remove old systemd unit files
+ file:
+ path: "{{ item }}"
+ state: absent
+ with_items:
+ - /usr/lib/systemd/system/ceph-mgr@.service
+ - /usr/lib/systemd/system/ceph-mgr.target
+ - /lib/systemd/system/ceph-mgr@.service
+ - /lib/systemd/system/ceph-mgr.target
+
+ - import_role:
+ name: ceph-defaults
+
+ - import_role:
+ name: ceph-facts
+
+ # NOTE: changed from file module to raw find command for performance reasons
+ # The file module has to run checks on current ownership of all directories and files. This is unnecessary
+ # as in this case we know we want all owned by ceph user
+ - name: set proper ownership on ceph directories
+ command: "find /var/lib/ceph/mgr /etc/ceph -not -( -user {{ ceph_uid }} -or -group {{ ceph_uid }} -) -execdir chown -h {{ ceph_uid }}:{{ ceph_uid }} {} +"
+ changed_when: false
+
+ tasks:
+ - import_role:
+ name: ceph-handler
+
+ - import_role:
+ name: ceph-container-engine
+
+ - import_role:
+ name: ceph-container-common
+
+ - import_role:
+ name: ceph-mgr
+
+
+- name: set osd flags
+ hosts: "{{ mon_group_name | default('mons') }}[0]"
+ become: True
+ tasks:
+ - import_role:
+ name: ceph-defaults
+ - import_role:
+ name: ceph-facts
+ tasks_from: container_binary.yml
+
+ - name: get pool list
+ command: "{{ ceph_cmd }} --cluster {{ cluster }} osd pool ls detail -f json"
+ register: pool_list
+ changed_when: false
+ check_mode: false
+
+ - name: get balancer module status
+ command: "{{ ceph_cmd }} --cluster {{ cluster }} balancer status -f json"
+ register: balancer_status_switch
+ changed_when: false
+ check_mode: false
+
+ - name: set_fact pools_pgautoscaler_mode
+ set_fact:
+ pools_pgautoscaler_mode: "{{ pools_pgautoscaler_mode | default([]) | union([{'name': item.pool_name, 'mode': item.pg_autoscale_mode}]) }}"
+ with_items: "{{ pool_list.stdout | default('{}') | from_json }}"
+
+ - name: disable balancer
+ command: "{{ ceph_cmd }} --cluster {{ cluster }} balancer off"
+ changed_when: false
+ when: (balancer_status_switch.stdout | from_json)['active'] | bool
+
+ - name: disable pg autoscale on pools
+ ceph_pool:
+ name: "{{ item.name }}"
+ cluster: "{{ cluster }}"
+ pg_autoscale_mode: false
+ with_items: "{{ pools_pgautoscaler_mode }}"
+ when:
+ - pools_pgautoscaler_mode is defined
+ - item.mode == 'on'
+ environment:
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+
+ - name: set osd flags
+ ceph_osd_flag:
+ name: "{{ item }}"
+ cluster: "{{ cluster }}"
+ environment:
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+ with_items:
+ - noout
+ - nodeep-scrub
+
+
+- name: switching from non-containerized to containerized ceph osd
+
+ vars:
+ containerized_deployment: true
+ osd_group_name: osds
+ switch_to_containers: True
+
+ hosts: "{{ osd_group_name|default('osds') }}"
+
+ serial: 1
+ become: true
+ pre_tasks:
+
+ - import_role:
+ name: ceph-defaults
+
+ - name: collect running osds
+ shell: |
+ set -o pipefail;
+ systemctl list-units | grep -E "loaded * active" | grep -Eo 'ceph-osd@[0-9]+.service|ceph-volume'
+ register: running_osds
+ changed_when: false
+ failed_when: false
+
+ # systemd module does not support --runtime option
+ - name: disable ceph-osd@.service runtime-enabled
+ command: "systemctl disable --runtime {{ item }}" # noqa 303
+ changed_when: false
+ failed_when: false
+ with_items: "{{ running_osds.stdout_lines | default([]) }}"
+ when: item.startswith('ceph-osd@')
+
+ - name: stop/disable/mask non-containerized ceph osd(s) (if any)
+ systemd:
+ name: "{{ item }}"
+ state: stopped
+ enabled: no
+ with_items: "{{ running_osds.stdout_lines | default([]) }}"
+ when: running_osds != []
+
+ - name: disable ceph.target
+ systemd:
+ name: ceph.target
+ enabled: no
+
+ - name: remove old ceph-osd systemd units
+ file:
+ path: "{{ item }}"
+ state: absent
+ with_items:
+ - /usr/lib/systemd/system/ceph-osd.target
+ - /usr/lib/systemd/system/ceph-osd@.service
+ - /usr/lib/systemd/system/ceph-volume@.service
+ - /lib/systemd/system/ceph-osd.target
+ - /lib/systemd/system/ceph-osd@.service
+ - /lib/systemd/system/ceph-volume@.service
+
+ - import_role:
+ name: ceph-facts
+
+ # NOTE: changed from file module to raw find command for performance reasons
+ # The file module has to run checks on current ownership of all directories and files. This is unnecessary
+ # as in this case we know we want all owned by ceph user
+ - name: set proper ownership on ceph directories
+ command: "find /var/lib/ceph/osd /etc/ceph -not -( -user {{ ceph_uid }} -or -group {{ ceph_uid }} -) -execdir chown -h {{ ceph_uid }}:{{ ceph_uid }} {} +"
+ changed_when: false
+
+ - name: check for existing old leveldb file extension (ldb)
+ shell: stat /var/lib/ceph/osd/*/current/omap/*.ldb
+ changed_when: false
+ failed_when: false
+ register: ldb_files
+
+ - name: rename leveldb extension from ldb to sst
+ shell: rename -v .ldb .sst /var/lib/ceph/osd/*/current/omap/*.ldb
+ changed_when: false
+ failed_when: false
+ when: ldb_files.rc == 0
+
+ - name: check if containerized osds are already running
+ command: >
+ {{ container_binary }} ps -q --filter='name=ceph-osd'
+ changed_when: false
+ failed_when: false
+ register: osd_running
+
+ - name: get osd directories
+ command: >
+ find /var/lib/ceph/osd {% if dmcrypt | bool %}/var/lib/ceph/osd-lockbox{% endif %} -maxdepth 1 -mindepth 1 -type d
+ register: osd_dirs
+ changed_when: false
+ failed_when: false
+
+ - name: unmount all the osd directories
+ command: >
+ umount {{ item }}
+ changed_when: false
+ failed_when: false
+ with_items: "{{ osd_dirs.stdout_lines }}"
+ when: osd_running.rc != 0 or osd_running.stdout_lines | length == 0
+
+ tasks:
+ - import_role:
+ name: ceph-handler
+
+ - import_role:
+ name: ceph-container-engine
+
+ - import_role:
+ name: ceph-container-common
+
+ - import_role:
+ name: ceph-osd
+
+ post_tasks:
+ - name: container - waiting for clean pgs...
+ command: >
+ {{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_facts']['hostname'] }} ceph --cluster {{ cluster }} pg stat --format json
+ register: ceph_health_post
+ until: >
+ (((ceph_health_post.stdout | from_json).pg_summary.num_pg_by_state | length) > 0)
+ and
+ (((ceph_health_post.stdout | from_json).pg_summary.num_pg_by_state | selectattr('name', 'search', '^active\\+clean') | map(attribute='num') | list | sum) == (ceph_health_post.stdout | from_json).pg_summary.num_pgs)
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ retries: "{{ health_osd_check_retries }}"
+ delay: "{{ health_osd_check_delay }}"
+ changed_when: false
+
+
+- name: unset osd flags
+ hosts: "{{ mon_group_name | default('mons') }}[0]"
+ become: True
+ tasks:
+ - import_role:
+ name: ceph-defaults
+ - import_role:
+ name: ceph-facts
+ tasks_from: container_binary.yml
+
+ - name: re-enable pg autoscale on pools
+ ceph_pool:
+ name: "{{ item.name }}"
+ cluster: "{{ cluster }}"
+ pg_autoscale_mode: true
+ with_items: "{{ pools_pgautoscaler_mode }}"
+ when:
+ - pools_pgautoscaler_mode is defined
+ - item.mode == 'on'
+ environment:
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+
+ - name: unset osd flags
+ ceph_osd_flag:
+ name: "{{ item }}"
+ cluster: "{{ cluster }}"
+ state: absent
+ environment:
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+ with_items:
+ - noout
+ - nodeep-scrub
+
+ - name: re-enable balancer
+ command: "{{ ceph_cmd }} --cluster {{ cluster }} balancer on"
+ changed_when: false
+ when: (balancer_status_switch.stdout | from_json)['active'] | bool
+
+
+- name: switching from non-containerized to containerized ceph mds
+
+ hosts: "{{ mds_group_name|default('mdss') }}"
+
+ vars:
+ containerized_deployment: true
+ mds_group_name: mdss
+
+ serial: 1
+ become: true
+ pre_tasks:
+
+ - name: stop non-containerized ceph mds(s)
+ service:
+ name: "ceph-mds@{{ ansible_facts['hostname'] }}"
+ state: stopped
+ enabled: no
+
+ - name: remove old systemd unit files
+ file:
+ path: "{{ item }}"
+ state: absent
+ with_items:
+ - /usr/lib/systemd/system/ceph-mds@.service
+ - /usr/lib/systemd/system/ceph-mds.target
+ - /lib/systemd/system/ceph-mds@.service
+ - /lib/systemd/system/ceph-mds.target
+
+ - import_role:
+ name: ceph-defaults
+
+ - import_role:
+ name: ceph-facts
+
+ # NOTE: changed from file module to raw find command for performance reasons
+ # The file module has to run checks on current ownership of all directories and files. This is unnecessary
+ # as in this case we know we want all owned by ceph user
+ - name: set proper ownership on ceph directories
+ command: "find /var/lib/ceph/mds /etc/ceph -not -( -user {{ ceph_uid }} -or -group {{ ceph_uid }} -) -execdir chown {{ ceph_uid }}:{{ ceph_uid }} {} +"
+ changed_when: false
+
+ tasks:
+ - import_role:
+ name: ceph-handler
+
+ - import_role:
+ name: ceph-container-engine
+
+ - import_role:
+ name: ceph-container-common
+
+ - import_role:
+ name: ceph-mds
+
+
+- name: switching from non-containerized to containerized ceph rgw
+
+ hosts: "{{ rgw_group_name|default('rgws') }}"
+
+ vars:
+ containerized_deployment: true
+ rgw_group_name: rgws
+
+ serial: 1
+ become: true
+ pre_tasks:
+ - import_role:
+ name: ceph-defaults
+
+ - import_role:
+ name: ceph-facts
+
+ - import_role:
+ name: ceph-config
+ tasks_from: rgw_systemd_environment_file.yml
+
+ # NOTE: changed from file module to raw find command for performance reasons
+ # The file module has to run checks on current ownership of all directories and files. This is unnecessary
+ # as in this case we know we want all owned by ceph user
+ - name: set proper ownership on ceph directories
+ command: "find /var/lib/ceph/radosgw /etc/ceph -not -( -user {{ ceph_uid }} -or -group {{ ceph_uid }} -) -execdir chown {{ ceph_uid }}:{{ ceph_uid }} {} +"
+ changed_when: false
+
+ tasks:
+ - name: stop non-containerized ceph rgw(s)
+ service:
+ name: "ceph-radosgw@rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}"
+ state: stopped
+ enabled: no
+ with_items: "{{ rgw_instances }}"
+
+ - name: remove old systemd unit files
+ file:
+ path: "{{ item }}"
+ state: absent
+ with_items:
+ - /usr/lib/systemd/system/ceph-radosgw@.service
+ - /usr/lib/systemd/system/ceph-radosgw.target
+ - /lib/systemd/system/ceph-radosgw@.service
+ - /lib/systemd/system/ceph-radosgw.target
+
+ - import_role:
+ name: ceph-handler
+
+ - import_role:
+ name: ceph-container-engine
+
+ - import_role:
+ name: ceph-container-common
+
+ - import_role:
+ name: ceph-rgw
+
+
+- name: switching from non-containerized to containerized ceph rbd-mirror
+
+ hosts: "{{ rbdmirror_group_name|default('rbdmirrors') }}"
+
+ vars:
+ containerized_deployment: true
+ rbdmirror_group_name: rbdmirrors
+
+ serial: 1
+ become: true
+ pre_tasks:
+ - name: check for ceph rbd mirror services
+ command: systemctl show --no-pager --property=Id ceph-rbd-mirror@* # noqa 303
+ changed_when: false
+ register: rbdmirror_services
+
+ - name: stop non-containerized ceph rbd mirror(s)
+ service:
+ name: "{{ item.split('=')[1] }}"
+ state: stopped
+ enabled: no
+ loop: "{{ rbdmirror_services.stdout_lines }}"
+
+ - name: remove old systemd unit files
+ file:
+ path: "{{ item }}"
+ state: absent
+ with_items:
+ - /usr/lib/systemd/system/ceph-rbd-mirror@.service
+ - /usr/lib/systemd/system/ceph-rbd-mirror.target
+ - /lib/systemd/system/ceph-rbd-mirror@.service
+ - /lib/systemd/system/ceph-rbd-mirror.target
+
+ - import_role:
+ name: ceph-defaults
+
+ - import_role:
+ name: ceph-facts
+
+ # NOTE: changed from file module to raw find command for performance reasons
+ # The file module has to run checks on current ownership of all directories and files. This is unnecessary
+ # as in this case we know we want all owned by ceph user
+ - name: set proper ownership on ceph directories
+ command: "find /var/lib/ceph /etc/ceph -not -( -user {{ ceph_uid }} -or -group {{ ceph_uid }} -) -execdir chown {{ ceph_uid }}:{{ ceph_uid }} {} +"
+ changed_when: false
+
+ tasks:
+ - import_role:
+ name: ceph-handler
+
+ - import_role:
+ name: ceph-container-engine
+
+ - import_role:
+ name: ceph-container-common
+
+ - import_role:
+ name: ceph-rbd-mirror
+
+
+- name: switching from non-containerized to containerized ceph nfs
+
+ hosts: "{{ nfs_group_name|default('nfss') }}"
+
+ vars:
+ containerized_deployment: true
+ nfs_group_name: nfss
+
+ serial: 1
+ become: true
+ pre_tasks:
+
+ # failed_when: false is here because if we're
+ # working with a jewel cluster then ceph nfs
+ # will not exist
+ - name: stop non-containerized ceph nfs(s)
+ service:
+ name: nfs-ganesha
+ state: stopped
+ enabled: no
+ failed_when: false
+
+ - import_role:
+ name: ceph-defaults
+
+ - import_role:
+ name: ceph-facts
+
+ # NOTE: changed from file module to raw find command for performance reasons
+ # The file module has to run checks on current ownership of all directories and files. This is unnecessary
+ # as in this case we know we want all owned by ceph user
+ - name: set proper ownership on ceph directories
+ command: "find /var/lib/ceph /etc/ceph -not -( -user {{ ceph_uid }} -or -group {{ ceph_uid }} -) -execdir chown {{ ceph_uid }}:{{ ceph_uid }} {} +"
+ changed_when: false
+
+ tasks:
+ - import_role:
+ name: ceph-handler
+
+ - import_role:
+ name: ceph-container-engine
+
+ - import_role:
+ name: ceph-container-common
+
+ - import_role:
+ name: ceph-nfs
+
+- name: switching from non-containerized to containerized iscsigws
+ hosts: "{{ iscsi_gw_group_name|default('iscsigws') }}"
+ vars:
+ containerized_deployment: true
+ iscsi_gw_group_name: iscsigws
+ become: true
+ serial: 1
+ pre_tasks:
+ - import_role:
+ name: ceph-defaults
+
+ - name: stop iscsigw services
+ service:
+ name: "{{ item }}"
+ state: stopped
+ enabled: no
+ with_items:
+ - tcmu-runner
+ - rbd-target-gw
+ - rbd-target-api
+
+ - name: remove old systemd unit files
+ file:
+ path: "/usr/lib/systemd/system/{{ item }}.service"
+ state: absent
+ with_items:
+ - tcmu-runner
+ - rbd-target-gw
+ - rbd-target-api
+ tasks:
+ - import_role:
+ name: ceph-facts
+
+ - import_role:
+ name: ceph-handler
+
+ # NOTE: changed from file module to raw find command for performance reasons
+ # The file module has to run checks on current ownership of all directories and files. This is unnecessary
+ # as in this case we know we want all owned by ceph user
+ - name: set proper ownership on ceph directories
+ command: "find /etc/ceph -not -( -user {{ ceph_uid }} -or -group {{ ceph_uid }} -) -execdir chown -h {{ ceph_uid }}:{{ ceph_uid }} {} +"
+ changed_when: false
+
+ - import_role:
+ name: ceph-container-engine
+
+ - import_role:
+ name: ceph-container-common
+
+ - import_role:
+ name: ceph-iscsi-gw
+
+- name: switching from non-containerized to containerized ceph-crash
+
+ hosts:
+ - "{{ mon_group_name | default('mons') }}"
+ - "{{ osd_group_name | default('osds') }}"
+ - "{{ mds_group_name | default('mdss') }}"
+ - "{{ rgw_group_name | default('rgws') }}"
+ - "{{ rbdmirror_group_name | default('rbdmirrors') }}"
+ - "{{ mgr_group_name | default('mgrs') }}"
+
+ vars:
+ containerized_deployment: true
+ become: true
+ tasks:
+ - name: stop non-containerized ceph-crash
+ service:
+ name: ceph-crash
+ state: stopped
+ enabled: no
+
+ - import_role:
+ name: ceph-defaults
+
+ - import_role:
+ name: ceph-facts
+ tasks_from: container_binary.yml
+
+ - import_role:
+ name: ceph-handler
+
+ - import_role:
+ name: ceph-crash
+
+- name: final task
+ hosts:
+ - "{{ mon_group_name|default('mons') }}"
+ - "{{ mgr_group_name|default('mgrs') }}"
+ - "{{ osd_group_name|default('osds') }}"
+ - "{{ mds_group_name|default('mdss') }}"
+ - "{{ rgw_group_name|default('rgws') }}"
+ vars:
+ containerized_deployment: true
+ become: true
+ tasks:
+ - import_role:
+ name: ceph-defaults
+ # NOTE: changed from file module to raw find command for performance reasons
+ # The file module has to run checks on current ownership of all directories and files. This is unnecessary
+ # as in this case we know we want all owned by ceph user
+ - name: set proper ownership on ceph directories
+ command: "find /var/lib/ceph /etc/ceph -not -( -user {{ ceph_uid }} -or -group {{ ceph_uid }} -) -execdir chown {{ ceph_uid }}:{{ ceph_uid }} {} +"
+ changed_when: false
--- /dev/null
+---
+# NOTE (leseb):
+# The playbook aims to takeover a cluster that was not configured with
+# ceph-ansible.
+#
+# The procedure is as follows:
+#
+# 1. Install Ansible and add your monitors and osds hosts in it. For more detailed information you can read the [Ceph Ansible Wiki](https://github.com/ceph/ceph-ansible/wiki)
+# 2. Set `generate_fsid: false` in `group_vars`
+# 3. Get your current cluster fsid with `ceph fsid` and set `fsid` accordingly in `group_vars`
+# 4. Run the playbook called: `take-over-existing-cluster.yml` like this `ansible-playbook take-over-existing-cluster.yml`.
+# 5. Eventually run Ceph Ansible to validate everything by doing: `ansible-playbook site.yml`.
+
+- hosts: mons
+ become: True
+ tasks:
+ - import_role:
+ name: ceph-defaults
+ - import_role:
+ name: ceph-fetch-keys
+
+- hosts:
+ - mons
+ - osds
+ - mdss
+ - rgws
+ - nfss
+ - rbdmirrors
+ - clients
+ - mgrs
+ - iscsi-gw
+ become: true
+ tasks:
+ - import_role:
+ name: ceph-defaults
+
+ post_tasks:
+ - name: get the name of the existing ceph cluster
+ shell: |
+ set -o pipefail;
+ basename $(grep --exclude '*.bak' -R fsid /etc/ceph/ | egrep -o '^[^.]*' | head -n 1)
+ changed_when: false
+ register: cluster_name
+
+ - name: "stat {{ cluster_name.stdout }}.conf"
+ stat:
+ path: "/etc/ceph/{{ cluster_name.stdout }}.conf"
+ register: ceph_conf_stat
+
+ # Creates a backup of original ceph conf file in 'cluster_name-YYYYMMDDTHHMMSS.conf.bak' format
+ - name: "make a backup of original {{ cluster_name.stdout }}.conf"
+ copy:
+ src: "/etc/ceph/{{ cluster_name.stdout }}.conf"
+ dest: "/etc/ceph/{{ cluster_name.stdout }}-{{ ansible_date_time.iso8601_basic_short }}.conf.bak"
+ remote_src: true
+ owner: "{{ ceph_conf_stat.stat.pw_name }}"
+ group: "{{ ceph_conf_stat.stat.gr_name }}"
+ mode: "{{ ceph_conf_stat.stat.mode }}"
+
+ - name: generate ceph configuration file
+ action: config_template
+ args:
+ src: "roles/ceph-config/templates/ceph.conf.j2"
+ dest: "/etc/ceph/{{ cluster_name.stdout }}.conf"
+ owner: "{{ ceph_conf_stat.stat.pw_name }}"
+ group: "{{ ceph_conf_stat.stat.gr_name }}"
+ mode: "{{ ceph_conf_stat.stat.mode }}"
+ config_overrides: "{{ ceph_conf_overrides }}"
+ config_type: ini
--- /dev/null
+---
+# This playbook was made to automate Ceph servers maintenance
+# Typical use case: hardware change
+# By running this playbook you will set the 'noout' flag on your
+# cluster, which means that OSD **can't** be marked as out
+# of the CRUSH map, but they will be marked as down.
+# Basically we tell the cluster to don't move any data since
+# the operation won't last for too long.
+
+- hosts: <your_host>
+ gather_facts: False
+
+ tasks:
+
+ - name: Set the noout flag
+ command: ceph osd set noout
+ delegate_to: <your_monitor>
+
+ - name: Turn off the server
+ command: poweroff
+
+ - name: Wait for the server to go down
+ local_action:
+ module: wait_for
+ host: <your_host>
+ port: 22
+ state: stopped
+
+ - name: Wait for the server to come up
+ local_action:
+ module: wait_for
+ host: <your_host>
+ port: 22
+ delay: 10
+ timeout: 3600
+
+ - name: Unset the noout flag
+ command: ceph osd unset noout
+ delegate_to: <your_monitor>
--- /dev/null
+---
+# This playbook was meant to upgrade a node from Ubuntu to RHEL.
+# We are performing a set of actions prior to reboot the node.
+# The node reboots via PXE and gets its new operating system.
+# This playbook only works for monitors and OSDs.
+# Note that some of the checks are ugly:
+# ie: the when migration_completed.stat.exists
+# can be improved with includes, however I wanted to keep a single file...
+#
+
+- hosts: mons
+ serial: 1
+ sudo: True
+
+ vars:
+ backup_dir: /tmp/
+
+ tasks:
+
+ - name: Check if the node has be migrated already
+ stat: >
+ path=/var/lib/ceph/mon/ceph-{{ ansible_facts['hostname'] }}/migration_completed
+ register: migration_completed
+ failed_when: false
+
+ - name: Check for failed run
+ stat: >
+ path=/var/lib/ceph/{{ ansible_facts['hostname'] }}.tar
+ register: mon_archive_leftover
+
+ - fail: msg="Looks like an archive is already there, please remove it!"
+ when: migration_completed.stat.exists == False and mon_archive_leftover.stat.exists == True
+
+ - name: Compress the store as much as possible
+ command: ceph tell mon.{{ ansible_facts['hostname'] }} compact
+ when: migration_completed.stat.exists == False
+
+ - name: Check if sysvinit
+ stat: >
+ path=/var/lib/ceph/mon/ceph-{{ ansible_facts['hostname'] }}/sysvinit
+ register: monsysvinit
+ changed_when: False
+
+ - name: Check if upstart
+ stat: >
+ path=/var/lib/ceph/mon/ceph-{{ ansible_facts['hostname'] }}/upstart
+ register: monupstart
+ changed_when: False
+
+ - name: Check if init does what it is supposed to do (Sysvinit)
+ shell: >
+ ps faux|grep -sq [c]eph-mon && service ceph status mon >> /dev/null
+ register: ceph_status_sysvinit
+ changed_when: False
+
+ # can't complete the condition since the previous taks never ran...
+ - fail: msg="Something is terribly wrong here, sysvinit is configured, the service is started BUT the init script does not return 0, GO FIX YOUR SETUP!"
+ when: ceph_status_sysvinit.rc != 0 and migration_completed.stat.exists == False and monsysvinit.stat.exists == True
+
+ - name: Check if init does what it is supposed to do (upstart)
+ shell: >
+ ps faux|grep -sq [c]eph-mon && status ceph-mon-all >> /dev/null
+ register: ceph_status_upstart
+ changed_when: False
+
+ - fail: msg="Something is terribly wrong here, upstart is configured, the service is started BUT the init script does not return 0, GO FIX YOUR SETUP!"
+ when: ceph_status_upstart.rc != 0 and migration_completed.stat.exists == False and monupstart.stat.exists == True
+
+ - name: Restart the Monitor after compaction (Upstart)
+ service: >
+ name=ceph-mon
+ state=restarted
+ args=id={{ ansible_facts['hostname'] }}
+ when: monupstart.stat.exists == True and migration_completed.stat.exists == False
+
+ - name: Restart the Monitor after compaction (Sysvinit)
+ service: >
+ name=ceph
+ state=restarted
+ args=mon
+ when: monsysvinit.stat.exists == True and migration_completed.stat.exists == False
+
+ - name: Wait for the monitor to be up again
+ local_action:
+ module: wait_for
+ host: "{{ ansible_ssh_host | default(inventory_hostname) }}"
+ port: 6789
+ timeout: 10
+ when: migration_completed.stat.exists == False
+
+ - name: Stop the monitor (Upstart)
+ service: >
+ name=ceph-mon
+ state=stopped
+ args=id={{ ansible_facts['hostname'] }}
+ when: monupstart.stat.exists == True and migration_completed.stat.exists == False
+
+ - name: Stop the monitor (Sysvinit)
+ service: >
+ name=ceph
+ state=stopped
+ args=mon
+ when: monsysvinit.stat.exists == True and migration_completed.stat.exists == False
+
+ - name: Wait for the monitor to be down
+ local_action:
+ module: wait_for
+ host: "{{ ansible_ssh_host | default(inventory_hostname) }}"
+ port: 6789
+ timeout: 10
+ state: stopped
+ when: migration_completed.stat.exists == False
+
+ - name: Create a backup directory
+ file: >
+ path={{ backup_dir }}/monitors-backups
+ state=directory
+ owner=root
+ group=root
+ mode=0644
+ delegate_to: "{{ item }}"
+ with_items: "{{ groups.backup[0] }}"
+ when: migration_completed.stat.exists == False
+
+ # NOTE (leseb): should we convert upstart to sysvinit here already?
+ - name: Archive monitor stores
+ shell: >
+ tar -cpvzf - --one-file-system . /etc/ceph/* | cat > {{ ansible_facts['hostname'] }}.tar
+ chdir=/var/lib/ceph/
+ creates={{ ansible_facts['hostname'] }}.tar
+ when: migration_completed.stat.exists == False
+
+ - name: Scp the Monitor store
+ fetch: >
+ src=/var/lib/ceph/{{ ansible_facts['hostname'] }}.tar
+ dest={{ backup_dir }}/monitors-backups/{{ ansible_facts['hostname'] }}.tar
+ flat=yes
+ when: migration_completed.stat.exists == False
+
+ - name: Reboot the server
+ command: reboot
+ when: migration_completed.stat.exists == False
+
+ - name: Wait for the server to come up
+ local_action:
+ module: wait_for
+ port: 22
+ delay: 10
+ timeout: 3600
+ when: migration_completed.stat.exists == False
+
+ - name: Wait a bit more to be sure that the server is ready
+ pause: seconds=20
+ when: migration_completed.stat.exists == False
+
+ - name: Check if sysvinit
+ stat: >
+ path=/var/lib/ceph/mon/ceph-{{ ansible_facts['hostname'] }}/sysvinit
+ register: monsysvinit
+ changed_when: False
+
+ - name: Check if upstart
+ stat: >
+ path=/var/lib/ceph/mon/ceph-{{ ansible_facts['hostname'] }}/upstart
+ register: monupstart
+ changed_when: False
+
+ - name: Make sure the monitor is stopped (Upstart)
+ service: >
+ name=ceph-mon
+ state=stopped
+ args=id={{ ansible_facts['hostname'] }}
+ when: monupstart.stat.exists == True and migration_completed.stat.exists == False
+
+ - name: Make sure the monitor is stopped (Sysvinit)
+ service: >
+ name=ceph
+ state=stopped
+ args=mon
+ when: monsysvinit.stat.exists == True and migration_completed.stat.exists == False
+
+ # NOTE (leseb): 'creates' was added in Ansible 1.6
+ - name: Copy and unarchive the monitor store
+ unarchive: >
+ src={{ backup_dir }}/monitors-backups/{{ ansible_facts['hostname'] }}.tar
+ dest=/var/lib/ceph/
+ copy=yes
+ mode=0600
+ creates=etc/ceph/ceph.conf
+ when: migration_completed.stat.exists == False
+
+ - name: Copy keys and configs
+ shell: >
+ cp etc/ceph/* /etc/ceph/
+ chdir=/var/lib/ceph/
+ when: migration_completed.stat.exists == False
+
+ - name: Configure RHEL7 for sysvinit
+ shell: find -L /var/lib/ceph/mon/ -mindepth 1 -maxdepth 1 -regextype posix-egrep -regex '.*/[A-Za-z0-9]+-[A-Za-z0-9._-]+' -exec touch {}/sysvinit \; -exec rm {}/upstart \;
+ when: migration_completed.stat.exists == False
+
+ # NOTE (leseb): at this point the upstart and sysvinit checks are not necessary
+ # so we directly call sysvinit
+ - name: Start the monitor
+ service: >
+ name=ceph
+ state=started
+ args=mon
+ when: migration_completed.stat.exists == False
+
+ - name: Wait for the Monitor to be up again
+ local_action:
+ module: wait_for
+ host: "{{ ansible_ssh_host | default(inventory_hostname) }}"
+ port: 6789
+ timeout: 10
+ when: migration_completed.stat.exists == False
+
+ - name: Waiting for the monitor to join the quorum...
+ shell: >
+ ceph -s | grep monmap | sed 's/.*quorum//' | egrep -q {{ ansible_facts['hostname'] }}
+ register: result
+ until: result.rc == 0
+ retries: 5
+ delay: 10
+ delegate_to: "{{ item }}"
+ with_items: "{{ groups.backup[0] }}"
+ when: migration_completed.stat.exists == False
+
+ - name: Done moving to the next monitor
+ file: >
+ path=/var/lib/ceph/mon/ceph-{{ ansible_facts['hostname'] }}/migration_completed
+ state=touch
+ owner=root
+ group=root
+ mode=0600
+ when: migration_completed.stat.exists == False
+
+- hosts: osds
+ serial: 1
+ sudo: True
+
+ vars:
+ backup_dir: /tmp/
+
+ tasks:
+ - name: Check if the node has be migrated already
+ stat: >
+ path=/var/lib/ceph/migration_completed
+ register: migration_completed
+ failed_when: false
+
+ - name: Check for failed run
+ stat: >
+ path=/var/lib/ceph/{{ ansible_facts['hostname'] }}.tar
+ register: osd_archive_leftover
+
+ - fail: msg="Looks like an archive is already there, please remove it!"
+ when: migration_completed.stat.exists == False and osd_archive_leftover.stat.exists == True
+
+ - name: Check if init does what it is supposed to do (Sysvinit)
+ shell: >
+ ps faux|grep -sq [c]eph-osd && service ceph status osd >> /dev/null
+ register: ceph_status_sysvinit
+ changed_when: False
+
+ # can't complete the condition since the previous taks never ran...
+ - fail: msg="Something is terribly wrong here, sysvinit is configured, the services are started BUT the init script does not return 0, GO FIX YOUR SETUP!"
+ when: ceph_status_sysvinit.rc != 0 and migration_completed.stat.exists == False and monsysvinit.stat.exists == True
+
+ - name: Check if init does what it is supposed to do (upstart)
+ shell: >
+ ps faux|grep -sq [c]eph-osd && initctl list|egrep -sq "ceph-osd \(ceph/.\) start/running, process [0-9][0-9][0-9][0-9]"
+ register: ceph_status_upstart
+ changed_when: False
+
+ - fail: msg="Something is terribly wrong here, upstart is configured, the services are started BUT the init script does not return 0, GO FIX YOUR SETUP!"
+ when: ceph_status_upstart.rc != 0 and migration_completed.stat.exists == False and monupstart.stat.exists == True
+
+ - name: Set the noout flag
+ command: ceph osd set noout
+ delegate_to: "{{ item }}"
+ with_items: "{{ groups[mon_group_name][0] }}"
+ when: migration_completed.stat.exists == False
+
+ - name: Check if sysvinit
+ shell: stat /var/lib/ceph/osd/ceph-*/sysvinit
+ register: osdsysvinit
+ failed_when: false
+ changed_when: False
+
+ - name: Check if upstart
+ shell: stat /var/lib/ceph/osd/ceph-*/upstart
+ register: osdupstart
+ failed_when: false
+ changed_when: False
+
+ - name: Archive ceph configs
+ shell: >
+ tar -cpvzf - --one-file-system . /etc/ceph/ceph.conf | cat > {{ ansible_facts['hostname'] }}.tar
+ chdir=/var/lib/ceph/
+ creates={{ ansible_facts['hostname'] }}.tar
+ when: migration_completed.stat.exists == False
+
+ - name: Create backup directory
+ file: >
+ path={{ backup_dir }}/osds-backups
+ state=directory
+ owner=root
+ group=root
+ mode=0644
+ delegate_to: "{{ item }}"
+ with_items: "{{ groups.backup[0] }}"
+ when: migration_completed.stat.exists == False
+
+ - name: Scp OSDs dirs and configs
+ fetch: >
+ src=/var/lib/ceph/{{ ansible_facts['hostname'] }}.tar
+ dest={{ backup_dir }}/osds-backups/
+ flat=yes
+ when: migration_completed.stat.exists == False
+
+ - name: Collect OSD ports
+ shell: netstat -tlpn | awk -F ":" '/ceph-osd/ { sub (" .*", "", $2); print $2 }' | uniq
+ register: osd_ports
+ when: migration_completed.stat.exists == False
+
+ - name: Gracefully stop the OSDs (Upstart)
+ service: >
+ name=ceph-osd-all
+ state=stopped
+ when: osdupstart.rc == 0 and migration_completed.stat.exists == False
+
+ - name: Gracefully stop the OSDs (Sysvinit)
+ service: >
+ name=ceph
+ state=stopped
+ args=mon
+ when: osdsysvinit.rc == 0 and migration_completed.stat.exists == False
+
+ - name: Wait for the OSDs to be down
+ local_action:
+ module: wait_for
+ host: "{{ ansible_ssh_host | default(inventory_hostname) }}"
+ port: {{ item }}
+ timeout: 10
+ state: stopped
+ with_items: "{{ osd_ports.stdout_lines }}"
+ when: migration_completed.stat.exists == False
+
+ - name: Configure RHEL with sysvinit
+ shell: find -L /var/lib/ceph/osd/ -mindepth 1 -maxdepth 1 -regextype posix-egrep -regex '.*/[A-Za-z0-9]+-[A-Za-z0-9._-]+' -exec touch {}/sysvinit \; -exec rm {}/upstart \;
+ when: migration_completed.stat.exists == False
+
+ - name: Reboot the server
+ command: reboot
+ when: migration_completed.stat.exists == False
+
+ - name: Wait for the server to come up
+ local_action:
+ module: wait_for
+ port: 22
+ delay: 10
+ timeout: 3600
+ when: migration_completed.stat.exists == False
+
+ - name: Wait a bit to be sure that the server is ready for scp
+ pause: seconds=20
+ when: migration_completed.stat.exists == False
+
+ # NOTE (leseb): 'creates' was added in Ansible 1.6
+ - name: Copy and unarchive the OSD configs
+ unarchive: >
+ src={{ backup_dir }}/osds-backups/{{ ansible_facts['hostname'] }}.tar
+ dest=/var/lib/ceph/
+ copy=yes
+ mode=0600
+ creates=etc/ceph/ceph.conf
+ when: migration_completed.stat.exists == False
+
+ - name: Copy keys and configs
+ shell: >
+ cp etc/ceph/* /etc/ceph/
+ chdir=/var/lib/ceph/
+ when: migration_completed.stat.exists == False
+
+ # NOTE (leseb): at this point the upstart and sysvinit checks are not necessary
+ # so we directly call sysvinit
+ - name: Start all the OSDs
+ service: >
+ name=ceph-osd-all
+ state=started
+ args=osd
+ when: migration_completed.stat.exists == False
+
+ # NOTE (leseb): this is tricky unless this is set into the ceph.conf
+ # listened ports can be predicted, thus they will change after each restart
+# - name: Wait for the OSDs to be up again
+# local_action: >
+# wait_for
+# host={{ ansible_ssh_host | default(inventory_hostname) }}
+# port={{ item }}
+# timeout=30
+# with_items:
+# - "{{ osd_ports.stdout_lines }}"
+
+ - name: Waiting for clean PGs...
+ shell: >
+ test "[""$(ceph -s -f json | python -c 'import sys, json; print(json.load(sys.stdin)["pgmap"]["num_pgs"])')""]" = "$(ceph -s -f json | python -c 'import sys, json; print([ i["count"] for i in json.load(sys.stdin)["pgmap"]["pgs_by_state"] if i["state_name"] == "active+clean"])')"
+ register: result
+ until: result.rc == 0
+ retries: 10
+ delay: 10
+ delegate_to: "{{ item }}"
+ with_items: "{{ groups.backup[0] }}"
+ when: migration_completed.stat.exists == False
+
+ - name: Done moving to the next OSD
+ file: >
+ path=/var/lib/ceph/migration_completed
+ state=touch
+ owner=root
+ group=root
+ mode=0600
+ when: migration_completed.stat.exists == False
+
+ - name: Unset the noout flag
+ command: ceph osd unset noout
+ delegate_to: "{{ item }}"
+ with_items: "{{ groups[mon_group_name][0] }}"
+ when: migration_completed.stat.exists == False
+
+- hosts: rgws
+ serial: 1
+ sudo: True
+
+ vars:
+ backup_dir: /tmp/
+
+ tasks:
+ - name: Check if the node has be migrated already
+ stat: >
+ path=/var/lib/ceph/radosgw/migration_completed
+ register: migration_completed
+ failed_when: false
+
+ - name: Check for failed run
+ stat: >
+ path=/var/lib/ceph/{{ ansible_facts['hostname'] }}.tar
+ register: rgw_archive_leftover
+
+ - fail: msg="Looks like an archive is already there, please remove it!"
+ when: migration_completed.stat.exists == False and rgw_archive_leftover.stat.exists == True
+
+ - name: Archive rados gateway configs
+ shell: >
+ tar -cpvzf - --one-file-system . /etc/ceph/* | cat > {{ ansible_facts['hostname'] }}.tar
+ chdir=/var/lib/ceph/
+ creates={{ ansible_facts['hostname'] }}.tar
+ when: migration_completed.stat.exists == False
+
+ - name: Create backup directory
+ file: >
+ path={{ backup_dir }}/rgws-backups
+ state=directory
+ owner=root
+ group=root
+ mode=0644
+ delegate_to: "{{ item }}"
+ with_items: "{{ groups.backup[0] }}"
+ when: migration_completed.stat.exists == False
+
+ - name: Scp RGWs dirs and configs
+ fetch: >
+ src=/var/lib/ceph/{{ ansible_facts['hostname'] }}.tar
+ dest={{ backup_dir }}/rgws-backups/
+ flat=yes
+ when: migration_completed.stat.exists == False
+
+ - name: Gracefully stop the rados gateway
+ service: >
+ name={{ item }}
+ state=stopped
+ with_items: radosgw
+ when: migration_completed.stat.exists == False
+
+ - name: Wait for radosgw to be down
+ local_action:
+ module: wait_for
+ host: "{{ ansible_ssh_host | default(inventory_hostname) }}"
+ path: /tmp/radosgw.sock
+ state: absent
+ timeout: 30
+ when: migration_completed.stat.exists == False
+
+ - name: Reboot the server
+ command: reboot
+ when: migration_completed.stat.exists == False
+
+ - name: Wait for the server to come up
+ local_action:
+ module: wait_for
+ port: 22
+ delay: 10
+ timeout: 3600
+ when: migration_completed.stat.exists == False
+
+ - name: Wait a bit to be sure that the server is ready for scp
+ pause: seconds=20
+ when: migration_completed.stat.exists == False
+
+ # NOTE (leseb): 'creates' was added in Ansible 1.6
+ - name: Copy and unarchive the OSD configs
+ unarchive: >
+ src={{ backup_dir }}/rgws-backups/{{ ansible_facts['hostname'] }}.tar
+ dest=/var/lib/ceph/
+ copy=yes
+ mode=0600
+ creates=etc/ceph/ceph.conf
+ when: migration_completed.stat.exists == False
+
+ - name: Copy keys and configs
+ shell: >
+ {{ item }}
+ chdir=/var/lib/ceph/
+ with_items: cp etc/ceph/* /etc/ceph/
+ when: migration_completed.stat.exists == False
+
+ - name: Start rados gateway
+ service: >
+ name={{ item }}
+ state=started
+ with_items: radosgw
+ when: migration_completed.stat.exists == False
+
+ - name: Wait for radosgw to be up again
+ local_action:
+ module: wait_for
+ host: "{{ ansible_ssh_host | default(inventory_hostname) }}"
+ path: /tmp/radosgw.sock
+ state: present
+ timeout: 30
+ when: migration_completed.stat.exists == False
+
+ - name: Done moving to the next rados gateway
+ file: >
+ path=/var/lib/ceph/radosgw/migration_completed
+ state=touch
+ owner=root
+ group=root
+ mode=0600
+ when: migration_completed.stat.exists == False
--- /dev/null
+---
+# This playbook will make custom partition layout for your osd hosts.
+# You should define `devices` variable for every host.
+#
+# For example, in host_vars/hostname1
+#
+# devices:
+# - device_name: sdb
+# partitions:
+# - index: 1
+# size: 10G
+# type: data
+# - index: 2
+# size: 5G
+# type: journal
+# - device_name: sdc
+# partitions:
+# - index: 1
+# size: 10G
+# type: data
+# - index: 2
+# size: 5G
+# type: journal
+#
+- vars:
+ osd_group_name: osds
+ journal_typecode: 45b0969e-9b03-4f30-b4c6-b4b80ceff106
+ data_typecode: 4fbd7e29-9d25-41b8-afd0-062c0ceff05d
+ devices: []
+ hosts: "{{ osd_group_name }}"
+
+ tasks:
+
+ - name: load a variable file for devices partition
+ include_vars: "{{ item }}"
+ with_first_found:
+ - files:
+ - "host_vars/{{ ansible_facts['hostname'] }}.yml"
+ - "host_vars/default.yml"
+ skip: true
+
+ - name: exit playbook, if devices not defined
+ fail:
+ msg: "devices must be define in host_vars/default.yml or host_vars/{{ ansible_facts['hostname'] }}.yml"
+ when: devices is not defined
+
+ - name: install sgdisk(gdisk)
+ package:
+ name: gdisk
+ state: present
+ register: result
+ until: result is succeeded
+
+ - name: erase all previous partitions(dangerous!!!)
+ shell: sgdisk --zap-all -- /dev/{{item.device_name}}
+ with_items: "{{ devices }}"
+
+ - name: make osd partitions
+ shell: >
+ sgdisk --new={{item.1.index}}:0:+{{item.1.size}} "--change-name={{item.1.index}}:ceph {{item.1.type}}"
+ "--typecode={{item.1.index}}:{% if item.1.type=='data' %}{{data_typecode}}{% else %}{{journal_typecode}}{% endif %}"
+ --mbrtogpt -- /dev/{{item.0.device_name}}
+ with_subelements:
+ - "{{ devices }}"
+ - partitions
+
+ - set_fact:
+ owner: 167
+ group: 167
+ when: ansible_facts['os_family'] == "RedHat"
+
+ - set_fact:
+ owner: 64045
+ group: 64045
+ when: ansible_facts['os_family'] == "Debian"
+
+ - name: change partitions ownership
+ file:
+ path: "/dev/{{item.0.device_name}}{{item.1.index}}"
+ owner: "{{ owner | default('root')}}"
+ group: "{{ group | default('disk')}}"
+ with_subelements:
+ - "{{ devices }}"
+ - partitions
+ when:
+ item.0.device_name | match('/dev/([hsv]d[a-z]{1,2}){1,2}$')
+
+ - name: change partitions ownership
+ file:
+ path: "/dev/{{item.0.device_name}}p{{item.1.index}}"
+ owner: "{{ owner | default('root')}}"
+ group: "{{ group | default('disk')}}"
+ with_subelements:
+ - "{{ devices }}"
+ - partitions
+ when: item.0.device_name | match('/dev/(cciss/c[0-9]d[0-9]|nvme[0-9]n[0-9]){1,2}$')
+...
--- /dev/null
+---
+# This playbook use to migrate activity osd(s) journal to SSD.
+#
+# You should define `osds_journal_devices` variable for host which osd(s) journal migrate to.
+#
+# For example in host_vars/hostname1.yml
+#
+# osds_journal_devices:
+# - device_name: /dev/sdd
+# partitions:
+# - index: 1
+# size: 10G
+# osd_id: 0
+# - index: 2
+# size: 10G
+# osd_id: 1
+# - device_name: /dev/sdf
+# partitions:
+# - index: 1
+# size: 10G
+# osd_id: 2
+#
+# @param device_name: The full device path of new ssd.
+# @param partitions: The custom partition layout of ssd.
+# @param index: The index of this partition.
+# @param size: The size of this partition.
+# @param osd_id: Which osds's journal this partition for.
+#
+# ansible-playbook migrate-journal-to-ssd.yml
+# The playbook will migrate osd(s) journal to ssd device which you define in host_vars.
+
+- vars:
+ osd_group_name: osds
+ journal_typecode: 45b0969e-9b03-4f30-b4c6-b4b80ceff106
+ osds_journal_devices: []
+ hosts: "{{ osd_group_name }}"
+ serial: 1
+ tasks:
+
+ - name: get osd(s) if directory stat
+ stat:
+ path: "/var/lib/ceph/osd/{{ cluster }}-{{ item.1.osd_id }}/journal_uuid"
+ register: osds_dir_stat
+ with_subelements:
+ - "{{ osds_journal_devices }}"
+ - partitions
+
+ - name: exit playbook osd(s) is not on this host
+ fail:
+ msg: exit playbook osd(s) is not on this host
+ with_items:
+ osds_dir_stat.results
+ when: osds_dir_stat is defined and item.stat.exists == false
+
+ - name: install sgdisk(gdisk)
+ package:
+ name: gdisk
+ state: present
+ register: result
+ until: result is succeeded
+ when: osds_journal_devices is defined
+
+ - name: generate uuid for osds journal
+ command: uuidgen
+ register: osds
+ with_subelements:
+ - "{{ osds_journal_devices }}"
+ - partitions
+
+ - name: make osd partitions on ssd
+ shell: >
+ sgdisk --new={{item.item[1].index}}:0:+{{item.item[1].size}} "--change-name={{ item.item[1].index }}:ceph journal"
+ --typecode={{ item.item[1].index }}:{{ journal_typecode }}
+ --partition-guid={{ item.item[1].index }}:{{ item.stdout }}
+ --mbrtogpt -- {{ item.item[0].device_name }}
+ with_items: "{{ osds.results }}"
+
+ - name: stop osd(s) service
+ service:
+ name: "ceph-osd@{{ item.item[1].osd_id }}"
+ state: stopped
+ with_items: "{{ osds.results }}"
+
+ - name: flush osd(s) journal
+ command: ceph-osd -i {{ item.item[1].osd_id }} --flush-journal --cluster {{ cluster }}
+ with_items: "{{ osds.results }}"
+ when: osds_journal_devices is defined
+
+ - name: update osd(s) journal soft link
+ command: ln -sf /dev/disk/by-partuuid/{{ item.stdout }} /var/lib/ceph/osd/{{ cluster }}-{{ item.item[1].osd_id }}/journal
+ with_items: "{{ osds.results }}"
+
+ - name: update osd(s) journal uuid
+ command: echo {{ item.stdout }} > /var/lib/ceph/osd/{{ cluster }}-{{ item.item[1].osd_id }}/journal_uuid
+ with_items: "{{ osds.results }}"
+
+ - name: initialize osd(s) new journal
+ command: ceph-osd -i {{ item.item[1].osd_id }} --mkjournal --cluster {{ cluster }}
+ with_items: "{{ osds.results }}"
+
+ - name: start osd(s) service
+ service:
+ name: "ceph-osd@{{ item.item[1].osd_id }}"
+ state: started
+ with_items: "{{ osds.results }}"
--- /dev/null
+---
+# Nukes a multisite config
+- hosts: rgws
+ become: True
+ tasks:
+ - include_tasks: roles/ceph-rgw/tasks/multisite/destroy.yml
+
+ handlers:
+ # Ansible 2.1.0 bug will ignore included handlers without this
+ - name: import_tasks roles/ceph-rgw/handlers/main.yml
+ import_tasks: roles/ceph-rgw/handlers/main.yml
--- /dev/null
+---
+# This playbook use to recover Ceph OSDs after ssd journal failure.
+# You will also realise that it’s really simple to bring your
+# OSDs back to life after replacing your faulty SSD with a new one.
+#
+# You should define `dev_ssds` variable for host which changes ssds after
+# failure.
+#
+# For example in host_vars/hostname1.yml
+#
+# dev_ssds:
+# - device_name: /dev/sdd
+# partitions:
+# - index: 1
+# size: 10G
+# osd_id: 0
+# - index: 2
+# size: 10G
+# osd_id: 1
+# - device_name: /dev/sdf
+# partitions:
+# - index: 1
+# size: 10G
+# osd_id: 2
+#
+# @param device_name: The full device path of new ssd
+# @param partitions: The custom partition layout of new ssd
+# @param index: The index of this partition
+# @param size: The size of this partition
+# @param osd_id: Which osds's journal this partition for.
+#
+# ansible-playbook recover-osds-after-ssd-journal-failure.yml
+# Prompts for select which host to recover, defaults to null,
+# doesn't select host the recover ssd. Input the hostname
+# which to recover osds after ssd journal failure
+#
+# ansible-playbook -e target_host=hostname \
+# recover-osds-after-ssd-journal-failure.yml
+# Overrides the prompt using -e option. Can be used in
+# automation scripts to avoid interactive prompt.
+
+- hosts: localhost
+ gather_facts: no
+ vars_prompt:
+ - name: target_host
+ prompt: please enter the target hostname which to recover osds after ssd journal failure
+ private: no
+ tasks:
+ - add_host:
+ name: "{{ target_host }}"
+ groups: dynamically_created_hosts
+
+- hosts: dynamically_created_hosts
+ vars:
+ journal_typecode: 45b0969e-9b03-4f30-b4c6-b4b80ceff106
+ dev_ssds: []
+
+ tasks:
+ - fail: msg="please define dev_ssds variable"
+ when: dev_ssds|length <= 0
+
+ - name: get osd(s) if directory stat
+ stat:
+ path: "/var/lib/ceph/osd/{{ cluster }}-{{ item.1.osd_id }}/journal_uuid"
+ register: osds_dir_stat
+ with_subelements:
+ - "{{ dev_ssds }}"
+ - partitions
+
+ - name: exit playbook osd(s) is not on this host
+ fail:
+ msg: exit playbook osds is not no this host
+ with_items:
+ osds_dir_stat.results
+ when:
+ - osds_dir_stat is defined | bool
+ - item.stat.exists == false
+
+ - name: install sgdisk(gdisk)
+ package:
+ name: gdisk
+ state: present
+ register: result
+ until: result is succeeded
+
+ - name: get osd(s) journal uuid
+ command: cat "/var/lib/ceph/osd/{{ cluster }}-{{ item.1.osd_id }}/journal_uuid"
+ register: osds_uuid
+ with_subelements:
+ - "{{ dev_ssds }}"
+ - partitions
+
+ - name: make partitions on new ssd
+ shell: >
+ sgdisk --new={{item.item[1].index}}:0:+{{item.item[1].size}} "--change-name={{ item.item[1].index }}:ceph journal"
+ --typecode={{ item.item[1].index }}:{{ journal_typecode }}
+ --partition-guid={{ item.item[1].index }}:{{ item.stdout }}
+ --mbrtogpt -- {{ item.item[0].device_name }}
+ with_items: "{{ osds_uuid.results }}"
+
+ - name: stop osd(s) service
+ service:
+ name: "ceph-osd@{{ item.item[1].osd_id }}"
+ state: stopped
+ with_items: "{{ osds_uuid.results }}"
+
+ - name: reinitialize osd(s) journal in new ssd
+ command: ceph-osd -i {{ item.item[1].osd_id }} --mkjournal --cluster {{ cluster }}
+ with_items: "{{ osds_uuid.results }}"
+
+ - name: start osd(s) service
+ service:
+ name: "ceph-osd@{{ item.item[1].osd_id }}"
+ state: started
+ with_items: "{{ osds_uuid.results }}"
--- /dev/null
+---
+# This playbook replaces Ceph OSDs.
+# It can replace any number of OSD(s) from the cluster and ALL THEIR DATA
+#
+# When disks fail, or if an admnistrator wants to reprovision OSDs with a new backend,
+# for instance, for switching from FileStore to BlueStore, OSDs need to be replaced.
+# Unlike Removing the OSD, replaced OSD’s id and CRUSH map entry need to be keep intact after the OSD is destroyed for replacement.
+#
+# Use it like this:
+# ansible-playbook replace-osd.yml -e osd_to_replace=0,2,6
+# Prompts for confirmation to replace, defaults to no and
+# doesn't replace the osd(s). yes replaces the osd(s).
+#
+# ansible-playbook -e ireallymeanit=yes|no replace-osd.yml
+# Overrides the prompt using -e option. Can be used in
+# automation scripts to avoid interactive prompt.
+
+- name: gather facts and check the init system
+
+ hosts:
+ - "{{ mon_group_name|default('mons') }}"
+ - "{{ osd_group_name|default('osds') }}"
+
+ become: True
+ tasks:
+ - debug: msg="gather facts on all Ceph hosts for following reference"
+
+- name: confirm whether user really meant to replace osd(s)
+ hosts: localhost
+ become: true
+ vars_prompt:
+ - name: ireallymeanit
+ prompt: Are you sure you want to replace the osd(s)?
+ default: 'no'
+ private: no
+ vars:
+ mon_group_name: mons
+ osd_group_name: osds
+ pre_tasks:
+ - name: exit playbook, if user did not mean to replace the osd(s)
+ fail:
+ msg: "Exiting replace-osd playbook, no osd(s) was/were replaced..
+ To replace the osd(s), either say 'yes' on the prompt or
+ or use `-e ireallymeanit=yes` on the command line when
+ invoking the playbook"
+ when: ireallymeanit != 'yes'
+
+ - name: exit playbook, if no osd(s) was/were given
+ fail:
+ msg: "osd_to_replace must be declared
+ Exiting replace-osd playbook, no OSD(s) was/were replaced.
+ On the command line when invoking the playbook, you can use
+ -e osd_to_replace=0,1,2,3 argument."
+ when: osd_to_replace is not defined
+
+ tasks:
+ - import_role:
+ name: ceph-defaults
+
+ post_tasks:
+ - name: set_fact container_exec_cmd build docker exec command (containerized)
+ set_fact:
+ container_exec_cmd: "docker exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_facts']['hostname'] }}"
+ when: containerized_deployment | bool
+
+ - name: exit playbook, if can not connect to the cluster
+ command: "{{ container_exec_cmd | default('') }} timeout 5 ceph --cluster {{ cluster }} health"
+ register: ceph_health
+ until: ceph_health.stdout.find("HEALTH") > -1
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ retries: 5
+ delay: 2
+
+ - name: find the host(s) where the osd(s) is/are running on
+ command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd find {{ item }}"
+ with_items: "{{ osd_to_replace.split(',') }}"
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ register: find_osd_hosts
+
+ - name: set_fact osd_hosts
+ set_fact:
+ osd_hosts: "{{ osd_hosts | default([]) + [ (item.stdout | from_json).crush_location.host ] }}"
+ with_items: "{{ find_osd_hosts.results }}"
+
+ - name: check if ceph admin key exists on the osd nodes
+ stat:
+ path: "/etc/ceph/{{ cluster }}.client.admin.keyring"
+ register: ceph_admin_key
+ with_items: "{{ osd_hosts }}"
+ delegate_to: "{{ item }}"
+ failed_when: false
+ when: not containerized_deployment | bool
+
+ - name: fail when admin key is not present
+ fail:
+ msg: "The Ceph admin key is not present on the OSD node, please add it and remove it after the playbook is done."
+ with_items: "{{ ceph_admin_key.results }}"
+ when:
+ - not containerized_deployment | bool
+ - item.stat.exists == false
+
+ # NOTE(leseb): using '>' is the only way I could have the command working
+ - name: find osd device based on the id
+ shell: >
+ docker run --privileged=true -v /dev:/dev --entrypoint /usr/sbin/ceph-disk
+ {{ ceph_docker_registry}}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
+ list | awk -v pattern=osd.{{ item.1 }} '$0 ~ pattern {print $1}'
+ with_together:
+ - "{{ osd_hosts }}"
+ - "{{ osd_to_replace.split(',') }}"
+ register: osd_to_replace_disks
+ delegate_to: "{{ item.0 }}"
+ when: containerized_deployment | bool
+
+ - name: zapping osd(s) - container
+ shell: >
+ docker run --privileged=true -v /dev:/dev --entrypoint /usr/sbin/ceph-disk
+ {{ ceph_docker_registry}}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
+ zap {{ item.1 }}
+ run_once: true
+ with_together:
+ - "{{ osd_hosts }}"
+ - "{{ osd_to_replace_disks.results }}"
+ delegate_to: "{{ item.0 }}"
+ when: containerized_deployment | bool
+
+ - name: zapping osd(s) - non container
+ command: ceph-disk zap --cluster {{ cluster }} {{ item.1 }}
+ run_once: true
+ with_together:
+ - "{{ osd_hosts }}"
+ - "{{ osd_to_replace_disks.results }}"
+ delegate_to: "{{ item.0 }}"
+ when: not containerized_deployment | bool
+
+ - name: destroying osd(s)
+ command: ceph-disk destroy --cluster {{ cluster }} --destroy-by-id {{ item.1 }} --zap
+ run_once: true
+ with_together:
+ - "{{ osd_hosts }}"
+ - "{{ osd_to_replace.split(',') }}"
+ delegate_to: "{{ item.0 }}"
+ when: not containerized_deployment | bool
+
+ - name: replace osd(s) - prepare - non container
+ command: ceph-disk prepare {{ item.1 }} --osd-id {{ item.2 }} --osd-uuid $(uuidgen)
+ run_once: true
+ delegate_to: "{{ item.0 }}"
+ with_together:
+ - "{{ osd_hosts }}"
+ - "{{ osd_to_replace_disks.results }}"
+ - "{{ osd_to_replace.split(',') }}"
+
+ - name: replace osd(s) - prepare - container
+ shell: >
+ docker run --privileged=true -v /dev:/dev --entrypoint /usr/sbin/ceph-disk
+ {{ ceph_docker_registry}}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
+ prepare {{ item.1 }}
+ run_once: true
+ delegate_to: "{{ item.0 }}"
+ with_together:
+ - "{{ osd_hosts }}"
+ - "{{ osd_to_replace_disks.results }}"
+
+ - name: replace osd(s) - activate - non container
+ command: ceph-disk activate {{ item.1 }}1
+ run_once: true
+ delegate_to: "{{ item.0 }}"
+ with_together:
+ - "{{ osd_hosts }}"
+ - "{{ osd_to_replace_disks.results }}"
+
+ - name: replace osd(s) - activate - container
+ shell: >
+ docker run --privileged=true -v /dev:/dev --entrypoint /usr/sbin/ceph-disk
+ {{ ceph_docker_registry}}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
+ activate {{ item.1 }}1
+ run_once: true
+ delegate_to: "{{ item.0 }}"
+ with_together:
+ - "{{ osd_hosts }}"
+ - "{{ osd_to_replace_disks.results }}"
+
+ - name: show ceph health
+ command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} -s"
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+
+ - name: show ceph osd tree
+ command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd tree"
+ delegate_to: "{{ groups[mon_group_name][0] }}"
--- /dev/null
+# This file configures logical volume creation for FS Journals on NVMe, a NVMe based bucket index, and HDD based OSDs.
+# This playbook configures one NVMe device at a time. If your OSD systems contain multiple NVMe devices, you will need to edit the key variables ("nvme_device", "hdd_devices") for each run.
+# It is meant to be used when osd_objectstore=filestore and it outputs the necessary input for group_vars/osds.yml.
+# The LVs for journals are created first then the LVs for data. All LVs for journals correspond to a LV for data.
+#
+## CHANGE THESE VARS ##
+#
+# The NVMe device and the hdd devices must be raw and not have any GPT, FS, or RAID signatures.
+# GPT, FS, & RAID signatures should be removed from a device prior to running the lv-create.yml playbook.
+#
+# Having leftover signatures can result in ansible errors that say "device $device_name excluded by a filter" after running the lv-create.yml playbook.
+# This can be done by running `wipefs -a $device_name`.
+
+# Path of nvme device primed for LV creation for journals and data. Only one NVMe device is allowed at a time. Providing a list will not work in this case.
+nvme_device: dummy
+
+# Path of hdd devices designated for LV creation.
+hdd_devices:
+ - /dev/sdd
+ - /dev/sde
+ - /dev/sdf
+ - /dev/sdg
+ - /dev/sdh
+
+# Per the lvol module documentation, "size" and "journal_size" is the size of the logical volume, according to lvcreate(8) --size.
+# This is by default in megabytes or optionally with one of [bBsSkKmMgGtTpPeE] units; or according to lvcreate(8) --extents as a percentage of [VG|PVS|FREE]; Float values must begin with a digit.
+# For further reading and examples see: https://docs.ansible.com/ansible/2.6/modules/lvol_module.html
+
+# Suggested journal size is 5500
+journal_size: 5500
+
+# This var is a list of bucket index LVs created on the NVMe device. We recommend one be created but you can add others
+nvme_device_lvs:
+ - lv_name: "ceph-bucket-index-1"
+ size: 100%FREE
+ journal_name: "ceph-journal-bucket-index-1-{{ nvme_device_basename }}"
+
+## TYPICAL USERS WILL NOT NEED TO CHANGE VARS FROM HERE DOWN ##
+
+# the path to where to save the logfile for lv-create.yml
+logfile_path: ./lv-create.log
+
+# all hdd's have to be the same size and the LVs on them are dedicated for OSD data
+hdd_lv_size: 100%FREE
+
+# Since this playbook can be run multiple times across different devices, {{ var.split('/')[-1] }} is used quite frequently in this play-book.
+# This is used to strip the device name away from its path (ex: sdc from /dev/sdc) to differenciate the names of vgs, journals, or lvs if the prefixes are not changed across multiple runs.
+nvme_device_basename: "{{ nvme_device.split('/')[-1] }}"
+
+# Only one volume group is created in the playbook for all the LVs on NVMe. This volume group takes up the entire device specified in "nvme_device".
+nvme_vg_name: "ceph-nvme-vg-{{ nvme_device_basename }}"
+
+hdd_vg_prefix: "ceph-hdd-vg"
+hdd_lv_prefix: "ceph-hdd-lv"
+hdd_journal_prefix: "ceph-journal"
+
+# Journals are created on NVMe device
--- /dev/null
+#!/usr/bin/python
+
+# Copyright 2018 Daniel Pivonka <dpivonka@redhat.com>
+# Copyright 2018 Red Hat, Inc.
+#
+# GNU General Public License v3.0+
+
+from ansible.module_utils.basic import AnsibleModule
+from socket import error as socket_error
+import boto
+import radosgw
+
+ANSIBLE_METADATA = {
+ 'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'
+}
+
+DOCUMENTATION = '''
+---
+module: ceph_add_users_buckets
+short_description: bulk create user and buckets
+description:
+ - Bulk create Ceph Object Storage users and buckets
+
+option:
+ rgw_host:
+ description:
+ - a radosgw host in the ceph cluster
+ required: true
+ port:
+ description:
+ - tcp port of the radosgw host
+ required: true
+ is_secure:
+ description:
+ - boolean indicating whether the instance is running over https
+ required: false
+ default: false
+ admin_access_key:
+ description:
+ - radosgw admin user's access key
+ required: true
+ admin_secret_key:
+ description:
+ - radosgw admin user's secret key
+ required: true
+ users:
+ description:
+ - list of users to be created containing sub options
+ required: false
+ sub_options:
+ username:
+ description:
+ - username for new user
+ required: true
+ fullname:
+ description:
+ - fullname for new user
+ required: true
+ email:
+ description:
+ - email for new user
+ required: false
+ maxbucket:
+ description:
+ - max bucket for new user
+ required: false
+ default: 1000
+ suspend:
+ description:
+ - suspend a new user apon creation
+ required: false
+ default: false
+ autogenkey:
+ description:
+ - auto generate keys for new user
+ required: false
+ default: true
+ accesskey:
+ description:
+ - access key for new user
+ required: false
+ secretkey:
+ description:
+ - secret key for new user
+ required: false
+ userquota:
+ description:
+ - enable/disable user quota for new user
+ required: false
+ default: false
+ usermaxsize:
+ description:
+ - with user quota enabled specify quota size in kb
+ required: false
+ default: unlimited
+ usermaxobjects:
+ description:
+ - with user quota enabled specify maximum number of objects
+ required: false
+ default: unlimited
+ bucketquota:
+ description:
+ - enable/disable bucket quota for new user
+ required: false
+ default: false
+ bucketmaxsize:
+ description:
+ - with bucket quota enabled specify bucket size in kb
+ required: false
+ default: unlimited
+ bucketmaxobjects:
+ description:
+ - with bucket quota enabled specify maximum number of objects # noqa: E501
+ required: false
+ default: unlimited
+ buckets:
+ description:
+ - list of buckets to be created containing sub options
+ required: false
+ sub_options:
+ bucket:
+ description:
+ - name for new bucket
+ required: true
+ user:
+ description:
+ - user new bucket will be linked too
+ required: true
+
+
+requirements: ['radosgw', 'boto']
+
+author:
+ - 'Daniel Pivonka'
+
+'''
+
+EXAMPLES = '''
+# single basic user
+- name: single basic user
+ ceph_add_users_buckets:
+ rgw_host: '172.16.0.12'
+ port: 8080
+ admin_access_key: 'N61I8625V4XTWGDTLBLL'
+ admin_secret_key: 'HZrkuHHO9usUurDWBQHTeLIjO325bIULaC7DxcoV'
+ users:
+ - username: 'test1'
+ fullname: 'tester'
+
+
+# single complex user
+- name: single complex user
+ ceph_add_users_buckets:
+ rgw_host: '172.16.0.12'
+ port: 8080
+ admin_access_key: 'N61I8625V4XTWGDTLBLL'
+ admin_secret_key: 'HZrkuHHO9usUurDWBQHTeLIjO325bIULaC7DxcoV'
+ users:
+ - username: 'test1'
+ fullname: 'tester'
+ email: 'dan@email.com'
+ maxbucket: 666
+ suspend: true
+ autogenkey: true
+ accesskey: 'B3AR4Q33L59YV56A9A2F'
+ secretkey: 'd84BRnMysnVGSyZiRlYUMduVgIarQWiNMdKzrF76'
+ userquota: true
+ usermaxsize: '1000'
+ usermaxobjects: 3
+ bucketquota: true
+ bucketmaxsize: '1000'
+ bucketmaxobjects: 3
+
+# multi user
+- name: multi user
+ ceph_add_users_buckets:
+ rgw_host: '172.16.0.12'
+ port: 8080
+ admin_access_key: 'N61I8625V4XTWGDTLBLL'
+ admin_secret_key: 'HZrkuHHO9usUurDWBQHTeLIjO325bIULaC7DxcoV'
+ users:
+ - username: 'test1'
+ fullname: 'tester'
+ email: 'dan@email.com'
+ maxbucket: 666
+ suspend: true
+ autogenkey: true
+ accesskey: 'B3AR4Q33L59YV56A9A2F'
+ secretkey: 'd84BRnMysnVGSyZiRlYUMduVgIarQWiNMdKzrF76'
+ userquota: true
+ usermaxsize: '1000K'
+ usermaxobjects: 3
+ bucketquota: true
+ bucketmaxsize: '1000K'
+ bucketmaxobjects: 3
+ - username: 'test2'
+ fullname: 'tester'
+
+# single bucket
+- name: single basic user
+ ceph_add_users_buckets:
+ rgw_host: '172.16.0.12'
+ port: 8080
+ admin_access_key: 'N61I8625V4XTWGDTLBLL'
+ admin_secret_key: 'HZrkuHHO9usUurDWBQHTeLIjO325bIULaC7DxcoV'
+ buckets:
+ - bucket: 'heyimabucket1'
+ user: 'test1'
+
+# multi bucket
+- name: single basic user
+ ceph_add_users_buckets:
+ rgw_host: '172.16.0.12'
+ port: 8080
+ admin_access_key: 'N61I8625V4XTWGDTLBLL'
+ admin_secret_key: 'HZrkuHHO9usUurDWBQHTeLIjO325bIULaC7DxcoV'
+ buckets:
+ - bucket: 'heyimabucket1'
+ user: 'test1'
+ - bucket: 'heyimabucket2'
+ user: 'test2'
+ - bucket: 'heyimabucket3'
+ user: 'test2'
+
+# buckets and users
+- name: single basic user
+ ceph_add_users_buckets:
+ rgw_host: '172.16.0.12'
+ port: 8080
+ admin_access_key: 'N61I8625V4XTWGDTLBLL'
+ admin_secret_key: 'HZrkuHHO9usUurDWBQHTeLIjO325bIULaC7DxcoV'
+ users:
+ - username: 'test1'
+ fullname: 'tester'
+ email: 'dan@email.com'
+ maxbucket: 666
+ - username: 'test2'
+ fullname: 'tester'
+ email: 'dan1@email.com'
+ accesskey: 'B3AR4Q33L59YV56A9A2F'
+ secretkey: 'd84BRnMysnVGSyZiRlYUMduVgIarQWiNMdKzrF76'
+ userquota: true
+ usermaxsize: '1000'
+ usermaxobjects: 3
+ bucketquota: true
+ bucketmaxsize: '1000'
+ bucketmaxobjects: 3
+ buckets:
+ - bucket: 'heyimabucket1'
+ user: 'test1'
+ - bucket: 'heyimabucket2'
+ user: 'test2'
+ - bucket: 'heyimabucket3'
+ user: 'test2'
+
+'''
+
+RETURN = '''
+error_messages:
+ description: error for failed user or bucket.
+ returned: always
+ type: list
+ sample: [
+ "test2: could not modify user: unable to modify user, cannot add duplicate email\n" # noqa: E501
+ ]
+
+failed_users:
+ description: users that were not created.
+ returned: always
+ type: str
+ sample: "test2"
+
+added_users:
+ description: users that were created.
+ returned: always
+ type: str
+ sample: "test1"
+
+failed_buckets:
+ description: buckets that were not created.
+ returned: always
+ type: str
+ sample: "heyimabucket3"
+
+added_buckets:
+ description: buckets that were created.
+ returned: always
+ type: str
+ sample: "heyimabucket1, heyimabucket2"
+
+'''
+
+
+def create_users(rgw, users, result):
+
+ added_users = []
+ failed_users = []
+
+ for user in users:
+
+ # get info
+ username = user['username']
+ fullname = user['fullname']
+ email = user['email']
+ maxbucket = user['maxbucket']
+ suspend = user['suspend']
+ autogenkey = user['autogenkey']
+ accesskey = user['accesskey']
+ secretkey = user['secretkey']
+ userquota = user['userquota']
+ usermaxsize = user['usermaxsize']
+ usermaxobjects = user['usermaxobjects']
+ bucketquota = user['bucketquota']
+ bucketmaxsize = user['bucketmaxsize']
+ bucketmaxobjects = user['bucketmaxobjects']
+
+ fail_flag = False
+
+ # check if user exists
+ try:
+ user_info = rgw.get_user(uid=username)
+ except radosgw.exception.RadosGWAdminError:
+ # it doesnt exist
+ user_info = None
+
+ # user exists can not create
+ if user_info:
+ result['error_messages'].append(username + ' UserExists')
+ failed_users.append(username)
+ else:
+ # user doesnt exist create it
+ if email:
+ if autogenkey:
+ try:
+ rgw.create_user(username, fullname, email=email, key_type='s3', # noqa: E501
+ generate_key=autogenkey,
+ max_buckets=maxbucket, suspended=suspend) # noqa: E501
+ except radosgw.exception.RadosGWAdminError as e:
+ result['error_messages'].append(username + ' ' + e.get_code()) # noqa: E501
+ fail_flag = True
+ else:
+ try:
+ rgw.create_user(username, fullname, email=email, key_type='s3', # noqa: E501
+ access_key=accesskey, secret_key=secretkey, # noqa: E501
+ max_buckets=maxbucket, suspended=suspend) # noqa: E501
+ except radosgw.exception.RadosGWAdminError as e:
+ result['error_messages'].append(username + ' ' + e.get_code()) # noqa: E501
+ fail_flag = True
+ else:
+ if autogenkey:
+ try:
+ rgw.create_user(username, fullname, key_type='s3',
+ generate_key=autogenkey,
+ max_buckets=maxbucket, suspended=suspend) # noqa: E501
+ except radosgw.exception.RadosGWAdminError as e:
+ result['error_messages'].append(username + ' ' + e.get_code()) # noqa: E501
+ fail_flag = True
+ else:
+ try:
+ rgw.create_user(username, fullname, key_type='s3',
+ access_key=accesskey, secret_key=secretkey, # noqa: E501
+ max_buckets=maxbucket, suspended=suspend) # noqa: E501
+ except radosgw.exception.RadosGWAdminError as e:
+ result['error_messages'].append(username + ' ' + e.get_code()) # noqa: E501
+ fail_flag = True
+
+ if not fail_flag and userquota:
+ try:
+ rgw.set_quota(username, 'user', max_objects=usermaxobjects,
+ max_size_kb=usermaxsize, enabled=True)
+ except radosgw.exception.RadosGWAdminError as e:
+ result['error_messages'].append(username + ' ' + e.get_code()) # noqa: E501
+ fail_flag = True
+
+ if not fail_flag and bucketquota:
+ try:
+ rgw.set_quota(username, 'bucket', max_objects=bucketmaxobjects, # noqa: E501
+ max_size_kb=bucketmaxsize, enabled=True)
+ except radosgw.exception.RadosGWAdminError as e:
+ result['error_messages'].append(username + ' ' + e.get_code()) # noqa: E501
+ fail_flag = True
+
+ if fail_flag:
+ try:
+ rgw.delete_user(username)
+ except radosgw.exception.RadosGWAdminError:
+ pass
+ failed_users.append(username)
+ else:
+ added_users.append(username)
+
+ result['added_users'] = ", ".join(added_users)
+ result['failed_users'] = ", ".join(failed_users)
+
+
+def create_buckets(rgw, buckets, result):
+
+ added_buckets = []
+ failed_buckets = []
+
+ for bucket_info in buckets:
+ bucket = bucket_info['bucket']
+ user = bucket_info['user']
+
+ # check if bucket exists
+ try:
+ bucket_info = rgw.get_bucket(bucket_name=bucket)
+ except TypeError:
+ # it doesnt exist
+ bucket_info = None
+
+ # if it exists add to failed list
+ if bucket_info:
+ failed_buckets.append(bucket)
+ result['error_messages'].append(bucket + ' BucketExists')
+ else:
+ # bucket doesn't exist, so we need to create it
+ bucket_info = create_bucket(rgw, bucket)
+ if bucket_info:
+ # bucket created ok, link to user
+
+ # check if user exists
+ try:
+ user_info = rgw.get_user(uid=user)
+ except radosgw.exception.RadosGWAdminError:
+ # it doesnt exist
+ user_info = None
+
+ # user exists, link
+ if user_info:
+ try:
+ rgw.link_bucket(bucket_name=bucket,
+ bucket_id=bucket_info.id,
+ uid=user)
+ added_buckets.append(bucket)
+ except radosgw.exception.RadosGWAdminError as e:
+ result['error_messages'].append(bucket + e.get_code())
+ try:
+ rgw.delete_bucket(bucket, purge_objects=True)
+ except radosgw.exception.RadosGWAdminError:
+ pass
+ failed_buckets.append(bucket)
+
+ else:
+ # user doesnt exist cant be link delete bucket
+ try:
+ rgw.delete_bucket(bucket, purge_objects=True)
+ except radosgw.exception.RadosGWAdminError:
+ pass
+ failed_buckets.append(bucket)
+ result['error_messages'].append(bucket + ' could not be linked' + ', NoSuchUser ' + user) # noqa: E501
+
+ else:
+ # something went wrong
+ failed_buckets.append(bucket)
+ result['error_messages'].append(bucket + ' could not be created') # noqa: E501
+
+ result['added_buckets'] = ", ".join(added_buckets)
+ result['failed_buckets'] = ", ".join(failed_buckets)
+
+
+def create_bucket(rgw, bucket):
+ conn = boto.connect_s3(aws_access_key_id=rgw.provider._access_key,
+ aws_secret_access_key=rgw.provider._secret_key,
+ host=rgw._connection[0],
+ port=rgw.port,
+ is_secure=rgw.is_secure,
+ calling_format=boto.s3.connection.OrdinaryCallingFormat(), # noqa: E501
+ )
+
+ try:
+ conn.create_bucket(bucket_name=bucket)
+ bucket_info = rgw.get_bucket(bucket_name=bucket)
+ except boto.exception.S3ResponseError:
+ return None
+ else:
+ return bucket_info
+
+
+def main():
+ # arguments/parameters that a user can pass to the module
+ fields = dict(rgw_host=dict(type='str', required=True),
+ port=dict(type='int', required=True),
+ is_secure=dict(type='bool',
+ required=False,
+ default=False),
+ admin_access_key=dict(type='str', required=True),
+ admin_secret_key=dict(type='str', required=True),
+ buckets=dict(type='list', required=False, elements='dict',
+ options=dict(bucket=dict(type='str', required=True), # noqa: E501
+ user=dict(type='str', required=True))), # noqa: E501
+ users=dict(type='list', required=False, elements='dict',
+ options=dict(username=dict(type='str', required=True), # noqa: E501
+ fullname=dict(type='str', required=True), # noqa: E501
+ email=dict(type='str', required=False), # noqa: E501
+ maxbucket=dict(type='int', required=False, default=1000), # noqa: E501
+ suspend=dict(type='bool', required=False, default=False), # noqa: E501
+ autogenkey=dict(type='bool', required=False, default=True), # noqa: E501
+ accesskey=dict(type='str', required=False), # noqa: E501
+ secretkey=dict(type='str', required=False), # noqa: E501
+ userquota=dict(type='bool', required=False, default=False), # noqa: E501
+ usermaxsize=dict(type='str', required=False, default='-1'), # noqa: E501
+ usermaxobjects=dict(type='int', required=False, default=-1), # noqa: E501
+ bucketquota=dict(type='bool', required=False, default=False), # noqa: E501
+ bucketmaxsize=dict(type='str', required=False, default='-1'), # noqa: E501
+ bucketmaxobjects=dict(type='int', required=False, default=-1)))) # noqa: E501
+
+ # the AnsibleModule object
+ module = AnsibleModule(argument_spec=fields,
+ supports_check_mode=False)
+
+ # get vars
+ rgw_host = module.params.get('rgw_host')
+ port = module.params.get('port')
+ is_secure = module.params.get('is_secure')
+ admin_access_key = module.params.get('admin_access_key')
+ admin_secret_key = module.params.get('admin_secret_key')
+ users = module.params['users']
+ buckets = module.params.get('buckets')
+
+ # seed the result dict in the object
+ result = dict(
+ changed=False,
+ error_messages=[],
+ added_users='',
+ failed_users='',
+ added_buckets='',
+ failed_buckets='',
+ )
+
+ # radosgw connection
+ rgw = radosgw.connection.RadosGWAdminConnection(host=rgw_host,
+ port=port,
+ access_key=admin_access_key, # noqa: E501
+ secret_key=admin_secret_key, # noqa: E501
+ aws_signature='AWS4',
+ is_secure=is_secure)
+
+ # test connection
+ connected = True
+ try:
+ rgw.get_usage()
+ except radosgw.exception.RadosGWAdminError as e:
+ connected = False
+ result['error_messages'] = e.get_code()
+ except socket_error as e:
+ connected = False
+ result['error_messages'] = str(e)
+
+ if connected and users:
+ create_users(rgw, users, result)
+
+ if connected and buckets:
+ create_buckets(rgw, buckets, result)
+
+ if result['added_users'] != '' or result['added_buckets'] != '':
+ result['changed'] = True
+
+ # conditional state caused a failure
+ if result['added_users'] == '' and result['added_buckets'] == '':
+ module.fail_json(msg='No users or buckets were added successfully',
+ **result)
+
+ # EXIT
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
--- /dev/null
+#!/usr/bin/python
+
+# Copyright (c) 2018 Red Hat, Inc.
+#
+# GNU General Public License v3.0+
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from ansible.module_utils.basic import AnsibleModule
+try:
+ from ansible.module_utils.ca_common import fatal
+except ImportError:
+ from module_utils.ca_common import fatal
+import datetime
+
+ANSIBLE_METADATA = {
+ 'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'
+}
+
+DOCUMENTATION = '''
+---
+module: ceph_crush
+
+author: Sebastien Han <seb@redhat.com>
+
+short_description: Create Ceph CRUSH hierarchy
+
+version_added: "2.6"
+
+description:
+ - By using the hostvar variable 'osd_crush_location'
+ ceph_crush creates buckets and places them in the right CRUSH hierarchy
+
+options:
+ cluster:
+ description:
+ - The ceph cluster name.
+ required: false
+ default: ceph
+ location:
+ description:
+ - osd_crush_location dict from the inventory file. It contains
+ the placement of each host in the CRUSH map.
+ required: true
+ containerized:
+ description:
+ - Weither or not this is a containerized cluster. The value is
+ assigned or not depending on how the playbook runs.
+ required: false
+ default: None
+'''
+
+EXAMPLES = '''
+- name: configure crush hierarchy
+ ceph_crush:
+ cluster: "{{ cluster }}"
+ location: "{{ hostvars[item]['osd_crush_location'] }}"
+ containerized: "{{ container_exec_cmd }}"
+ with_items: "{{ groups[osd_group_name] }}"
+ when: crush_rule_config | bool
+'''
+
+RETURN = '''# '''
+
+
+def generate_cmd(cluster, subcommand, bucket, bucket_type, containerized=None):
+ '''
+ Generate command line to execute
+ '''
+ cmd = [
+ 'ceph',
+ '--cluster',
+ cluster,
+ 'osd',
+ 'crush',
+ subcommand,
+ bucket,
+ bucket_type,
+ ]
+ if containerized:
+ cmd = containerized.split() + cmd
+ return cmd
+
+
+def sort_osd_crush_location(location, module):
+ '''
+ Sort location tuple
+ '''
+ if len(location) < 2:
+ fatal("You must specify at least 2 buckets.", module)
+
+ if not any(item for item in location if item[0] == "host"):
+ fatal("You must specify a 'host' bucket.", module)
+
+ try:
+ crush_bucket_types = [
+ "host",
+ "chassis",
+ "rack",
+ "row",
+ "pdu",
+ "pod",
+ "room",
+ "datacenter",
+ "region",
+ "root",
+ ]
+ return sorted(location, key=lambda crush: crush_bucket_types.index(crush[0])) # noqa: E501
+ except ValueError as error:
+ fatal("{} is not a valid CRUSH bucket, valid bucket types are {}".format(error.args[0].split()[0], crush_bucket_types), module) # noqa: E501
+
+
+def create_and_move_buckets_list(cluster, location, containerized=None):
+ '''
+ Creates Ceph CRUSH buckets and arrange the hierarchy
+ '''
+ previous_bucket = None
+ cmd_list = []
+ for item in location:
+ bucket_type, bucket_name = item
+ # ceph osd crush add-bucket maroot root
+ cmd_list.append(generate_cmd(cluster, "add-bucket", bucket_name, bucket_type, containerized)) # noqa: E501
+ if previous_bucket:
+ # ceph osd crush move monrack root=maroot
+ cmd_list.append(generate_cmd(cluster, "move", previous_bucket, "%s=%s" % (bucket_type, bucket_name), containerized)) # noqa: E501
+ previous_bucket = item[1]
+ return cmd_list
+
+
+def exec_commands(module, cmd_list):
+ '''
+ Creates Ceph commands
+ '''
+ for cmd in cmd_list:
+ rc, out, err = module.run_command(cmd)
+ return rc, cmd, out, err
+
+
+def run_module():
+ module_args = dict(
+ cluster=dict(type='str', required=False, default='ceph'),
+ location=dict(type='dict', required=True),
+ containerized=dict(type='str', required=True, default=None),
+ )
+
+ module = AnsibleModule(
+ argument_spec=module_args,
+ supports_check_mode=True
+ )
+
+ cluster = module.params['cluster']
+ location_dict = module.params['location']
+ location = sort_osd_crush_location(tuple(location_dict.items()), module)
+ containerized = module.params['containerized']
+
+ result = dict(
+ changed=False,
+ stdout='',
+ stderr='',
+ rc=0,
+ start='',
+ end='',
+ delta='',
+ )
+
+ if module.check_mode:
+ module.exit_json(**result)
+
+ startd = datetime.datetime.now()
+
+ # run the Ceph command to add buckets
+ rc, cmd, out, err = exec_commands(module, create_and_move_buckets_list(cluster, location, containerized)) # noqa: E501
+
+ endd = datetime.datetime.now()
+ delta = endd - startd
+
+ result = dict(
+ cmd=cmd,
+ start=str(startd),
+ end=str(endd),
+ delta=str(delta),
+ rc=rc,
+ stdout=out.rstrip("\r\n"),
+ stderr=err.rstrip("\r\n"),
+ changed=True,
+ )
+
+ if rc != 0:
+ module.fail_json(msg='non-zero return code', **result)
+
+ module.exit_json(**result)
+
+
+def main():
+ run_module()
+
+
+if __name__ == '__main__':
+ main()
--- /dev/null
+# Copyright 2020, Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from ansible.module_utils.basic import AnsibleModule
+try:
+ from ansible.module_utils.ca_common import exit_module, \
+ generate_ceph_cmd, \
+ is_containerized, \
+ exec_command
+except ImportError:
+ from module_utils.ca_common import exit_module, \
+ generate_ceph_cmd, \
+ is_containerized, \
+ exec_command
+import datetime
+import json
+
+
+ANSIBLE_METADATA = {
+ 'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'
+}
+
+DOCUMENTATION = '''
+---
+module: ceph_crush_rule
+short_description: Manage Ceph Crush Replicated/Erasure Rule
+version_added: "2.8"
+description:
+ - Manage Ceph Crush rule(s) creation, deletion and updates.
+options:
+ name:
+ description:
+ - name of the Ceph Crush rule.
+ required: true
+ cluster:
+ description:
+ - The ceph cluster name.
+ required: false
+ default: ceph
+ state:
+ description:
+ If 'present' is used, the module creates a rule if it doesn't
+ exist or update it if it already exists.
+ If 'absent' is used, the module will simply delete the rule.
+ If 'info' is used, the module will return all details about the
+ existing rule (json formatted).
+ required: false
+ choices: ['present', 'absent', 'info']
+ default: present
+ rule_type:
+ description:
+ - The ceph CRUSH rule type.
+ required: false
+ choices: ['replicated', 'erasure']
+ required: false
+ bucket_root:
+ description:
+ - The ceph bucket root for replicated rule.
+ required: false
+ bucket_type:
+ description:
+ - The ceph bucket type for replicated rule.
+ required: false
+ choices: ['osd', 'host', 'chassis', 'rack', 'row', 'pdu', 'pod',
+ 'room', 'datacenter', 'zone', 'region', 'root']
+ device_class:
+ description:
+ - The ceph device class for replicated rule.
+ required: false
+ profile:
+ description:
+ - The ceph erasure profile for erasure rule.
+ required: false
+author:
+ - Dimitri Savineau <dsavinea@redhat.com>
+'''
+
+EXAMPLES = '''
+- name: create a Ceph Crush replicated rule
+ ceph_crush_rule:
+ name: foo
+ bucket_root: default
+ bucket_type: host
+ device_class: ssd
+ rule_type: replicated
+
+- name: create a Ceph Crush erasure rule
+ ceph_crush_rule:
+ name: foo
+ profile: bar
+ rule_type: erasure
+
+- name: get a Ceph Crush rule information
+ ceph_crush_rule:
+ name: foo
+ state: info
+
+- name: delete a Ceph Crush rule
+ ceph_crush_rule:
+ name: foo
+ state: absent
+'''
+
+RETURN = '''# '''
+
+
+def create_rule(module, container_image=None):
+ '''
+ Create a new crush replicated/erasure rule
+ '''
+
+ cluster = module.params.get('cluster')
+ name = module.params.get('name')
+ rule_type = module.params.get('rule_type')
+ bucket_root = module.params.get('bucket_root')
+ bucket_type = module.params.get('bucket_type')
+ device_class = module.params.get('device_class')
+ profile = module.params.get('profile')
+
+ if rule_type == 'replicated':
+ args = ['create-replicated', name, bucket_root, bucket_type]
+ if device_class:
+ args.append(device_class)
+ else:
+ args = ['create-erasure', name]
+ if profile:
+ args.append(profile)
+
+ cmd = generate_ceph_cmd(['osd', 'crush', 'rule'],
+ args,
+ cluster=cluster,
+ container_image=container_image)
+
+ return cmd
+
+
+def get_rule(module, container_image=None):
+ '''
+ Get existing crush rule
+ '''
+
+ cluster = module.params.get('cluster')
+ name = module.params.get('name')
+
+ args = ['dump', name, '--format=json']
+
+ cmd = generate_ceph_cmd(['osd', 'crush', 'rule'],
+ args,
+ cluster=cluster,
+ container_image=container_image)
+
+ return cmd
+
+
+def remove_rule(module, container_image=None):
+ '''
+ Remove a crush rule
+ '''
+
+ cluster = module.params.get('cluster')
+ name = module.params.get('name')
+
+ args = ['rm', name]
+
+ cmd = generate_ceph_cmd(['osd', 'crush', 'rule'],
+ args,
+ cluster=cluster,
+ container_image=container_image)
+
+ return cmd
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ cluster=dict(type='str', required=False, default='ceph'),
+ state=dict(type='str', required=False, choices=['present', 'absent', 'info'], default='present'), # noqa: E501
+ rule_type=dict(type='str', required=False, choices=['replicated', 'erasure']), # noqa: E501
+ bucket_root=dict(type='str', required=False),
+ bucket_type=dict(type='str', required=False, choices=['osd', 'host', 'chassis', 'rack', 'row', 'pdu', 'pod', # noqa: E501
+ 'room', 'datacenter', 'zone', 'region', 'root']), # noqa: E501
+ device_class=dict(type='str', required=False),
+ profile=dict(type='str', required=False)
+ ),
+ supports_check_mode=True,
+ required_if=[
+ ('state', 'present', ['rule_type']),
+ ('rule_type', 'replicated', ['bucket_root', 'bucket_type']),
+ ('rule_type', 'erasure', ['profile'])
+ ]
+ )
+
+ # Gather module parameters in variables
+ name = module.params.get('name')
+ state = module.params.get('state')
+ rule_type = module.params.get('rule_type')
+
+ if module.check_mode:
+ module.exit_json(
+ changed=False,
+ stdout='',
+ stderr='',
+ rc=0,
+ start='',
+ end='',
+ delta='',
+ )
+
+ startd = datetime.datetime.now()
+ changed = False
+
+ # will return either the image name or None
+ container_image = is_containerized()
+
+ if state == "present":
+ rc, cmd, out, err = exec_command(module, get_rule(module, container_image=container_image)) # noqa: E501
+ if rc != 0:
+ rc, cmd, out, err = exec_command(module, create_rule(module, container_image=container_image)) # noqa: E501
+ changed = True
+ else:
+ rule = json.loads(out)
+ if (rule['type'] == 1 and rule_type == 'erasure') or (rule['type'] == 3 and rule_type == 'replicated'): # noqa: E501
+ module.fail_json(msg="Can not convert crush rule {} to {}".format(name, rule_type), changed=False, rc=1) # noqa: E501
+
+ elif state == "absent":
+ rc, cmd, out, err = exec_command(module, get_rule(module, container_image=container_image)) # noqa: E501
+ if rc == 0:
+ rc, cmd, out, err = exec_command(module, remove_rule(module, container_image=container_image)) # noqa: E501
+ changed = True
+ else:
+ rc = 0
+ out = "Crush Rule {} doesn't exist".format(name)
+
+ elif state == "info":
+ rc, cmd, out, err = exec_command(module, get_rule(module, container_image=container_image)) # noqa: E501
+
+ exit_module(module=module, out=out, rc=rc, cmd=cmd, err=err, startd=startd, changed=changed) # noqa: E501
+
+
+if __name__ == '__main__':
+ main()
--- /dev/null
+# Copyright 2020, Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from ansible.module_utils.basic import AnsibleModule
+try:
+ from ansible.module_utils.ca_common import generate_ceph_cmd, \
+ is_containerized, \
+ exec_command, \
+ exit_module, \
+ fatal
+except ImportError:
+ from module_utils.ca_common import generate_ceph_cmd, is_containerized, exec_command, exit_module, fatal # noqa: E501
+
+import datetime
+import json
+
+
+ANSIBLE_METADATA = {
+ 'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'
+}
+
+DOCUMENTATION = '''
+---
+module: ceph_dashboard_user
+
+short_description: Manage Ceph Dashboard User
+
+version_added: "2.8"
+
+description:
+ - Manage Ceph Dashboard user(s) creation, deletion and updates.
+options:
+ cluster:
+ description:
+ - The ceph cluster name.
+ required: false
+ default: ceph
+ name:
+ description:
+ - name of the Ceph Dashboard user.
+ required: true
+ state:
+ description:
+ If 'present' is used, the module creates a user if it doesn't
+ exist or update it if it already exists.
+ If 'absent' is used, the module will simply delete the user.
+ If 'info' is used, the module will return all details about the
+ existing user (json formatted).
+ required: false
+ choices: ['present', 'absent', 'info']
+ default: present
+ password:
+ description:
+ - password of the Ceph Dashboard user.
+ required: false
+ roles:
+ description:
+ - roles of the Ceph Dashboard user.
+ required: false
+ default: []
+
+author:
+ - Dimitri Savineau <dsavinea@redhat.com>
+'''
+
+EXAMPLES = '''
+- name: create a Ceph Dashboard user
+ ceph_dashboard_user:
+ name: foo
+ password: bar
+
+- name: create a read-only/block-manager Ceph Dashboard user
+ ceph_dashboard_user:
+ name: foo
+ password: bar
+ roles:
+ - 'read-only'
+ - 'block-manager'
+
+- name: create a Ceph Dashboard admin user
+ ceph_dashboard_user:
+ name: foo
+ password: bar
+ roles: ['administrator']
+
+- name: get a Ceph Dashboard user information
+ ceph_dashboard_user:
+ name: foo
+ state: info
+
+- name: delete a Ceph Dashboard user
+ ceph_dashboard_user:
+ name: foo
+ state: absent
+'''
+
+RETURN = '''# '''
+
+
+def create_user(module, container_image=None):
+ '''
+ Create a new user
+ '''
+
+ cluster = module.params.get('cluster')
+ name = module.params.get('name')
+
+ args = ['ac-user-create', '-i', '-', name]
+
+ cmd = generate_ceph_cmd(sub_cmd=['dashboard'],
+ args=args,
+ cluster=cluster,
+ container_image=container_image,
+ interactive=True)
+
+ return cmd
+
+
+def set_roles(module, container_image=None):
+ '''
+ Set user roles
+ '''
+
+ cluster = module.params.get('cluster')
+ name = module.params.get('name')
+ roles = module.params.get('roles')
+
+ args = ['ac-user-set-roles', name]
+
+ args.extend(roles)
+
+ cmd = generate_ceph_cmd(sub_cmd=['dashboard'],
+ args=args,
+ cluster=cluster,
+ container_image=container_image)
+
+ return cmd
+
+
+def set_password(module, container_image=None):
+ '''
+ Set user password
+ '''
+
+ cluster = module.params.get('cluster')
+ name = module.params.get('name')
+
+ args = ['ac-user-set-password', '-i', '-', name]
+
+ cmd = generate_ceph_cmd(sub_cmd=['dashboard'],
+ args=args,
+ cluster=cluster,
+ container_image=container_image,
+ interactive=True)
+
+ return cmd
+
+
+def get_user(module, container_image=None):
+ '''
+ Get existing user
+ '''
+
+ cluster = module.params.get('cluster')
+ name = module.params.get('name')
+
+ args = ['ac-user-show', name, '--format=json']
+
+ cmd = generate_ceph_cmd(sub_cmd=['dashboard'],
+ args=args,
+ cluster=cluster,
+ container_image=container_image)
+
+ return cmd
+
+
+def remove_user(module, container_image=None):
+ '''
+ Remove a user
+ '''
+
+ cluster = module.params.get('cluster')
+ name = module.params.get('name')
+
+ args = ['ac-user-delete', name]
+
+ cmd = generate_ceph_cmd(sub_cmd=['dashboard'],
+ args=args,
+ cluster=cluster,
+ container_image=container_image)
+
+ return cmd
+
+
+def run_module():
+ module_args = dict(
+ cluster=dict(type='str', required=False, default='ceph'),
+ name=dict(type='str', required=True),
+ state=dict(type='str', required=False, choices=['present', 'absent', 'info'], default='present'), # noqa: E501
+ password=dict(type='str', required=False, no_log=True),
+ roles=dict(type='list',
+ required=False,
+ choices=['administrator', 'read-only', 'block-manager', 'rgw-manager', 'cluster-manager', 'pool-manager', 'cephfs-manager'], # noqa: E501
+ default=[]),
+ )
+
+ module = AnsibleModule(
+ argument_spec=module_args,
+ supports_check_mode=True,
+ required_if=[['state', 'present', ['password']]]
+ )
+
+ # Gather module parameters in variables
+ name = module.params.get('name')
+ state = module.params.get('state')
+ roles = module.params.get('roles')
+ password = module.params.get('password')
+
+ if module.check_mode:
+ module.exit_json(
+ changed=False,
+ stdout='',
+ stderr='',
+ rc=0,
+ start='',
+ end='',
+ delta='',
+ )
+
+ startd = datetime.datetime.now()
+ changed = False
+
+ # will return either the image name or None
+ container_image = is_containerized()
+
+ if state == "present":
+ rc, cmd, out, err = exec_command(module, get_user(module, container_image=container_image)) # noqa: E501
+ if rc == 0:
+ user = json.loads(out)
+ user['roles'].sort()
+ roles.sort()
+ if user['roles'] != roles:
+ rc, cmd, out, err = exec_command(module, set_roles(module, container_image=container_image)) # noqa: E501
+ changed = True
+ rc, cmd, out, err = exec_command(module, set_password(module, container_image=container_image), stdin=password) # noqa: E501
+ else:
+ rc, cmd, out, err = exec_command(module, create_user(module, container_image=container_image), stdin=password) # noqa: E501
+ if rc != 0:
+ fatal(err, module)
+ rc, cmd, out, err = exec_command(module, set_roles(module, container_image=container_image)) # noqa: E501
+ changed = True
+
+ elif state == "absent":
+ rc, cmd, out, err = exec_command(module, get_user(module, container_image=container_image)) # noqa: E501
+ if rc == 0:
+ rc, cmd, out, err = exec_command(module, remove_user(module, container_image=container_image)) # noqa: E501
+ changed = True
+ else:
+ rc = 0
+ out = "Dashboard User {} doesn't exist".format(name)
+
+ elif state == "info":
+ rc, cmd, out, err = exec_command(module, get_user(module, container_image=container_image)) # noqa: E501
+
+ exit_module(module=module, out=out, rc=rc, cmd=cmd, err=err, startd=startd, changed=changed) # noqa: E501
+
+
+def main():
+ run_module()
+
+
+if __name__ == '__main__':
+ main()
--- /dev/null
+# Copyright 2020, Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from ansible.module_utils.basic import AnsibleModule
+try:
+ from ansible.module_utils.ca_common import is_containerized, \
+ generate_ceph_cmd, \
+ exec_command, \
+ exit_module
+except ImportError:
+ from module_utils.ca_common import is_containerized, \
+ generate_ceph_cmd, \
+ exec_command, \
+ exit_module
+import datetime
+import json
+
+
+ANSIBLE_METADATA = {
+ 'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'
+}
+
+DOCUMENTATION = '''
+---
+module: ceph_ec_profile
+
+short_description: Manage Ceph Erasure Code profile
+
+version_added: "2.8"
+
+description:
+ - Manage Ceph Erasure Code profile
+options:
+ cluster:
+ description:
+ - The ceph cluster name.
+ required: false
+ default: ceph
+ name:
+ description:
+ - name of the profile.
+ required: true
+ state:
+ description:
+ If 'present' is used, the module creates a profile.
+ If 'absent' is used, the module will delete the profile.
+ required: false
+ choices: ['present', 'absent', 'info']
+ default: present
+ stripe_unit:
+ description:
+ - The amount of data in a data chunk, per stripe.
+ required: false
+ k:
+ description:
+ - Number of data-chunks the object will be split in
+ required: true
+ m:
+ description:
+ - Compute coding chunks for each object and store them on different
+ OSDs.
+ required: true
+ crush_root:
+ description:
+ - The name of the crush bucket used for the first step of the CRUSH
+ rule.
+ required: false
+ crush_device_class:
+ description:
+ - Restrict placement to devices of a specific class (hdd/ssd)
+ required: false
+
+author:
+ - Guillaume Abrioux <gabrioux@redhat.com>
+'''
+
+EXAMPLES = '''
+- name: create an erasure code profile
+ ceph_ec_profile:
+ name: foo
+ k: 4
+ m: 2
+
+- name: delete an erassure code profile
+ ceph_ec_profile:
+ name: foo
+ state: absent
+'''
+
+RETURN = '''# '''
+
+
+def get_profile(module, name, cluster='ceph', container_image=None):
+ '''
+ Get existing profile
+ '''
+
+ args = ['get', name, '--format=json']
+
+ cmd = generate_ceph_cmd(sub_cmd=['osd', 'erasure-code-profile'],
+ args=args,
+ cluster=cluster,
+ container_image=container_image)
+
+ return cmd
+
+
+def create_profile(module, name, k, m, stripe_unit, cluster='ceph', force=False, container_image=None): # noqa: E501
+ '''
+ Create a profile
+ '''
+
+ args = ['set', name, 'k={}'.format(k), 'm={}'.format(m)]
+ if stripe_unit:
+ args.append('stripe_unit={}'.format(stripe_unit))
+ if force:
+ args.append('--force')
+
+ cmd = generate_ceph_cmd(sub_cmd=['osd', 'erasure-code-profile'],
+ args=args,
+ cluster=cluster,
+ container_image=container_image)
+
+ return cmd
+
+
+def delete_profile(module, name, cluster='ceph', container_image=None):
+ '''
+ Delete a profile
+ '''
+
+ args = ['rm', name]
+
+ cmd = generate_ceph_cmd(sub_cmd=['osd', 'erasure-code-profile'],
+ args=args,
+ cluster=cluster,
+ container_image=container_image)
+
+ return cmd
+
+
+def run_module():
+ module_args = dict(
+ cluster=dict(type='str', required=False, default='ceph'),
+ name=dict(type='str', required=True),
+ state=dict(type='str', required=False,
+ choices=['present', 'absent'], default='present'),
+ stripe_unit=dict(type='str', required=False),
+ k=dict(type='str', required=False),
+ m=dict(type='str', required=False),
+ )
+
+ module = AnsibleModule(
+ argument_spec=module_args,
+ supports_check_mode=True,
+ required_if=[['state', 'present', ['k', 'm']]],
+ )
+
+ # Gather module parameters in variables
+ name = module.params.get('name')
+ cluster = module.params.get('cluster')
+ state = module.params.get('state')
+ stripe_unit = module.params.get('stripe_unit')
+ k = module.params.get('k')
+ m = module.params.get('m')
+
+ if module.check_mode:
+ module.exit_json(
+ changed=False,
+ stdout='',
+ stderr='',
+ rc=0,
+ start='',
+ end='',
+ delta='',
+ )
+
+ startd = datetime.datetime.now()
+ changed = False
+
+ # will return either the image name or None
+ container_image = is_containerized()
+
+ if state == "present":
+ rc, cmd, out, err = exec_command(module, get_profile(module, name, cluster, container_image=container_image)) # noqa: E501
+ if rc == 0:
+ # the profile already exists, let's check whether we have to
+ # update it
+ current_profile = json.loads(out)
+ if current_profile['k'] != k or \
+ current_profile['m'] != m or \
+ current_profile.get('stripe_unit', stripe_unit) != stripe_unit:
+ rc, cmd, out, err = exec_command(module,
+ create_profile(module,
+ name,
+ k,
+ m,
+ stripe_unit,
+ cluster,
+ force=True, container_image=container_image)) # noqa: E501
+ changed = True
+ else:
+ # the profile doesn't exist, it has to be created
+ rc, cmd, out, err = exec_command(module, create_profile(module,
+ name,
+ k,
+ m,
+ stripe_unit, # noqa: E501
+ cluster,
+ container_image=container_image)) # noqa: E501
+ if rc == 0:
+ changed = True
+
+ elif state == "absent":
+ rc, cmd, out, err = exec_command(module, delete_profile(module, name, cluster, container_image=container_image)) # noqa: E501
+ if not err:
+ out = 'Profile {} removed.'.format(name)
+ changed = True
+ else:
+ rc = 0
+ out = "Skipping, the profile {} doesn't exist".format(name)
+
+ exit_module(module=module, out=out, rc=rc, cmd=cmd, err=err, startd=startd, changed=changed) # noqa: E501
+
+
+def main():
+ run_module()
+
+
+if __name__ == '__main__':
+ main()
--- /dev/null
+# Copyright 2020, Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from ansible.module_utils.basic import AnsibleModule
+try:
+ from ansible.module_utils.ca_common import is_containerized, \
+ exec_command, \
+ generate_ceph_cmd, \
+ exit_module
+except ImportError:
+ from module_utils.ca_common import is_containerized, \
+ exec_command, \
+ generate_ceph_cmd, \
+ exit_module
+
+import datetime
+import json
+
+
+ANSIBLE_METADATA = {
+ 'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'
+}
+
+DOCUMENTATION = '''
+---
+module: ceph_fs
+
+short_description: Manage Ceph File System
+
+version_added: "2.8"
+
+description:
+ - Manage Ceph File System(s) creation, deletion and updates.
+options:
+ cluster:
+ description:
+ - The ceph cluster name.
+ required: false
+ default: ceph
+ name:
+ description:
+ - name of the Ceph File System.
+ required: true
+ state:
+ description:
+ If 'present' is used, the module creates a filesystem if it
+ doesn't exist or update it if it already exists.
+ If 'absent' is used, the module will simply delete the filesystem.
+ If 'info' is used, the module will return all details about the
+ existing filesystem (json formatted).
+ required: false
+ choices: ['present', 'absent', 'info']
+ default: present
+ data:
+ description:
+ - name of the data pool.
+ required: false
+ metadata:
+ description:
+ - name of the metadata pool.
+ required: false
+ max_mds:
+ description:
+ - name of the max_mds attribute.
+ required: false
+
+
+author:
+ - Dimitri Savineau <dsavinea@redhat.com>
+'''
+
+EXAMPLES = '''
+- name: create a Ceph File System
+ ceph_fs:
+ name: foo
+ data: bar_data
+ metadata: bar_metadata
+ max_mds: 2
+
+- name: get a Ceph File System information
+ ceph_fs:
+ name: foo
+ state: info
+
+- name: delete a Ceph File System
+ ceph_fs:
+ name: foo
+ state: absent
+'''
+
+RETURN = '''# '''
+
+
+def create_fs(module, container_image=None):
+ '''
+ Create a new fs
+ '''
+
+ cluster = module.params.get('cluster')
+ name = module.params.get('name')
+ data = module.params.get('data')
+ metadata = module.params.get('metadata')
+
+ args = ['new', name, metadata, data]
+
+ cmd = generate_ceph_cmd(sub_cmd=['fs'],
+ args=args,
+ cluster=cluster,
+ container_image=container_image)
+
+ return cmd
+
+
+def get_fs(module, container_image=None):
+ '''
+ Get existing fs
+ '''
+
+ cluster = module.params.get('cluster')
+ name = module.params.get('name')
+
+ args = ['get', name, '--format=json']
+
+ cmd = generate_ceph_cmd(sub_cmd=['fs'],
+ args=args,
+ cluster=cluster,
+ container_image=container_image)
+
+ return cmd
+
+
+def remove_fs(module, container_image=None):
+ '''
+ Remove a fs
+ '''
+
+ cluster = module.params.get('cluster')
+ name = module.params.get('name')
+
+ args = ['rm', name, '--yes-i-really-mean-it']
+
+ cmd = generate_ceph_cmd(sub_cmd=['fs'],
+ args=args,
+ cluster=cluster,
+ container_image=container_image)
+
+ return cmd
+
+
+def fail_fs(module, container_image=None):
+ '''
+ Fail a fs
+ '''
+
+ cluster = module.params.get('cluster')
+ name = module.params.get('name')
+
+ args = ['fail', name]
+
+ cmd = generate_ceph_cmd(sub_cmd=['fs'],
+ args=args,
+ cluster=cluster,
+ container_image=container_image)
+
+ return cmd
+
+
+def set_fs(module, container_image=None):
+ '''
+ Set parameter to a fs
+ '''
+
+ cluster = module.params.get('cluster')
+ name = module.params.get('name')
+ max_mds = module.params.get('max_mds')
+
+ args = ['set', name, 'max_mds', str(max_mds)]
+
+ cmd = generate_ceph_cmd(sub_cmd=['fs'],
+ args=args,
+ cluster=cluster,
+ container_image=container_image)
+
+ return cmd
+
+
+def run_module():
+ module_args = dict(
+ cluster=dict(type='str', required=False, default='ceph'),
+ name=dict(type='str', required=True),
+ state=dict(type='str', required=False, choices=['present', 'absent', 'info'], default='present'), # noqa: E501
+ data=dict(type='str', required=False),
+ metadata=dict(type='str', required=False),
+ max_mds=dict(type='int', required=False),
+ )
+
+ module = AnsibleModule(
+ argument_spec=module_args,
+ supports_check_mode=True,
+ required_if=[['state', 'present', ['data', 'metadata']]],
+ )
+
+ # Gather module parameters in variables
+ name = module.params.get('name')
+ state = module.params.get('state')
+ max_mds = module.params.get('max_mds')
+
+ if module.check_mode:
+ module.exit_json(
+ changed=False,
+ stdout='',
+ stderr='',
+ rc=0,
+ start='',
+ end='',
+ delta='',
+ )
+
+ startd = datetime.datetime.now()
+ changed = False
+
+ # will return either the image name or None
+ container_image = is_containerized()
+
+ if state == "present":
+ rc, cmd, out, err = exec_command(module, get_fs(module, container_image=container_image)) # noqa: E501
+ if rc == 0:
+ fs = json.loads(out)
+ if max_mds and fs["mdsmap"]["max_mds"] != max_mds:
+ rc, cmd, out, err = exec_command(module, set_fs(module, container_image=container_image)) # noqa: E501
+ if rc == 0:
+ changed = True
+ else:
+ rc, cmd, out, err = exec_command(module, create_fs(module, container_image=container_image)) # noqa: E501
+ if max_mds and max_mds > 1:
+ exec_command(module, set_fs(module, container_image=container_image)) # noqa: E501
+ if rc == 0:
+ changed = True
+
+ elif state == "absent":
+ rc, cmd, out, err = exec_command(module, get_fs(module, container_image=container_image)) # noqa: E501
+ if rc == 0:
+ exec_command(module, fail_fs(module, container_image=container_image)) # noqa: E501
+ rc, cmd, out, err = exec_command(module, remove_fs(module, container_image=container_image)) # noqa: E501
+ if rc == 0:
+ changed = True
+ else:
+ rc = 0
+ out = "Ceph File System {} doesn't exist".format(name)
+
+ elif state == "info":
+ rc, cmd, out, err = exec_command(module, get_fs(module, container_image=container_image)) # noqa: E501
+
+ exit_module(module=module, out=out, rc=rc, cmd=cmd, err=err, startd=startd, changed=changed) # noqa: E501
+
+
+def main():
+ run_module()
+
+
+if __name__ == '__main__':
+ main()
--- /dev/null
+#!/usr/bin/python3
+
+# Copyright 2018, Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from ansible.module_utils.basic import AnsibleModule
+try:
+ from ansible.module_utils.ca_common import is_containerized, container_exec, fatal
+except ImportError:
+ from module_utils.ca_common import is_containerized, container_exec, fatal
+import datetime
+import json
+import os
+import struct
+import time
+import base64
+import socket
+
+
+ANSIBLE_METADATA = {
+ 'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'
+}
+
+DOCUMENTATION = '''
+---
+module: ceph_key
+
+author: Sebastien Han <seb@redhat.com>
+
+short_description: Manage Cephx key(s)
+
+version_added: "2.6"
+
+description:
+ - Manage CephX creation, deletion and updates.
+ It can also list and get information about keyring(s).
+options:
+ cluster:
+ description:
+ - The ceph cluster name.
+ required: false
+ default: ceph
+ name:
+ description:
+ - name of the CephX key
+ required: true
+ user:
+ description:
+ - entity used to perform operation.
+ It corresponds to the -n option (--name)
+ required: false
+ user_key:
+ description:
+ - the path to the keyring corresponding to the
+ user being used.
+ It corresponds to the -k option (--keyring)
+ state:
+ description:
+ - If 'present' is used, the module creates a keyring
+ with the associated capabilities.
+ If 'present' is used and a secret is provided the module
+ will always add the key. Which means it will update
+ the keyring if the secret changes, the same goes for
+ the capabilities.
+ If 'absent' is used, the module will simply delete the keyring.
+ If 'list' is used, the module will list all the keys and will
+ return a json output.
+ If 'info' is used, the module will return in a json format the
+ description of a given keyring.
+ If 'generate_secret' is used, the module will simply output a cephx keyring.
+ required: false
+ choices: ['present', 'update', 'absent', 'list', 'info', 'fetch_initial_keys', 'generate_secret']
+ default: present
+ caps:
+ description:
+ - CephX key capabilities
+ default: None
+ required: false
+ secret:
+ description:
+ - keyring's secret value
+ required: false
+ default: None
+ import_key:
+ description:
+ - Wether or not to import the created keyring into Ceph.
+ This can be useful for someone that only wants to generate keyrings
+ but not add them into Ceph.
+ required: false
+ default: True
+ dest:
+ description:
+ - Destination to write the keyring, can a file or a directory
+ required: false
+ default: /etc/ceph/
+ fetch_initial_keys:
+ description:
+ - Fetch client.admin and bootstrap key.
+ This is only needed for Nautilus and above.
+ Writes down to the filesystem the initial keys generated by the monitor. # noqa: E501
+ This command can ONLY run from a monitor node.
+ required: false
+ default: false
+ output_format:
+ description:
+ - The key output format when retrieving the information of an
+ entity.
+ required: false
+ default: json
+'''
+
+EXAMPLES = '''
+
+keys_to_create:
+ - { name: client.key, key: "AQAin8tUUK84ExAA/QgBtI7gEMWdmnvKBzlXdQ==", caps: { mon: "allow rwx", mds: "allow *" } , mode: "0600" } # noqa: E501
+ - { name: client.cle, caps: { mon: "allow r", osd: "allow *" } , mode: "0600" } # noqa: E501
+
+caps:
+ mon: "allow rwx"
+ mds: "allow *"
+
+- name: create ceph admin key
+ ceph_key:
+ name: client.admin
+ state: present
+ secret: AQAin8tU2DsKFBAAFIAzVTzkL3+gtAjjpQiomw==
+ caps:
+ mon: allow *
+ osd: allow *
+ mgr: allow *
+ mds: allow
+ mode: 0400
+ import_key: False
+
+- name: create monitor initial keyring
+ ceph_key:
+ name: mon.
+ state: present
+ secret: AQAin8tUMICVFBAALRHNrV0Z4MXupRw4v9JQ6Q==
+ caps:
+ mon: allow *
+ dest: "/var/lib/ceph/tmp/"
+ import_key: False
+
+- name: create cephx key
+ ceph_key:
+ name: "{{ keys_to_create }}"
+ user: client.bootstrap-rgw
+ user_key: /var/lib/ceph/bootstrap-rgw/ceph.keyring
+ state: present
+ caps: "{{ caps }}"
+
+- name: create cephx key but don't import it in Ceph
+ ceph_key:
+ name: "{{ keys_to_create }}"
+ state: present
+ caps: "{{ caps }}"
+ import_key: False
+
+- name: delete cephx key
+ ceph_key:
+ name: "my_key"
+ state: absent
+
+- name: info cephx key
+ ceph_key:
+ name: "my_key""
+ state: info
+
+- name: info cephx admin key (plain)
+ ceph_key:
+ name: client.admin
+ output_format: plain
+ state: info
+ register: client_admin_key
+
+- name: list cephx keys
+ ceph_key:
+ state: list
+
+- name: fetch cephx keys
+ ceph_key:
+ state: fetch_initial_keys
+'''
+
+RETURN = '''# '''
+
+
+CEPH_INITIAL_KEYS = ['client.admin', 'client.bootstrap-mds', 'client.bootstrap-mgr', # noqa: E501
+ 'client.bootstrap-osd', 'client.bootstrap-rbd', 'client.bootstrap-rbd-mirror', 'client.bootstrap-rgw'] # noqa: E501
+
+
+def str_to_bool(val):
+ try:
+ val = val.lower()
+ except AttributeError:
+ val = str(val).lower()
+ if val == 'true':
+ return True
+ elif val == 'false':
+ return False
+ else:
+ raise ValueError("Invalid input value: %s" % val)
+
+
+def generate_secret():
+ '''
+ Generate a CephX secret
+ '''
+
+ key = os.urandom(16)
+ header = struct.pack('<hiih', 1, int(time.time()), 0, len(key))
+ secret = base64.b64encode(header + key)
+
+ return secret
+
+
+def generate_caps(_type, caps):
+ '''
+ Generate CephX capabilities list
+ '''
+
+ caps_cli = []
+
+ for k, v in caps.items():
+ # makes sure someone didn't pass an empty var,
+ # we don't want to add an empty cap
+ if len(k) == 0:
+ continue
+ if _type == "ceph-authtool":
+ caps_cli.extend(["--cap"])
+ caps_cli.extend([k, v])
+
+ return caps_cli
+
+
+def generate_ceph_cmd(cluster, args, user, user_key_path, container_image=None):
+ '''
+ Generate 'ceph' command line to execute
+ '''
+
+ if container_image:
+ binary = 'ceph'
+ cmd = container_exec(
+ binary, container_image)
+ else:
+ binary = ['ceph']
+ cmd = binary
+
+ base_cmd = [
+ '-n',
+ user,
+ '-k',
+ user_key_path,
+ '--cluster',
+ cluster,
+ 'auth',
+ ]
+
+ cmd.extend(base_cmd + args)
+
+ return cmd
+
+
+def generate_ceph_authtool_cmd(cluster, name, secret, caps, dest, container_image=None): # noqa: E501
+ '''
+ Generate 'ceph-authtool' command line to execute
+ '''
+
+ if container_image:
+ binary = 'ceph-authtool'
+ cmd = container_exec(
+ binary, container_image)
+ else:
+ binary = ['ceph-authtool']
+ cmd = binary
+
+ base_cmd = [
+ '--create-keyring',
+ dest,
+ '--name',
+ name,
+ '--add-key',
+ secret,
+ ]
+
+ cmd.extend(base_cmd)
+ cmd.extend(generate_caps("ceph-authtool", caps))
+
+ return cmd
+
+
+def create_key(module, result, cluster, user, user_key_path, name, secret, caps, import_key, dest, container_image=None): # noqa: E501
+ '''
+ Create a CephX key
+ '''
+
+ cmd_list = []
+ if not secret:
+ secret = generate_secret()
+
+ if user == 'client.admin':
+ args = ['import', '-i', dest]
+ else:
+ args = ['get-or-create', name]
+ args.extend(generate_caps(None, caps))
+ args.extend(['-o', dest])
+
+ cmd_list.append(generate_ceph_authtool_cmd(
+ cluster, name, secret, caps, dest, container_image))
+
+ if import_key or user != 'client.admin':
+ cmd_list.append(generate_ceph_cmd(
+ cluster, args, user, user_key_path, container_image))
+
+ return cmd_list
+
+
+def delete_key(cluster, user, user_key_path, name, container_image=None):
+ '''
+ Delete a CephX key
+ '''
+
+ cmd_list = []
+
+ args = [
+ 'del',
+ name,
+ ]
+
+ cmd_list.append(generate_ceph_cmd(
+ cluster, args, user, user_key_path, container_image))
+
+ return cmd_list
+
+
+def get_key(cluster, user, user_key_path, name, dest, container_image=None):
+ '''
+ Get a CephX key (write on the filesystem)
+ '''
+
+ cmd_list = []
+
+ args = [
+ 'get',
+ name,
+ '-o',
+ dest,
+ ]
+
+ cmd_list.append(generate_ceph_cmd(
+ cluster, args, user, user_key_path, container_image))
+
+ return cmd_list
+
+
+def info_key(cluster, name, user, user_key_path, output_format, container_image=None): # noqa: E501
+ '''
+ Get information about a CephX key
+ '''
+
+ cmd_list = []
+
+ args = [
+ 'get',
+ name,
+ '-f',
+ output_format,
+ ]
+
+ cmd_list.append(generate_ceph_cmd(
+ cluster, args, user, user_key_path, container_image))
+
+ return cmd_list
+
+
+def list_keys(cluster, user, user_key_path, container_image=None):
+ '''
+ List all CephX keys
+ '''
+
+ cmd_list = []
+
+ args = [
+ 'ls',
+ '-f',
+ 'json',
+ ]
+
+ cmd_list.append(generate_ceph_cmd(
+ cluster, args, user, user_key_path, container_image))
+
+ return cmd_list
+
+
+def exec_commands(module, cmd_list):
+ '''
+ Execute command(s)
+ '''
+
+ for cmd in cmd_list:
+ rc, out, err = module.run_command(cmd)
+ if rc != 0:
+ return rc, cmd, out, err
+
+ return rc, cmd, out, err
+
+
+def lookup_ceph_initial_entities(module, out):
+ '''
+ Lookup Ceph initial keys entries in the auth map
+ '''
+
+ # convert out to json, ansible returns a string...
+ try:
+ out_dict = json.loads(out)
+ except ValueError as e:
+ fatal("Could not decode 'ceph auth list' json output: {}".format(e), module) # noqa: E501
+
+ entities = []
+ if "auth_dump" in out_dict:
+ for key in out_dict["auth_dump"]:
+ for k, v in key.items():
+ if k == "entity":
+ if v in CEPH_INITIAL_KEYS:
+ entities.append(v)
+ else:
+ fatal("'auth_dump' key not present in json output:", module) # noqa: E501
+
+ if len(entities) != len(CEPH_INITIAL_KEYS) and not str_to_bool(os.environ.get('CEPH_ROLLING_UPDATE', False)): # noqa: E501
+ # must be missing in auth_dump, as if it were in CEPH_INITIAL_KEYS
+ # it'd be in entities from the above test. Report what's missing.
+ missing = []
+ for e in CEPH_INITIAL_KEYS:
+ if e not in entities:
+ missing.append(e)
+ fatal("initial keyring does not contain keys: " + ' '.join(missing), module) # noqa: E501
+ return entities
+
+
+def build_key_path(cluster, entity):
+ '''
+ Build key path depending on the key type
+ '''
+
+ if "admin" in entity:
+ path = "/etc/ceph"
+ keyring_filename = cluster + "." + entity + ".keyring"
+ key_path = os.path.join(path, keyring_filename)
+ elif "bootstrap" in entity:
+ path = "/var/lib/ceph"
+ # bootstrap keys show up as 'client.boostrap-osd'
+ # however the directory is called '/var/lib/ceph/bootstrap-osd'
+ # so we need to substring 'client.'
+ entity_split = entity.split('.')[1]
+ keyring_filename = cluster + ".keyring"
+ key_path = os.path.join(path, entity_split, keyring_filename)
+ else:
+ return None
+
+ return key_path
+
+
+def run_module():
+ module_args = dict(
+ cluster=dict(type='str', required=False, default='ceph'),
+ name=dict(type='str', required=False),
+ state=dict(type='str', required=False, default='present', choices=['present', 'update', 'absent', # noqa: E501
+ 'list', 'info', 'fetch_initial_keys', 'generate_secret']), # noqa: E501
+ caps=dict(type='dict', required=False, default=None),
+ secret=dict(type='str', required=False, default=None, no_log=True),
+ import_key=dict(type='bool', required=False, default=True),
+ dest=dict(type='str', required=False, default='/etc/ceph/'),
+ user=dict(type='str', required=False, default='client.admin'),
+ user_key=dict(type='str', required=False, default=None),
+ output_format=dict(type='str', required=False, default='json', choices=['json', 'plain', 'xml', 'yaml']) # noqa: E501
+ )
+
+ module = AnsibleModule(
+ argument_spec=module_args,
+ supports_check_mode=True,
+ add_file_common_args=True,
+ )
+
+ file_args = module.load_file_common_arguments(module.params)
+
+ # Gather module parameters in variables
+ state = module.params['state']
+ name = module.params.get('name')
+ cluster = module.params.get('cluster')
+ caps = module.params.get('caps')
+ secret = module.params.get('secret')
+ import_key = module.params.get('import_key')
+ dest = module.params.get('dest')
+ user = module.params.get('user')
+ user_key = module.params.get('user_key')
+ output_format = module.params.get('output_format')
+
+ changed = False
+
+ result = dict(
+ changed=changed,
+ stdout='',
+ stderr='',
+ rc=0,
+ start='',
+ end='',
+ delta='',
+ )
+
+ if module.check_mode:
+ module.exit_json(**result)
+
+ startd = datetime.datetime.now()
+
+ # will return either the image name or None
+ container_image = is_containerized()
+
+ # Test if the key exists, if it does we skip its creation
+ # We only want to run this check when a key needs to be added
+ # There is no guarantee that any cluster is running and we don't need one
+ _secret = secret
+ _caps = caps
+ key_exist = 1
+
+ if not user_key:
+ user_key_filename = '{}.{}.keyring'.format(cluster, user)
+ user_key_dir = '/etc/ceph'
+ user_key_path = os.path.join(user_key_dir, user_key_filename)
+ else:
+ user_key_path = user_key
+
+ if (state in ["present", "update"]):
+ # if dest is not a directory, the user wants to change the file's name
+ # (e,g: /etc/ceph/ceph.mgr.ceph-mon2.keyring)
+ if not os.path.isdir(dest):
+ file_path = dest
+ else:
+ if 'bootstrap' in dest:
+ # Build a different path for bootstrap keys as there are stored
+ # as /var/lib/ceph/bootstrap-rbd/ceph.keyring
+ keyring_filename = cluster + '.keyring'
+ else:
+ keyring_filename = cluster + "." + name + ".keyring"
+ file_path = os.path.join(dest, keyring_filename)
+
+ file_args['path'] = file_path
+
+ if import_key:
+ _info_key = []
+ rc, cmd, out, err = exec_commands(
+ module, info_key(cluster, name, user, user_key_path, output_format, container_image)) # noqa: E501
+ key_exist = rc
+ if not caps and key_exist != 0:
+ fatal("Capabilities must be provided when state is 'present'", module) # noqa: E501
+ if key_exist != 0 and secret is None and caps is None:
+ fatal("Keyring doesn't exist, you must provide 'secret' and 'caps'", module) # noqa: E501
+ if key_exist == 0:
+ _info_key = json.loads(out)
+ if not secret:
+ secret = _info_key[0]['key']
+ _secret = _info_key[0]['key']
+ if not caps:
+ caps = _info_key[0]['caps']
+ _caps = _info_key[0]['caps']
+ if secret == _secret and caps == _caps:
+ if not os.path.isfile(file_path):
+ rc, cmd, out, err = exec_commands(module, get_key(cluster, user, user_key_path, name, file_path, container_image)) # noqa: E501
+ result["rc"] = rc
+ if rc != 0:
+ result["stdout"] = "Couldn't fetch the key {0} at {1}.".format(name, file_path) # noqa: E501
+ module.exit_json(**result)
+ result["stdout"] = "fetched the key {0} at {1}.".format(name, file_path) # noqa: E501
+
+ result["stdout"] = "{0} already exists and doesn't need to be updated.".format(name) # noqa: E501
+ result["rc"] = 0
+ module.set_fs_attributes_if_different(file_args, False)
+ module.exit_json(**result)
+ else:
+ if os.path.isfile(file_path) and not secret or not caps:
+ result["stdout"] = "{0} already exists in {1} you must provide secret *and* caps when import_key is {2}".format(name, dest, import_key) # noqa: E501
+ result["rc"] = 0
+ module.exit_json(**result)
+ if (key_exist == 0 and (secret != _secret or caps != _caps)) or key_exist != 0: # noqa: E501
+ rc, cmd, out, err = exec_commands(module, create_key(
+ module, result, cluster, user, user_key_path, name, secret, caps, import_key, file_path, container_image)) # noqa: E501
+ if rc != 0:
+ result["stdout"] = "Couldn't create or update {0}".format(name)
+ result["stderr"] = err
+ module.exit_json(**result)
+ module.set_fs_attributes_if_different(file_args, False)
+ changed = True
+
+ elif state == "absent":
+ if key_exist == 0:
+ rc, cmd, out, err = exec_commands(
+ module, delete_key(cluster, user, user_key_path, name, container_image)) # noqa: E501
+ if rc == 0:
+ changed = True
+ else:
+ rc = 0
+
+ elif state == "info":
+ rc, cmd, out, err = exec_commands(
+ module, info_key(cluster, name, user, user_key_path, output_format, container_image)) # noqa: E501
+
+ elif state == "list":
+ rc, cmd, out, err = exec_commands(
+ module, list_keys(cluster, user, user_key_path, container_image))
+
+ elif state == "fetch_initial_keys":
+ hostname = socket.gethostname().split('.', 1)[0]
+ user = "mon."
+ keyring_filename = cluster + "-" + hostname + "/keyring"
+ user_key_path = os.path.join("/var/lib/ceph/mon/", keyring_filename)
+ rc, cmd, out, err = exec_commands(
+ module, list_keys(cluster, user, user_key_path, container_image))
+ if rc != 0:
+ result["stdout"] = "failed to retrieve ceph keys"
+ result["sdterr"] = err
+ result['rc'] = 0
+ module.exit_json(**result)
+
+ entities = lookup_ceph_initial_entities(module, out)
+
+ output_format = "plain"
+ for entity in entities:
+ key_path = build_key_path(cluster, entity)
+ if key_path is None:
+ fatal("Failed to build key path, no entity yet?", module)
+ elif os.path.isfile(key_path):
+ # if the key is already on the filesystem
+ # there is no need to fetch it again
+ continue
+
+ extra_args = [
+ '-o',
+ key_path,
+ ]
+
+ info_cmd = info_key(cluster, entity, user,
+ user_key_path, output_format, container_image)
+ # we use info_cmd[0] because info_cmd is an array made of an array
+ info_cmd[0].extend(extra_args)
+ rc, cmd, out, err = exec_commands(
+ module, info_cmd) # noqa: E501
+
+ file_args = module.load_file_common_arguments(module.params)
+ file_args['path'] = key_path
+ module.set_fs_attributes_if_different(file_args, False)
+ elif state == "generate_secret":
+ out = generate_secret().decode()
+ cmd = ''
+ rc = 0
+ err = ''
+ changed = True
+
+ endd = datetime.datetime.now()
+ delta = endd - startd
+
+ result = dict(
+ cmd=cmd,
+ start=str(startd),
+ end=str(endd),
+ delta=str(delta),
+ rc=rc,
+ stdout=out.rstrip("\r\n"),
+ stderr=err.rstrip("\r\n"),
+ changed=changed,
+ )
+
+ if rc != 0:
+ module.fail_json(msg='non-zero return code', **result)
+
+ module.exit_json(**result)
+
+
+def main():
+ run_module()
+
+
+if __name__ == '__main__':
+ main()
--- /dev/null
+# Copyright 2020, Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from ansible.module_utils.basic import AnsibleModule
+try:
+ from ansible.module_utils.ca_common import exit_module, \
+ generate_ceph_cmd, \
+ is_containerized
+except ImportError:
+ from module_utils.ca_common import exit_module, \
+ generate_ceph_cmd, \
+ is_containerized
+import datetime
+
+
+ANSIBLE_METADATA = {
+ 'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'
+}
+
+DOCUMENTATION = '''
+---
+module: ceph_mgr_module
+short_description: Manage Ceph MGR module
+version_added: "2.8"
+description:
+ - Manage Ceph MGR module
+options:
+ name:
+ description:
+ - name of the ceph MGR module.
+ required: true
+ cluster:
+ description:
+ - The ceph cluster name.
+ required: false
+ default: ceph
+ state:
+ description:
+ - If 'enable' is used, the module enables the MGR module.
+ If 'absent' is used, the module disables the MGR module.
+ required: false
+ choices: ['enable', 'disable']
+ default: enable
+author:
+ - Dimitri Savineau <dsavinea@redhat.com>
+'''
+
+EXAMPLES = '''
+- name: enable dashboard mgr module
+ ceph_mgr_module:
+ name: dashboard
+ state: enable
+
+- name: disable multiple mgr modules
+ ceph_mgr_module:
+ name: '{{ item }}'
+ state: disable
+ loop:
+ - 'dashboard'
+ - 'prometheus'
+'''
+
+RETURN = '''# '''
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ cluster=dict(type='str', required=False, default='ceph'),
+ state=dict(type='str', required=False, default='enable', choices=['enable', 'disable']), # noqa: E501
+ ),
+ supports_check_mode=True,
+ )
+
+ name = module.params.get('name')
+ cluster = module.params.get('cluster')
+ state = module.params.get('state')
+
+ startd = datetime.datetime.now()
+
+ container_image = is_containerized()
+
+ cmd = generate_ceph_cmd(['mgr', 'module'],
+ [state, name],
+ cluster=cluster,
+ container_image=container_image)
+
+ if module.check_mode:
+ exit_module(
+ module=module,
+ out='',
+ rc=0,
+ cmd=cmd,
+ err='',
+ startd=startd,
+ changed=False
+ )
+ else:
+ rc, out, err = module.run_command(cmd)
+ if 'is already enabled' in err:
+ changed = False
+ else:
+ changed = True
+ exit_module(
+ module=module,
+ out=out,
+ rc=rc,
+ cmd=cmd,
+ err=err,
+ startd=startd,
+ changed=changed
+ )
+
+
+if __name__ == '__main__':
+ main()
--- /dev/null
+# Copyright 2020, Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from ansible.module_utils.basic import AnsibleModule
+try:
+ from ansible.module_utils.ca_common import exit_module, generate_ceph_cmd, is_containerized # noqa: E501
+except ImportError:
+ from module_utils.ca_common import exit_module, generate_ceph_cmd, is_containerized # noqa: E501
+import datetime
+
+
+ANSIBLE_METADATA = {
+ 'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'
+}
+
+DOCUMENTATION = '''
+---
+module: ceph_osd
+short_description: Manage Ceph OSD state
+version_added: "2.8"
+description:
+ - Manage Ceph OSD state
+options:
+ ids:
+ description:
+ - The ceph OSD id(s).
+ required: true
+ cluster:
+ description:
+ - The ceph cluster name.
+ required: false
+ default: ceph
+ state:
+ description:
+ - The ceph OSD state.
+ required: true
+ choices: ['destroy', 'down', 'in', 'out', 'purge', 'rm']
+author:
+ - Dimitri Savineau <dsavinea@redhat.com>
+'''
+
+EXAMPLES = '''
+- name: destroy OSD 42
+ ceph_osd:
+ ids: 42
+ state: destroy
+
+- name: set multiple OSDs down
+ ceph_osd:
+ ids: [0, 1, 3]
+ state: down
+
+- name: set OSD 42 in
+ ceph_osd:
+ ids: 42
+ state: in
+
+- name: set OSD 42 out
+ ceph_osd:
+ ids: 42
+ state: out
+
+- name: purge OSD 42
+ ceph_osd:
+ ids: 42
+ state: purge
+
+- name: rm OSD 42
+ ceph_osd:
+ ids: 42
+ state: rm
+'''
+
+RETURN = '''# '''
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ ids=dict(type='list', required=True),
+ cluster=dict(type='str', required=False, default='ceph'),
+ state=dict(type='str', required=True, choices=['destroy', 'down', 'in', 'out', 'purge', 'rm']), # noqa: E501
+ ),
+ supports_check_mode=True,
+ )
+
+ ids = module.params.get('ids')
+ cluster = module.params.get('cluster')
+ state = module.params.get('state')
+
+ if state in ['destroy', 'purge'] and len(ids) > 1:
+ module.fail_json(msg='destroy and purge only support one OSD at at time', rc=1) # noqa: E501
+
+ startd = datetime.datetime.now()
+
+ container_image = is_containerized()
+
+ cmd = generate_ceph_cmd(['osd', state], ids, cluster=cluster, container_image=container_image) # noqa: E501
+
+ if state in ['destroy', 'purge']:
+ cmd.append('--yes-i-really-mean-it')
+
+ if module.check_mode:
+ exit_module(
+ module=module,
+ out='',
+ rc=0,
+ cmd=cmd,
+ err='',
+ startd=startd,
+ changed=False
+ )
+ else:
+ rc, out, err = module.run_command(cmd)
+ changed = True
+ if state in ['down', 'in', 'out'] and 'marked' not in err:
+ changed = False
+ exit_module(
+ module=module,
+ out=out,
+ rc=rc,
+ cmd=cmd,
+ err=err,
+ startd=startd,
+ changed=changed
+ )
+
+
+if __name__ == '__main__':
+ main()
--- /dev/null
+# Copyright 2020, Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from ansible.module_utils.basic import AnsibleModule
+try:
+ from ansible.module_utils.ca_common import exit_module, \
+ generate_ceph_cmd, \
+ is_containerized
+except ImportError:
+ from module_utils.ca_common import exit_module, \
+ generate_ceph_cmd, \
+ is_containerized
+import datetime
+
+
+ANSIBLE_METADATA = {
+ 'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'
+}
+
+DOCUMENTATION = '''
+---
+module: ceph_osd_flag
+short_description: Manage Ceph OSD flag
+version_added: "2.8"
+description:
+ - Manage Ceph OSD flag
+options:
+ name:
+ description:
+ - name of the ceph OSD flag.
+ required: true
+ choices: ['noup', 'nodown', 'noout', 'nobackfill', 'norebalance',
+ 'norecover', 'noscrub', 'nodeep-scrub']
+ cluster:
+ description:
+ - The ceph cluster name.
+ required: false
+ default: ceph
+ state:
+ description:
+ - If 'present' is used, the module sets the OSD flag.
+ If 'absent' is used, the module will unset the OSD flag.
+ required: false
+ choices: ['present', 'absent']
+ default: present
+author:
+ - Dimitri Savineau <dsavinea@redhat.com>
+'''
+
+EXAMPLES = '''
+- name: set noup OSD flag
+ ceph_osd_flag:
+ name: noup
+
+- name: unset multiple OSD flags
+ ceph_osd_flag:
+ name: '{{ item }}'
+ state: absent
+ loop:
+ - 'noup'
+ - 'norebalance'
+'''
+
+RETURN = '''# '''
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True, choices=['noup', 'nodown', 'noout', 'nobackfill', 'norebalance', 'norecover', 'noscrub', 'nodeep-scrub']), # noqa: E501
+ cluster=dict(type='str', required=False, default='ceph'),
+ state=dict(type='str', required=False, default='present', choices=['present', 'absent']), # noqa: E501
+ ),
+ supports_check_mode=True,
+ )
+
+ name = module.params.get('name')
+ cluster = module.params.get('cluster')
+ state = module.params.get('state')
+
+ startd = datetime.datetime.now()
+
+ container_image = is_containerized()
+
+ if state == 'present':
+ cmd = generate_ceph_cmd(['osd', 'set'], [name], cluster=cluster, container_image=container_image) # noqa: E501
+ else:
+ cmd = generate_ceph_cmd(['osd', 'unset'], [name], cluster=cluster, container_image=container_image) # noqa: E501
+
+ if module.check_mode:
+ exit_module(
+ module=module,
+ out='',
+ rc=0,
+ cmd=cmd,
+ err='',
+ startd=startd,
+ changed=False
+ )
+ else:
+ rc, out, err = module.run_command(cmd)
+ exit_module(
+ module=module,
+ out=out,
+ rc=rc,
+ cmd=cmd,
+ err=err,
+ startd=startd,
+ changed=True
+ )
+
+
+if __name__ == '__main__':
+ main()
--- /dev/null
+#!/usr/bin/python3
+
+# Copyright 2020, Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from ansible.module_utils.basic import AnsibleModule
+try:
+ from ansible.module_utils.ca_common import generate_ceph_cmd, \
+ pre_generate_ceph_cmd, \
+ is_containerized, \
+ exec_command, \
+ exit_module
+except ImportError:
+ from module_utils.ca_common import generate_ceph_cmd, \
+ pre_generate_ceph_cmd, \
+ is_containerized, \
+ exec_command, \
+ exit_module
+
+
+import datetime
+import json
+import os
+
+
+ANSIBLE_METADATA = {
+ 'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'
+}
+
+DOCUMENTATION = '''
+---
+module: ceph_pool
+
+author: Guillaume Abrioux <gabrioux@redhat.com>
+
+short_description: Manage Ceph Pools
+
+version_added: "2.8"
+
+description:
+ - Manage Ceph pool(s) creation, deletion and updates.
+options:
+ cluster:
+ description:
+ - The ceph cluster name.
+ required: false
+ default: ceph
+ name:
+ description:
+ - name of the Ceph pool
+ required: true
+ state:
+ description:
+ If 'present' is used, the module creates a pool if it doesn't exist
+ or update it if it already exists.
+ If 'absent' is used, the module will simply delete the pool.
+ If 'list' is used, the module will return all details about the
+ existing pools. (json formatted).
+ required: false
+ choices: ['present', 'absent', 'list']
+ default: present
+ size:
+ description:
+ - set the replica size of the pool.
+ required: false
+ default: 3
+ min_size:
+ description:
+ - set the min_size parameter of the pool.
+ required: false
+ default: default to `osd_pool_default_min_size` (ceph)
+ pg_num:
+ description:
+ - set the pg_num of the pool.
+ required: false
+ default: default to `osd_pool_default_pg_num` (ceph)
+ pgp_num:
+ description:
+ - set the pgp_num of the pool.
+ required: false
+ default: default to `osd_pool_default_pgp_num` (ceph)
+ pg_autoscale_mode:
+ description:
+ - set the pg autoscaler on the pool.
+ required: false
+ default: 'on'
+ target_size_ratio:
+ description:
+ - set the target_size_ratio on the pool
+ required: false
+ default: None
+ pool_type:
+ description:
+ - set the pool type, either 'replicated' or 'erasure'
+ required: false
+ default: 'replicated'
+ erasure_profile:
+ description:
+ - When pool_type = 'erasure', set the erasure profile of the pool
+ required: false
+ default: 'default'
+ rule_name:
+ description:
+ - Set the crush rule name assigned to the pool
+ required: false
+ default: 'replicated_rule' when pool_type is 'erasure' else None
+ expected_num_objects:
+ description:
+ - Set the expected_num_objects parameter of the pool.
+ required: false
+ default: '0'
+ application:
+ description:
+ - Set the pool application on the pool.
+ required: false
+ default: None
+'''
+
+EXAMPLES = '''
+
+pools:
+ - { name: foo, size: 3, application: rbd, pool_type: 'replicated',
+ pg_autoscale_mode: 'on' }
+
+- hosts: all
+ become: true
+ tasks:
+ - name: create a pool
+ ceph_pool:
+ name: "{{ item.name }}"
+ state: present
+ size: "{{ item.size }}"
+ application: "{{ item.application }}"
+ pool_type: "{{ item.pool_type }}"
+ pg_autoscale_mode: "{{ item.pg_autoscale_mode }}"
+ with_items: "{{ pools }}"
+'''
+
+RETURN = '''# '''
+
+
+def check_pool_exist(cluster,
+ name,
+ user,
+ user_key,
+ output_format='json',
+ container_image=None):
+ '''
+ Check if a given pool exists
+ '''
+
+ args = ['stats', name, '-f', output_format]
+
+ cmd = generate_ceph_cmd(sub_cmd=['osd', 'pool'],
+ args=args,
+ cluster=cluster,
+ user=user,
+ user_key=user_key,
+ container_image=container_image)
+
+ return cmd
+
+
+def generate_get_config_cmd(param,
+ cluster,
+ user,
+ user_key,
+ container_image=None):
+ _cmd = pre_generate_ceph_cmd(container_image=container_image)
+ args = [
+ '-n',
+ user,
+ '-k',
+ user_key,
+ '--cluster',
+ cluster,
+ 'config',
+ 'get',
+ 'mon.*',
+ param
+ ]
+ cmd = _cmd + args
+ return cmd
+
+
+def get_application_pool(cluster,
+ name,
+ user,
+ user_key,
+ output_format='json',
+ container_image=None):
+ '''
+ Get application type enabled on a given pool
+ '''
+
+ args = ['application', 'get', name, '-f', output_format]
+
+ cmd = generate_ceph_cmd(sub_cmd=['osd', 'pool'],
+ args=args,
+ cluster=cluster,
+ user=user,
+ user_key=user_key,
+ container_image=container_image)
+
+ return cmd
+
+
+def enable_application_pool(cluster,
+ name,
+ application,
+ user,
+ user_key,
+ container_image=None):
+ '''
+ Enable application on a given pool
+ '''
+
+ args = ['application', 'enable', name, application]
+
+ cmd = generate_ceph_cmd(sub_cmd=['osd', 'pool'],
+ args=args,
+ cluster=cluster,
+ user=user,
+ user_key=user_key,
+ container_image=container_image)
+
+ return cmd
+
+
+def disable_application_pool(cluster,
+ name,
+ application,
+ user,
+ user_key,
+ container_image=None):
+ '''
+ Disable application on a given pool
+ '''
+
+ args = ['application', 'disable', name,
+ application, '--yes-i-really-mean-it']
+
+ cmd = generate_ceph_cmd(sub_cmd=['osd', 'pool'],
+ args=args,
+ cluster=cluster,
+ user=user,
+ user_key=user_key,
+ container_image=container_image)
+
+ return cmd
+
+
+def get_pool_details(module,
+ cluster,
+ name,
+ user,
+ user_key,
+ output_format='json',
+ container_image=None):
+ '''
+ Get details about a given pool
+ '''
+
+ args = ['ls', 'detail', '-f', output_format]
+
+ cmd = generate_ceph_cmd(sub_cmd=['osd', 'pool'],
+ args=args,
+ cluster=cluster,
+ user=user,
+ user_key=user_key,
+ container_image=container_image)
+
+ rc, cmd, out, err = exec_command(module, cmd)
+
+ if rc == 0:
+ out = [p for p in json.loads(out.strip()) if p['pool_name'] == name][0]
+
+ _rc, _cmd, application_pool, _err = exec_command(module,
+ get_application_pool(cluster, # noqa: E501
+ name, # noqa: E501
+ user, # noqa: E501
+ user_key, # noqa: E501
+ container_image=container_image)) # noqa: E501
+
+ # This is a trick because "target_size_ratio" isn't present at the same
+ # level in the dict
+ # ie:
+ # {
+ # 'pg_num': 8,
+ # 'pgp_num': 8,
+ # 'pg_autoscale_mode': 'on',
+ # 'options': {
+ # 'target_size_ratio': 0.1
+ # }
+ # }
+ # If 'target_size_ratio' is present in 'options', we set it, this way we
+ # end up with a dict containing all needed keys at the same level.
+ if 'target_size_ratio' in out['options'].keys():
+ out['target_size_ratio'] = out['options']['target_size_ratio']
+ else:
+ out['target_size_ratio'] = None
+
+ application = list(json.loads(application_pool.strip()).keys())
+
+ if len(application) == 0:
+ out['application'] = ''
+ else:
+ out['application'] = application[0]
+
+ return rc, cmd, out, err
+
+
+def compare_pool_config(user_pool_config, running_pool_details):
+ '''
+ Compare user input config pool details with current running pool details
+ '''
+
+ delta = {}
+ filter_keys = ['pg_num', 'pg_placement_num', 'size',
+ 'pg_autoscale_mode', 'target_size_ratio']
+ for key in filter_keys:
+ if (str(running_pool_details[key]) != user_pool_config[key]['value'] and # noqa: E501
+ user_pool_config[key]['value']):
+ delta[key] = user_pool_config[key]
+
+ if (running_pool_details['application'] !=
+ user_pool_config['application']['value'] and
+ user_pool_config['application']['value']):
+ delta['application'] = {}
+ delta['application']['new_application'] = user_pool_config['application']['value'] # noqa: E501
+ # to be improved (for update_pools()...)
+ delta['application']['value'] = delta['application']['new_application']
+ delta['application']['old_application'] = running_pool_details['application'] # noqa: E501
+
+ return delta
+
+
+def list_pools(cluster,
+ user,
+ user_key,
+ details,
+ output_format='json',
+ container_image=None):
+ '''
+ List existing pools
+ '''
+
+ args = ['ls']
+
+ if details:
+ args.append('detail')
+
+ args.extend(['-f', output_format])
+
+ cmd = generate_ceph_cmd(sub_cmd=['osd', 'pool'],
+ args=args,
+ cluster=cluster,
+ user=user,
+ user_key=user_key,
+ container_image=container_image)
+
+ return cmd
+
+
+def create_pool(cluster,
+ name,
+ user,
+ user_key,
+ user_pool_config,
+ container_image=None):
+ '''
+ Create a new pool
+ '''
+
+ args = ['create', user_pool_config['pool_name']['value'],
+ user_pool_config['type']['value']]
+
+ if user_pool_config['pg_autoscale_mode']['value'] == 'off':
+ args.extend(['--pg_num',
+ user_pool_config['pg_num']['value'],
+ '--pgp_num',
+ user_pool_config['pgp_num']['value'] or
+ user_pool_config['pg_num']['value']])
+ elif user_pool_config['target_size_ratio']['value']:
+ args.extend(['--target_size_ratio',
+ user_pool_config['target_size_ratio']['value']])
+
+ if user_pool_config['type']['value'] == 'replicated':
+ args.extend([user_pool_config['crush_rule']['value'],
+ '--expected_num_objects',
+ user_pool_config['expected_num_objects']['value'],
+ '--autoscale-mode',
+ user_pool_config['pg_autoscale_mode']['value']])
+
+ if (user_pool_config['size']['value'] and
+ user_pool_config['type']['value'] == "replicated"):
+ args.extend(['--size', user_pool_config['size']['value']])
+
+ elif user_pool_config['type']['value'] == 'erasure':
+ args.extend([user_pool_config['erasure_profile']['value']])
+
+ if user_pool_config['crush_rule']['value']:
+ args.extend([user_pool_config['crush_rule']['value']])
+
+ args.extend(['--expected_num_objects',
+ user_pool_config['expected_num_objects']['value'],
+ '--autoscale-mode',
+ user_pool_config['pg_autoscale_mode']['value']])
+
+ cmd = generate_ceph_cmd(sub_cmd=['osd', 'pool'],
+ args=args,
+ cluster=cluster,
+ user=user,
+ user_key=user_key,
+ container_image=container_image)
+
+ return cmd
+
+
+def remove_pool(cluster, name, user, user_key, container_image=None):
+ '''
+ Remove a pool
+ '''
+
+ args = ['rm', name, name, '--yes-i-really-really-mean-it']
+
+ cmd = generate_ceph_cmd(sub_cmd=['osd', 'pool'],
+ args=args,
+ cluster=cluster,
+ user=user,
+ user_key=user_key,
+ container_image=container_image)
+
+ return cmd
+
+
+def update_pool(module, cluster, name,
+ user, user_key, delta, container_image=None):
+ '''
+ Update an existing pool
+ '''
+
+ report = ""
+
+ for key in delta.keys():
+ if key != 'application':
+ args = ['set',
+ name,
+ delta[key]['cli_set_opt'],
+ delta[key]['value']]
+
+ cmd = generate_ceph_cmd(sub_cmd=['osd', 'pool'],
+ args=args,
+ cluster=cluster,
+ user=user,
+ user_key=user_key,
+ container_image=container_image)
+
+ rc, cmd, out, err = exec_command(module, cmd)
+ if rc != 0:
+ return rc, cmd, out, err
+
+ else:
+ rc, cmd, out, err = exec_command(module, disable_application_pool(cluster, name, delta['application']['old_application'], user, user_key, container_image=container_image)) # noqa: E501
+ if rc != 0:
+ return rc, cmd, out, err
+
+ rc, cmd, out, err = exec_command(module, enable_application_pool(cluster, name, delta['application']['new_application'], user, user_key, container_image=container_image)) # noqa: E501
+ if rc != 0:
+ return rc, cmd, out, err
+
+ report = report + "\n" + "{} has been updated: {} is now {}".format(name, key, delta[key]['value']) # noqa: E501
+
+ out = report
+ return rc, cmd, out, err
+
+
+def run_module():
+ module_args = dict(
+ cluster=dict(type='str', required=False, default='ceph'),
+ name=dict(type='str', required=True),
+ state=dict(type='str', required=False, default='present',
+ choices=['present', 'absent', 'list']),
+ details=dict(type='bool', required=False, default=False),
+ size=dict(type='str', required=False),
+ min_size=dict(type='str', required=False),
+ pg_num=dict(type='str', required=False),
+ pgp_num=dict(type='str', required=False),
+ pg_autoscale_mode=dict(type='str', required=False, default='on'),
+ target_size_ratio=dict(type='str', required=False, default=None),
+ pool_type=dict(type='str', required=False, default='replicated',
+ choices=['replicated', 'erasure', '1', '3']),
+ erasure_profile=dict(type='str', required=False, default='default'),
+ rule_name=dict(type='str', required=False, default=None),
+ expected_num_objects=dict(type='str', required=False, default="0"),
+ application=dict(type='str', required=False, default=None),
+ )
+
+ module = AnsibleModule(
+ argument_spec=module_args,
+ supports_check_mode=True
+ )
+
+ # Gather module parameters in variables
+ cluster = module.params.get('cluster')
+ name = module.params.get('name')
+ state = module.params.get('state')
+ details = module.params.get('details')
+ size = module.params.get('size')
+ min_size = module.params.get('min_size')
+ pg_num = module.params.get('pg_num')
+ pgp_num = module.params.get('pgp_num')
+ pg_autoscale_mode = module.params.get('pg_autoscale_mode')
+ target_size_ratio = module.params.get('target_size_ratio')
+ application = module.params.get('application')
+
+ if (module.params.get('pg_autoscale_mode').lower() in
+ ['true', 'on', 'yes']):
+ pg_autoscale_mode = 'on'
+ elif (module.params.get('pg_autoscale_mode').lower() in
+ ['false', 'off', 'no']):
+ pg_autoscale_mode = 'off'
+ else:
+ pg_autoscale_mode = 'warn'
+
+ if module.params.get('pool_type') == '1':
+ pool_type = 'replicated'
+ elif module.params.get('pool_type') == '3':
+ pool_type = 'erasure'
+ else:
+ pool_type = module.params.get('pool_type')
+
+ if not module.params.get('rule_name'):
+ rule_name = 'replicated_rule' if pool_type == 'replicated' else None
+ else:
+ rule_name = module.params.get('rule_name')
+
+ erasure_profile = module.params.get('erasure_profile')
+ expected_num_objects = module.params.get('expected_num_objects')
+ user_pool_config = {
+ 'pool_name': {'value': name},
+ 'pg_num': {'value': pg_num, 'cli_set_opt': 'pg_num'},
+ 'pgp_num': {'value': pgp_num, 'cli_set_opt': 'pgp_num'},
+ 'pg_autoscale_mode': {'value': pg_autoscale_mode,
+ 'cli_set_opt': 'pg_autoscale_mode'},
+ 'target_size_ratio': {'value': target_size_ratio,
+ 'cli_set_opt': 'target_size_ratio'},
+ 'application': {'value': application},
+ 'type': {'value': pool_type},
+ 'erasure_profile': {'value': erasure_profile},
+ 'crush_rule': {'value': rule_name, 'cli_set_opt': 'crush_rule'},
+ 'expected_num_objects': {'value': expected_num_objects},
+ 'size': {'value': size, 'cli_set_opt': 'size'},
+ 'min_size': {'value': min_size}
+ }
+
+ if module.check_mode:
+ module.exit_json(
+ changed=False,
+ stdout='',
+ stderr='',
+ rc=0,
+ start='',
+ end='',
+ delta='',
+ )
+
+ startd = datetime.datetime.now()
+ changed = False
+
+ # will return either the image name or None
+ container_image = is_containerized()
+
+ user = "client.admin"
+ keyring_filename = cluster + '.' + user + '.keyring'
+ user_key = os.path.join("/etc/ceph/", keyring_filename)
+
+ if state == "present":
+ rc, cmd, out, err = exec_command(module,
+ check_pool_exist(cluster,
+ name,
+ user,
+ user_key,
+ container_image=container_image)) # noqa: E501
+ if rc == 0:
+ running_pool_details = get_pool_details(module,
+ cluster,
+ name,
+ user,
+ user_key,
+ container_image=container_image) # noqa: E501
+ user_pool_config['pg_placement_num'] = {'value': str(running_pool_details[2]['pg_placement_num']), 'cli_set_opt': 'pgp_num'} # noqa: E501
+ delta = compare_pool_config(user_pool_config,
+ running_pool_details[2])
+ if len(delta) > 0:
+ keys = list(delta.keys())
+ details = running_pool_details[2]
+ if details['erasure_code_profile'] and 'size' in keys:
+ del delta['size']
+ if details['pg_autoscale_mode'] == 'on':
+ delta.pop('pg_num', None)
+ delta.pop('pgp_num', None)
+
+ if len(delta) == 0:
+ out = "Skipping pool {}.\nUpdating either 'size' on an erasure-coded pool or 'pg_num'/'pgp_num' on a pg autoscaled pool is incompatible".format(name) # noqa: E501
+ else:
+ rc, cmd, out, err = update_pool(module,
+ cluster,
+ name,
+ user,
+ user_key,
+ delta,
+ container_image=container_image) # noqa: E501
+ if rc == 0:
+ changed = True
+ else:
+ out = "Pool {} already exists and there is nothing to update.".format(name) # noqa: E501
+ else:
+ rc, cmd, out, err = exec_command(module,
+ create_pool(cluster,
+ name,
+ user,
+ user_key,
+ user_pool_config=user_pool_config, # noqa: E501
+ container_image=container_image)) # noqa: E501
+ if user_pool_config['application']['value']:
+ rc, _, _, _ = exec_command(module,
+ enable_application_pool(cluster,
+ name,
+ user_pool_config['application']['value'], # noqa: E501
+ user,
+ user_key,
+ container_image=container_image)) # noqa: E501
+ if user_pool_config['min_size']['value']:
+ # not implemented yet
+ pass
+ changed = True
+
+ elif state == "list":
+ rc, cmd, out, err = exec_command(module,
+ list_pools(cluster,
+ name, user,
+ user_key,
+ details,
+ container_image=container_image)) # noqa: E501
+ if rc != 0:
+ out = "Couldn't list pool(s) present on the cluster"
+
+ elif state == "absent":
+ rc, cmd, out, err = exec_command(module,
+ check_pool_exist(cluster,
+ name, user,
+ user_key,
+ container_image=container_image)) # noqa: E501
+ if rc == 0:
+ rc, cmd, out, err = exec_command(module,
+ remove_pool(cluster,
+ name,
+ user,
+ user_key,
+ container_image=container_image)) # noqa: E501
+ changed = True
+ else:
+ rc = 0
+ out = "Skipped, since pool {} doesn't exist".format(name)
+
+ exit_module(module=module, out=out, rc=rc, cmd=cmd, err=err, startd=startd,
+ changed=changed)
+
+
+def main():
+ run_module()
+
+
+if __name__ == '__main__':
+ main()
--- /dev/null
+#!/usr/bin/python
+
+from ansible.module_utils.basic import AnsibleModule
+try:
+ from ansible.module_utils.ca_common import exec_command, \
+ is_containerized, \
+ fatal
+except ImportError:
+ from module_utils.ca_common import exec_command, \
+ is_containerized, \
+ fatal
+import datetime
+import copy
+import json
+import os
+
+ANSIBLE_METADATA = {
+ 'metadata_version': '1.0',
+ 'status': ['preview'],
+ 'supported_by': 'community'
+}
+
+DOCUMENTATION = '''
+---
+module: ceph_volume
+
+short_description: Create ceph OSDs with ceph-volume
+
+description:
+ - Using the ceph-volume utility available in Ceph this module
+ can be used to create ceph OSDs that are backed by logical volumes.
+ - Only available in ceph versions luminous or greater.
+
+options:
+ cluster:
+ description:
+ - The ceph cluster name.
+ required: false
+ default: ceph
+ objectstore:
+ description:
+ - The objectstore of the OSD, either filestore or bluestore
+ - Required if action is 'create'
+ required: false
+ choices: ['bluestore', 'filestore']
+ default: bluestore
+ action:
+ description:
+ - The action to take. Creating OSDs and zapping or querying devices.
+ required: true
+ choices: ['create', 'zap', 'batch', 'prepare', 'activate', 'list', 'inventory']
+ default: create
+ data:
+ description:
+ - The logical volume name or device to use for the OSD data.
+ required: true
+ data_vg:
+ description:
+ - If data is a lv, this must be the name of the volume group it belongs to.
+ required: false
+ osd_fsid:
+ description:
+ - The OSD FSID
+ required: false
+ osd_id:
+ description:
+ - The OSD ID
+ required: false
+ journal:
+ description:
+ - The logical volume name or partition to use as a filestore journal.
+ - Only applicable if objectstore is 'filestore'.
+ required: false
+ journal_vg:
+ description:
+ - If journal is a lv, this must be the name of the volume group it belongs to.
+ - Only applicable if objectstore is 'filestore'.
+ required: false
+ db:
+ description:
+ - A partition or logical volume name to use for block.db.
+ - Only applicable if objectstore is 'bluestore'.
+ required: false
+ db_vg:
+ description:
+ - If db is a lv, this must be the name of the volume group it belongs to. # noqa: E501
+ - Only applicable if objectstore is 'bluestore'.
+ required: false
+ wal:
+ description:
+ - A partition or logical volume name to use for block.wal.
+ - Only applicable if objectstore is 'bluestore'.
+ required: false
+ wal_vg:
+ description:
+ - If wal is a lv, this must be the name of the volume group it belongs to. # noqa: E501
+ - Only applicable if objectstore is 'bluestore'.
+ required: false
+ crush_device_class:
+ description:
+ - Will set the crush device class for the OSD.
+ required: false
+ dmcrypt:
+ description:
+ - If set to True the OSD will be encrypted with dmcrypt.
+ required: false
+ batch_devices:
+ description:
+ - A list of devices to pass to the 'ceph-volume lvm batch' subcommand.
+ - Only applicable if action is 'batch'.
+ required: false
+ osds_per_device:
+ description:
+ - The number of OSDs to create per device.
+ - Only applicable if action is 'batch'.
+ required: false
+ default: 1
+ journal_size:
+ description:
+ - The size in MB of filestore journals.
+ - Only applicable if action is 'batch'.
+ required: false
+ default: 5120
+ block_db_size:
+ description:
+ - The size in bytes of bluestore block db lvs.
+ - The default of -1 means to create them as big as possible.
+ - Only applicable if action is 'batch'.
+ required: false
+ default: -1
+ journal_devices:
+ description:
+ - A list of devices for filestore journal to pass to the 'ceph-volume lvm batch' subcommand.
+ - Only applicable if action is 'batch'.
+ - Only applicable if objectstore is 'filestore'.
+ required: false
+ block_db_devices:
+ description:
+ - A list of devices for bluestore block db to pass to the 'ceph-volume lvm batch' subcommand.
+ - Only applicable if action is 'batch'.
+ - Only applicable if objectstore is 'bluestore'.
+ required: false
+ wal_devices:
+ description:
+ - A list of devices for bluestore block wal to pass to the 'ceph-volume lvm batch' subcommand.
+ - Only applicable if action is 'batch'.
+ - Only applicable if objectstore is 'bluestore'.
+ required: false
+ report:
+ description:
+ - If provided the --report flag will be passed to 'ceph-volume lvm batch'.
+ - No OSDs will be created.
+ - Results will be returned in json format.
+ - Only applicable if action is 'batch'.
+ required: false
+ list:
+ description:
+ - List potential Ceph LVM metadata on a device
+ required: false
+ inventory:
+ description:
+ - List storage device inventory.
+ required: false
+
+author:
+ - Andrew Schoen (@andrewschoen)
+ - Sebastien Han <seb@redhat.com>
+'''
+
+EXAMPLES = '''
+- name: set up a filestore osd with an lv data and a journal partition
+ ceph_volume:
+ objectstore: filestore
+ data: data-lv
+ data_vg: data-vg
+ journal: /dev/sdc1
+ action: create
+
+- name: set up a bluestore osd with a raw device for data
+ ceph_volume:
+ objectstore: bluestore
+ data: /dev/sdc
+ action: create
+
+
+- name: set up a bluestore osd with an lv for data and partitions for block.wal and block.db # noqa: E501
+ ceph_volume:
+ objectstore: bluestore
+ data: data-lv
+ data_vg: data-vg
+ db: /dev/sdc1
+ wal: /dev/sdc2
+ action: create
+'''
+
+
+def container_exec(binary, container_image, mounts=None):
+ '''
+ Build the docker CLI to run a command inside a container
+ '''
+ _mounts = {}
+ _mounts['/run/lock/lvm'] = '/run/lock/lvm:z'
+ _mounts['/var/run/udev'] = '/var/run/udev:z'
+ _mounts['/dev'] = '/dev'
+ _mounts['/etc/ceph'] = '/etc/ceph:z'
+ _mounts['/run/lvm'] = '/run/lvm'
+ _mounts['/var/lib/ceph'] = '/var/lib/ceph:z'
+ _mounts['/var/log/ceph'] = '/var/log/ceph:z'
+ if mounts is None:
+ mounts = _mounts
+ else:
+ _mounts.update(mounts)
+
+ volumes = sum(
+ [['-v', '{}:{}'.format(src_dir, dst_dir)]
+ for src_dir, dst_dir in _mounts.items()], [])
+
+ container_binary = os.getenv('CEPH_CONTAINER_BINARY')
+ command_exec = [container_binary, 'run',
+ '--rm',
+ '--privileged',
+ '--net=host',
+ '--ipc=host'] + volumes + \
+ ['--entrypoint=' + binary, container_image]
+ return command_exec
+
+
+def build_cmd(action, container_image,
+ cluster='ceph',
+ binary='ceph-volume', mounts=None):
+ '''
+ Build the ceph-volume command
+ '''
+
+ _binary = binary
+
+ if container_image:
+ cmd = container_exec(
+ binary, container_image, mounts=mounts)
+ else:
+ binary = [binary]
+ cmd = binary
+
+ if _binary == 'ceph-volume':
+ cmd.extend(['--cluster', cluster])
+
+ cmd.extend(action)
+
+ return cmd
+
+
+def get_data(data, data_vg):
+ if data_vg:
+ data = '{0}/{1}'.format(data_vg, data)
+ return data
+
+
+def get_journal(journal, journal_vg):
+ if journal_vg:
+ journal = '{0}/{1}'.format(journal_vg, journal)
+ return journal
+
+
+def get_db(db, db_vg):
+ if db_vg:
+ db = '{0}/{1}'.format(db_vg, db)
+ return db
+
+
+def get_wal(wal, wal_vg):
+ if wal_vg:
+ wal = '{0}/{1}'.format(wal_vg, wal)
+ return wal
+
+
+def batch(module, container_image, report=None):
+ '''
+ Batch prepare OSD devices
+ '''
+
+ # get module variables
+ cluster = module.params['cluster']
+ objectstore = module.params['objectstore']
+ batch_devices = module.params.get('batch_devices', None)
+ crush_device_class = module.params.get('crush_device_class', None)
+ journal_devices = module.params.get('journal_devices', None)
+ journal_size = module.params.get('journal_size', None)
+ block_db_size = module.params.get('block_db_size', None)
+ block_db_devices = module.params.get('block_db_devices', None)
+ wal_devices = module.params.get('wal_devices', None)
+ dmcrypt = module.params.get('dmcrypt', None)
+ osds_per_device = module.params.get('osds_per_device', 1)
+
+ if not osds_per_device:
+ fatal('osds_per_device must be provided if action is "batch"', module)
+
+ if osds_per_device < 1:
+ fatal('osds_per_device must be greater than 0 if action is "batch"', module) # noqa: E501
+
+ if not batch_devices:
+ fatal('batch_devices must be provided if action is "batch"', module)
+
+ # Build the CLI
+ action = ['lvm', 'batch']
+ cmd = build_cmd(action, container_image, cluster)
+ cmd.extend(['--%s' % objectstore])
+ if not report:
+ cmd.append('--yes')
+
+ if container_image:
+ cmd.append('--prepare')
+
+ if crush_device_class:
+ cmd.extend(['--crush-device-class', crush_device_class])
+
+ if dmcrypt:
+ cmd.append('--dmcrypt')
+
+ if osds_per_device > 1:
+ cmd.extend(['--osds-per-device', str(osds_per_device)])
+
+ if objectstore == 'filestore':
+ cmd.extend(['--journal-size', journal_size])
+
+ if objectstore == 'bluestore' and block_db_size != '-1':
+ cmd.extend(['--block-db-size', block_db_size])
+
+ cmd.extend(batch_devices)
+
+ if journal_devices and objectstore == 'filestore':
+ cmd.append('--journal-devices')
+ cmd.extend(journal_devices)
+
+ if block_db_devices and objectstore == 'bluestore':
+ cmd.append('--db-devices')
+ cmd.extend(block_db_devices)
+
+ if wal_devices and objectstore == 'bluestore':
+ cmd.append('--wal-devices')
+ cmd.extend(wal_devices)
+
+ return cmd
+
+
+def ceph_volume_cmd(subcommand, container_image, cluster=None):
+ '''
+ Build ceph-volume initial command
+ '''
+
+ if container_image:
+ binary = 'ceph-volume'
+ cmd = container_exec(
+ binary, container_image)
+ else:
+ binary = ['ceph-volume']
+ cmd = binary
+
+ if cluster:
+ cmd.extend(['--cluster', cluster])
+
+ cmd.append('lvm')
+ cmd.append(subcommand)
+
+ return cmd
+
+
+def prepare_or_create_osd(module, action, container_image):
+ '''
+ Prepare or create OSD devices
+ '''
+
+ # get module variables
+ cluster = module.params['cluster']
+ objectstore = module.params['objectstore']
+ data = module.params['data']
+ data_vg = module.params.get('data_vg', None)
+ data = get_data(data, data_vg)
+ journal = module.params.get('journal', None)
+ journal_vg = module.params.get('journal_vg', None)
+ db = module.params.get('db', None)
+ db_vg = module.params.get('db_vg', None)
+ wal = module.params.get('wal', None)
+ wal_vg = module.params.get('wal_vg', None)
+ crush_device_class = module.params.get('crush_device_class', None)
+ dmcrypt = module.params.get('dmcrypt', None)
+
+ # Build the CLI
+ action = ['lvm', action]
+ cmd = build_cmd(action, container_image, cluster)
+ cmd.extend(['--%s' % objectstore])
+ cmd.append('--data')
+ cmd.append(data)
+
+ if journal and objectstore == 'filestore':
+ journal = get_journal(journal, journal_vg)
+ cmd.extend(['--journal', journal])
+
+ if db and objectstore == 'bluestore':
+ db = get_db(db, db_vg)
+ cmd.extend(['--block.db', db])
+
+ if wal and objectstore == 'bluestore':
+ wal = get_wal(wal, wal_vg)
+ cmd.extend(['--block.wal', wal])
+
+ if crush_device_class:
+ cmd.extend(['--crush-device-class', crush_device_class])
+
+ if dmcrypt:
+ cmd.append('--dmcrypt')
+
+ return cmd
+
+
+def list_osd(module, container_image):
+ '''
+ List will detect wether or not a device has Ceph LVM Metadata
+ '''
+
+ # get module variables
+ cluster = module.params['cluster']
+ data = module.params.get('data', None)
+ data_vg = module.params.get('data_vg', None)
+ data = get_data(data, data_vg)
+
+ # Build the CLI
+ action = ['lvm', 'list']
+ cmd = build_cmd(action,
+ container_image,
+ cluster,
+ mounts={'/var/lib/ceph': '/var/lib/ceph:ro'})
+ if data:
+ cmd.append(data)
+ cmd.append('--format=json')
+
+ return cmd
+
+
+def list_storage_inventory(module, container_image):
+ '''
+ List storage inventory.
+ '''
+
+ action = ['inventory']
+ cmd = build_cmd(action, container_image)
+ cmd.append('--format=json')
+
+ return cmd
+
+
+def activate_osd():
+ '''
+ Activate all the OSDs on a machine
+ '''
+
+ # build the CLI
+ action = ['lvm', 'activate']
+ container_image = None
+ cmd = build_cmd(action, container_image)
+ cmd.append('--all')
+
+ return cmd
+
+
+def is_lv(module, vg, lv, container_image):
+ '''
+ Check if an LV exists
+ '''
+
+ args = ['--noheadings', '--reportformat', 'json', '--select', 'lv_name={},vg_name={}'.format(lv, vg)] # noqa: E501
+
+ cmd = build_cmd(args, container_image, binary='lvs')
+
+ rc, cmd, out, err = exec_command(module, cmd)
+
+ if rc == 0:
+ result = json.loads(out)['report'][0]['lv']
+ if len(result) > 0:
+ return True
+
+ return False
+
+
+def zap_devices(module, container_image):
+ '''
+ Will run 'ceph-volume lvm zap' on all devices, lvs and partitions
+ used to create the OSD. The --destroy flag is always passed so that
+ if an OSD was originally created with a raw device or partition for
+ 'data' then any lvs that were created by ceph-volume are removed.
+ '''
+
+ # get module variables
+ data = module.params.get('data', None)
+ data_vg = module.params.get('data_vg', None)
+ journal = module.params.get('journal', None)
+ journal_vg = module.params.get('journal_vg', None)
+ db = module.params.get('db', None)
+ db_vg = module.params.get('db_vg', None)
+ wal = module.params.get('wal', None)
+ wal_vg = module.params.get('wal_vg', None)
+ osd_fsid = module.params.get('osd_fsid', None)
+ osd_id = module.params.get('osd_id', None)
+ destroy = module.params.get('destroy', True)
+
+ # build the CLI
+ action = ['lvm', 'zap']
+ cmd = build_cmd(action, container_image)
+ if destroy:
+ cmd.append('--destroy')
+
+ if osd_fsid:
+ cmd.extend(['--osd-fsid', osd_fsid])
+
+ if osd_id:
+ cmd.extend(['--osd-id', osd_id])
+
+ if data:
+ data = get_data(data, data_vg)
+ cmd.append(data)
+
+ if journal:
+ journal = get_journal(journal, journal_vg)
+ cmd.extend([journal])
+
+ if db:
+ db = get_db(db, db_vg)
+ cmd.extend([db])
+
+ if wal:
+ wal = get_wal(wal, wal_vg)
+ cmd.extend([wal])
+
+ return cmd
+
+
+def run_module():
+ module_args = dict(
+ cluster=dict(type='str', required=False, default='ceph'),
+ objectstore=dict(type='str', required=False, choices=[
+ 'bluestore', 'filestore'], default='bluestore'),
+ action=dict(type='str', required=False, choices=[
+ 'create', 'zap', 'batch', 'prepare', 'activate', 'list',
+ 'inventory'], default='create'), # noqa: 4502
+ data=dict(type='str', required=False),
+ data_vg=dict(type='str', required=False),
+ journal=dict(type='str', required=False),
+ journal_vg=dict(type='str', required=False),
+ db=dict(type='str', required=False),
+ db_vg=dict(type='str', required=False),
+ wal=dict(type='str', required=False),
+ wal_vg=dict(type='str', required=False),
+ crush_device_class=dict(type='str', required=False),
+ dmcrypt=dict(type='bool', required=False, default=False),
+ batch_devices=dict(type='list', required=False, default=[]),
+ osds_per_device=dict(type='int', required=False, default=1),
+ journal_size=dict(type='str', required=False, default='5120'),
+ journal_devices=dict(type='list', required=False, default=[]),
+ block_db_size=dict(type='str', required=False, default='-1'),
+ block_db_devices=dict(type='list', required=False, default=[]),
+ wal_devices=dict(type='list', required=False, default=[]),
+ report=dict(type='bool', required=False, default=False),
+ osd_fsid=dict(type='str', required=False),
+ osd_id=dict(type='str', required=False),
+ destroy=dict(type='bool', required=False, default=True),
+ )
+
+ module = AnsibleModule(
+ argument_spec=module_args,
+ supports_check_mode=True,
+ mutually_exclusive=[
+ ('data', 'osd_fsid', 'osd_id'),
+ ],
+ required_if=[
+ ('action', 'zap', ('data', 'osd_fsid', 'osd_id'), True)
+ ]
+ )
+
+ result = dict(
+ changed=False,
+ stdout='',
+ stderr='',
+ rc=0,
+ start='',
+ end='',
+ delta='',
+ )
+
+ if module.check_mode:
+ module.exit_json(**result)
+
+ # start execution
+ startd = datetime.datetime.now()
+
+ # get the desired action
+ action = module.params['action']
+
+ # will return either the image name or None
+ container_image = is_containerized()
+
+ # Assume the task's status will be 'changed'
+ changed = True
+
+ if action == 'create' or action == 'prepare':
+ # First test if the device has Ceph LVM Metadata
+ rc, cmd, out, err = exec_command(
+ module, list_osd(module, container_image))
+
+ # list_osd returns a dict, if the dict is empty this means
+ # we can not check the return code since it's not consistent
+ # with the plain output
+ # see: http://tracker.ceph.com/issues/36329
+ # FIXME: it's probably less confusing to check for rc
+
+ # convert out to json, ansible returns a string...
+ try:
+ out_dict = json.loads(out)
+ except ValueError:
+ fatal("Could not decode json output: {} from the command {}".format(out, cmd), module) # noqa: E501
+
+ if out_dict:
+ data = module.params['data']
+ result['stdout'] = 'skipped, since {0} is already used for an osd'.format(data) # noqa: E501
+ result['rc'] = 0
+ module.exit_json(**result)
+
+ # Prepare or create the OSD
+ rc, cmd, out, err = exec_command(
+ module, prepare_or_create_osd(module, action, container_image))
+
+ elif action == 'activate':
+ if container_image:
+ fatal(
+ "This is not how container's activation happens, nothing to activate", module) # noqa: E501
+
+ # Activate the OSD
+ rc, cmd, out, err = exec_command(
+ module, activate_osd())
+
+ elif action == 'zap':
+ # Zap the OSD
+ skip = []
+ for device_type in ['journal', 'data', 'db', 'wal']:
+ # 1/ if we passed vg/lv
+ if module.params.get('{}_vg'.format(device_type), None) and module.params.get(device_type, None): # noqa: E501
+ # 2/ check this is an actual lv/vg
+ ret = is_lv(module, module.params['{}_vg'.format(device_type)], module.params[device_type], container_image) # noqa: E501
+ skip.append(ret)
+ # 3/ This isn't a lv/vg device
+ if not ret:
+ module.params['{}_vg'.format(device_type)] = False
+ module.params[device_type] = False
+ # 4/ no journal|data|db|wal|_vg was passed, so it must be a raw device # noqa: E501
+ elif not module.params.get('{}_vg'.format(device_type), None) and module.params.get(device_type, None): # noqa: E501
+ skip.append(True)
+
+ cmd = zap_devices(module, container_image)
+
+ if any(skip) or module.params.get('osd_fsid', None) \
+ or module.params.get('osd_id', None):
+ rc, cmd, out, err = exec_command(
+ module, cmd)
+ for scan_cmd in ['vgscan', 'lvscan']:
+ module.run_command([scan_cmd, '--cache'])
+ else:
+ out = 'Skipped, nothing to zap'
+ err = ''
+ changed = False
+ rc = 0
+
+ elif action == 'list':
+ # List Ceph LVM Metadata on a device
+ rc, cmd, out, err = exec_command(
+ module, list_osd(module, container_image))
+
+ elif action == 'inventory':
+ # List storage device inventory.
+ rc, cmd, out, err = exec_command(
+ module, list_storage_inventory(module, container_image))
+
+ elif action == 'batch':
+ # Batch prepare AND activate OSDs
+ report = module.params.get('report', None)
+
+ # Add --report flag for the idempotency test
+ report_flags = [
+ '--report',
+ '--format=json',
+ ]
+
+ cmd = batch(module, container_image, report=True)
+ batch_report_cmd = copy.copy(cmd)
+ batch_report_cmd.extend(report_flags)
+
+ # Run batch --report to see what's going to happen
+ # Do not run the batch command if there is nothing to do
+ rc, cmd, out, err = exec_command(
+ module, batch_report_cmd)
+ try:
+ if not out:
+ out = '{}'
+ report_result = json.loads(out)
+ except ValueError:
+ strategy_changed_in_out = "strategy changed" in out
+ strategy_changed_in_err = "strategy changed" in err
+ strategy_changed = strategy_changed_in_out or \
+ strategy_changed_in_err
+ if strategy_changed:
+ if strategy_changed_in_out:
+ out = json.dumps({"changed": False,
+ "stdout": out.rstrip("\r\n")})
+ elif strategy_changed_in_err:
+ out = json.dumps({"changed": False,
+ "stderr": err.rstrip("\r\n")})
+ rc = 0
+ changed = False
+ else:
+ out = out.rstrip("\r\n")
+ result = dict(
+ cmd=cmd,
+ stdout=out.rstrip('\r\n'),
+ stderr=err.rstrip('\r\n'),
+ rc=rc,
+ changed=changed,
+ )
+ if strategy_changed:
+ module.exit_json(**result)
+ module.fail_json(msg='non-zero return code', **result)
+
+ if not report:
+ if 'changed' in report_result:
+ # we have the old batch implementation
+ # if not asking for a report, let's just run the batch command
+ changed = report_result['changed']
+ if changed:
+ # Batch prepare the OSD
+ rc, cmd, out, err = exec_command(
+ module, batch(module, container_image))
+ else:
+ # we have the refactored batch, its idempotent so lets just
+ # run it
+ rc, cmd, out, err = exec_command(
+ module, batch(module, container_image))
+ else:
+ cmd = batch_report_cmd
+
+ endd = datetime.datetime.now()
+ delta = endd - startd
+
+ result = dict(
+ cmd=cmd,
+ start=str(startd),
+ end=str(endd),
+ delta=str(delta),
+ rc=rc,
+ stdout=out.rstrip('\r\n'),
+ stderr=err.rstrip('\r\n'),
+ changed=changed,
+ )
+
+ if rc != 0:
+ module.fail_json(msg='non-zero return code', **result)
+
+ module.exit_json(**result)
+
+
+def main():
+ run_module()
+
+
+if __name__ == '__main__':
+ main()
--- /dev/null
+# Copyright 2020, Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from ansible.module_utils.basic import AnsibleModule
+try:
+ from ansible.module_utils.ca_common import exit_module
+except ImportError:
+ from module_utils.ca_common import exit_module
+import datetime
+import os
+
+
+ANSIBLE_METADATA = {
+ 'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'
+}
+
+DOCUMENTATION = '''
+---
+module: ceph_volume_simple_activate
+short_description: Activate legacy OSD with ceph-volume
+version_added: "2.8"
+description:
+ - Activate legacy OSD with ceph-volume by providing the JSON file from
+ the scan operation or by passing the OSD ID and OSD FSID.
+options:
+ cluster:
+ description:
+ - The ceph cluster name.
+ required: false
+ default: ceph
+ path:
+ description:
+ - The OSD metadata as JSON file in /etc/ceph/osd directory, it
+ must exist.
+ required: false
+ osd_id:
+ description:
+ - The legacy OSD ID.
+ required: false
+ osd_fsid:
+ description:
+ - The legacy OSD FSID.
+ required: false
+ osd_all:
+ description:
+ - Activate all legacy OSDs.
+ required: false
+ systemd:
+ description:
+ - Using systemd unit during the OSD activation.
+ required: false
+ default: true
+author:
+ - Dimitri Savineau <dsavinea@redhat.com>
+'''
+
+EXAMPLES = '''
+- name: activate all legacy OSDs
+ ceph_volume_simple_activate:
+ cluster: ceph
+ osd_all: true
+
+- name: activate a legacy OSD via OSD ID and OSD FSID
+ ceph_volume_simple_activate:
+ cluster: ceph
+ osd_id: 3
+ osd_fsid: 0c4a7eca-0c2a-4c12-beff-08a80f064c52
+
+- name: activate a legacy OSD via the JSON file
+ ceph_volume_simple_activate:
+ cluster: ceph
+ path: /etc/ceph/osd/3-0c4a7eca-0c2a-4c12-beff-08a80f064c52.json
+
+- name: activate a legacy OSD via the JSON file without systemd
+ ceph_volume_simple_activate:
+ cluster: ceph
+ path: /etc/ceph/osd/3-0c4a7eca-0c2a-4c12-beff-08a80f064c52.json
+ systemd: false
+'''
+
+RETURN = '''# '''
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ cluster=dict(type='str', required=False, default='ceph'),
+ path=dict(type='path', required=False),
+ systemd=dict(type='bool', required=False, default=True),
+ osd_id=dict(type='str', required=False),
+ osd_fsid=dict(type='str', required=False),
+ osd_all=dict(type='bool', required=False),
+ ),
+ supports_check_mode=True,
+ mutually_exclusive=[
+ ('osd_all', 'osd_id'),
+ ('osd_all', 'osd_fsid'),
+ ('path', 'osd_id'),
+ ('path', 'osd_fsid'),
+ ],
+ required_together=[
+ ('osd_id', 'osd_fsid')
+ ],
+ required_one_of=[
+ ('path', 'osd_id', 'osd_all'),
+ ('path', 'osd_fsid', 'osd_all'),
+ ],
+ )
+
+ path = module.params.get('path')
+ cluster = module.params.get('cluster')
+ systemd = module.params.get('systemd')
+ osd_id = module.params.get('osd_id')
+ osd_fsid = module.params.get('osd_fsid')
+ osd_all = module.params.get('osd_all')
+
+ if path and not os.path.exists(path):
+ module.fail_json(msg='{} does not exist'.format(path), rc=1)
+
+ startd = datetime.datetime.now()
+
+ container_image = os.getenv('CEPH_CONTAINER_IMAGE')
+ container_binary = os.getenv('CEPH_CONTAINER_BINARY')
+ if container_binary and container_image:
+ cmd = [container_binary,
+ 'run', '--rm', '--privileged',
+ '--ipc=host', '--net=host',
+ '-v', '/etc/ceph:/etc/ceph:z',
+ '-v', '/var/lib/ceph/:/var/lib/ceph/:z',
+ '-v', '/var/log/ceph/:/var/log/ceph/:z',
+ '-v', '/run/lvm/:/run/lvm/',
+ '-v', '/run/lock/lvm/:/run/lock/lvm/',
+ '--entrypoint=ceph-volume', container_image]
+ else:
+ cmd = ['ceph-volume']
+
+ cmd.extend(['--cluster', cluster, 'simple', 'activate'])
+
+ if osd_all:
+ cmd.append('--all')
+ else:
+ if path:
+ cmd.extend(['--file', path])
+ else:
+ cmd.extend([osd_id, osd_fsid])
+
+ if not systemd:
+ cmd.append('--no-systemd')
+
+ if module.check_mode:
+ exit_module(
+ module=module,
+ out='',
+ rc=0,
+ cmd=cmd,
+ err='',
+ startd=startd,
+ changed=False
+ )
+ else:
+ rc, out, err = module.run_command(cmd)
+ exit_module(
+ module=module,
+ out=out,
+ rc=rc,
+ cmd=cmd,
+ err=err,
+ startd=startd,
+ changed=True
+ )
+
+
+if __name__ == '__main__':
+ main()
--- /dev/null
+# Copyright 2020, Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from ansible.module_utils.basic import AnsibleModule
+try:
+ from ansible.module_utils.ca_common import exit_module
+except ImportError:
+ from module_utils.ca_common import exit_module
+import datetime
+import os
+
+
+ANSIBLE_METADATA = {
+ 'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'
+}
+
+DOCUMENTATION = '''
+---
+module: ceph_volume_simple_scan
+short_description: Scan legacy OSD with ceph-volume
+version_added: "2.8"
+description:
+ - Scan legacy OSD with ceph-volume and store the output as JSON file
+ in /etc/ceph/osd directory with {OSD_ID}-{OSD_FSID}.json format.
+options:
+ cluster:
+ description:
+ - The ceph cluster name.
+ required: false
+ default: ceph
+ path:
+ description:
+ - The OSD directory or metadata partition. The directory or
+ partition must exist.
+ required: false
+ force:
+ description:
+ - Force re-scanning an OSD and overwriting the JSON content.
+ required: false
+ default: false
+ stdout:
+ description:
+ - Do not store the output to JSON file but stdout instead.
+ required: false
+ default: false
+author:
+ - Dimitri Savineau <dsavinea@redhat.com>
+'''
+
+EXAMPLES = '''
+- name: scan all running OSDs
+ ceph_volume_simple_scan:
+ cluster: ceph
+
+- name: scan an OSD with the directory
+ ceph_volume_simple_scan:
+ cluster: ceph
+ path: /var/lib/ceph/osd/ceph-3
+
+- name: scan an OSD with the partition
+ ceph_volume_simple_scan:
+ cluster: ceph
+ path: /dev/sdb1
+
+- name: rescan an OSD and print the result on stdout
+ ceph_volume_simple_scan:
+ cluster: ceph
+ path: /dev/nvme0n1p1
+ force: true
+ stdout: true
+'''
+
+RETURN = '''# '''
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ cluster=dict(type='str', required=False, default='ceph'),
+ path=dict(type='path', required=False),
+ force=dict(type='bool', required=False, default=False),
+ stdout=dict(type='bool', required=False, default=False),
+ ),
+ supports_check_mode=True,
+ )
+
+ path = module.params.get('path')
+ cluster = module.params.get('cluster')
+ force = module.params.get('force')
+ stdout = module.params.get('stdout')
+
+ if path and not os.path.exists(path):
+ module.fail_json(msg='{} does not exist'.format(path), rc=1)
+
+ startd = datetime.datetime.now()
+
+ container_image = os.getenv('CEPH_CONTAINER_IMAGE')
+ container_binary = os.getenv('CEPH_CONTAINER_BINARY')
+ if container_binary and container_image:
+ cmd = [container_binary,
+ 'run', '--rm', '--privileged',
+ '--ipc=host', '--net=host',
+ '-v', '/etc/ceph:/etc/ceph:z',
+ '-v', '/var/lib/ceph/:/var/lib/ceph/:z',
+ '-v', '/var/log/ceph/:/var/log/ceph/:z',
+ '-v', '/run/lvm/:/run/lvm/',
+ '-v', '/run/lock/lvm/:/run/lock/lvm/',
+ '--entrypoint=ceph-volume', container_image]
+ else:
+ cmd = ['ceph-volume']
+
+ cmd.extend(['--cluster', cluster, 'simple', 'scan'])
+
+ if force:
+ cmd.append('--force')
+
+ if stdout:
+ cmd.append('--stdout')
+
+ if path:
+ cmd.append(path)
+
+ if module.check_mode:
+ exit_module(
+ module=module,
+ out='',
+ rc=0,
+ cmd=cmd,
+ err='',
+ startd=startd,
+ changed=False
+ )
+ else:
+ rc, out, err = module.run_command(cmd)
+ exit_module(
+ module=module,
+ out=out,
+ rc=rc,
+ cmd=cmd,
+ err=err,
+ startd=startd,
+ changed=True
+ )
+
+
+if __name__ == '__main__':
+ main()
--- /dev/null
+# Copyright 2020, Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from ansible.module_utils.basic import AnsibleModule
+try:
+ from ansible.module_utils.ca_common import exit_module
+except ImportError:
+ from module_utils.ca_common import exit_module
+import datetime
+import json
+
+
+ANSIBLE_METADATA = {
+ 'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'
+}
+
+DOCUMENTATION = '''
+---
+module: cephadm_adopt
+short_description: Adopt a Ceph cluster with cephadm
+version_added: "2.8"
+description:
+ - Adopt a Ceph cluster with cephadm
+options:
+ name:
+ description:
+ - The ceph daemon name.
+ required: true
+ cluster:
+ description:
+ - The ceph cluster name.
+ required: false
+ default: ceph
+ style:
+ description:
+ - Cep deployment style.
+ required: false
+ default: legacy
+ image:
+ description:
+ - Ceph container image.
+ required: false
+ docker:
+ description:
+ - Use docker instead of podman.
+ required: false
+ pull:
+ description:
+ - Pull the Ceph container image.
+ required: false
+ default: true
+ firewalld:
+ description:
+ - Manage firewall rules with firewalld.
+ required: false
+ default: true
+author:
+ - Dimitri Savineau <dsavinea@redhat.com>
+'''
+
+EXAMPLES = '''
+- name: adopt a ceph monitor with cephadm (default values)
+ cephadm_adopt:
+ name: mon.foo
+ style: legacy
+
+- name: adopt a ceph monitor with cephadm (with custom values)
+ cephadm_adopt:
+ name: mon.foo
+ style: legacy
+ image: quay.ceph.io/ceph/daemon-base:latest-master-devel
+ pull: false
+ firewalld: false
+
+- name: adopt a ceph monitor with cephadm with custom image via env var
+ cephadm_adopt:
+ name: mon.foo
+ style: legacy
+ environment:
+ CEPHADM_IMAGE: quay.ceph.io/ceph/daemon-base:latest-master-devel
+'''
+
+RETURN = '''# '''
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ cluster=dict(type='str', required=False, default='ceph'),
+ style=dict(type='str', required=False, default='legacy'),
+ image=dict(type='str', required=False),
+ docker=dict(type='bool', required=False, default=False),
+ pull=dict(type='bool', required=False, default=True),
+ firewalld=dict(type='bool', required=False, default=True),
+ ),
+ supports_check_mode=True,
+ )
+
+ name = module.params.get('name')
+ cluster = module.params.get('cluster')
+ style = module.params.get('style')
+ docker = module.params.get('docker')
+ image = module.params.get('image')
+ pull = module.params.get('pull')
+ firewalld = module.params.get('firewalld')
+
+ startd = datetime.datetime.now()
+
+ cmd = ['cephadm', 'ls', '--no-detail']
+
+ if module.check_mode:
+ exit_module(
+ module=module,
+ out='',
+ rc=0,
+ cmd=cmd,
+ err='',
+ startd=startd,
+ changed=False
+ )
+ else:
+ rc, out, err = module.run_command(cmd)
+
+ if rc == 0:
+ if name in [x["name"] for x in json.loads(out) if x["style"] == "cephadm:v1"]: # noqa: E501
+ exit_module(
+ module=module,
+ out='{} is already adopted'.format(name),
+ rc=0,
+ cmd=cmd,
+ err='',
+ startd=startd,
+ changed=False
+ )
+ else:
+ module.fail_json(msg=err, rc=rc)
+
+ cmd = ['cephadm']
+
+ if docker:
+ cmd.append('--docker')
+
+ if image:
+ cmd.extend(['--image', image])
+
+ cmd.extend(['adopt', '--cluster', cluster, '--name', name, '--style', style]) # noqa: E501
+
+ if not pull:
+ cmd.append('--skip-pull')
+
+ if not firewalld:
+ cmd.append('--skip-firewalld')
+
+ rc, out, err = module.run_command(cmd)
+ exit_module(
+ module=module,
+ out=out,
+ rc=rc,
+ cmd=cmd,
+ err=err,
+ startd=startd,
+ changed=True
+ )
+
+
+if __name__ == '__main__':
+ main()
--- /dev/null
+# Copyright 2020, Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from ansible.module_utils.basic import AnsibleModule
+try:
+ from ansible.module_utils.ca_common import exit_module
+except ImportError:
+ from module_utils.ca_common import exit_module
+import datetime
+
+
+ANSIBLE_METADATA = {
+ 'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'
+}
+
+DOCUMENTATION = '''
+---
+module: cephadm_bootstrap
+short_description: Bootstrap a Ceph cluster via cephadm
+version_added: "2.8"
+description:
+ - Bootstrap a Ceph cluster via cephadm
+options:
+ mon_ip:
+ description:
+ - Ceph monitor IP address.
+ required: true
+ image:
+ description:
+ - Ceph container image.
+ required: false
+ docker:
+ description:
+ - Use docker instead of podman.
+ required: false
+ fsid:
+ description:
+ - Ceph FSID.
+ required: false
+ pull:
+ description:
+ - Pull the Ceph container image.
+ required: false
+ default: true
+ dashboard:
+ description:
+ - Deploy the Ceph dashboard.
+ required: false
+ default: true
+ dashboard_user:
+ description:
+ - Ceph dashboard user.
+ required: false
+ dashboard_password:
+ description:
+ - Ceph dashboard password.
+ required: false
+ monitoring:
+ description:
+ - Deploy the monitoring stack.
+ required: false
+ default: true
+ firewalld:
+ description:
+ - Manage firewall rules with firewalld.
+ required: false
+ default: true
+ allow_overwrite:
+ description:
+ - allow overwrite of existing –output-* config/keyring/ssh files.
+ required: false
+ default: false
+ registry_url:
+ description:
+ - URL for custom registry.
+ required: false
+ registry_username:
+ description:
+ - Username for custom registry.
+ required: false
+ registry_password:
+ description:
+ - Password for custom registry.
+ required: false
+ registry_json:
+ description:
+ - JSON file with custom registry login info (URL,
+ username, password).
+ required: false
+ ssh_user:
+ description:
+ - SSH user used for cephadm ssh to the hosts.
+ required: false
+ ssh_config:
+ description:
+ - SSH config file path for cephadm ssh client.
+ required: false
+author:
+ - Dimitri Savineau <dsavinea@redhat.com>
+'''
+
+EXAMPLES = '''
+- name: bootstrap a cluster via cephadm (with default values)
+ cephadm_bootstrap:
+ mon_ip: 192.168.42.1
+
+- name: bootstrap a cluster via cephadm (with custom values)
+ cephadm_bootstrap:
+ mon_ip: 192.168.42.1
+ fsid: 3c9ba63a-c7df-4476-a1e7-317dfc711f82
+ image: quay.ceph.io/ceph/daemon-base:latest-master-devel
+ dashboard: false
+ monitoring: false
+ firewalld: false
+
+- name: bootstrap a cluster via cephadm with custom image via env var
+ cephadm_bootstrap:
+ mon_ip: 192.168.42.1
+ environment:
+ CEPHADM_IMAGE: quay.ceph.io/ceph/daemon-base:latest-master-devel
+'''
+
+RETURN = '''# '''
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ mon_ip=dict(type='str', required=True),
+ image=dict(type='str', required=False),
+ docker=dict(type='bool', required=False, default=False),
+ fsid=dict(type='str', required=False),
+ pull=dict(type='bool', required=False, default=True),
+ dashboard=dict(type='bool', required=False, default=True),
+ dashboard_user=dict(type='str', required=False),
+ dashboard_password=dict(type='str', required=False, no_log=True),
+ monitoring=dict(type='bool', required=False, default=True),
+ firewalld=dict(type='bool', required=False, default=True),
+ allow_overwrite=dict(type='bool', required=False, default=False),
+ registry_url=dict(type='str', require=False),
+ registry_username=dict(type='str', require=False),
+ registry_password=dict(type='str', require=False, no_log=True),
+ registry_json=dict(type='path', require=False),
+ ssh_user=dict(type='str', required=False),
+ ssh_config=dict(type='str', required=False),
+ ),
+ supports_check_mode=True,
+ mutually_exclusive=[
+ ('registry_json', 'registry_url'),
+ ('registry_json', 'registry_username'),
+ ('registry_json', 'registry_password'),
+ ],
+ required_together=[
+ ('registry_url', 'registry_username', 'registry_password')
+ ],
+ )
+
+ mon_ip = module.params.get('mon_ip')
+ docker = module.params.get('docker')
+ image = module.params.get('image')
+ fsid = module.params.get('fsid')
+ pull = module.params.get('pull')
+ dashboard = module.params.get('dashboard')
+ dashboard_user = module.params.get('dashboard_user')
+ dashboard_password = module.params.get('dashboard_password')
+ monitoring = module.params.get('monitoring')
+ firewalld = module.params.get('firewalld')
+ allow_overwrite = module.params.get('allow_overwrite')
+ registry_url = module.params.get('registry_url')
+ registry_username = module.params.get('registry_username')
+ registry_password = module.params.get('registry_password')
+ registry_json = module.params.get('registry_json')
+ ssh_user = module.params.get('ssh_user')
+ ssh_config = module.params.get('ssh_config')
+
+ startd = datetime.datetime.now()
+
+ cmd = ['cephadm']
+
+ if docker:
+ cmd.append('--docker')
+
+ if image:
+ cmd.extend(['--image', image])
+
+ cmd.extend(['bootstrap', '--mon-ip', mon_ip])
+
+ if fsid:
+ cmd.extend(['--fsid', fsid])
+
+ if not pull:
+ cmd.append('--skip-pull')
+
+ if dashboard:
+ if dashboard_user:
+ cmd.extend(['--initial-dashboard-user', dashboard_user])
+ if dashboard_password:
+ cmd.extend(['--initial-dashboard-password', dashboard_password])
+ else:
+ cmd.append('--skip-dashboard')
+
+ if not monitoring:
+ cmd.append('--skip-monitoring-stack')
+
+ if not firewalld:
+ cmd.append('--skip-firewalld')
+
+ if allow_overwrite:
+ cmd.append('--allow-overwrite')
+
+ if registry_url and registry_username and registry_password:
+ cmd.extend(['--registry-url', registry_url,
+ '--registry-username', registry_username,
+ '--registry-password', registry_password])
+
+ if registry_json:
+ cmd.extend(['--registry-json', registry_json])
+
+ if ssh_user:
+ cmd.extend(['--ssh-user', ssh_user])
+
+ if ssh_config:
+ cmd.extend(['--ssh-config', ssh_config])
+
+ if module.check_mode:
+ exit_module(
+ module=module,
+ out='',
+ rc=0,
+ cmd=cmd,
+ err='',
+ startd=startd,
+ changed=False
+ )
+ else:
+ rc, out, err = module.run_command(cmd)
+ exit_module(
+ module=module,
+ out=out,
+ rc=rc,
+ cmd=cmd,
+ err=err,
+ startd=startd,
+ changed=True
+ )
+
+
+if __name__ == '__main__':
+ main()
--- /dev/null
+# this is a virtual module that is entirely implemented server side
+
+DOCUMENTATION = """
+---
+module: config_template
+version_added: 1.9.2
+short_description: Renders template files providing a create/update override interface
+description:
+ - The module contains the template functionality with the ability to override items
+ in config, in transit, though the use of an simple dictionary without having to
+ write out various temp files on target machines. The module renders all of the
+ potential jinja a user could provide in both the template file and in the override
+ dictionary which is ideal for deployers whom may have lots of different configs
+ using a similar code base.
+ - The module is an extension of the **copy** module and all of attributes that can be
+ set there are available to be set here.
+options:
+ src:
+ description:
+ - Path of a Jinja2 formatted template on the local server. This can be a relative
+ or absolute path.
+ required: true
+ default: null
+ dest:
+ description:
+ - Location to render the template to on the remote machine.
+ required: true
+ default: null
+ config_overrides:
+ description:
+ - A dictionary used to update or override items within a configuration template.
+ The dictionary data structure may be nested. If the target config file is an ini
+ file the nested keys in the ``config_overrides`` will be used as section
+ headers.
+ config_type:
+ description:
+ - A string value describing the target config type.
+ choices:
+ - ini
+ - json
+ - yaml
+author: Kevin Carter
+"""
+
+EXAMPLES = """
+ - name: run config template ini
+ config_template:
+ src: templates/test.ini.j2
+ dest: /tmp/test.ini
+ config_overrides: {}
+ config_type: ini
+
+ - name: run config template json
+ config_template:
+ src: templates/test.json.j2
+ dest: /tmp/test.json
+ config_overrides: {}
+ config_type: json
+
+ - name: run config template yaml
+ config_template:
+ src: templates/test.yaml.j2
+ dest: /tmp/test.yaml
+ config_overrides: {}
+ config_type: yaml
+"""
--- /dev/null
+#!/usr/bin/env python
+
+__author__ = 'pcuzner@redhat.com'
+
+DOCUMENTATION = """
+---
+module: igw_client
+short_description: Manage iscsi gateway client definitions
+description:
+ - This module calls the 'client' configuration management module installed
+ on the iscsi gateway node to handle the definition of iscsi clients on the
+ gateway(s). This definition will setup iscsi authentication (e.g. chap),
+ and mask the required rbd images to the client.
+
+ The 'client' configuration module is provided by ceph-iscsi-config
+ rpm which is installed on the gateway nodes.
+
+ To support module debugging, this module logs to
+ /var/log/ansible-module-igw_config.log on the target machine(s).
+
+option:
+ client_iqn:
+ description:
+ - iqn of the client machine which should be connected or removed from the
+ iscsi gateway environment
+ required: true
+
+ image_list:
+ description:
+ - comma separated string providing the rbd images that this
+ client definition should have. The rbd images provided must use the
+ following format <pool_name>.<rbd_image_name>
+ e.g. rbd.disk1,rbd.disk2
+ required: true
+
+ chap:
+ description:
+ - chap credentials for the client to authenticate to the gateways
+ to gain access to the exported rbds (LUNs). The credentials is a string
+ value of the form 'username/password'. The iscsi client must then use
+ these settings to gain access to any LUN resources.
+ required: true
+
+ state:
+ description:
+ - desired state for this client - absent or present
+ required: true
+
+requirements: ['ceph-iscsi-config']
+
+author:
+ - 'Paul Cuzner'
+
+"""
+
+import os # noqa: E402
+import logging # noqa: E402
+from logging.handlers import RotatingFileHandler # noqa: E402
+from ansible.module_utils.basic import * # noqa: E402,F403
+
+from ceph_iscsi_config.client import GWClient # noqa: E402
+import ceph_iscsi_config.settings as settings # noqa: E402
+
+
+# the main function is called ansible_main to allow the call stack
+# to be checked to determine whether the call to the ceph_iscsi_config
+# modules is from ansible or not
+def ansible_main():
+
+ fields = {
+ "client_iqn": {"required": True, "type": "str"},
+ "image_list": {"required": True, "type": "str"},
+ "chap": {"required": True, "type": "str"},
+ "state": {
+ "required": True,
+ "choices": ['present', 'absent'],
+ "type": "str"
+ },
+ }
+
+ module = AnsibleModule(argument_spec=fields, # noqa: F405
+ supports_check_mode=False)
+
+ client_iqn = module.params['client_iqn']
+
+ if module.params['image_list']:
+ image_list = module.params['image_list'].split(',')
+ else:
+ image_list = []
+
+ chap = module.params['chap']
+ desired_state = module.params['state']
+
+ logger.info("START - Client configuration started : {}".format(client_iqn))
+
+ # The client is defined using the GWClient class. This class handles
+ # client attribute updates, rados configuration object updates and LIO
+ # settings. Since the logic is external to this custom module, clients
+ # can be created/deleted by other methods in the same manner.
+ client = GWClient(logger, client_iqn, image_list, chap)
+ if client.error:
+ module.fail_json(msg=client.error_msg)
+
+ client.manage(desired_state)
+ if client.error:
+ module.fail_json(msg=client.error_msg)
+
+ logger.info("END - Client configuration complete - {} "
+ "changes made".format(client.change_count))
+
+ changes_made = True if client.change_count > 0 else False
+
+ module.exit_json(changed=changes_made,
+ meta={"msg": "Client definition completed {} "
+ "changes made".format(client.change_count)})
+
+
+if __name__ == '__main__':
+
+ module_name = os.path.basename(__file__).replace('ansible_module_', '')
+ logger = logging.getLogger(os.path.basename(module_name))
+ logger.setLevel(logging.DEBUG)
+ handler = RotatingFileHandler('/var/log/ansible-module-igw_config.log',
+ maxBytes=5242880,
+ backupCount=7)
+ log_fmt = logging.Formatter('%(asctime)s %(name)s %(levelname)-8s : '
+ '%(message)s')
+ handler.setFormatter(log_fmt)
+ logger.addHandler(handler)
+
+ # initialise global variables used by all called modules
+ # e.g. ceph conffile, keyring etc
+ settings.init()
+
+ ansible_main()
--- /dev/null
+#!/usr/bin/env python
+__author__ = 'pcuzner@redhat.com'
+
+
+DOCUMENTATION = """
+---
+module: igw_gateway
+short_description: Manage the iscsi gateway definition
+description:
+ - This module calls the 'gateway' configuration management module installed
+ on the iscsi gateway node(s) to handle the definition of iscsi gateways.
+ The module will configure;
+ * the iscsi target and target portal group (TPG)
+ * rbd maps to the gateway and registration of those rbds as LUNs to the
+ kernels LIO subsystem
+
+ The actual configuration modules are provided by ceph-iscsi-config rpm
+ which is installed on the gateway nodes.
+
+ To support module debugging, this module logs to
+ /var/log/ansible-module-igw_config.log on the target machine(s).
+
+option:
+ gateway_iqn:
+ description:
+ - iqn that all gateway nodes will use to present a common system image
+ name to iscsi clients
+ required: true
+
+ gateway_ip_list:
+ description:
+ - comma separated string providing the IP addresses that will be used
+ as iSCSI portal IPs to accept iscsi client connections. Each IP address
+ should equate to an IP on a gateway node - typically dedicated to iscsi
+ traffic. The order of the IP addresses determines the TPG sequence
+ within the target definition - so once defined, new gateways can be
+ added but *must* be added to the end of this list to preserve the tpg
+ sequence
+
+ e.g. 192.168.122.101,192.168.122.103
+ required: true
+
+ mode:
+ description:
+ - mode in which to run the gateway module. Two modes are supported
+ target ... define the iscsi target iqn, tpg's and portals
+ map ...... map luns to the tpg's, and also define the ALUA path setting
+ for each LUN (activeOptimized/activenonoptimized)
+ required: true
+
+
+requirements: ['ceph-iscsi-config']
+
+author:
+ - 'Paul Cuzner'
+
+"""
+
+import os # noqa: E402
+import logging # noqa: E402
+
+from logging.handlers import RotatingFileHandler # noqa: E402
+from ansible.module_utils.basic import * # noqa: E402,F403
+
+import ceph_iscsi_config.settings as settings # noqa: E402
+from ceph_iscsi_config.common import Config # noqa: E402
+
+from ceph_iscsi_config.gateway import GWTarget # noqa: E402
+from ceph_iscsi_config.utils import valid_ip # noqa: E402
+
+
+# the main function is called ansible_main to allow the call stack
+# to be checked to determine whether the call to the ceph_iscsi_config
+# modules is from ansible or not
+def ansible_main():
+ # Configures the gateway on the host. All images defined are added to
+ # the default tpg for later allocation to clients
+ fields = {"gateway_iqn": {"required": True, "type": "str"},
+ "gateway_ip_list": {"required": True}, # "type": "list"},
+ "mode": {
+ "required": True,
+ "choices": ['target', 'map']
+ }
+ }
+
+ module = AnsibleModule(argument_spec=fields, # noqa: F405
+ supports_check_mode=False)
+
+ cfg = Config(logger)
+ if cfg.config['version'] > 3:
+ module.fail_json(msg="Unsupported iscsigws.yml/iscsi-gws.yml setting "
+ "detected. Remove depreciated iSCSI target, LUN, "
+ "client, and gateway settings from "
+ "iscsigws.yml/iscsi-gws.yml. See "
+ "iscsigws.yml.sample for list of supported "
+ "settings")
+
+ gateway_iqn = module.params['gateway_iqn']
+ gateway_ip_list = module.params['gateway_ip_list'].split(',')
+ mode = module.params['mode']
+
+ if not valid_ip(gateway_ip_list):
+ module.fail_json(msg="Invalid gateway IP address(es) provided - port "
+ "22 check failed ({})".format(gateway_ip_list))
+
+ logger.info("START - GATEWAY configuration started - mode {}".format(mode))
+
+ gateway = GWTarget(logger, gateway_iqn, gateway_ip_list)
+ if gateway.error:
+ logger.critical("(ansible_main) Gateway init failed - "
+ "{}".format(gateway.error_msg))
+ module.fail_json(msg="iSCSI gateway initialisation failed "
+ "({})".format(gateway.error_msg))
+
+ gateway.manage(mode)
+
+ if gateway.error:
+ logger.critical("(main) Gateway creation or load failed, "
+ "unable to continue")
+ module.fail_json(msg="iSCSI gateway creation/load failure "
+ "({})".format(gateway.error_msg))
+
+ logger.info("END - GATEWAY configuration complete")
+ module.exit_json(changed=gateway.changes_made,
+ meta={"msg": "Gateway setup complete"})
+
+
+if __name__ == '__main__':
+
+ module_name = os.path.basename(__file__).replace('ansible_module_', '')
+ logger = logging.getLogger(os.path.basename(module_name))
+ logger.setLevel(logging.DEBUG)
+ handler = RotatingFileHandler('/var/log/ansible-module-igw_config.log',
+ maxBytes=5242880,
+ backupCount=7)
+ log_fmt = logging.Formatter('%(asctime)s %(name)s %(levelname)-8s : '
+ '%(message)s')
+ handler.setFormatter(log_fmt)
+ logger.addHandler(handler)
+
+ # initialise global variables used by all called modules
+ # e.g. ceph conffile, keyring etc
+ settings.init()
+
+ ansible_main()
--- /dev/null
+#!/usr/bin/env python
+
+__author__ = 'pcuzner@redhat.com'
+
+DOCUMENTATION = """
+---
+module: igw_lun
+short_description: Manage ceph rbd images to present as iscsi LUNs to clients
+description:
+ - This module calls the 'lun' configuration management module installed
+ on the iscsi gateway node(s). The lun module handles the creation and resize # noqa: E501
+ of rbd images, and then maps these rbd devices to the gateway node(s) to be
+ exposed through the kernel's LIO target.
+
+ To support module debugging, this module logs to /var/log/ansible-module-igw_config.log # noqa: E501
+ on the target machine(s).
+
+option:
+ pool:
+ description:
+ - The ceph pool where the image should exist or be created in.
+
+ NOTE - The pool *must* exist prior to the Ansible run.
+
+ required: true
+
+ image:
+ description:
+ - this is the rbd image name to create/resize - if the rbd does not exist it
+ is created for you with the settings optimised for exporting over iscsi.
+ required: true
+
+ size:
+ description:
+ - The size of the rbd image to create/resize. The size is numeric suffixed by
+ G or T (GB or TB). Increasing the size of a LUN is supported, but if a size
+ is provided that is smaller that the current size, the request is simply ignored.
+
+ e.g. 100G
+ required: true
+
+ host:
+ description:
+ - the host variable defines the name of the gateway node that will be
+ the allocation host for this rbd image. RBD creation and resize can
+ only be performed by one gateway, the other gateways in the
+ configuration will wait for the operation to complete.
+ required: true
+
+ features:
+ description:
+ - placeholder to potentially allow different rbd features to be set at
+ allocation time by Ansible. NOT CURRENTLY USED
+ required: false
+
+ state:
+ description:
+ - desired state for this LUN - absent or present. For a state='absent'
+ request, the lun module will verify that the rbd image is not allocated to
+ a client. As long as the rbd image is not in use, the LUN definition will be
+ removed from LIO, unmapped from all gateways AND DELETED.
+
+ USE WITH CARE!
+ required: true
+
+requirements: ['ceph-iscsi-config']
+
+author:
+ - 'Paul Cuzner'
+
+"""
+
+import os # noqa: E402
+import logging # noqa: E402
+from logging.handlers import RotatingFileHandler # noqa: E402
+
+from ansible.module_utils.basic import * # noqa: E402,F403
+
+from ceph_iscsi_config.lun import LUN # noqa: E402
+from ceph_iscsi_config.utils import valid_size # noqa: E402
+import ceph_iscsi_config.settings as settings # noqa: E402
+
+
+# the main function is called ansible_main to allow the call stack
+# to be checked to determine whether the call to the ceph_iscsi_config
+# modules is from ansible or not
+def ansible_main():
+
+ # Define the fields needs to create/map rbd's the the host(s)
+ # NB. features and state are reserved/unused
+ fields = {
+ "pool": {"required": False, "default": "rbd", "type": "str"},
+ "image": {"required": True, "type": "str"},
+ "size": {"required": True, "type": "str"},
+ "host": {"required": True, "type": "str"},
+ "features": {"required": False, "type": "str"},
+ "state": {
+ "required": False,
+ "default": "present",
+ "choices": ['present', 'absent'],
+ "type": "str"
+ },
+ }
+
+ # not supporting check mode currently
+ module = AnsibleModule(argument_spec=fields, # noqa: F405
+ supports_check_mode=False)
+
+ pool = module.params["pool"]
+ image = module.params['image']
+ size = module.params['size']
+ allocating_host = module.params['host']
+ desired_state = module.params['state']
+
+ ################################################
+ # Validate the parameters passed from Ansible #
+ ################################################
+ if not valid_size(size):
+ logger.critical("image '{}' has an invalid size specification '{}' "
+ "in the ansible configuration".format(image,
+ size))
+ module.fail_json(msg="(main) Unable to use the size parameter '{}' "
+ "for image '{}' from the playbook - "
+ "must be a number suffixed by M,G "
+ "or T".format(size,
+ image))
+
+ # define a lun object and perform some initial parameter validation
+ lun = LUN(logger, pool, image, size, allocating_host)
+ if lun.error:
+ module.fail_json(msg=lun.error_msg)
+
+ logger.info("START - LUN configuration started for {}/{}".format(pool,
+ image))
+
+ # attempt to create/allocate the LUN for LIO
+ lun.manage(desired_state)
+ if lun.error:
+ module.fail_json(msg=lun.error_msg)
+
+ if lun.num_changes == 0:
+ logger.info("END - No changes needed")
+ else:
+ logger.info("END - {} configuration changes "
+ "made".format(lun.num_changes))
+
+ module.exit_json(changed=(lun.num_changes > 0),
+ meta={"msg": "Configuration updated"})
+
+
+if __name__ == '__main__':
+
+ module_name = os.path.basename(__file__).replace('ansible_module_', '')
+ logger = logging.getLogger(os.path.basename(module_name))
+ logger.setLevel(logging.DEBUG)
+ handler = RotatingFileHandler('/var/log/ansible-module-igw_config.log',
+ maxBytes=5242880,
+ backupCount=7)
+ log_fmt = logging.Formatter('%(asctime)s %(name)s %(levelname)-8s : '
+ '%(message)s')
+ handler.setFormatter(log_fmt)
+ logger.addHandler(handler)
+
+ # initialise global variables used by all called modules
+ # e.g. ceph conffile, keyring etc
+ settings.init()
+
+ ansible_main()
--- /dev/null
+#!/usr/bin/env python
+
+DOCUMENTATION = """
+---
+module: igw_purge
+short_description: Provide a purge capability to remove an iSCSI gateway
+environment
+description:
+ - This module handles the removal of a gateway configuration from a ceph
+ environment.
+ The playbook that calls this module prompts the user for the type of purge
+ to perform.
+ The purge options are;
+ all ... purge all LIO configuration *and* delete all defined rbd images
+ lio ... purge only the LIO configuration (rbd's are left intact)
+
+ USE WITH CAUTION
+
+ To support module debugging, this module logs to
+ /var/log/ansible-module-igw_config.log on each target machine(s).
+
+option:
+ mode:
+ description:
+ - the mode defines the type of purge requested
+ gateway ... remove the LIO configuration only
+ disks ... remove the rbd disks defined to the gateway
+ required: true
+
+requirements: ['ceph-iscsi-config', 'python-rtslib']
+
+author:
+ - 'Paul Cuzner'
+
+"""
+
+import os # noqa: E402
+import logging # noqa: E402
+import socket # noqa: E402,F401
+import rados # noqa: E402
+import rbd # noqa: E402
+
+from logging.handlers import RotatingFileHandler # noqa: E402
+from ansible.module_utils.basic import * # noqa: E402,F403
+
+import ceph_iscsi_config.settings as settings # noqa: E402
+from ceph_iscsi_config.common import Config # noqa: E402
+from ceph_iscsi_config.lun import RBDDev # noqa: E402
+
+__author__ = 'pcuzner@redhat.com'
+
+
+def delete_images(cfg):
+ changes_made = False
+
+ for disk_name, disk in cfg.config['disks'].items():
+ image = disk['image']
+
+ logger.debug("Deleing image {}".format(image))
+
+ backstore = disk.get('backstore')
+ if backstore is None:
+ # ceph iscsi-config based.
+ rbd_dev = RBDDev(image, 0, disk['pool'])
+ else:
+ # ceph-iscsi based.
+ rbd_dev = RBDDev(image, 0, backstore, disk['pool'])
+
+ try:
+ rbd_dev.delete()
+ except rbd.ImageNotFound:
+ # Just log and ignore. If we crashed while purging we could delete
+ # the image but not removed it from the config
+ logger.debug("Image already deleted.")
+ except rbd.ImageHasSnapshots:
+ logger.error("Image still has snapshots.")
+ # Older versions of ceph-iscsi-config do not have a error_msg
+ # string.
+ if not rbd_dev.error_msg:
+ rbd_dev.error_msg = "Image has snapshots."
+
+ if rbd_dev.error:
+ if rbd_dev.error_msg:
+ logger.error("Could not remove {}. Error: {}. Manually run the " # noqa: E501
+ "rbd command line tool to delete.".
+ format(image, rbd_dev.error_msg))
+ else:
+ logger.error("Could not remove {}. Manually run the rbd "
+ "command line tool to delete.".format(image))
+ else:
+ changes_made = True
+
+ return changes_made
+
+
+def delete_gateway_config(cfg, module):
+ ioctx = cfg._open_ioctx()
+ try:
+ size, mtime = ioctx.stat(cfg.config_name)
+ except rados.ObjectNotFound:
+ logger.debug("gateway.conf already removed.")
+ return False
+
+ try:
+ ioctx.remove_object(cfg.config_name)
+ except Exception as err:
+ module.fail_json(msg="Gateway config object failed: {}".format(err))
+
+ return True
+
+
+def ansible_main():
+
+ fields = {"mode": {"required": True,
+ "type": "str",
+ "choices": ["gateway", "disks"]
+ }
+ }
+
+ module = AnsibleModule(argument_spec=fields, # noqa: F405
+ supports_check_mode=False)
+
+ run_mode = module.params['mode']
+ changes_made = False
+
+ logger.info("START - GATEWAY configuration PURGE started, run mode "
+ "is {}".format(run_mode))
+ cfg = Config(logger)
+ #
+ # Purge gateway configuration, if the config has gateways
+ if run_mode == 'gateway':
+ changes_made = delete_gateway_config(cfg, module)
+ elif run_mode == 'disks' and len(cfg.config['disks'].keys()) > 0:
+ #
+ # Remove the disks on this host, that have been registered in the
+ # config object
+ changes_made = delete_images(cfg)
+
+ logger.info("END - GATEWAY configuration PURGE complete")
+
+ module.exit_json(changed=changes_made,
+ meta={"msg": "Purge of iSCSI settings ({}) "
+ "complete".format(run_mode)})
+
+
+if __name__ == '__main__':
+
+ module_name = os.path.basename(__file__).replace('ansible_module_', '')
+ logger = logging.getLogger(os.path.basename(module_name))
+ logger.setLevel(logging.DEBUG)
+ handler = RotatingFileHandler('/var/log/ansible-module-igw_config.log',
+ maxBytes=5242880,
+ backupCount=7)
+ log_fmt = logging.Formatter('%(asctime)s %(name)s %(levelname)-8s : '
+ '%(message)s')
+ handler.setFormatter(log_fmt)
+ logger.addHandler(handler)
+
+ settings.init()
+
+ ansible_main()
--- /dev/null
+# Copyright 2022, Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+from ansible.module_utils.basic import AnsibleModule
+
+try:
+ from ansible.module_utils.ca_common import (
+ exit_module,
+ exec_command,
+ is_containerized,
+ container_exec,
+ )
+except ImportError:
+ from module_utils.ca_common import (
+ exit_module,
+ exec_command,
+ is_containerized,
+ container_exec,
+ )
+import datetime
+import json
+import re
+from enum import IntFlag
+
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = """
+---
+module: radosgw_caps
+
+short_description: Manage RADOS Gateway Admin capabilities
+
+version_added: "2.10"
+
+description:
+ - Manage RADOS Gateway capabilities addition and deletion.
+options:
+ cluster:
+ description:
+ - The ceph cluster name.
+ required: false
+ default: ceph
+ type: str
+ name:
+ description:
+ - name of the RADOS Gateway user (uid).
+ required: true
+ type: str
+ state:
+ description:
+ If 'present' is used, the module will assign capabilities
+ defined in `caps`.
+ If 'absent' is used, the module will remove the capabilities.
+ required: false
+ choices: ['present', 'absent']
+ default: present
+ type: str
+ caps:
+ description:
+ - The set of capabilities to assign or remove.
+ required: true
+ type: list
+ elements: str
+
+author:
+ - Mathias Chapelain <mathias.chapelain@proton.ch>
+"""
+
+EXAMPLES = """
+- name: add users read capabilties to a user
+ radosgw_caps:
+ name: foo
+ state: present
+ caps:
+ - users=read
+
+- name: add users read write and all buckets capabilities
+ radosgw_caps:
+ name: foo
+ state: present
+ caps:
+ - users=read,write
+ - buckets=*
+
+- name: remove usage write capabilities
+ radosgw_caps:
+ name: foo
+ state: absent
+ caps:
+ - usage=write
+"""
+
+RETURN = """
+---
+cmd:
+ description: The radosgw-admin command being run by the module to apply caps settings.
+ returned: always
+ type: str
+start:
+ description: Timestamp of module execution start.
+ returned: always
+ type: str
+end:
+ description: Timestamp of module execution end.
+ returned: always
+ type: str
+delta:
+ description: Time of module execution between start and end.
+ returned: always
+ type: str
+diff:
+ description: Dict containing the user capabilities before and after modifications.
+ returned: always
+ type: dict
+ contains:
+ before:
+ description: Contains user capabilities, json-formatted, as returned by `radosgw-admin user info`.
+ returned: always
+ type: str
+ after:
+ description: Contains user capabilities, json-formatted, as returned by `radosgw-admin caps add/rm`.
+ returned: success
+ type: str
+rc:
+ description: Return code of the module command executed, see `cmd` return value.
+ returned: always
+ type: int
+stdout:
+ description: Output of the executed command.
+ returned: always
+ type: str
+stderr:
+ description: Error output of the executed command.
+ returned: always
+ type: str
+changed:
+ description: Specify if user capabilities has been changed during module execution.
+ returned: always
+ type: bool
+"""
+
+
+def pre_generate_radosgw_cmd(container_image=None):
+ """
+ Generate radosgw-admin prefix comaand
+ """
+ if container_image:
+ cmd = container_exec("radosgw-admin", container_image)
+ else:
+ cmd = ["radosgw-admin"]
+
+ return cmd
+
+
+def generate_radosgw_cmd(cluster, args, container_image=None):
+ """
+ Generate 'radosgw' command line to execute
+ """
+
+ cmd = pre_generate_radosgw_cmd(container_image=container_image)
+
+ base_cmd = ["--cluster", cluster, "caps"]
+
+ cmd.extend(base_cmd + args)
+
+ return cmd
+
+
+def add_caps(module, container_image=None):
+ """
+ Add capabilities
+ """
+
+ cluster = module.params.get("cluster")
+ name = module.params.get("name")
+ caps = module.params.get("caps")
+
+ args = ["add", "--uid=" + name, "--caps=" + ";".join(caps)]
+
+ cmd = generate_radosgw_cmd(
+ cluster=cluster, args=args, container_image=container_image
+ )
+
+ return cmd
+
+
+def remove_caps(module, container_image=None):
+ """
+ Remove capabilities
+ """
+
+ cluster = module.params.get("cluster")
+ name = module.params.get("name")
+ caps = module.params.get("caps")
+
+ args = ["rm", "--uid=" + name, "--caps=" + ";".join(caps)]
+
+ cmd = generate_radosgw_cmd(
+ cluster=cluster, args=args, container_image=container_image
+ )
+
+ return cmd
+
+
+def get_user(module, container_image=None):
+ """
+ Get existing user
+ """
+
+ cluster = module.params.get("cluster")
+ name = module.params.get("name")
+
+ args = ["info", "--uid=" + name, "--format=json"]
+
+ cmd = pre_generate_radosgw_cmd(container_image=container_image)
+
+ base_cmd = ["--cluster", cluster, "user"]
+
+ cmd.extend(base_cmd + args)
+
+ return cmd
+
+
+class RGWUserCaps(IntFlag):
+ INVALID = 0x0
+ READ = 0x1
+ WRITE = 0x2
+ ALL = READ | WRITE
+
+
+def perm_string_to_flag(perm):
+ splitted = re.split(",|=| |\t", perm)
+ if ("read" in splitted and "write" in splitted) or "*" in splitted:
+ return RGWUserCaps.ALL
+ elif "read" in splitted:
+ return RGWUserCaps.READ
+ elif "write" in splitted:
+ return RGWUserCaps.WRITE
+ return RGWUserCaps.INVALID
+
+
+def perm_flag_to_string(perm):
+ if perm == RGWUserCaps.ALL:
+ return "*"
+ elif perm == RGWUserCaps.READ:
+ return "read"
+ elif perm == RGWUserCaps.WRITE:
+ return "write"
+ else:
+ return "invalid"
+
+
+def params_to_caps_output(current_caps, params, deletion=False):
+ out_caps = current_caps
+ for param in params:
+ splitted = param.split("=", maxsplit=1)
+ cap = splitted[0]
+
+ new_perm = perm_string_to_flag(splitted[1])
+ current = next((item for item in out_caps if item["type"] == cap), None)
+
+ if not current:
+ if not deletion:
+ out_caps.append(dict(type=cap, perm=perm_flag_to_string(new_perm)))
+ continue
+
+ current_perm = perm_string_to_flag(current["perm"])
+
+ new_perm = current_perm & ~new_perm if deletion else new_perm | current_perm
+
+ if new_perm == 0x0:
+ out_caps.remove(current)
+
+ current["perm"] = perm_flag_to_string(new_perm)
+
+ return out_caps
+
+
+def run_module():
+ module_args = dict(
+ cluster=dict(type="str", required=False, default="ceph"),
+ name=dict(type="str", required=True),
+ state=dict(
+ type="str", required=False, choices=["present", "absent"], default="present"
+ ),
+ caps=dict(type="list", required=True),
+ )
+
+ module = AnsibleModule(
+ argument_spec=module_args,
+ supports_check_mode=True,
+ )
+
+ # Gather module parameters in variables
+ name = module.params.get("name")
+ state = module.params.get("state")
+ caps = module.params.get("caps")
+
+ startd = datetime.datetime.now()
+ changed = False
+
+ # will return either the image name or None
+ container_image = is_containerized()
+
+ diff = dict(before="", after="")
+
+ # get user infos for diff
+ rc, cmd, out, err = exec_command(
+ module, get_user(module, container_image=container_image)
+ )
+
+ if rc == 0:
+ before_user = json.loads(out)
+ before_caps = sorted(before_user["caps"], key=lambda d: d["type"])
+ diff["before"] = json.dumps(before_caps, indent=4)
+
+ out = ""
+ err = ""
+
+ if state == "present":
+ cmd = add_caps(module, container_image=container_image)
+ elif state == "absent":
+ cmd = remove_caps(module, container_image=container_image)
+
+ if not module.check_mode:
+ rc, cmd, out, err = exec_command(module, cmd)
+ else:
+ out_caps = params_to_caps_output(
+ before_user["caps"], caps, deletion=(state == "absent")
+ )
+ out = json.dumps(dict(caps=out_caps))
+
+ if rc == 0:
+ after_user = json.loads(out)["caps"]
+ after_user = sorted(after_user, key=lambda d: d["type"])
+ diff["after"] = json.dumps(after_user, indent=4)
+ changed = diff["before"] != diff["after"]
+ else:
+ out = "User {} doesn't exist".format(name)
+
+ exit_module(
+ module=module,
+ out=out,
+ rc=rc,
+ cmd=cmd,
+ err=err,
+ startd=startd,
+ changed=changed,
+ diff=diff,
+ )
+
+
+def main():
+ run_module()
+
+
+if __name__ == "__main__":
+ main()
--- /dev/null
+# Copyright 2020, Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from ansible.module_utils.basic import AnsibleModule
+import datetime
+import os
+
+
+ANSIBLE_METADATA = {
+ 'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'
+}
+
+DOCUMENTATION = '''
+---
+module: radosgw_realm
+
+short_description: Manage RADOS Gateway Realm
+
+version_added: "2.8"
+
+description:
+ - Manage RADOS Gateway realm(s) creation, deletion and updates.
+options:
+ cluster:
+ description:
+ - The ceph cluster name.
+ required: false
+ default: ceph
+ name:
+ description:
+ - name of the RADOS Gateway realm.
+ required: true
+ state:
+ description:
+ If 'present' is used, the module creates a realm if it doesn't
+ exist or update it if it already exists.
+ If 'absent' is used, the module will simply delete the realm.
+ If 'info' is used, the module will return all details about the
+ existing realm (json formatted).
+ required: false
+ choices: ['present', 'absent', 'info']
+ default: present
+ default:
+ description:
+ - set the default flag on the realm.
+ required: false
+ default: false
+
+author:
+ - Dimitri Savineau <dsavinea@redhat.com>
+'''
+
+EXAMPLES = '''
+- name: create a RADOS Gateway default realm
+ radosgw_realm:
+ name: foo
+ default: true
+
+- name: get a RADOS Gateway realm information
+ radosgw_realm:
+ name: foo
+ state: info
+
+- name: delete a RADOS Gateway realm
+ radosgw_realm:
+ name: foo
+ state: absent
+'''
+
+RETURN = '''# '''
+
+
+def container_exec(binary, container_image):
+ '''
+ Build the docker CLI to run a command inside a container
+ '''
+
+ container_binary = os.getenv('CEPH_CONTAINER_BINARY')
+ command_exec = [container_binary,
+ 'run',
+ '--rm',
+ '--net=host',
+ '-v', '/etc/ceph:/etc/ceph:z',
+ '-v', '/var/lib/ceph/:/var/lib/ceph/:z',
+ '-v', '/var/log/ceph/:/var/log/ceph/:z',
+ '--entrypoint=' + binary, container_image]
+ return command_exec
+
+
+def is_containerized():
+ '''
+ Check if we are running on a containerized cluster
+ '''
+
+ if 'CEPH_CONTAINER_IMAGE' in os.environ:
+ container_image = os.getenv('CEPH_CONTAINER_IMAGE')
+ else:
+ container_image = None
+
+ return container_image
+
+
+def pre_generate_radosgw_cmd(container_image=None):
+ '''
+ Generate radosgw-admin prefix comaand
+ '''
+ if container_image:
+ cmd = container_exec('radosgw-admin', container_image)
+ else:
+ cmd = ['radosgw-admin']
+
+ return cmd
+
+
+def generate_radosgw_cmd(cluster, args, container_image=None):
+ '''
+ Generate 'radosgw' command line to execute
+ '''
+
+ cmd = pre_generate_radosgw_cmd(container_image=container_image)
+
+ base_cmd = [
+ '--cluster',
+ cluster,
+ 'realm'
+ ]
+
+ cmd.extend(base_cmd + args)
+
+ return cmd
+
+
+def exec_commands(module, cmd):
+ '''
+ Execute command(s)
+ '''
+
+ rc, out, err = module.run_command(cmd)
+
+ return rc, cmd, out, err
+
+
+def create_realm(module, container_image=None):
+ '''
+ Create a new realm
+ '''
+
+ cluster = module.params.get('cluster')
+ name = module.params.get('name')
+ default = module.params.get('default', False)
+
+ args = ['create', '--rgw-realm=' + name]
+
+ if default:
+ args.append('--default')
+
+ cmd = generate_radosgw_cmd(cluster=cluster,
+ args=args,
+ container_image=container_image)
+
+ return cmd
+
+
+def get_realm(module, container_image=None):
+ '''
+ Get existing realm
+ '''
+
+ cluster = module.params.get('cluster')
+ name = module.params.get('name')
+
+ args = ['get', '--rgw-realm=' + name, '--format=json']
+
+ cmd = generate_radosgw_cmd(cluster=cluster,
+ args=args,
+ container_image=container_image)
+
+ return cmd
+
+
+def remove_realm(module, container_image=None):
+ '''
+ Remove a realm
+ '''
+
+ cluster = module.params.get('cluster')
+ name = module.params.get('name')
+
+ args = ['delete', '--rgw-realm=' + name]
+
+ cmd = generate_radosgw_cmd(cluster=cluster,
+ args=args,
+ container_image=container_image)
+
+ return cmd
+
+
+def exit_module(module, out, rc, cmd, err, startd, changed=False):
+ endd = datetime.datetime.now()
+ delta = endd - startd
+
+ result = dict(
+ cmd=cmd,
+ start=str(startd),
+ end=str(endd),
+ delta=str(delta),
+ rc=rc,
+ stdout=out.rstrip("\r\n"),
+ stderr=err.rstrip("\r\n"),
+ changed=changed,
+ )
+ module.exit_json(**result)
+
+
+def run_module():
+ module_args = dict(
+ cluster=dict(type='str', required=False, default='ceph'),
+ name=dict(type='str', required=True),
+ state=dict(type='str', required=False, choices=['present', 'absent', 'info'], default='present'), # noqa: E501
+ default=dict(type='bool', required=False, default=False),
+ )
+
+ module = AnsibleModule(
+ argument_spec=module_args,
+ supports_check_mode=True,
+ )
+
+ # Gather module parameters in variables
+ name = module.params.get('name')
+ state = module.params.get('state')
+
+ if module.check_mode:
+ module.exit_json(
+ changed=False,
+ stdout='',
+ stderr='',
+ rc=0,
+ start='',
+ end='',
+ delta='',
+ )
+
+ startd = datetime.datetime.now()
+ changed = False
+
+ # will return either the image name or None
+ container_image = is_containerized()
+
+ if state == "present":
+ rc, cmd, out, err = exec_commands(module, get_realm(module, container_image=container_image)) # noqa: E501
+ if rc != 0:
+ rc, cmd, out, err = exec_commands(module, create_realm(module, container_image=container_image)) # noqa: E501
+ changed = True
+
+ elif state == "absent":
+ rc, cmd, out, err = exec_commands(module, get_realm(module, container_image=container_image)) # noqa: E501
+ if rc == 0:
+ rc, cmd, out, err = exec_commands(module, remove_realm(module, container_image=container_image)) # noqa: E501
+ changed = True
+ else:
+ rc = 0
+ out = "Realm {} doesn't exist".format(name)
+
+ elif state == "info":
+ rc, cmd, out, err = exec_commands(module, get_realm(module, container_image=container_image)) # noqa: E501
+
+ exit_module(module=module, out=out, rc=rc, cmd=cmd, err=err, startd=startd, changed=changed) # noqa: E501
+
+
+def main():
+ run_module()
+
+
+if __name__ == '__main__':
+ main()
--- /dev/null
+# Copyright 2020, Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from ansible.module_utils.basic import AnsibleModule
+import datetime
+import json
+import os
+
+
+ANSIBLE_METADATA = {
+ 'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'
+}
+
+DOCUMENTATION = '''
+---
+module: radosgw_user
+
+short_description: Manage RADOS Gateway User
+
+version_added: "2.8"
+
+description:
+ - Manage RADOS Gateway user(s) creation, deletion and updates.
+options:
+ cluster:
+ description:
+ - The ceph cluster name.
+ required: false
+ default: ceph
+ name:
+ description:
+ - name of the RADOS Gateway user (uid).
+ required: true
+ state:
+ description:
+ If 'present' is used, the module creates a user if it doesn't
+ exist or update it if it already exists.
+ If 'absent' is used, the module will simply delete the user.
+ If 'info' is used, the module will return all details about the
+ existing user (json formatted).
+ required: false
+ choices: ['present', 'absent', 'info']
+ default: present
+ display_name:
+ description:
+ - set the display name of the user.
+ required: false
+ default: None
+ email:
+ description:
+ - set the email of the user.
+ required: false
+ default: None
+ access_key:
+ description:
+ - set the S3 access key of the user.
+ required: false
+ default: None
+ secret_key:
+ description:
+ - set the S3 secret key of the user.
+ required: false
+ default: None
+ realm:
+ description:
+ - set the realm of the user.
+ required: false
+ default: None
+ zonegroup:
+ description:
+ - set the zonegroup of the user.
+ required: false
+ default: None
+ zone:
+ description:
+ - set the zone of the user.
+ required: false
+ default: None
+ system:
+ description:
+ - set the system flag on the user.
+ required: false
+ default: false
+ admin:
+ description:
+ - set the admin flag on the user.
+ required: false
+ default: false
+
+author:
+ - Dimitri Savineau <dsavinea@redhat.com>
+'''
+
+EXAMPLES = '''
+- name: create a RADOS Gateway sytem user
+ radosgw_user:
+ name: foo
+ system: true
+
+- name: modify a RADOS Gateway user
+ radosgw_user:
+ name: foo
+ email: foo@bar.io
+ access_key: LbwDPp2BBo2Sdlts89Um
+ secret_key: FavL6ueQWcWuWn0YXyQ3TnJ3mT3Uj5SGVHCUXC5K
+ state: present
+
+- name: get a RADOS Gateway user information
+ radosgw_user:
+ name: foo
+ state: info
+
+- name: delete a RADOS Gateway user
+ radosgw_user:
+ name: foo
+ state: absent
+'''
+
+RETURN = '''# '''
+
+
+def container_exec(binary, container_image):
+ '''
+ Build the docker CLI to run a command inside a container
+ '''
+
+ container_binary = os.getenv('CEPH_CONTAINER_BINARY')
+ command_exec = [container_binary,
+ 'run',
+ '--rm',
+ '--net=host',
+ '-v', '/etc/ceph:/etc/ceph:z',
+ '-v', '/var/lib/ceph/:/var/lib/ceph/:z',
+ '-v', '/var/log/ceph/:/var/log/ceph/:z',
+ '--entrypoint=' + binary, container_image]
+ return command_exec
+
+
+def is_containerized():
+ '''
+ Check if we are running on a containerized cluster
+ '''
+
+ if 'CEPH_CONTAINER_IMAGE' in os.environ:
+ container_image = os.getenv('CEPH_CONTAINER_IMAGE')
+ else:
+ container_image = None
+
+ return container_image
+
+
+def pre_generate_radosgw_cmd(container_image=None):
+ '''
+ Generate radosgw-admin prefix comaand
+ '''
+ if container_image:
+ cmd = container_exec('radosgw-admin', container_image)
+ else:
+ cmd = ['radosgw-admin']
+
+ return cmd
+
+
+def generate_radosgw_cmd(cluster, args, container_image=None):
+ '''
+ Generate 'radosgw' command line to execute
+ '''
+
+ cmd = pre_generate_radosgw_cmd(container_image=container_image)
+
+ base_cmd = [
+ '--cluster',
+ cluster,
+ 'user'
+ ]
+
+ cmd.extend(base_cmd + args)
+
+ return cmd
+
+
+def exec_commands(module, cmd):
+ '''
+ Execute command(s)
+ '''
+
+ rc, out, err = module.run_command(cmd)
+
+ return rc, cmd, out, err
+
+
+def create_user(module, container_image=None):
+ '''
+ Create a new user
+ '''
+
+ cluster = module.params.get('cluster')
+ name = module.params.get('name')
+ display_name = module.params.get('display_name')
+ if not display_name:
+ display_name = name
+ email = module.params.get('email', None)
+ access_key = module.params.get('access_key', None)
+ secret_key = module.params.get('secret_key', None)
+ realm = module.params.get('realm', None)
+ zonegroup = module.params.get('zonegroup', None)
+ zone = module.params.get('zone', None)
+ system = module.params.get('system', False)
+ admin = module.params.get('admin', False)
+
+ args = ['create', '--uid=' + name, '--display_name=' + display_name]
+
+ if email:
+ args.extend(['--email=' + email])
+
+ if access_key:
+ args.extend(['--access-key=' + access_key])
+
+ if secret_key:
+ args.extend(['--secret-key=' + secret_key])
+
+ if realm:
+ args.extend(['--rgw-realm=' + realm])
+
+ if zonegroup:
+ args.extend(['--rgw-zonegroup=' + zonegroup])
+
+ if zone:
+ args.extend(['--rgw-zone=' + zone])
+
+ if system:
+ args.append('--system')
+
+ if admin:
+ args.append('--admin')
+
+ cmd = generate_radosgw_cmd(cluster=cluster,
+ args=args,
+ container_image=container_image)
+
+ return cmd
+
+
+def modify_user(module, container_image=None):
+ '''
+ Modify an existing user
+ '''
+
+ cluster = module.params.get('cluster')
+ name = module.params.get('name')
+ display_name = module.params.get('display_name')
+ if not display_name:
+ display_name = name
+ email = module.params.get('email', None)
+ access_key = module.params.get('access_key', None)
+ secret_key = module.params.get('secret_key', None)
+ realm = module.params.get('realm', None)
+ zonegroup = module.params.get('zonegroup', None)
+ zone = module.params.get('zone', None)
+ system = module.params.get('system', False)
+ admin = module.params.get('admin', False)
+
+ args = ['modify', '--uid=' + name]
+
+ if display_name:
+ args.extend(['--display_name=' + display_name])
+
+ if email:
+ args.extend(['--email=' + email])
+
+ if access_key:
+ args.extend(['--access-key=' + access_key])
+
+ if secret_key:
+ args.extend(['--secret-key=' + secret_key])
+
+ if realm:
+ args.extend(['--rgw-realm=' + realm])
+
+ if zonegroup:
+ args.extend(['--rgw-zonegroup=' + zonegroup])
+
+ if zone:
+ args.extend(['--rgw-zone=' + zone])
+
+ if system:
+ args.append('--system')
+
+ if admin:
+ args.append('--admin')
+
+ cmd = generate_radosgw_cmd(cluster=cluster,
+ args=args,
+ container_image=container_image)
+
+ return cmd
+
+
+def get_user(module, container_image=None):
+ '''
+ Get existing user
+ '''
+
+ cluster = module.params.get('cluster')
+ name = module.params.get('name')
+ realm = module.params.get('realm', None)
+ zonegroup = module.params.get('zonegroup', None)
+ zone = module.params.get('zone', None)
+
+ args = ['info', '--uid=' + name, '--format=json']
+
+ if realm:
+ args.extend(['--rgw-realm=' + realm])
+
+ if zonegroup:
+ args.extend(['--rgw-zonegroup=' + zonegroup])
+
+ if zone:
+ args.extend(['--rgw-zone=' + zone])
+
+ cmd = generate_radosgw_cmd(cluster=cluster,
+ args=args,
+ container_image=container_image)
+
+ return cmd
+
+
+def remove_user(module, container_image=None):
+ '''
+ Remove a user
+ '''
+
+ cluster = module.params.get('cluster')
+ name = module.params.get('name')
+ realm = module.params.get('realm', None)
+ zonegroup = module.params.get('zonegroup', None)
+ zone = module.params.get('zone', None)
+
+ args = ['rm', '--uid=' + name]
+
+ if realm:
+ args.extend(['--rgw-realm=' + realm])
+
+ if zonegroup:
+ args.extend(['--rgw-zonegroup=' + zonegroup])
+
+ if zone:
+ args.extend(['--rgw-zone=' + zone])
+
+ cmd = generate_radosgw_cmd(cluster=cluster,
+ args=args,
+ container_image=container_image)
+
+ return cmd
+
+
+def exit_module(module, out, rc, cmd, err, startd, changed=False):
+ endd = datetime.datetime.now()
+ delta = endd - startd
+
+ result = dict(
+ cmd=cmd,
+ start=str(startd),
+ end=str(endd),
+ delta=str(delta),
+ rc=rc,
+ stdout=out.rstrip("\r\n"),
+ stderr=err.rstrip("\r\n"),
+ changed=changed,
+ )
+ module.exit_json(**result)
+
+
+def run_module():
+ module_args = dict(
+ cluster=dict(type='str', required=False, default='ceph'),
+ name=dict(type='str', required=True),
+ state=dict(type='str', required=False, choices=['present', 'absent', 'info'], default='present'), # noqa: E501
+ display_name=dict(type='str', required=False),
+ email=dict(type='str', required=False),
+ access_key=dict(type='str', required=False, no_log=True),
+ secret_key=dict(type='str', required=False, no_log=True),
+ realm=dict(type='str', required=False),
+ zonegroup=dict(type='str', required=False),
+ zone=dict(type='str', required=False),
+ system=dict(type='bool', required=False, default=False),
+ admin=dict(type='bool', required=False, default=False)
+ )
+
+ module = AnsibleModule(
+ argument_spec=module_args,
+ supports_check_mode=True,
+ )
+
+ # Gather module parameters in variables
+ name = module.params.get('name')
+ state = module.params.get('state')
+ display_name = module.params.get('display_name')
+ if not display_name:
+ display_name = name
+ email = module.params.get('email')
+ access_key = module.params.get('access_key')
+ secret_key = module.params.get('secret_key')
+ system = str(module.params.get('system')).lower()
+ admin = str(module.params.get('admin')).lower()
+
+ if module.check_mode:
+ module.exit_json(
+ changed=False,
+ stdout='',
+ stderr='',
+ rc=0,
+ start='',
+ end='',
+ delta='',
+ )
+
+ startd = datetime.datetime.now()
+ changed = False
+
+ # will return either the image name or None
+ container_image = is_containerized()
+
+ if state == "present":
+ rc, cmd, out, err = exec_commands(module, get_user(module, container_image=container_image)) # noqa: E501
+ if rc == 0:
+ user = json.loads(out)
+ current = {
+ 'display_name': user['display_name'],
+ 'system': user.get('system', 'false'),
+ 'admin': user.get('admin', 'false')
+ }
+ asked = {
+ 'display_name': display_name,
+ 'system': system,
+ 'admin': admin
+ }
+ if email:
+ current['email'] = user['email']
+ asked['email'] = email
+ if access_key:
+ current['access_key'] = user['keys'][0]['access_key']
+ asked['access_key'] = access_key
+ if secret_key:
+ current['secret_key'] = user['keys'][0]['secret_key']
+ asked['secret_key'] = secret_key
+
+ if current != asked:
+ rc, cmd, out, err = exec_commands(module, modify_user(module, container_image=container_image)) # noqa: E501
+ changed = True
+ else:
+ rc, cmd, out, err = exec_commands(module, create_user(module, container_image=container_image)) # noqa: E501
+ changed = True
+
+ elif state == "absent":
+ rc, cmd, out, err = exec_commands(module, get_user(module, container_image=container_image)) # noqa: E501
+ if rc == 0:
+ rc, cmd, out, err = exec_commands(module, remove_user(module, container_image=container_image)) # noqa: E501
+ changed = True
+ else:
+ rc = 0
+ out = "User {} doesn't exist".format(name)
+
+ elif state == "info":
+ rc, cmd, out, err = exec_commands(module, get_user(module, container_image=container_image)) # noqa: E501
+
+ exit_module(module=module, out=out, rc=rc, cmd=cmd, err=err, startd=startd, changed=changed) # noqa: E501
+
+
+def main():
+ run_module()
+
+
+if __name__ == '__main__':
+ main()
--- /dev/null
+# Copyright 2020, Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from ansible.module_utils.basic import AnsibleModule
+try:
+ from ansible.module_utils.ca_common import fatal
+except ImportError:
+ from module_utils.ca_common import fatal
+import datetime
+import json
+import os
+
+
+ANSIBLE_METADATA = {
+ 'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'
+}
+
+DOCUMENTATION = '''
+---
+module: radosgw_zone
+
+short_description: Manage RADOS Gateway Zone
+
+version_added: "2.8"
+
+description:
+ - Manage RADOS Gateway zone(s) creation, deletion and updates.
+options:
+ cluster:
+ description:
+ - The ceph cluster name.
+ required: false
+ default: ceph
+ name:
+ description:
+ - name of the RADOS Gateway zone.
+ required: true
+ state:
+ description:
+ If 'present' is used, the module creates a zone if it doesn't
+ exist or update it if it already exists.
+ If 'absent' is used, the module will simply delete the zone.
+ If 'info' is used, the module will return all details about the
+ existing zone (json formatted).
+ required: false
+ choices: ['present', 'absent', 'info']
+ default: present
+ realm:
+ description:
+ - name of the RADOS Gateway realm.
+ required: true
+ zonegroup:
+ description:
+ - name of the RADOS Gateway zonegroup.
+ required: true
+ endpoints:
+ description:
+ - endpoints of the RADOS Gateway zone.
+ required: false
+ default: []
+ access_key:
+ description:
+ - set the S3 access key of the user.
+ required: false
+ default: None
+ secret_key:
+ description:
+ - set the S3 secret key of the user.
+ required: false
+ default: None
+ default:
+ description:
+ - set the default flag on the zone.
+ required: false
+ default: false
+ master:
+ description:
+ - set the master flag on the zone.
+ required: false
+ default: false
+
+author:
+ - Dimitri Savineau <dsavinea@redhat.com>
+'''
+
+EXAMPLES = '''
+- name: create a RADOS Gateway default zone
+ radosgw_zone:
+ name: z1
+ realm: foo
+ zonegroup: bar
+ endpoints:
+ - http://192.168.1.10:8080
+ - http://192.168.1.11:8080
+ default: true
+
+- name: get a RADOS Gateway zone information
+ radosgw_zone:
+ name: z1
+ state: info
+
+- name: delete a RADOS Gateway zone
+ radosgw_zone:
+ name: z1
+ state: absent
+'''
+
+RETURN = '''# '''
+
+
+def container_exec(binary, container_image):
+ '''
+ Build the docker CLI to run a command inside a container
+ '''
+
+ container_binary = os.getenv('CEPH_CONTAINER_BINARY')
+ command_exec = [container_binary,
+ 'run',
+ '--rm',
+ '--net=host',
+ '-v', '/etc/ceph:/etc/ceph:z',
+ '-v', '/var/lib/ceph/:/var/lib/ceph/:z',
+ '-v', '/var/log/ceph/:/var/log/ceph/:z',
+ '--entrypoint=' + binary, container_image]
+ return command_exec
+
+
+def is_containerized():
+ '''
+ Check if we are running on a containerized cluster
+ '''
+
+ if 'CEPH_CONTAINER_IMAGE' in os.environ:
+ container_image = os.getenv('CEPH_CONTAINER_IMAGE')
+ else:
+ container_image = None
+
+ return container_image
+
+
+def pre_generate_radosgw_cmd(container_image=None):
+ '''
+ Generate radosgw-admin prefix comaand
+ '''
+ if container_image:
+ cmd = container_exec('radosgw-admin', container_image)
+ else:
+ cmd = ['radosgw-admin']
+
+ return cmd
+
+
+def generate_radosgw_cmd(cluster, args, container_image=None):
+ '''
+ Generate 'radosgw' command line to execute
+ '''
+
+ cmd = pre_generate_radosgw_cmd(container_image=container_image)
+
+ base_cmd = [
+ '--cluster',
+ cluster,
+ 'zone'
+ ]
+
+ cmd.extend(base_cmd + args)
+
+ return cmd
+
+
+def exec_commands(module, cmd):
+ '''
+ Execute command(s)
+ '''
+
+ rc, out, err = module.run_command(cmd)
+
+ return rc, cmd, out, err
+
+
+def create_zone(module, container_image=None):
+ '''
+ Create a new zone
+ '''
+
+ cluster = module.params.get('cluster')
+ name = module.params.get('name')
+ realm = module.params.get('realm')
+ zonegroup = module.params.get('zonegroup')
+ endpoints = module.params.get('endpoints')
+ access_key = module.params.get('access_key')
+ secret_key = module.params.get('secret_key')
+ default = module.params.get('default')
+ master = module.params.get('master')
+
+ args = [
+ 'create',
+ '--rgw-realm=' + realm,
+ '--rgw-zonegroup=' + zonegroup,
+ '--rgw-zone=' + name
+ ]
+
+ if endpoints:
+ args.extend(['--endpoints=' + ','.join(endpoints)])
+
+ if access_key:
+ args.extend(['--access-key=' + access_key])
+
+ if secret_key:
+ args.extend(['--secret-key=' + secret_key])
+
+ if default:
+ args.append('--default')
+
+ if master:
+ args.append('--master')
+
+ cmd = generate_radosgw_cmd(cluster=cluster,
+ args=args,
+ container_image=container_image)
+
+ return cmd
+
+
+def modify_zone(module, container_image=None):
+ '''
+ Modify a new zone
+ '''
+
+ cluster = module.params.get('cluster')
+ name = module.params.get('name')
+ realm = module.params.get('realm')
+ zonegroup = module.params.get('zonegroup')
+ endpoints = module.params.get('endpoints')
+ access_key = module.params.get('access_key')
+ secret_key = module.params.get('secret_key')
+ default = module.params.get('default')
+ master = module.params.get('master')
+
+ args = [
+ 'modify',
+ '--rgw-realm=' + realm,
+ '--rgw-zonegroup=' + zonegroup,
+ '--rgw-zone=' + name
+ ]
+
+ if endpoints:
+ args.extend(['--endpoints=' + ','.join(endpoints)])
+
+ if access_key:
+ args.extend(['--access-key=' + access_key])
+
+ if secret_key:
+ args.extend(['--secret-key=' + secret_key])
+
+ if default:
+ args.append('--default')
+
+ if master:
+ args.append('--master')
+
+ cmd = generate_radosgw_cmd(cluster=cluster,
+ args=args,
+ container_image=container_image)
+
+ return cmd
+
+
+def get_zone(module, container_image=None):
+ '''
+ Get existing zone
+ '''
+
+ cluster = module.params.get('cluster')
+ name = module.params.get('name')
+ realm = module.params.get('realm')
+ zonegroup = module.params.get('zonegroup')
+
+ args = [
+ 'get',
+ '--rgw-realm=' + realm,
+ '--rgw-zonegroup=' + zonegroup,
+ '--rgw-zone=' + name,
+ '--format=json'
+ ]
+
+ cmd = generate_radosgw_cmd(cluster=cluster,
+ args=args,
+ container_image=container_image)
+
+ return cmd
+
+
+def get_zonegroup(module, container_image=None):
+ '''
+ Get existing zonegroup
+ '''
+
+ cluster = module.params.get('cluster')
+ realm = module.params.get('realm')
+ zonegroup = module.params.get('zonegroup')
+
+ cmd = pre_generate_radosgw_cmd(container_image=container_image)
+
+ args = [
+ '--cluster',
+ cluster,
+ 'zonegroup',
+ 'get',
+ '--rgw-realm=' + realm,
+ '--rgw-zonegroup=' + zonegroup,
+ '--format=json'
+ ]
+
+ cmd.extend(args)
+
+ return cmd
+
+
+def get_realm(module, container_image=None):
+ '''
+ Get existing realm
+ '''
+
+ cluster = module.params.get('cluster')
+ realm = module.params.get('realm')
+
+ cmd = pre_generate_radosgw_cmd(container_image=container_image)
+
+ args = [
+ '--cluster',
+ cluster,
+ 'realm',
+ 'get',
+ '--rgw-realm=' + realm,
+ '--format=json'
+ ]
+
+ cmd.extend(args)
+
+ return cmd
+
+
+def remove_zone(module, container_image=None):
+ '''
+ Remove a zone
+ '''
+
+ cluster = module.params.get('cluster')
+ name = module.params.get('name')
+ realm = module.params.get('realm')
+ zonegroup = module.params.get('zonegroup')
+
+ args = [
+ 'delete',
+ '--rgw-realm=' + realm,
+ '--rgw-zonegroup=' + zonegroup,
+ '--rgw-zone=' + name
+ ]
+
+ cmd = generate_radosgw_cmd(cluster=cluster,
+ args=args,
+ container_image=container_image)
+
+ return cmd
+
+
+def exit_module(module, out, rc, cmd, err, startd, changed=False):
+ endd = datetime.datetime.now()
+ delta = endd - startd
+
+ result = dict(
+ cmd=cmd,
+ start=str(startd),
+ end=str(endd),
+ delta=str(delta),
+ rc=rc,
+ stdout=out.rstrip("\r\n"),
+ stderr=err.rstrip("\r\n"),
+ changed=changed,
+ )
+ module.exit_json(**result)
+
+
+def run_module():
+ module_args = dict(
+ cluster=dict(type='str', required=False, default='ceph'),
+ name=dict(type='str', required=True),
+ state=dict(type='str', required=False, choices=['present', 'absent', 'info'], default='present'), # noqa: E501
+ realm=dict(type='str', require=True),
+ zonegroup=dict(type='str', require=True),
+ endpoints=dict(type='list', require=False, default=[]),
+ access_key=dict(type='str', required=False, no_log=True),
+ secret_key=dict(type='str', required=False, no_log=True),
+ default=dict(type='bool', required=False, default=False),
+ master=dict(type='bool', required=False, default=False),
+ )
+
+ module = AnsibleModule(
+ argument_spec=module_args,
+ supports_check_mode=True,
+ )
+
+ # Gather module parameters in variables
+ name = module.params.get('name')
+ state = module.params.get('state')
+ endpoints = module.params.get('endpoints')
+ access_key = module.params.get('access_key')
+ secret_key = module.params.get('secret_key')
+
+ if module.check_mode:
+ module.exit_json(
+ changed=False,
+ stdout='',
+ stderr='',
+ rc=0,
+ start='',
+ end='',
+ delta='',
+ )
+
+ startd = datetime.datetime.now()
+ changed = False
+
+ # will return either the image name or None
+ container_image = is_containerized()
+
+ if state == "present":
+ rc, cmd, out, err = exec_commands(module, get_zone(module, container_image=container_image)) # noqa: E501
+ if rc == 0:
+ zone = json.loads(out)
+ _rc, _cmd, _out, _err = exec_commands(module, get_realm(module, container_image=container_image)) # noqa: E501
+ if _rc != 0:
+ fatal(_err, module)
+ realm = json.loads(_out)
+ _rc, _cmd, _out, _err = exec_commands(module, get_zonegroup(module, container_image=container_image)) # noqa: E501
+ if _rc != 0:
+ fatal(_err, module)
+ zonegroup = json.loads(_out)
+ if not access_key:
+ access_key = ''
+ if not secret_key:
+ secret_key = ''
+ current = {
+ 'endpoints': next(zone['endpoints'] for zone in zonegroup['zones'] if zone['name'] == name), # noqa: E501
+ 'access_key': zone['system_key']['access_key'],
+ 'secret_key': zone['system_key']['secret_key'],
+ 'realm_id': zone['realm_id']
+ }
+ asked = {
+ 'endpoints': endpoints,
+ 'access_key': access_key,
+ 'secret_key': secret_key,
+ 'realm_id': realm['id']
+ }
+ if current != asked:
+ rc, cmd, out, err = exec_commands(module, modify_zone(module, container_image=container_image)) # noqa: E501
+ changed = True
+ else:
+ rc, cmd, out, err = exec_commands(module, create_zone(module, container_image=container_image)) # noqa: E501
+ changed = True
+
+ elif state == "absent":
+ rc, cmd, out, err = exec_commands(module, get_zone(module, container_image=container_image)) # noqa: E501
+ if rc == 0:
+ rc, cmd, out, err = exec_commands(module, remove_zone(module, container_image=container_image)) # noqa: E501
+ changed = True
+ else:
+ rc = 0
+ out = "Zone {} doesn't exist".format(name)
+
+ elif state == "info":
+ rc, cmd, out, err = exec_commands(module, get_zone(module, container_image=container_image)) # noqa: E501
+
+ exit_module(module=module, out=out, rc=rc, cmd=cmd, err=err, startd=startd, changed=changed) # noqa: E501
+
+
+def main():
+ run_module()
+
+
+if __name__ == '__main__':
+ main()
--- /dev/null
+# Copyright 2020, Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from ansible.module_utils.basic import AnsibleModule
+try:
+ from ansible.module_utils.ca_common import fatal
+except ImportError:
+ from module_utils.ca_common import fatal
+import datetime
+import json
+import os
+
+
+ANSIBLE_METADATA = {
+ 'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'
+}
+
+DOCUMENTATION = '''
+---
+module: radosgw_zonegroup
+
+short_description: Manage RADOS Gateway Zonegroup
+
+version_added: "2.8"
+
+description:
+ - Manage RADOS Gateway zonegroup(s) creation, deletion and updates.
+options:
+ cluster:
+ description:
+ - The ceph cluster name.
+ required: false
+ default: ceph
+ name:
+ description:
+ - name of the RADOS Gateway zonegroup.
+ required: true
+ state:
+ description:
+ If 'present' is used, the module creates a zonegroup if it doesn't
+ exist or update it if it already exists.
+ If 'absent' is used, the module will simply delete the zonegroup.
+ If 'info' is used, the module will return all details about the
+ existing zonegroup (json formatted).
+ required: false
+ choices: ['present', 'absent', 'info']
+ default: present
+ realm:
+ description:
+ - name of the RADOS Gateway realm.
+ required: true
+ endpoints:
+ description:
+ - endpoints of the RADOS Gateway zonegroup.
+ required: false
+ default: []
+ default:
+ description:
+ - set the default flag on the zonegroup.
+ required: false
+ default: false
+ master:
+ description:
+ - set the master flag on the zonegroup.
+ required: false
+ default: false
+
+author:
+ - Dimitri Savineau <dsavinea@redhat.com>
+'''
+
+EXAMPLES = '''
+- name: create a RADOS Gateway default zonegroup
+ radosgw_zonegroup:
+ name: foo
+ realm: bar
+ endpoints:
+ - http://192.168.1.10:8080
+ - http://192.168.1.11:8080
+ default: true
+
+- name: get a RADOS Gateway zonegroup information
+ radosgw_zonegroup:
+ name: foo
+ realm: bar
+ state: info
+
+- name: delete a RADOS Gateway zonegroup
+ radosgw_zonegroup:
+ name: foo
+ realm: bar
+ state: absent
+'''
+
+RETURN = '''# '''
+
+
+def container_exec(binary, container_image):
+ '''
+ Build the docker CLI to run a command inside a container
+ '''
+
+ container_binary = os.getenv('CEPH_CONTAINER_BINARY')
+ command_exec = [container_binary,
+ 'run',
+ '--rm',
+ '--net=host',
+ '-v', '/etc/ceph:/etc/ceph:z',
+ '-v', '/var/lib/ceph/:/var/lib/ceph/:z',
+ '-v', '/var/log/ceph/:/var/log/ceph/:z',
+ '--entrypoint=' + binary, container_image]
+ return command_exec
+
+
+def is_containerized():
+ '''
+ Check if we are running on a containerized cluster
+ '''
+
+ if 'CEPH_CONTAINER_IMAGE' in os.environ:
+ container_image = os.getenv('CEPH_CONTAINER_IMAGE')
+ else:
+ container_image = None
+
+ return container_image
+
+
+def pre_generate_radosgw_cmd(container_image=None):
+ '''
+ Generate radosgw-admin prefix comaand
+ '''
+ if container_image:
+ cmd = container_exec('radosgw-admin', container_image)
+ else:
+ cmd = ['radosgw-admin']
+
+ return cmd
+
+
+def generate_radosgw_cmd(cluster, args, container_image=None):
+ '''
+ Generate 'radosgw' command line to execute
+ '''
+
+ cmd = pre_generate_radosgw_cmd(container_image=container_image)
+
+ base_cmd = [
+ '--cluster',
+ cluster,
+ 'zonegroup'
+ ]
+
+ cmd.extend(base_cmd + args)
+
+ return cmd
+
+
+def exec_commands(module, cmd):
+ '''
+ Execute command(s)
+ '''
+
+ rc, out, err = module.run_command(cmd)
+
+ return rc, cmd, out, err
+
+
+def create_zonegroup(module, container_image=None):
+ '''
+ Create a new zonegroup
+ '''
+
+ cluster = module.params.get('cluster')
+ name = module.params.get('name')
+ realm = module.params.get('realm')
+ endpoints = module.params.get('endpoints')
+ default = module.params.get('default')
+ master = module.params.get('master')
+
+ args = ['create', '--rgw-realm=' + realm, '--rgw-zonegroup=' + name]
+
+ if endpoints:
+ args.extend(['--endpoints=' + ','.join(endpoints)])
+
+ if default:
+ args.append('--default')
+
+ if master:
+ args.append('--master')
+
+ cmd = generate_radosgw_cmd(cluster=cluster,
+ args=args,
+ container_image=container_image)
+
+ return cmd
+
+
+def modify_zonegroup(module, container_image=None):
+ '''
+ Modify a new zonegroup
+ '''
+
+ cluster = module.params.get('cluster')
+ name = module.params.get('name')
+ realm = module.params.get('realm')
+ endpoints = module.params.get('endpoints')
+ default = module.params.get('default')
+ master = module.params.get('master')
+
+ args = ['modify', '--rgw-realm=' + realm, '--rgw-zonegroup=' + name]
+
+ if endpoints:
+ args.extend(['--endpoints=' + ','.join(endpoints)])
+
+ if default:
+ args.append('--default')
+
+ if master:
+ args.append('--master')
+
+ cmd = generate_radosgw_cmd(cluster=cluster,
+ args=args,
+ container_image=container_image)
+
+ return cmd
+
+
+def get_zonegroup(module, container_image=None):
+ '''
+ Get existing zonegroup
+ '''
+
+ cluster = module.params.get('cluster')
+ name = module.params.get('name')
+ realm = module.params.get('realm')
+
+ args = [
+ 'get',
+ '--rgw-realm=' + realm,
+ '--rgw-zonegroup=' + name,
+ '--format=json'
+ ]
+
+ cmd = generate_radosgw_cmd(cluster=cluster,
+ args=args,
+ container_image=container_image)
+
+ return cmd
+
+
+def get_realm(module, container_image=None):
+ '''
+ Get existing realm
+ '''
+
+ cluster = module.params.get('cluster')
+ realm = module.params.get('realm')
+
+ cmd = pre_generate_radosgw_cmd(container_image=container_image)
+
+ args = [
+ '--cluster',
+ cluster,
+ 'realm',
+ 'get',
+ '--rgw-realm=' + realm,
+ '--format=json'
+ ]
+
+ cmd.extend(args)
+
+ return cmd
+
+
+def remove_zonegroup(module, container_image=None):
+ '''
+ Remove a zonegroup
+ '''
+
+ cluster = module.params.get('cluster')
+ name = module.params.get('name')
+ realm = module.params.get('realm')
+
+ args = ['delete', '--rgw-realm=' + realm, '--rgw-zonegroup=' + name]
+
+ cmd = generate_radosgw_cmd(cluster=cluster,
+ args=args,
+ container_image=container_image)
+
+ return cmd
+
+
+def exit_module(module, out, rc, cmd, err, startd, changed=False):
+ endd = datetime.datetime.now()
+ delta = endd - startd
+
+ result = dict(
+ cmd=cmd,
+ start=str(startd),
+ end=str(endd),
+ delta=str(delta),
+ rc=rc,
+ stdout=out.rstrip("\r\n"),
+ stderr=err.rstrip("\r\n"),
+ changed=changed,
+ )
+ module.exit_json(**result)
+
+
+def run_module():
+ module_args = dict(
+ cluster=dict(type='str', required=False, default='ceph'),
+ name=dict(type='str', required=True),
+ state=dict(type='str', required=False, choices=['present', 'absent', 'info'], default='present'), # noqa: E501
+ realm=dict(type='str', require=True),
+ endpoints=dict(type='list', require=False, default=[]),
+ default=dict(type='bool', required=False, default=False),
+ master=dict(type='bool', required=False, default=False),
+ )
+
+ module = AnsibleModule(
+ argument_spec=module_args,
+ supports_check_mode=True,
+ )
+
+ # Gather module parameters in variables
+ name = module.params.get('name')
+ state = module.params.get('state')
+ endpoints = module.params.get('endpoints')
+ master = str(module.params.get('master')).lower()
+
+ if module.check_mode:
+ module.exit_json(
+ changed=False,
+ stdout='',
+ stderr='',
+ rc=0,
+ start='',
+ end='',
+ delta='',
+ )
+
+ startd = datetime.datetime.now()
+ changed = False
+
+ # will return either the image name or None
+ container_image = is_containerized()
+
+ if state == "present":
+ rc, cmd, out, err = exec_commands(module, get_zonegroup(module, container_image=container_image)) # noqa: E501
+ if rc == 0:
+ zonegroup = json.loads(out)
+ _rc, _cmd, _out, _err = exec_commands(module, get_realm(module, container_image=container_image)) # noqa: E501
+ if _rc != 0:
+ fatal(_err, module)
+ realm = json.loads(_out)
+ current = {
+ 'endpoints': zonegroup['endpoints'],
+ 'master': zonegroup.get('is_master', 'false'),
+ 'realm_id': zonegroup['realm_id']
+ }
+ asked = {
+ 'endpoints': endpoints,
+ 'master': master,
+ 'realm_id': realm['id']
+ }
+ if current != asked:
+ rc, cmd, out, err = exec_commands(module, modify_zonegroup(module, container_image=container_image)) # noqa: E501
+ changed = True
+ else:
+ rc, cmd, out, err = exec_commands(module, create_zonegroup(module, container_image=container_image)) # noqa: E501
+ changed = True
+
+ elif state == "absent":
+ rc, cmd, out, err = exec_commands(module, get_zonegroup(module, container_image=container_image)) # noqa: E501
+ if rc == 0:
+ rc, cmd, out, err = exec_commands(module, remove_zonegroup(module, container_image=container_image)) # noqa: E501
+ changed = True
+ else:
+ rc = 0
+ out = "Zonegroup {} doesn't exist".format(name)
+
+ elif state == "info":
+ rc, cmd, out, err = exec_commands(module, get_zonegroup(module, container_image=container_image)) # noqa: E501
+
+ exit_module(module=module, out=out, rc=rc, cmd=cmd, err=err, startd=startd, changed=changed) # noqa: E501
+
+
+def main():
+ run_module()
+
+
+if __name__ == '__main__':
+ main()
--- /dev/null
+import os
+import datetime
+
+
+def generate_ceph_cmd(sub_cmd, args, user_key=None, cluster='ceph', user='client.admin', container_image=None, interactive=False):
+ '''
+ Generate 'ceph' command line to execute
+ '''
+
+ if not user_key:
+ user_key = '/etc/ceph/{}.{}.keyring'.format(cluster, user)
+
+ cmd = pre_generate_ceph_cmd(container_image=container_image, interactive=interactive)
+
+ base_cmd = [
+ '-n',
+ user,
+ '-k',
+ user_key,
+ '--cluster',
+ cluster
+ ]
+ base_cmd.extend(sub_cmd)
+ cmd.extend(base_cmd + args)
+
+ return cmd
+
+
+def container_exec(binary, container_image, interactive=False):
+ '''
+ Build the docker CLI to run a command inside a container
+ '''
+
+ container_binary = os.getenv('CEPH_CONTAINER_BINARY')
+ command_exec = [container_binary, 'run']
+
+ if interactive:
+ command_exec.extend(['--interactive'])
+
+ command_exec.extend(['--rm',
+ '--net=host',
+ '-v', '/etc/ceph:/etc/ceph:z',
+ '-v', '/var/lib/ceph/:/var/lib/ceph/:z',
+ '-v', '/var/log/ceph/:/var/log/ceph/:z',
+ '--entrypoint=' + binary, container_image])
+ return command_exec
+
+
+def is_containerized():
+ '''
+ Check if we are running on a containerized cluster
+ '''
+
+ if 'CEPH_CONTAINER_IMAGE' in os.environ:
+ container_image = os.getenv('CEPH_CONTAINER_IMAGE')
+ else:
+ container_image = None
+
+ return container_image
+
+
+def pre_generate_ceph_cmd(container_image=None, interactive=False):
+ '''
+ Generate ceph prefix comaand
+ '''
+ if container_image:
+ cmd = container_exec('ceph', container_image, interactive=interactive)
+ else:
+ cmd = ['ceph']
+
+ return cmd
+
+
+def exec_command(module, cmd, stdin=None):
+ '''
+ Execute command(s)
+ '''
+
+ binary_data = False
+ if stdin:
+ binary_data = True
+ rc, out, err = module.run_command(cmd, data=stdin, binary_data=binary_data)
+
+ return rc, cmd, out, err
+
+
+def exit_module(module, out, rc, cmd, err, startd, changed=False, diff=dict(before="", after="")):
+ endd = datetime.datetime.now()
+ delta = endd - startd
+
+ result = dict(
+ cmd=cmd,
+ start=str(startd),
+ end=str(endd),
+ delta=str(delta),
+ rc=rc,
+ stdout=out.rstrip("\r\n"),
+ stderr=err.rstrip("\r\n"),
+ changed=changed,
+ diff=diff
+ )
+ module.exit_json(**result)
+
+
+def fatal(message, module):
+ '''
+ Report a fatal error and exit
+ '''
+
+ if module:
+ module.fail_json(msg=message, rc=1)
+ else:
+ raise Exception(message)
--- /dev/null
+# (c) 2015, Kevin Carter <kevin.carter@rackspace.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+
+try:
+ import ConfigParser
+except ImportError:
+ import configparser as ConfigParser
+import datetime
+try:
+ from StringIO import StringIO
+except ImportError:
+ from io import StringIO
+import base64
+import json
+import os
+import pwd
+import re
+import six
+import time
+import yaml
+import tempfile as tmpfilelib
+
+from ansible.plugins.action import ActionBase
+from ansible.module_utils._text import to_bytes, to_text
+from ansible import constants as C
+from ansible import errors
+from ansible.parsing.yaml.dumper import AnsibleDumper
+from distutils.version import LooseVersion
+from ansible import __version__ as __ansible_version__
+
+__metaclass__ = type
+
+
+CONFIG_TYPES = {
+ 'ini': 'return_config_overrides_ini',
+ 'json': 'return_config_overrides_json',
+ 'yaml': 'return_config_overrides_yaml'
+}
+
+
+class IDumper(AnsibleDumper):
+ def increase_indent(self, flow=False, indentless=False):
+ return super(IDumper, self).increase_indent(flow, False)
+
+
+class MultiKeyDict(dict):
+ """Dictionary class which supports duplicate keys.
+ This class allows for an item to be added into a standard python dictionary
+ however if a key is created more than once the dictionary will convert the
+ singular value to a python tuple. This tuple type forces all values to be a
+ string.
+ Example Usage:
+ >>> z = MultiKeyDict()
+ >>> z['a'] = 1
+ >>> z['b'] = ['a', 'b', 'c']
+ >>> z['c'] = {'a': 1}
+ >>> print(z)
+ ... {'a': 1, 'b': ['a', 'b', 'c'], 'c': {'a': 1}}
+ >>> z['a'] = 2
+ >>> print(z)
+ ... {'a': tuple(['1', '2']), 'c': {'a': 1}, 'b': ['a', 'b', 'c']}
+ """
+
+ def __setitem__(self, key, value):
+ if key in self:
+ if isinstance(self[key], tuple):
+ items = self[key]
+ if str(value) not in items:
+ items += tuple([str(value)])
+ super(MultiKeyDict, self).__setitem__(key, items)
+ else:
+ if str(self[key]) != str(value):
+ items = tuple([str(self[key]), str(value)])
+ super(MultiKeyDict, self).__setitem__(key, items)
+ else:
+ return dict.__setitem__(self, key, value)
+
+
+class ConfigTemplateParser(ConfigParser.RawConfigParser):
+ """ConfigParser which supports multi key value.
+ The parser will use keys with multiple variables in a set as a multiple
+ key value within a configuration file.
+ Default Configuration file:
+ [DEFAULT]
+ things =
+ url1
+ url2
+ url3
+ other = 1,2,3
+ [section1]
+ key = var1
+ key = var2
+ key = var3
+ Example Usage:
+ >>> cp = ConfigTemplateParser(dict_type=MultiKeyDict)
+ >>> cp.read('/tmp/test.ini')
+ ... ['/tmp/test.ini']
+ >>> cp.get('DEFAULT', 'things')
+ ... \nurl1\nurl2\nurl3
+ >>> cp.get('DEFAULT', 'other')
+ ... '1,2,3'
+ >>> cp.set('DEFAULT', 'key1', 'var1')
+ >>> cp.get('DEFAULT', 'key1')
+ ... 'var1'
+ >>> cp.get('section1', 'key')
+ ... {'var1', 'var2', 'var3'}
+ >>> cp.set('section1', 'key', 'var4')
+ >>> cp.get('section1', 'key')
+ ... {'var1', 'var2', 'var3', 'var4'}
+ >>> with open('/tmp/test2.ini', 'w') as f:
+ ... cp.write(f)
+ Output file:
+ [DEFAULT]
+ things =
+ url1
+ url2
+ url3
+ key1 = var1
+ other = 1,2,3
+ [section1]
+ key = var4
+ key = var1
+ key = var3
+ key = var2
+ """
+
+ def __init__(self, *args, **kwargs):
+ self._comments = {}
+ self.ignore_none_type = bool(kwargs.pop('ignore_none_type', True))
+ self.default_section = str(kwargs.pop('default_section', 'DEFAULT'))
+ ConfigParser.RawConfigParser.__init__(self, *args, **kwargs)
+
+ def _write(self, fp, section, key, item, entry):
+ if section:
+ # If we are not ignoring a none type value, then print out
+ # the option name only if the value type is None.
+ if not self.ignore_none_type and item is None:
+ fp.write(key + '\n')
+ elif (item is not None) or (self._optcre == self.OPTCRE):
+ fp.write(entry)
+ else:
+ fp.write(entry)
+
+ def _write_check(self, fp, key, value, section=False):
+ if isinstance(value, (tuple, set)):
+ for item in value:
+ item = str(item).replace('\n', '\n\t')
+ entry = "%s = %s\n" % (key, item)
+ self._write(fp, section, key, item, entry)
+ else:
+ if isinstance(value, list):
+ _value = [str(i.replace('\n', '\n\t')) for i in value]
+ entry = '%s = %s\n' % (key, ','.join(_value))
+ else:
+ entry = '%s = %s\n' % (key, str(value).replace('\n', '\n\t'))
+ self._write(fp, section, key, value, entry)
+
+ def write(self, fp):
+ def _do_write(section_name, section, section_bool=False):
+ _write_comments(section_name)
+ fp.write("[%s]\n" % section_name)
+ for key, value in sorted(section.items()):
+ _write_comments(section_name, optname=key)
+ self._write_check(fp, key=key, value=value,
+ section=section_bool)
+ else:
+ fp.write("\n")
+
+ def _write_comments(section, optname=None):
+ comsect = self._comments.get(section, {})
+ if optname in comsect:
+ fp.write(''.join(comsect[optname]))
+
+ if self.default_section != 'DEFAULT' and self._sections.get(
+ self.default_section, False):
+ _do_write(self.default_section,
+ self._sections[self.default_section],
+ section_bool=True)
+ self._sections.pop(self.default_section)
+
+ if self._defaults:
+ _do_write('DEFAULT', self._defaults)
+
+ for section in sorted(self._sections):
+ _do_write(section, self._sections[section], section_bool=True)
+
+ def _read(self, fp, fpname):
+ comments = []
+ cursect = None
+ optname = None
+ lineno = 0
+ e = None
+ while True:
+ line = fp.readline()
+ if not line:
+ break
+ lineno += 1
+ if line.strip() == '':
+ if comments:
+ comments.append('')
+ continue
+
+ if line.lstrip()[0] in '#;':
+ comments.append(line.lstrip())
+ continue
+
+ if line.split(None, 1)[0].lower() == 'rem' and line[0] in "rR":
+ continue
+ if line[0].isspace() and cursect is not None and optname:
+ value = line.strip()
+ if value:
+ try:
+ if isinstance(cursect[optname], (tuple, set)):
+ _temp_item = list(cursect[optname])
+ del cursect[optname]
+ cursect[optname] = _temp_item
+ elif isinstance(cursect[optname], six.text_type):
+ _temp_item = [cursect[optname]]
+ del cursect[optname]
+ cursect[optname] = _temp_item
+ except NameError:
+ if isinstance(cursect[optname], (bytes, str)):
+ _temp_item = [cursect[optname]]
+ del cursect[optname]
+ cursect[optname] = _temp_item
+ cursect[optname].append(value)
+ else:
+ mo = self.SECTCRE.match(line)
+ if mo:
+ sectname = mo.group('header')
+ if sectname in self._sections:
+ cursect = self._sections[sectname]
+ elif sectname == 'DEFAULT':
+ cursect = self._defaults
+ else:
+ cursect = self._dict()
+ self._sections[sectname] = cursect
+ optname = None
+
+ comsect = self._comments.setdefault(sectname, {})
+ if comments:
+ # NOTE(flaper87): Using none as the key for
+ # section level comments
+ comsect[None] = comments
+ comments = []
+ elif cursect is None:
+ raise ConfigParser.MissingSectionHeaderError(
+ fpname,
+ lineno,
+ line
+ )
+ else:
+ mo = self._optcre.match(line)
+ if mo:
+ optname, vi, optval = mo.group('option', 'vi', 'value')
+ optname = self.optionxform(optname.rstrip())
+ if optval is not None:
+ if vi in ('=', ':') and ';' in optval:
+ pos = optval.find(';')
+ if pos != -1 and optval[pos - 1].isspace():
+ optval = optval[:pos]
+ optval = optval.strip()
+ if optval == '""':
+ optval = ''
+ cursect[optname] = optval
+ if comments:
+ comsect[optname] = comments
+ comments = []
+ else:
+ if not e:
+ e = ConfigParser.ParsingError(fpname)
+ e.append(lineno, repr(line))
+ if e:
+ raise e
+ all_sections = [self._defaults]
+ all_sections.extend(self._sections.values())
+ for options in all_sections:
+ for name, val in options.items():
+ if isinstance(val, list):
+ _temp_item = '\n'.join(val)
+ del options[name]
+ options[name] = _temp_item
+
+
+class DictCompare(object):
+ """
+ Calculate the difference between two dictionaries.
+
+ Example Usage:
+ >>> base_dict = {'test1': 'val1', 'test2': 'val2', 'test3': 'val3'}
+ >>> new_dict = {'test1': 'val2', 'test3': 'val3', 'test4': 'val3'}
+ >>> dc = DictCompare(base_dict, new_dict)
+ >>> dc.added()
+ ... ['test4']
+ >>> dc.removed()
+ ... ['test2']
+ >>> dc.changed()
+ ... ['test1']
+ >>> dc.get_changes()
+ ... {'added':
+ ... {'test4': 'val3'},
+ ... 'removed':
+ ... {'test2': 'val2'},
+ ... 'changed':
+ ... {'test1': {'current_val': 'vol1', 'new_val': 'val2'}
+ ... }
+ """
+ def __init__(self, base_dict, new_dict):
+ self.new_dict, self.base_dict = new_dict, base_dict
+ self.base_items, self.new_items = set(
+ self.base_dict.keys()), set(self.new_dict.keys())
+ self.intersect = self.new_items.intersection(self.base_items)
+
+ def added(self):
+ return self.new_items - self.intersect
+
+ def removed(self):
+ return self.base_items - self.intersect
+
+ def changed(self):
+ return set(
+ x for x in self.intersect if self.base_dict[x] != self.new_dict[x])
+
+ def get_changes(self):
+ """Returns dict of differences between 2 dicts and bool indicating if
+ there are differences
+
+ :param base_dict: ``dict``
+ :param new_dict: ``dict``
+ :returns: ``dict``, ``bool``
+ """
+ changed = False
+ mods = {'added': {}, 'removed': {}, 'changed': {}}
+
+ for s in self.changed():
+ changed = True
+ if type(self.base_dict[s]) is not dict:
+ mods['changed'] = {
+ s: {'current_val': self.base_dict[s],
+ 'new_val': self.new_dict[s]}}
+ continue
+
+ diff = DictCompare(self.base_dict[s], self.new_dict[s])
+ for a in diff.added():
+ if s not in mods['added']:
+ mods['added'][s] = {a: self.new_dict[s][a]}
+ else:
+ mods['added'][s][a] = self.new_dict[s][a]
+
+ for r in diff.removed():
+ if s not in mods['removed']:
+ mods['removed'][s] = {r: self.base_dict[s][r]}
+ else:
+ mods['removed'][s][r] = self.base_dict[s][r]
+
+ for c in diff.changed():
+ if s not in mods['changed']:
+ mods['changed'][s] = {
+ c: {'current_val': self.base_dict[s][c],
+ 'new_val': self.new_dict[s][c]}}
+ else:
+ mods['changed'][s][c] = {
+ 'current_val': self.base_dict[s][c],
+ 'new_val': self.new_dict[s][c]}
+
+ for s in self.added():
+ changed = True
+ mods['added'][s] = self.new_dict[s]
+
+ for s in self.removed():
+ changed = True
+ mods['removed'][s] = self.base_dict[s]
+
+ return mods, changed
+
+
+class ActionModule(ActionBase):
+ TRANSFERS_FILES = True
+
+ def return_config_overrides_ini(self,
+ config_overrides,
+ resultant,
+ list_extend=True,
+ ignore_none_type=True,
+ default_section='DEFAULT'):
+ """Returns string value from a modified config file and dict of
+ merged config
+
+ :param config_overrides: ``dict``
+ :param resultant: ``str`` || ``unicode``
+ :returns: ``str``, ``dict``
+ """
+ # If there is an exception loading the RawConfigParser The config obj
+ # is loaded again without the extra option. This is being done to
+ # support older python.
+ try:
+ config = ConfigTemplateParser(
+ allow_no_value=True,
+ dict_type=MultiKeyDict,
+ ignore_none_type=ignore_none_type,
+ default_section=default_section
+ )
+ config.optionxform = str
+ except Exception:
+ config = ConfigTemplateParser(dict_type=MultiKeyDict)
+
+ config_object = StringIO(resultant)
+ config.readfp(config_object)
+
+ for section, items in config_overrides.items():
+ # If the items value is not a dictionary it is assumed that the
+ # value is a default item for this config type.
+ if not isinstance(items, dict):
+ if isinstance(items, list):
+ items = ','.join(to_text(i) for i in items)
+ self._option_write(
+ config,
+ 'DEFAULT',
+ section,
+ items
+ )
+ else:
+ # Attempt to add a section to the config file passing if
+ # an error is raised that is related to the section
+ # already existing.
+ try:
+ config.add_section(section)
+ except (ConfigParser.DuplicateSectionError, ValueError):
+ pass
+ for key, value in items.items():
+ try:
+ self._option_write(config, section, key, value)
+ except ConfigParser.NoSectionError as exp:
+ error_msg = str(exp)
+ error_msg += (
+ ' Try being more explicit with your override'
+ 'data. Sections are case sensitive.'
+ )
+ raise errors.AnsibleModuleError(error_msg)
+ else:
+ config_object.close()
+
+ config_dict_new = {}
+ config_defaults = config.defaults()
+ for s in config.sections():
+ config_dict_new[s] = {}
+ for k, v in config.items(s):
+ if k not in config_defaults or config_defaults[k] != v:
+ config_dict_new[s][k] = v
+ else:
+ if default_section in config_dict_new:
+ config_dict_new[default_section][k] = v
+ else:
+ config_dict_new[default_section] = {k: v}
+
+ resultant_stringio = StringIO()
+ try:
+ config.write(resultant_stringio)
+ return resultant_stringio.getvalue(), config_dict_new
+ finally:
+ resultant_stringio.close()
+
+ @staticmethod
+ def _option_write(config, section, key, value):
+ config.remove_option(str(section), str(key))
+ try:
+ if not any(list(value.values())):
+ value = tuple(value.keys())
+ except AttributeError:
+ pass
+ if isinstance(value, (tuple, set)):
+ config.set(str(section), str(key), value)
+ elif isinstance(value, set):
+ config.set(str(section), str(key), value)
+ elif isinstance(value, list):
+ config.set(str(section), str(key), ','.join(str(i) for i in value))
+ else:
+ config.set(str(section), str(key), str(value))
+
+ def return_config_overrides_json(self,
+ config_overrides,
+ resultant,
+ list_extend=True,
+ ignore_none_type=True,
+ default_section='DEFAULT'):
+ """Returns config json and dict of merged config
+
+ Its important to note that file ordering will not be preserved as the
+ information within the json file will be sorted by keys.
+
+ :param config_overrides: ``dict``
+ :param resultant: ``str`` || ``unicode``
+ :returns: ``str``, ``dict``
+ """
+ original_resultant = json.loads(resultant)
+ merged_resultant = self._merge_dict(
+ base_items=original_resultant,
+ new_items=config_overrides,
+ list_extend=list_extend
+ )
+ return json.dumps(
+ merged_resultant,
+ indent=4,
+ sort_keys=True
+ ), merged_resultant
+
+ def return_config_overrides_yaml(self,
+ config_overrides,
+ resultant,
+ list_extend=True,
+ ignore_none_type=True,
+ default_section='DEFAULT'):
+ """Return config yaml and dict of merged config
+
+ :param config_overrides: ``dict``
+ :param resultant: ``str`` || ``unicode``
+ :returns: ``str``, ``dict``
+ """
+ original_resultant = yaml.safe_load(resultant)
+ merged_resultant = self._merge_dict(
+ base_items=original_resultant,
+ new_items=config_overrides,
+ list_extend=list_extend
+ )
+ return yaml.dump(
+ merged_resultant,
+ Dumper=IDumper,
+ default_flow_style=False,
+ width=1000,
+ ), merged_resultant
+
+ def _merge_dict(self, base_items, new_items, list_extend=True):
+ """Recursively merge new_items into base_items.
+
+ :param base_items: ``dict``
+ :param new_items: ``dict``
+ :returns: ``dict``
+ """
+ for key, value in new_items.items():
+ if isinstance(value, dict):
+ base_items[key] = self._merge_dict(
+ base_items=base_items.get(key, {}),
+ new_items=value,
+ list_extend=list_extend
+ )
+ elif (not isinstance(value, int) and
+ (',' in value or '\n' in value)):
+ base_items[key] = re.split(',|\n', value)
+ base_items[key] = [i.strip() for i in base_items[key] if i]
+ elif isinstance(value, list):
+ if isinstance(base_items.get(key), list) and list_extend:
+ base_items[key].extend(value)
+ else:
+ base_items[key] = value
+ elif isinstance(value, (tuple, set)):
+ if isinstance(base_items.get(key), tuple) and list_extend:
+ base_items[key] += tuple(value)
+ elif isinstance(base_items.get(key), list) and list_extend:
+ base_items[key].extend(list(value))
+ else:
+ base_items[key] = value
+ else:
+ base_items[key] = new_items[key]
+ return base_items
+
+ def _load_options_and_status(self, task_vars):
+ """Return options and status from module load."""
+
+ config_type = self._task.args.get('config_type')
+ if config_type not in ['ini', 'yaml', 'json']:
+ return False, dict(
+ failed=True,
+ msg="No valid [ config_type ] was provided. Valid options are"
+ " ini, yaml, or json."
+ )
+
+ # Access to protected method is unavoidable in Ansible
+ searchpath = [self._loader._basedir]
+
+ if self._task._role:
+ file_path = self._task._role._role_path
+ searchpath.insert(1, C.DEFAULT_ROLES_PATH)
+ searchpath.insert(1, self._task._role._role_path)
+ else:
+ file_path = self._loader.get_basedir()
+
+ user_source = self._task.args.get('src')
+ # (alextricity25) It's possible that the user could pass in a datatype
+ # and not always a string. In this case we don't want the datatype
+ # python representation to be printed out to the file, but rather we
+ # want the serialized version.
+ _user_content = self._task.args.get('content')
+
+ # If the data type of the content input is a dictionary, it's
+ # converted dumped as json if config_type is 'json'.
+ if isinstance(_user_content, dict):
+ if self._task.args.get('config_type') == 'json':
+ _user_content = json.dumps(_user_content)
+
+ user_content = str(_user_content)
+ if not user_source:
+ if not user_content:
+ return False, dict(
+ failed=True,
+ msg="No user [ src ] or [ content ] was provided"
+ )
+ else:
+ tmp_content = None
+ fd, tmp_content = tmpfilelib.mkstemp()
+ try:
+ with open(tmp_content, 'wb') as f:
+ f.write(user_content.encode())
+ except Exception as err:
+ os.remove(tmp_content)
+ raise Exception(err)
+ self._task.args['src'] = source = tmp_content
+ else:
+ source = self._loader.path_dwim_relative(
+ file_path,
+ 'templates',
+ user_source
+ )
+ searchpath.insert(1, os.path.dirname(source))
+
+ _dest = self._task.args.get('dest')
+ list_extend = self._task.args.get('list_extend')
+ if not _dest:
+ return False, dict(
+ failed=True,
+ msg="No [ dest ] was provided"
+ )
+ else:
+ # Expand any user home dir specification
+ user_dest = self._remote_expand_user(_dest)
+ if user_dest.endswith(os.sep):
+ user_dest = os.path.join(user_dest, os.path.basename(source))
+
+ # Get ignore_none_type
+ # In some situations(i.e. my.cnf files), INI files can have valueless
+ # options that don't have a '=' or ':' suffix. In these cases,
+ # ConfigParser gives these options a "None" value. If ignore_none_type
+ # is set to true, these key/value options will be ignored, if it's set
+ # to false, then ConfigTemplateParser will write out only the option
+ # name with out the '=' or ':' suffix. The default is true.
+ ignore_none_type = self._task.args.get('ignore_none_type', True)
+
+ default_section = self._task.args.get('default_section', 'DEFAULT')
+
+ return True, dict(
+ source=source,
+ dest=user_dest,
+ config_overrides=self._task.args.get('config_overrides', dict()),
+ config_type=config_type,
+ searchpath=searchpath,
+ list_extend=list_extend,
+ ignore_none_type=ignore_none_type,
+ default_section=default_section
+ )
+
+ def run(self, tmp=None, task_vars=None):
+ """Run the method"""
+
+ try:
+ remote_user = task_vars.get('ansible_user')
+ if not remote_user:
+ remote_user = task_vars.get('ansible_ssh_user')
+ if not remote_user:
+ remote_user = self._play_context.remote_user
+
+ if not tmp:
+ tmp = self._make_tmp_path(remote_user)
+ except TypeError:
+ if not tmp:
+ tmp = self._make_tmp_path()
+
+ _status, _vars = self._load_options_and_status(task_vars=task_vars)
+ if not _status:
+ return _vars
+
+ temp_vars = task_vars.copy()
+ template_host = temp_vars['template_host'] = os.uname()[1]
+ source = temp_vars['template_path'] = _vars['source']
+ temp_vars['template_mtime'] = datetime.datetime.fromtimestamp(
+ os.path.getmtime(source)
+ )
+
+ try:
+ template_uid = temp_vars['template_uid'] = pwd.getpwuid(
+ os.stat(source).st_uid
+ ).pw_name
+ except Exception:
+ template_uid = temp_vars['template_uid'] = os.stat(source).st_uid
+
+ managed_default = C.DEFAULT_MANAGED_STR
+ managed_str = managed_default.format(
+ host=template_host,
+ uid=template_uid,
+ file=to_bytes(source)
+ )
+
+ temp_vars['ansible_managed'] = time.strftime(
+ managed_str,
+ time.localtime(os.path.getmtime(source))
+ )
+ temp_vars['template_fullpath'] = os.path.abspath(source)
+ temp_vars['template_run_date'] = datetime.datetime.now()
+
+ with open(source, 'r') as f:
+ template_data = to_text(f.read())
+
+ self._templar.environment.loader.searchpath = _vars['searchpath']
+ self._templar.set_available_variables(temp_vars)
+ resultant = self._templar.template(
+ template_data,
+ preserve_trailing_newlines=True,
+ escape_backslashes=False,
+ convert_data=False
+ )
+
+ # Access to protected method is unavoidable in Ansible
+ self._templar.set_available_variables(
+ self._templar._available_variables
+ )
+
+ config_dict_base = {}
+ type_merger = getattr(self, CONFIG_TYPES.get(_vars['config_type']))
+ resultant, config_dict_base = type_merger(
+ config_overrides=_vars['config_overrides'],
+ resultant=resultant,
+ list_extend=_vars.get('list_extend', True),
+ ignore_none_type=_vars.get('ignore_none_type', True),
+ default_section=_vars.get('default_section', 'DEFAULT')
+ )
+
+ changed = False
+ if self._play_context.diff:
+ slurpee = self._execute_module(
+ module_name='slurp',
+ module_args=dict(src=_vars['dest']),
+ task_vars=task_vars
+ )
+
+ config_dict_new = {}
+ if 'content' in slurpee:
+ dest_data = base64.b64decode(
+ slurpee['content']).decode('utf-8')
+ resultant_dest = self._templar.template(
+ dest_data,
+ preserve_trailing_newlines=True,
+ escape_backslashes=False,
+ convert_data=False
+ )
+ type_merger = getattr(self,
+ CONFIG_TYPES.get(_vars['config_type']))
+ resultant_new, config_dict_new = type_merger(
+ config_overrides={},
+ resultant=resultant_dest,
+ list_extend=_vars.get('list_extend', True),
+ ignore_none_type=_vars.get('ignore_none_type', True),
+ default_section=_vars.get('default_section', 'DEFAULT')
+ )
+
+ # Compare source+overrides with dest to look for changes and
+ # build diff
+ cmp_dicts = DictCompare(config_dict_new, config_dict_base)
+ mods, changed = cmp_dicts.get_changes()
+
+ # Re-template the resultant object as it may have new data within it
+ # as provided by an override variable.
+ resultant = self._templar.template(
+ resultant,
+ preserve_trailing_newlines=True,
+ escape_backslashes=False,
+ convert_data=False
+ )
+
+ # run the copy module
+ new_module_args = self._task.args.copy()
+ # Access to protected method is unavoidable in Ansible
+ transferred_data = self._transfer_data(
+ self._connection._shell.join_path(tmp, 'source'),
+ resultant
+ )
+ if LooseVersion(__ansible_version__) < LooseVersion("2.6"):
+ new_module_args.update(
+ dict(
+ src=transferred_data,
+ dest=_vars['dest'],
+ original_basename=os.path.basename(source),
+ follow=True,
+ ),
+ )
+ else:
+ new_module_args.update(
+ dict(
+ src=transferred_data,
+ dest=_vars['dest'],
+ _original_basename=os.path.basename(source),
+ follow=True,
+ ),
+ )
+
+ # Remove data types that are not available to the copy module
+ new_module_args.pop('config_overrides', None)
+ new_module_args.pop('config_type', None)
+ new_module_args.pop('list_extend', None)
+ new_module_args.pop('ignore_none_type', None)
+ new_module_args.pop('default_section', None)
+ # Content from config_template is converted to src
+ new_module_args.pop('content', None)
+
+ # Run the copy module
+ rc = self._execute_module(
+ module_name='copy',
+ module_args=new_module_args,
+ task_vars=task_vars
+ )
+ copy_changed = rc.get('changed')
+ if not copy_changed:
+ rc['changed'] = changed
+
+ if self._play_context.diff:
+ rc['diff'] = []
+ rc['diff'].append(
+ {'prepared': json.dumps(mods, indent=4, sort_keys=True)})
+ if self._task.args.get('content'):
+ os.remove(_vars['source'])
+ return rc
--- /dev/null
+"""Ansible callback plugin to print a summary completion status of installation
+phases.
+"""
+from datetime import datetime
+from ansible.plugins.callback import CallbackBase
+from ansible import constants as C
+
+
+class CallbackModule(CallbackBase):
+ """This callback summarizes installation phase status."""
+
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'aggregate'
+ CALLBACK_NAME = 'installer_checkpoint'
+ CALLBACK_NEEDS_WHITELIST = False
+
+ def __init__(self):
+ super(CallbackModule, self).__init__()
+
+ def v2_playbook_on_stats(self, stats):
+
+ # Set the order of the installer phases
+ installer_phases = [
+ 'installer_phase_ceph_mon',
+ 'installer_phase_ceph_mgr',
+ 'installer_phase_ceph_osd',
+ 'installer_phase_ceph_mds',
+ 'installer_phase_ceph_rgw',
+ 'installer_phase_ceph_nfs',
+ 'installer_phase_ceph_rbdmirror',
+ 'installer_phase_ceph_client',
+ 'installer_phase_ceph_iscsi_gw',
+ 'installer_phase_ceph_rgw_loadbalancer',
+ 'installer_phase_ceph_dashboard',
+ 'installer_phase_ceph_grafana',
+ 'installer_phase_ceph_node_exporter',
+ 'installer_phase_ceph_crash',
+ ]
+
+ # Define the attributes of the installer phases
+ phase_attributes = {
+ 'installer_phase_ceph_mon': {
+ 'title': 'Install Ceph Monitor',
+ 'playbook': 'roles/ceph-mon/tasks/main.yml'
+ },
+ 'installer_phase_ceph_mgr': {
+ 'title': 'Install Ceph Manager',
+ 'playbook': 'roles/ceph-mgr/tasks/main.yml'
+ },
+ 'installer_phase_ceph_osd': {
+ 'title': 'Install Ceph OSD',
+ 'playbook': 'roles/ceph-osd/tasks/main.yml'
+ },
+ 'installer_phase_ceph_mds': {
+ 'title': 'Install Ceph MDS',
+ 'playbook': 'roles/ceph-mds/tasks/main.yml'
+ },
+ 'installer_phase_ceph_rgw': {
+ 'title': 'Install Ceph RGW',
+ 'playbook': 'roles/ceph-rgw/tasks/main.yml'
+ },
+ 'installer_phase_ceph_nfs': {
+ 'title': 'Install Ceph NFS',
+ 'playbook': 'roles/ceph-nfs/tasks/main.yml'
+ },
+ 'installer_phase_ceph_rbdmirror': {
+ 'title': 'Install Ceph RBD Mirror',
+ 'playbook': 'roles/ceph-rbd-mirror/tasks/main.yml'
+ },
+ 'installer_phase_ceph_client': {
+ 'title': 'Install Ceph Client',
+ 'playbook': 'roles/ceph-client/tasks/main.yml'
+ },
+ 'installer_phase_ceph_iscsi_gw': {
+ 'title': 'Install Ceph iSCSI Gateway',
+ 'playbook': 'roles/ceph-iscsi-gw/tasks/main.yml'
+ },
+ 'installer_phase_ceph_rgw_loadbalancer': {
+ 'title': 'Install Ceph RGW LoadBalancer',
+ 'playbook': 'roles/ceph-rgw-loadbalancer/tasks/main.yml'
+ },
+ 'installer_phase_ceph_dashboard': {
+ 'title': 'Install Ceph Dashboard',
+ 'playbook': 'roles/ceph-dashboard/tasks/main.yml'
+ },
+ 'installer_phase_ceph_grafana': {
+ 'title': 'Install Ceph Grafana',
+ 'playbook': 'roles/ceph-grafana/tasks/main.yml'
+ },
+ 'installer_phase_ceph_node_exporter': {
+ 'title': 'Install Ceph Node Exporter',
+ 'playbook': 'roles/ceph-node-exporter/tasks/main.yml'
+ },
+ 'installer_phase_ceph_crash': {
+ 'title': 'Install Ceph Crash',
+ 'playbook': 'roles/ceph-crash/tasks/main.yml'
+ },
+ }
+
+ # Find the longest phase title
+ max_column = 0
+ for phase in phase_attributes:
+ max_column = max(max_column, len(phase_attributes[phase]['title']))
+
+ if '_run' in stats.custom:
+ self._display.banner('INSTALLER STATUS')
+ for phase in installer_phases:
+ phase_title = phase_attributes[phase]['title']
+ padding = max_column - len(phase_title) + 2
+ if phase in stats.custom['_run']:
+ phase_status = stats.custom['_run'][phase]['status']
+ phase_time = phase_time_delta(stats.custom['_run'][phase])
+ self._display.display(
+ '{}{}: {} ({})'.format(phase_title, ' ' * padding, phase_status, phase_time),
+ color=self.phase_color(phase_status))
+ if phase_status == 'In Progress' and phase != 'installer_phase_initialize':
+ self._display.display(
+ '\tThis phase can be restarted by running: {}'.format(
+ phase_attributes[phase]['playbook']))
+
+ self._display.display("", screen_only=True)
+
+ def phase_color(self, status):
+ """ Return color code for installer phase"""
+ valid_status = [
+ 'In Progress',
+ 'Complete',
+ ]
+
+ if status not in valid_status:
+ self._display.warning('Invalid phase status defined: {}'.format(status))
+
+ if status == 'Complete':
+ phase_color = C.COLOR_OK
+ elif status == 'In Progress':
+ phase_color = C.COLOR_ERROR
+ else:
+ phase_color = C.COLOR_WARN
+
+ return phase_color
+
+
+def phase_time_delta(phase):
+ """ Calculate the difference between phase start and end times """
+ time_format = '%Y%m%d%H%M%SZ'
+ phase_start = datetime.strptime(phase['start'], time_format)
+ if 'end' not in phase:
+ # The phase failed so set the end time to now
+ phase_end = datetime.now()
+ else:
+ phase_end = datetime.strptime(phase['end'], time_format)
+ delta = str(phase_end - phase_start).split(".")[0] # Trim microseconds
+
+ return delta
--- /dev/null
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible import errors
+
+try:
+ import netaddr
+except ImportError:
+ # in this case, we'll make the filter return an error message (see bottom)
+ netaddr = None
+
+
+class FilterModule(object):
+ ''' IP addresses within IP ranges '''
+
+ def ips_in_ranges(self, ip_addresses, ip_ranges):
+ ips_in_ranges = list()
+ for ip_addr in ip_addresses:
+ for ip_range in ip_ranges:
+ if netaddr.IPAddress(ip_addr) in netaddr.IPNetwork(ip_range):
+ ips_in_ranges.append(ip_addr)
+ return ips_in_ranges
+
+ def filters(self):
+ if netaddr:
+ return {
+ 'ips_in_ranges': self.ips_in_ranges
+ }
+ else:
+ # Need to install python's netaddr for these filters to work
+ raise errors.AnsibleFilterError(
+ "The ips_in_ranges filter requires python's netaddr be "
+ "installed on the ansible controller.")
--- /dev/null
+---
+# THIS FILE IS AN EXAMPLE THAT CONTAINS A SET OF VARIABLE FOR A PARTICULAR PURPOSE
+# GOAL: CONFIGURE RADOS GATEWAY WITH KEYSTONE V2
+#
+# The following variables should be added in your group_vars/rgws.yml file
+# The double quotes are important, do NOT remove them.
+
+
+ceph_conf_overrides:
+ "client.rgw.{{ hostvars[inventory_hostname]['ansible_facts']['hostname'] }}":
+ "rgw keystone api version": "2"
+ "rgw keystone url": "http://192.168.0.1:35357"
+ "rgw keystone admin token": "password"
+ "rgw keystone admin tenant": "admin"
+ "rgw keystone accepted roles": "Member, _member_, admin"
+ "rgw keystone token cache size": "10000"
+ "rgw keystone revocation interval": "900"
+ "rgw s3 auth use keystone": "true"
+ "nss db path": "/var/lib/ceph/radosgw/ceph-radosgw.{{ ansible_facts['hostname'] }}/nss"
+
+
+# NOTE (leseb): to authentivate with Keystone you have two options:
+# * using a token (like shown above)
+# - "rgw keystone admin token" = admin"
+# - "rgw keystone token cache size" = 10000"
+#
+# * use credential:
+# - "rgw keystone admin user" = "admin"
+# - "rgw keystone admin password" = "password"
+#
--- /dev/null
+---
+# THIS FILE IS AN EXAMPLE THAT CONTAINS A SET OF VARIABLE FOR A PARTICULAR PURPOSE
+# GOAL: CONFIGURE RADOS GATEWAY WITH KEYSTONE V3
+#
+# The following variables should be added in your group_vars/rgws.yml file
+# The double quotes are important, do NOT remove them.
+
+
+ceph_conf_overrides:
+ "client.rgw.{{ hostvars[inventory_hostname]['ansible_facts']['hostname'] }}":
+ "rgw keystone api version": "3"
+ "rgw keystone url": "http://192.168.0.1:35357"
+ "rgw keystone admin token": "password"
+ "rgw keystone admin project": "admin"
+ "rgw keystone admin domain": "default"
+ "rgw keystone accepted roles": "Member, _member_, admin"
+ "rgw keystone token cache size": "10000"
+ "rgw keystone revocation interval": "900"
+ "rgw s3 auth use keystone": "true"
+ "nss db path": "/var/lib/ceph/radosgw/ceph-radosgw.{{ ansible_facts['hostname'] }}/nss"
+
+
+# NOTE (leseb): to authentivate with Keystone you have two options:
+# * using a token (like shown above)
+# - "rgw keystone admin token" = admin"
+# - "rgw keystone token cache size" = 10000"
+#
+# * use credential:
+# - "rgw keystone admin user" = "admin"
+# - "rgw keystone admin password" = "password"
+#
--- /dev/null
+---
+# THIS FILE IS AN EXAMPLE THAT CONTAINS A SET OF VARIABLE FOR A PARTICULAR PURPOSE
+# GOAL: CONFIGURE RADOS GATEWAY WITH STATIC WEBSITE
+#
+# The following variables should be added in your group_vars/rgws.yml file
+# The double quotes are important, do NOT remove them.
+
+ceph_conf_overrides:
+ "client.rgw.{{ hostvars[inventory_hostname]['ansible_facts']['hostname'] }}":
+ rgw enable static website = true
+ rgw dns s3website name = objects-website-region.domain.com
--- /dev/null
+---
+# THIS FILE IS AN EXAMPLE THAT CONTAINS A SET OF VARIABLE FOR A PARTICULAR PURPOSE
+# GOAL: CONFIGURE RADOS GATEWAY WITH USAGE LOG
+#
+# The following variables should be added in your group_vars/rgws.yml file
+# The double quotes are important, do NOT remove them.
+
+ceph_conf_overrides:
+ "client.rgw.{{ hostvars[inventory_hostname]['ansible_facts']['hostname'] }}":
+ rgw enable usage log = true
+ rgw usage log tick interval = 30
+ rgw usage log flush threshold = 1024
+ rgw usage max shards = 32
+ rgw usage max user shards = 1
+
--- /dev/null
+---
+- name: check for python
+ stat:
+ path: "{{ item }}"
+ changed_when: false
+ failed_when: false
+ register: systempython
+ with_items:
+ - /usr/bin/python
+ - /usr/bin/python3
+ - /usr/libexec/platform-python
+
+- block:
+ - name: check for dnf-3 package manager (RedHat/Fedora/CentOS)
+ raw: stat /bin/dnf-3
+ changed_when: false
+ failed_when: false
+ register: stat_dnf3
+
+ - name: check for yum package manager (RedHat/Fedora/CentOS)
+ raw: stat /bin/yum
+ changed_when: false
+ failed_when: false
+ register: stat_yum
+
+ - name: check for apt package manager (Debian/Ubuntu)
+ raw: stat /usr/bin/apt-get
+ changed_when: false
+ failed_when: false
+ register: stat_apt
+
+ - name: check for zypper package manager (SUSE/OpenSUSE)
+ raw: stat /usr/bin/zypper
+ changed_when: false
+ failed_when: false
+ register: stat_zypper
+
+ - name: install python for RedHat based OS - dnf
+ raw: >
+ {{ 'dnf' if stat_dnf3.rc == 0 else 'yum' }} -y install python3;
+ ln -sf /usr/bin/python3 /usr/bin/python
+ creates=/usr/bin/python
+ register: result
+ until: (result is succeeded) and ('Failed' not in result.stdout)
+ when: stat_dnf3.rc == 0 or stat_yum.rc == 0
+
+ - name: install python for debian based OS
+ raw: apt-get -y install python-simplejson
+ register: result
+ until: result is succeeded
+ when: stat_apt.rc == 0
+
+ - name: install python for SUSE/OpenSUSE
+ raw: zypper -n install python-base
+ register: result
+ until: result is succeeded
+ when: stat_zypper.rc == 0
+ when: not True in (systempython.results | selectattr('stat', 'defined') | map(attribute='stat.exists') | list | unique)
+
+- name: install python-xml for opensuse only if python2 is installed already
+ raw: zypper -n install python-xml
+ register: result
+ until: result is succeeded
+ with_items: "{{ systempython.results }}"
+ when:
+ - stat_zypper.rc is defined
+ - stat_zypper.rc == 0
+ - item.stat.exists | bool
+ - item.stat.path == '/usr/bin/python'
--- /dev/null
+# These are Python requirements needed to run ceph-ansible master
+ansible>=2.9,<2.10,!=2.9.10
+netaddr
+six
--- /dev/null
+---
+# These are Ansible requirements needed to run ceph-ansible main
+collections:
+ - name: ansible.utils
+ version: '>=2.5.0'
+ - name: community.general
+ - name: ansible.posix
--- /dev/null
+ceph_repository: rhcs
+ceph_origin: repository
+ceph_iscsi_config_dev: false
+ceph_rhcs_version: 5
+containerized_deployment: true
+ceph_docker_image: "rhceph/rhceph-5-rhel8"
+ceph_docker_image_tag: "latest"
+ceph_docker_registry: "registry.redhat.io"
+ceph_docker_registry_auth: true
+node_exporter_container_image: registry.redhat.io/openshift4/ose-prometheus-node-exporter:v4.6
+grafana_container_image: registry.redhat.io/rhceph/rhceph-5-dashboard-rhel8:5
+prometheus_container_image: registry.redhat.io/openshift4/ose-prometheus:v4.6
+alertmanager_container_image: registry.redhat.io/openshift4/ose-prometheus-alertmanager:v4.6
+# END OF FILE, DO NOT TOUCH ME!
--- /dev/null
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [2014] [Sébastien Han]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
--- /dev/null
+# Ansible role: ceph-client
+
+Documentation is available at http://docs.ceph.com/ceph-ansible/.
--- /dev/null
+---
+###########
+# GENERAL #
+###########
+
+# Even though Client nodes should not have the admin key
+# at their disposal, some people might want to have it
+# distributed on Client nodes. Setting 'copy_admin_key' to 'true'
+# will copy the admin key to the /etc/ceph/ directory
+copy_admin_key: false
+
+user_config: false
+# When pg_autoscale_mode is set to True, you must add the target_size_ratio key with a correct value
+# `pg_num` and `pgp_num` keys will be ignored, even if specified.
+# eg:
+# test:
+# name: "test"
+# application: "rbd"
+# target_size_ratio: 0.2
+test:
+ name: "test"
+ application: "rbd"
+test2:
+ name: "test2"
+ application: "rbd"
+pools:
+ - "{{ test }}"
+ - "{{ test2 }}"
+
+# Generate a keyring using ceph-authtool CLI or python.
+# Eg:
+# $ ceph-authtool --gen-print-key
+# or
+# $ python2 -c "import os ; import struct ; import time; import base64 ; key = os.urandom(16) ; header = struct.pack('<hiih',1,int(time.time()),0,len(key)) ; print(base64.b64encode(header + key))"
+#
+# To use a particular secret, you have to add 'key' to the dict below, so something like:
+# - { name: client.test, key: "AQAin8tUMICVFBAALRHNrV0Z4MXupRw4v9JQ6Q==" ...
+
+keys:
+ - { name: client.test, caps: { mon: "profile rbd", osd: "allow class-read object_prefix rbd_children, profile rbd pool=test" }, mode: "{{ ceph_keyring_permissions }}" }
+ - { name: client.test2, caps: { mon: "profile rbd", osd: "allow class-read object_prefix rbd_children, profile rbd pool=test2" }, mode: "{{ ceph_keyring_permissions }}" }
--- /dev/null
+---
+galaxy_info:
+ company: Red Hat
+ author: Sébastien Han
+ description: Installs A Ceph Client
+ license: Apache
+ min_ansible_version: 2.7
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ galaxy_tags:
+ - system
+dependencies: []
--- /dev/null
+---
+# dummy container setup is only supported on x86_64
+# when running with containerized_deployment: true this task
+# creates a group that contains only x86_64 hosts.
+# when running with containerized_deployment: false this task
+# will add all client hosts to the group (and not filter).
+- name: create filtered clients group
+ group_by:
+ key: _filtered_clients
+ parents: "{{ client_group_name }}"
+ when: (ansible_facts['architecture'] == 'x86_64') or (not containerized_deployment | bool)
+
+- name: set_fact delegated_node
+ set_fact:
+ delegated_node: "{{ groups[mon_group_name][0] if groups.get(mon_group_name, []) | length > 0 else inventory_hostname }}"
+
+- name: set_fact admin_key_presence
+ set_fact:
+ admin_key_presence: "{{ True if groups.get(mon_group_name, []) | length > 0 else copy_admin_key }}"
+
+- name: create cephx key(s)
+ ceph_key:
+ name: "{{ item.name }}"
+ caps: "{{ item.caps }}"
+ secret: "{{ item.key | default('') }}"
+ cluster: "{{ cluster }}"
+ dest: "{{ ceph_conf_key_directory }}"
+ import_key: "{{ admin_key_presence }}"
+ mode: "{{ item.mode | default(ceph_keyring_permissions) }}"
+ owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
+ group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
+ environment:
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+ with_items:
+ - "{{ keys }}"
+ delegate_to: "{{ delegated_node }}"
+ when:
+ - cephx | bool
+ - keys | length > 0
+ - inventory_hostname == groups.get('_filtered_clients') | first
+ no_log: "{{ no_log_on_ceph_key_tasks }}"
+
+- name: slurp client cephx key(s)
+ slurp:
+ src: "{{ ceph_conf_key_directory }}/{{ cluster }}.{{ item.name }}.keyring"
+ with_items: "{{ keys }}"
+ register: slurp_client_keys
+ delegate_to: "{{ delegated_node }}"
+ when:
+ - cephx | bool
+ - keys | length > 0
+ - inventory_hostname == groups.get('_filtered_clients') | first
+ no_log: "{{ no_log_on_ceph_key_tasks }}"
+
+- name: pool related tasks
+ when:
+ - admin_key_presence | bool
+ - inventory_hostname == groups.get('_filtered_clients', []) | first
+ block:
+ - import_role:
+ name: ceph-facts
+ tasks_from: get_def_crush_rule_name.yml
+
+ - name: create ceph pool(s)
+ ceph_pool:
+ name: "{{ item.name }}"
+ cluster: "{{ cluster }}"
+ pg_num: "{{ item.pg_num | default(omit) }}"
+ pgp_num: "{{ item.pgp_num | default(omit) }}"
+ size: "{{ item.size | default(omit) }}"
+ min_size: "{{ item.min_size | default(omit) }}"
+ pool_type: "{{ item.type | default('replicated') }}"
+ rule_name: "{{ item.rule_name | default(omit) }}"
+ erasure_profile: "{{ item.erasure_profile | default(omit) }}"
+ pg_autoscale_mode: "{{ item.pg_autoscale_mode | default(omit) }}"
+ target_size_ratio: "{{ item.target_size_ratio | default(omit) }}"
+ application: "{{ item.application | default(omit) }}"
+ environment:
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+ with_items: "{{ pools }}"
+ changed_when: false
+ delegate_to: "{{ delegated_node }}"
+
+- name: get client cephx keys
+ copy:
+ dest: "{{ item.source }}"
+ content: "{{ item.content | b64decode }}"
+ mode: "{{ item.item.get('mode', '0600') }}"
+ owner: "{{ ceph_uid }}"
+ group: "{{ ceph_uid }}"
+ with_items: "{{ hostvars[groups['_filtered_clients'][0]]['slurp_client_keys']['results'] }}"
+ when: not item.get('skipped', False)
+ no_log: "{{ no_log_on_ceph_key_tasks }}"
+
--- /dev/null
+---
+- name: include pre_requisite.yml
+ include_tasks: pre_requisite.yml
+ when: groups.get(mon_group_name, []) | length > 0
+
+- name: include create_users_keys.yml
+ include_tasks: create_users_keys.yml
+ when:
+ - user_config | bool
+ - not rolling_update | default(False) | bool
--- /dev/null
+---
+- name: copy ceph admin keyring
+ block:
+ - name: get keys from monitors
+ ceph_key:
+ name: client.admin
+ cluster: "{{ cluster }}"
+ output_format: plain
+ state: info
+ environment:
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+ register: _admin_key
+ delegate_to: "{{ groups.get(mon_group_name)[0] }}"
+ run_once: true
+ no_log: "{{ no_log_on_ceph_key_tasks }}"
+
+ - name: copy ceph key(s) if needed
+ copy:
+ dest: "/etc/ceph/{{ cluster }}.client.admin.keyring"
+ content: "{{ _admin_key.stdout + '\n' }}"
+ owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
+ group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
+ mode: "{{ ceph_keyring_permissions }}"
+ no_log: "{{ no_log_on_ceph_key_tasks }}"
+ when:
+ - cephx | bool
+ - copy_admin_key | bool
--- /dev/null
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [2014] [Sébastien Han]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
--- /dev/null
+# Ansible role: ceph-common
+
+Documentation is available at http://docs.ceph.com/ceph-ansible/.
--- /dev/null
+-----BEGIN PGP PUBLIC KEY BLOCK-----
+Version: GnuPG v1
+
+mQINBFX4hgkBEADLqn6O+UFp+ZuwccNldwvh5PzEwKUPlXKPLjQfXlQRig1flpCH
+E0HJ5wgGlCtYd3Ol9f9+qU24kDNzfbs5bud58BeE7zFaZ4s0JMOMuVm7p8JhsvkU
+C/Lo/7NFh25e4kgJpjvnwua7c2YrA44ggRb1QT19ueOZLK5wCQ1mR+0GdrcHRCLr
+7Sdw1d7aLxMT+5nvqfzsmbDullsWOD6RnMdcqhOxZZvpay8OeuK+yb8FVQ4sOIzB
+FiNi5cNOFFHg+8dZQoDrK3BpwNxYdGHsYIwU9u6DWWqXybBnB9jd2pve9PlzQUbO
+eHEa4Z+jPqxY829f4ldaql7ig8e6BaInTfs2wPnHJ+606g2UH86QUmrVAjVzlLCm
+nqoGymoAPGA4ObHu9X3kO8viMBId9FzooVqR8a9En7ZE0Dm9O7puzXR7A1f5sHoz
+JdYHnr32I+B8iOixhDUtxIY4GA8biGATNaPd8XR2Ca1hPuZRVuIiGG9HDqUEtXhV
+fY5qjTjaThIVKtYgEkWMT+Wet3DPPiWT3ftNOE907e6EWEBCHgsEuuZnAbku1GgD
+LBH4/a/yo9bNvGZKRaTUM/1TXhM5XgVKjd07B4cChgKypAVHvef3HKfCG2U/DkyA
+LjteHt/V807MtSlQyYaXUTGtDCrQPSlMK5TjmqUnDwy6Qdq8dtWN3DtBWQARAQAB
+tCpDZXBoLmNvbSAocmVsZWFzZSBrZXkpIDxzZWN1cml0eUBjZXBoLmNvbT6JAjgE
+EwECACIFAlX4hgkCGwMGCwkIBwMCBhUIAgkKCwQWAgMBAh4BAheAAAoJEOhKwsBG
+DzmUXdIQAI8YPcZMBWdv489q8CzxlfRIRZ3Gv/G/8CH+EOExcmkVZ89mVHngCdAP
+DOYCl8twWXC1lwJuLDBtkUOHXNuR5+Jcl5zFOUyldq1Hv8u03vjnGT7lLJkJoqpG
+l9QD8nBqRvBU7EM+CU7kP8+09b+088pULil+8x46PwgXkvOQwfVKSOr740Q4J4nm
+/nUOyTNtToYntmt2fAVWDTIuyPpAqA6jcqSOC7Xoz9cYxkVWnYMLBUySXmSS0uxl
+3p+wK0lMG0my/gb+alke5PAQjcE5dtXYzCn+8Lj0uSfCk8Gy0ZOK2oiUjaCGYN6D
+u72qDRFBnR3jaoFqi03bGBIMnglGuAPyBZiI7LJgzuT9xumjKTJW3kN4YJxMNYu1
+FzmIyFZpyvZ7930vB2UpCOiIaRdZiX4Z6ZN2frD3a/vBxBNqiNh/BO+Dex+PDfI4
+TqwF8zlcjt4XZ2teQ8nNMR/D8oiYTUW8hwR4laEmDy7ASxe0p5aijmUApWq5UTsF
++s/QbwugccU0iR5orksM5u9MZH4J/mFGKzOltfGXNLYI6D5Mtwrnyi0BsF5eY0u6
+vkdivtdqrq2DXY+ftuqLOQ7b+t1RctbcMHGPptlxFuN9ufP5TiTWSpfqDwmHCLsT
+k2vFiMwcHdLpQ1IH8ORVRgPPsiBnBOJ/kIiXG2SxPUTjjEGOVgeA
+=/Tod
+-----END PGP PUBLIC KEY BLOCK-----
--- /dev/null
+pub 4096R/FD431D51 2009-10-22
+ Key fingerprint = 567E 347A D004 4ADE 55BA 8A5F 199E 2F91 FD43 1D51
+uid Red Hat, Inc. (release key 2) <security@redhat.com>
+
+-----BEGIN PGP PUBLIC KEY BLOCK-----
+Version: GnuPG v1.4.5 (GNU/Linux)
+
+mQINBErgSTsBEACh2A4b0O9t+vzC9VrVtL1AKvUWi9OPCjkvR7Xd8DtJxeeMZ5eF
+0HtzIG58qDRybwUe89FZprB1ffuUKzdE+HcL3FbNWSSOXVjZIersdXyH3NvnLLLF
+0DNRB2ix3bXG9Rh/RXpFsNxDp2CEMdUvbYCzE79K1EnUTVh1L0Of023FtPSZXX0c
+u7Pb5DI5lX5YeoXO6RoodrIGYJsVBQWnrWw4xNTconUfNPk0EGZtEnzvH2zyPoJh
+XGF+Ncu9XwbalnYde10OCvSWAZ5zTCpoLMTvQjWpbCdWXJzCm6G+/hx9upke546H
+5IjtYm4dTIVTnc3wvDiODgBKRzOl9rEOCIgOuGtDxRxcQkjrC+xvg5Vkqn7vBUyW
+9pHedOU+PoF3DGOM+dqv+eNKBvh9YF9ugFAQBkcG7viZgvGEMGGUpzNgN7XnS1gj
+/DPo9mZESOYnKceve2tIC87p2hqjrxOHuI7fkZYeNIcAoa83rBltFXaBDYhWAKS1
+PcXS1/7JzP0ky7d0L6Xbu/If5kqWQpKwUInXtySRkuraVfuK3Bpa+X1XecWi24JY
+HVtlNX025xx1ewVzGNCTlWn1skQN2OOoQTV4C8/qFpTW6DTWYurd4+fE0OJFJZQF
+buhfXYwmRlVOgN5i77NTIJZJQfYFj38c/Iv5vZBPokO6mffrOTv3MHWVgQARAQAB
+tDNSZWQgSGF0LCBJbmMuIChyZWxlYXNlIGtleSAyKSA8c2VjdXJpdHlAcmVkaGF0
+LmNvbT6JAjYEEwECACAFAkrgSTsCGwMGCwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAK
+CRAZni+R/UMdUWzpD/9s5SFR/ZF3yjY5VLUFLMXIKUztNN3oc45fyLdTI3+UClKC
+2tEruzYjqNHhqAEXa2sN1fMrsuKec61Ll2NfvJjkLKDvgVIh7kM7aslNYVOP6BTf
+C/JJ7/ufz3UZmyViH/WDl+AYdgk3JqCIO5w5ryrC9IyBzYv2m0HqYbWfphY3uHw5
+un3ndLJcu8+BGP5F+ONQEGl+DRH58Il9Jp3HwbRa7dvkPgEhfFR+1hI+Btta2C7E
+0/2NKzCxZw7Lx3PBRcU92YKyaEihfy/aQKZCAuyfKiMvsmzs+4poIX7I9NQCJpyE
+IGfINoZ7VxqHwRn/d5mw2MZTJjbzSf+Um9YJyA0iEEyD6qjriWQRbuxpQXmlAJbh
+8okZ4gbVFv1F8MzK+4R8VvWJ0XxgtikSo72fHjwha7MAjqFnOq6eo6fEC/75g3NL
+Ght5VdpGuHk0vbdENHMC8wS99e5qXGNDued3hlTavDMlEAHl34q2H9nakTGRF5Ki
+JUfNh3DVRGhg8cMIti21njiRh7gyFI2OccATY7bBSr79JhuNwelHuxLrCFpY7V25
+OFktl15jZJaMxuQBqYdBgSay2G0U6D1+7VsWufpzd/Abx1/c3oi9ZaJvW22kAggq
+dzdA27UUYjWvx42w9menJwh/0jeQcTecIUd0d0rFcw/c1pvgMMl/Q73yzKgKYw==
+=zbHE
+-----END PGP PUBLIC KEY BLOCK-----
+
--- /dev/null
+---
+galaxy_info:
+ company: Red Hat
+ author: Sébastien Han
+ description: Installs Ceph
+ license: Apache
+ min_ansible_version: 2.7
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ galaxy_tags:
+ - system
+dependencies: []
--- /dev/null
+# (c) 2015, Kevin Carter <kevin.carter@rackspace.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+
+try:
+ import ConfigParser
+except ImportError:
+ import configparser as ConfigParser
+import datetime
+try:
+ from StringIO import StringIO
+except ImportError:
+ from io import StringIO
+import json
+import os
+import pwd
+import re
+import six
+import time
+import yaml
+import tempfile as tmpfilelib
+
+from ansible.plugins.action import ActionBase
+from ansible.module_utils._text import to_bytes, to_text
+from ansible import constants as C
+from ansible import errors
+from ansible.parsing.yaml.dumper import AnsibleDumper
+from distutils.version import LooseVersion
+from ansible import __version__ as __ansible_version__
+
+__metaclass__ = type
+
+
+CONFIG_TYPES = {
+ 'ini': 'return_config_overrides_ini',
+ 'json': 'return_config_overrides_json',
+ 'yaml': 'return_config_overrides_yaml'
+}
+
+
+class IDumper(AnsibleDumper):
+ def increase_indent(self, flow=False, indentless=False):
+ return super(IDumper, self).increase_indent(flow, False)
+
+
+class MultiKeyDict(dict):
+ """Dictionary class which supports duplicate keys.
+ This class allows for an item to be added into a standard python dictionary
+ however if a key is created more than once the dictionary will convert the
+ singular value to a python tuple. This tuple type forces all values to be a
+ string.
+ Example Usage:
+ >>> z = MultiKeyDict()
+ >>> z['a'] = 1
+ >>> z['b'] = ['a', 'b', 'c']
+ >>> z['c'] = {'a': 1}
+ >>> print(z)
+ ... {'a': 1, 'b': ['a', 'b', 'c'], 'c': {'a': 1}}
+ >>> z['a'] = 2
+ >>> print(z)
+ ... {'a': tuple(['1', '2']), 'c': {'a': 1}, 'b': ['a', 'b', 'c']}
+ """
+
+ def __setitem__(self, key, value):
+ if key in self:
+ if isinstance(self[key], tuple):
+ items = self[key]
+ if str(value) not in items:
+ items += tuple([str(value)])
+ super(MultiKeyDict, self).__setitem__(key, items)
+ else:
+ if str(self[key]) != str(value):
+ items = tuple([str(self[key]), str(value)])
+ super(MultiKeyDict, self).__setitem__(key, items)
+ else:
+ return dict.__setitem__(self, key, value)
+
+
+class ConfigTemplateParser(ConfigParser.RawConfigParser):
+ """ConfigParser which supports multi key value.
+ The parser will use keys with multiple variables in a set as a multiple
+ key value within a configuration file.
+ Default Configuration file:
+ [DEFAULT]
+ things =
+ url1
+ url2
+ url3
+ other = 1,2,3
+ [section1]
+ key = var1
+ key = var2
+ key = var3
+ Example Usage:
+ >>> cp = ConfigTemplateParser(dict_type=MultiKeyDict)
+ >>> cp.read('/tmp/test.ini')
+ ... ['/tmp/test.ini']
+ >>> cp.get('DEFAULT', 'things')
+ ... \nurl1\nurl2\nurl3
+ >>> cp.get('DEFAULT', 'other')
+ ... '1,2,3'
+ >>> cp.set('DEFAULT', 'key1', 'var1')
+ >>> cp.get('DEFAULT', 'key1')
+ ... 'var1'
+ >>> cp.get('section1', 'key')
+ ... {'var1', 'var2', 'var3'}
+ >>> cp.set('section1', 'key', 'var4')
+ >>> cp.get('section1', 'key')
+ ... {'var1', 'var2', 'var3', 'var4'}
+ >>> with open('/tmp/test2.ini', 'w') as f:
+ ... cp.write(f)
+ Output file:
+ [DEFAULT]
+ things =
+ url1
+ url2
+ url3
+ key1 = var1
+ other = 1,2,3
+ [section1]
+ key = var4
+ key = var1
+ key = var3
+ key = var2
+ """
+
+ def __init__(self, *args, **kwargs):
+ self._comments = {}
+ self.ignore_none_type = bool(kwargs.pop('ignore_none_type', True))
+ self.default_section = str(kwargs.pop('default_section', 'DEFAULT'))
+ ConfigParser.RawConfigParser.__init__(self, *args, **kwargs)
+
+ def _write(self, fp, section, key, item, entry):
+ if section:
+ # If we are not ignoring a none type value, then print out
+ # the option name only if the value type is None.
+ if not self.ignore_none_type and item is None:
+ fp.write(key + '\n')
+ elif (item is not None) or (self._optcre == self.OPTCRE):
+ fp.write(entry)
+ else:
+ fp.write(entry)
+
+ def _write_check(self, fp, key, value, section=False):
+ if isinstance(value, (tuple, set)):
+ for item in value:
+ item = str(item).replace('\n', '\n\t')
+ entry = "%s = %s\n" % (key, item)
+ self._write(fp, section, key, item, entry)
+ else:
+ if isinstance(value, list):
+ _value = [str(i.replace('\n', '\n\t')) for i in value]
+ entry = '%s = %s\n' % (key, ','.join(_value))
+ else:
+ entry = '%s = %s\n' % (key, str(value).replace('\n', '\n\t'))
+ self._write(fp, section, key, value, entry)
+
+ def write(self, fp):
+ def _do_write(section_name, section, section_bool=False):
+ _write_comments(section_name)
+ fp.write("[%s]\n" % section_name)
+ for key, value in sorted(section.items()):
+ _write_comments(section_name, optname=key)
+ self._write_check(fp, key=key, value=value,
+ section=section_bool)
+ else:
+ fp.write("\n")
+
+ def _write_comments(section, optname=None):
+ comsect = self._comments.get(section, {})
+ if optname in comsect:
+ fp.write(''.join(comsect[optname]))
+
+ if self.default_section != 'DEFAULT' and self._sections.get(
+ self.default_section, False):
+ _do_write(self.default_section,
+ self._sections[self.default_section],
+ section_bool=True)
+ self._sections.pop(self.default_section)
+
+ if self._defaults:
+ _do_write('DEFAULT', self._defaults)
+
+ for section in sorted(self._sections):
+ _do_write(section, self._sections[section], section_bool=True)
+
+ def _read(self, fp, fpname):
+ comments = []
+ cursect = None
+ optname = None
+ lineno = 0
+ e = None
+ while True:
+ line = fp.readline()
+ if not line:
+ break
+ lineno += 1
+ if line.strip() == '':
+ if comments:
+ comments.append('')
+ continue
+
+ if line[0] in '#;':
+ comments.append(line)
+ continue
+
+ if line.split(None, 1)[0].lower() == 'rem' and line[0] in "rR":
+ continue
+ if line[0].isspace() and cursect is not None and optname:
+ value = line.strip()
+ if value:
+ try:
+ if isinstance(cursect[optname], (tuple, set)):
+ _temp_item = list(cursect[optname])
+ del cursect[optname]
+ cursect[optname] = _temp_item
+ elif isinstance(cursect[optname], six.text_type):
+ _temp_item = [cursect[optname]]
+ del cursect[optname]
+ cursect[optname] = _temp_item
+ except NameError:
+ if isinstance(cursect[optname], (bytes, str)):
+ _temp_item = [cursect[optname]]
+ del cursect[optname]
+ cursect[optname] = _temp_item
+ cursect[optname].append(value)
+ else:
+ mo = self.SECTCRE.match(line)
+ if mo:
+ sectname = mo.group('header')
+ if sectname in self._sections:
+ cursect = self._sections[sectname]
+ elif sectname == 'DEFAULT':
+ cursect = self._defaults
+ else:
+ cursect = self._dict()
+ self._sections[sectname] = cursect
+ optname = None
+
+ comsect = self._comments.setdefault(sectname, {})
+ if comments:
+ # NOTE(flaper87): Using none as the key for
+ # section level comments
+ comsect[None] = comments
+ comments = []
+ elif cursect is None:
+ raise ConfigParser.MissingSectionHeaderError(
+ fpname,
+ lineno,
+ line
+ )
+ else:
+ mo = self._optcre.match(line)
+ if mo:
+ optname, vi, optval = mo.group('option', 'vi', 'value')
+ optname = self.optionxform(optname.rstrip())
+ if optval is not None:
+ if vi in ('=', ':') and ';' in optval:
+ pos = optval.find(';')
+ if pos != -1 and optval[pos - 1].isspace():
+ optval = optval[:pos]
+ optval = optval.strip()
+ if optval == '""':
+ optval = ''
+ cursect[optname] = optval
+ if comments:
+ comsect[optname] = comments
+ comments = []
+ else:
+ if not e:
+ e = ConfigParser.ParsingError(fpname)
+ e.append(lineno, repr(line))
+ if e:
+ raise e
+ all_sections = [self._defaults]
+ all_sections.extend(self._sections.values())
+ for options in all_sections:
+ for name, val in options.items():
+ if isinstance(val, list):
+ _temp_item = '\n'.join(val)
+ del options[name]
+ options[name] = _temp_item
+
+
+class ActionModule(ActionBase):
+ TRANSFERS_FILES = True
+
+ def return_config_overrides_ini(self,
+ config_overrides,
+ resultant,
+ list_extend=True,
+ ignore_none_type=True,
+ default_section='DEFAULT'):
+ """Returns string value from a modified config file.
+
+ :param config_overrides: ``dict``
+ :param resultant: ``str`` || ``unicode``
+ :returns: ``str``
+ """
+ # If there is an exception loading the RawConfigParser The config obj
+ # is loaded again without the extra option. This is being done to
+ # support older python.
+ try:
+ config = ConfigTemplateParser(
+ allow_no_value=True,
+ dict_type=MultiKeyDict,
+ ignore_none_type=ignore_none_type,
+ default_section=default_section
+ )
+ config.optionxform = str
+ except Exception:
+ config = ConfigTemplateParser(dict_type=MultiKeyDict)
+
+ config_object = StringIO(resultant)
+ config.readfp(config_object)
+ for section, items in config_overrides.items():
+ # If the items value is not a dictionary it is assumed that the
+ # value is a default item for this config type.
+ if not isinstance(items, dict):
+ if isinstance(items, list):
+ items = ','.join(to_text(i) for i in items)
+ self._option_write(
+ config,
+ 'DEFAULT',
+ section,
+ items
+ )
+ else:
+ # Attempt to add a section to the config file passing if
+ # an error is raised that is related to the section
+ # already existing.
+ try:
+ config.add_section(section)
+ except (ConfigParser.DuplicateSectionError, ValueError):
+ pass
+ for key, value in items.items():
+ try:
+ self._option_write(config, section, key, value)
+ except ConfigParser.NoSectionError as exp:
+ error_msg = str(exp)
+ error_msg += (
+ ' Try being more explicit with your override'
+ 'data. Sections are case sensitive.'
+ )
+ raise errors.AnsibleModuleError(error_msg)
+ else:
+ config_object.close()
+
+ resultant_stringio = StringIO()
+ try:
+ config.write(resultant_stringio)
+ return resultant_stringio.getvalue()
+ finally:
+ resultant_stringio.close()
+
+ @staticmethod
+ def _option_write(config, section, key, value):
+ config.remove_option(str(section), str(key))
+ try:
+ if not any(list(value.values())):
+ value = tuple(value.keys())
+ except AttributeError:
+ pass
+ if isinstance(value, (tuple, set)):
+ config.set(str(section), str(key), value)
+ elif isinstance(value, set):
+ config.set(str(section), str(key), value)
+ elif isinstance(value, list):
+ config.set(str(section), str(key), ','.join(str(i) for i in value))
+ else:
+ config.set(str(section), str(key), str(value))
+
+ def return_config_overrides_json(self,
+ config_overrides,
+ resultant,
+ list_extend=True,
+ ignore_none_type=True,
+ default_section='DEFAULT'):
+ """Returns config json
+
+ Its important to note that file ordering will not be preserved as the
+ information within the json file will be sorted by keys.
+
+ :param config_overrides: ``dict``
+ :param resultant: ``str`` || ``unicode``
+ :returns: ``str``
+ """
+ original_resultant = json.loads(resultant)
+ merged_resultant = self._merge_dict(
+ base_items=original_resultant,
+ new_items=config_overrides,
+ list_extend=list_extend,
+ default_section=default_section
+ )
+ return json.dumps(
+ merged_resultant,
+ indent=4,
+ sort_keys=True
+ )
+
+ def return_config_overrides_yaml(self,
+ config_overrides,
+ resultant,
+ list_extend=True,
+ ignore_none_type=True,
+ default_section='DEFAULT'):
+ """Return config yaml.
+
+ :param config_overrides: ``dict``
+ :param resultant: ``str`` || ``unicode``
+ :returns: ``str``
+ """
+ original_resultant = yaml.safe_load(resultant)
+ merged_resultant = self._merge_dict(
+ base_items=original_resultant,
+ new_items=config_overrides,
+ list_extend=list_extend
+ )
+ return yaml.dump(
+ merged_resultant,
+ Dumper=IDumper,
+ default_flow_style=False,
+ width=1000,
+ )
+
+ def _merge_dict(self, base_items, new_items, list_extend=True):
+ """Recursively merge new_items into base_items.
+
+ :param base_items: ``dict``
+ :param new_items: ``dict``
+ :returns: ``dict``
+ """
+ for key, value in new_items.items():
+ if isinstance(value, dict):
+ base_items[key] = self._merge_dict(
+ base_items=base_items.get(key, {}),
+ new_items=value,
+ list_extend=list_extend
+ )
+ elif (not isinstance(value, int) and
+ (',' in value or '\n' in value)):
+ base_items[key] = re.split(',|\n', value)
+ base_items[key] = [i.strip() for i in base_items[key] if i]
+ elif isinstance(value, list):
+ if isinstance(base_items.get(key), list) and list_extend:
+ base_items[key].extend(value)
+ else:
+ base_items[key] = value
+ elif isinstance(value, (tuple, set)):
+ if isinstance(base_items.get(key), tuple) and list_extend:
+ base_items[key] += tuple(value)
+ elif isinstance(base_items.get(key), list) and list_extend:
+ base_items[key].extend(list(value))
+ else:
+ base_items[key] = value
+ else:
+ base_items[key] = new_items[key]
+ return base_items
+
+ def _load_options_and_status(self, task_vars):
+ """Return options and status from module load."""
+
+ config_type = self._task.args.get('config_type')
+ if config_type not in ['ini', 'yaml', 'json']:
+ return False, dict(
+ failed=True,
+ msg="No valid [ config_type ] was provided. Valid options are"
+ " ini, yaml, or json."
+ )
+
+ # Access to protected method is unavoidable in Ansible
+ searchpath = [self._loader._basedir]
+
+ if self._task._role:
+ file_path = self._task._role._role_path
+ searchpath.insert(1, C.DEFAULT_ROLES_PATH)
+ searchpath.insert(1, self._task._role._role_path)
+ else:
+ file_path = self._loader.get_basedir()
+
+ user_source = self._task.args.get('src')
+ # (alextricity25) It's possible that the user could pass in a datatype
+ # and not always a string. In this case we don't want the datatype
+ # python representation to be printed out to the file, but rather we
+ # want the serialized version.
+ _user_content = self._task.args.get('content')
+
+ # If the data type of the content input is a dictionary, it's
+ # converted dumped as json if config_type is 'json'.
+ if isinstance(_user_content, dict):
+ if self._task.args.get('config_type') == 'json':
+ _user_content = json.dumps(_user_content)
+
+ user_content = str(_user_content)
+ if not user_source:
+ if not user_content:
+ return False, dict(
+ failed=True,
+ msg="No user [ src ] or [ content ] was provided"
+ )
+ else:
+ tmp_content = None
+ fd, tmp_content = tmpfilelib.mkstemp()
+ try:
+ with open(tmp_content, 'wb') as f:
+ f.write(user_content.encode())
+ except Exception as err:
+ os.remove(tmp_content)
+ raise Exception(err)
+ self._task.args['src'] = source = tmp_content
+ else:
+ source = self._loader.path_dwim_relative(
+ file_path,
+ 'templates',
+ user_source
+ )
+ searchpath.insert(1, os.path.dirname(source))
+
+ _dest = self._task.args.get('dest')
+ list_extend = self._task.args.get('list_extend')
+ if not _dest:
+ return False, dict(
+ failed=True,
+ msg="No [ dest ] was provided"
+ )
+ else:
+ # Expand any user home dir specification
+ user_dest = self._remote_expand_user(_dest)
+ if user_dest.endswith(os.sep):
+ user_dest = os.path.join(user_dest, os.path.basename(source))
+
+ # Get ignore_none_type
+ # In some situations(i.e. my.cnf files), INI files can have valueless
+ # options that don't have a '=' or ':' suffix. In these cases,
+ # ConfigParser gives these options a "None" value. If ignore_none_type
+ # is set to true, these key/value options will be ignored, if it's set
+ # to false, then ConfigTemplateParser will write out only the option
+ # name with out the '=' or ':' suffix. The default is true.
+ ignore_none_type = self._task.args.get('ignore_none_type', True)
+
+ default_section = self._task.args.get('default_section', 'DEFAULT')
+
+ return True, dict(
+ source=source,
+ dest=user_dest,
+ config_overrides=self._task.args.get('config_overrides', dict()),
+ config_type=config_type,
+ searchpath=searchpath,
+ list_extend=list_extend,
+ ignore_none_type=ignore_none_type,
+ default_section=default_section
+ )
+
+ def run(self, tmp=None, task_vars=None):
+ """Run the method"""
+
+ try:
+ remote_user = task_vars.get('ansible_user')
+ if not remote_user:
+ remote_user = task_vars.get('ansible_ssh_user')
+ if not remote_user:
+ remote_user = self._play_context.remote_user
+
+ if not tmp:
+ tmp = self._make_tmp_path(remote_user)
+ except TypeError:
+ if not tmp:
+ tmp = self._make_tmp_path()
+
+ _status, _vars = self._load_options_and_status(task_vars=task_vars)
+ if not _status:
+ return _vars
+
+ temp_vars = task_vars.copy()
+ template_host = temp_vars['template_host'] = os.uname()[1]
+ source = temp_vars['template_path'] = _vars['source']
+ temp_vars['template_mtime'] = datetime.datetime.fromtimestamp(
+ os.path.getmtime(source)
+ )
+
+ try:
+ template_uid = temp_vars['template_uid'] = pwd.getpwuid(
+ os.stat(source).st_uid
+ ).pw_name
+ except Exception:
+ template_uid = temp_vars['template_uid'] = os.stat(source).st_uid
+
+ managed_default = C.DEFAULT_MANAGED_STR
+ managed_str = managed_default.format(
+ host=template_host,
+ uid=template_uid,
+ file=to_bytes(source)
+ )
+
+ temp_vars['ansible_managed'] = time.strftime(
+ managed_str,
+ time.localtime(os.path.getmtime(source))
+ )
+ temp_vars['template_fullpath'] = os.path.abspath(source)
+ temp_vars['template_run_date'] = datetime.datetime.now()
+
+ with open(source, 'r') as f:
+ template_data = to_text(f.read())
+
+ self._templar.environment.loader.searchpath = _vars['searchpath']
+ self._templar.set_available_variables(temp_vars)
+ resultant = self._templar.template(
+ template_data,
+ preserve_trailing_newlines=True,
+ escape_backslashes=False,
+ convert_data=False
+ )
+
+ # Access to protected method is unavoidable in Ansible
+ self._templar.set_available_variables(
+ self._templar._available_variables
+ )
+
+ if _vars['config_overrides']:
+ type_merger = getattr(self, CONFIG_TYPES.get(_vars['config_type']))
+ resultant = type_merger(
+ config_overrides=_vars['config_overrides'],
+ resultant=resultant,
+ list_extend=_vars.get('list_extend', True),
+ ignore_none_type=_vars.get('ignore_none_type', True),
+ default_section=_vars.get('default_section', 'DEFAULT')
+ )
+
+ # Re-template the resultant object as it may have new data within it
+ # as provided by an override variable.
+ resultant = self._templar.template(
+ resultant,
+ preserve_trailing_newlines=True,
+ escape_backslashes=False,
+ convert_data=False
+ )
+
+ # run the copy module
+ new_module_args = self._task.args.copy()
+ # Access to protected method is unavoidable in Ansible
+ transferred_data = self._transfer_data(
+ self._connection._shell.join_path(tmp, 'source'),
+ resultant
+ )
+ if LooseVersion(__ansible_version__) < LooseVersion("2.6"):
+ new_module_args.update(
+ dict(
+ src=transferred_data,
+ dest=_vars['dest'],
+ original_basename=os.path.basename(source),
+ follow=True,
+ ),
+ )
+ else:
+ new_module_args.update(
+ dict(
+ src=transferred_data,
+ dest=_vars['dest'],
+ _original_basename=os.path.basename(source),
+ follow=True,
+ ),
+ )
+
+ # Remove data types that are not available to the copy module
+ new_module_args.pop('config_overrides', None)
+ new_module_args.pop('config_type', None)
+ new_module_args.pop('list_extend', None)
+ new_module_args.pop('ignore_none_type', None)
+ new_module_args.pop('default_section', None)
+ # Content from config_template is converted to src
+ new_module_args.pop('content', None)
+
+ # Run the copy module
+ rc = self._execute_module(
+ module_name='copy',
+ module_args=new_module_args,
+ task_vars=task_vars
+ )
+ if self._task.args.get('content'):
+ os.remove(_vars['source'])
+ return rc
--- /dev/null
+---
+- name: configure cluster name
+ lineinfile:
+ dest: /etc/sysconfig/ceph
+ insertafter: EOF
+ create: yes
+ line: "CLUSTER={{ cluster }}"
+ regexp: "^CLUSTER="
+ when: ansible_facts['os_family'] in ["RedHat", "Suse"]
+
+# NOTE(leseb): we are performing the following check
+# to ensure any Jewel installation will not fail.
+# The following commit https://github.com/ceph/ceph/commit/791eba81a5467dd5de4f1680ed0deb647eb3fb8b
+# fixed a package issue where the path was the wrong.
+# This bug is not yet on all the distros package so we are working around it
+# Impacted versions:
+# - Jewel from UCA: https://bugs.launchpad.net/ubuntu/+source/ceph/+bug/1582773
+# - Jewel from latest Canonical 16.04 distro
+# - All previous versions from Canonical
+# - Infernalis from ceph.com
+- name: debian based systems - configure cluster name
+ when: ansible_facts['os_family'] == "Debian"
+ block:
+ - name: check /etc/default/ceph exist
+ stat:
+ path: /etc/default/ceph
+ register: etc_default_ceph
+ check_mode: no
+
+ - name: configure cluster name
+ when: etc_default_ceph.stat.exists
+ block:
+ - name: when /etc/default/ceph is not dir
+ lineinfile:
+ dest: /etc/default/ceph
+ insertafter: EOF
+ create: yes
+ regexp: "^CLUSTER="
+ line: "CLUSTER={{ cluster }}"
+ when: not etc_default_ceph.stat.isdir
+
+ - name: when /etc/default/ceph is dir
+ lineinfile:
+ dest: /etc/default/ceph/ceph
+ insertafter: EOF
+ create: yes
+ regexp: "^CLUSTER="
+ line: "CLUSTER={{ cluster }}"
+ when: etc_default_ceph.stat.isdir
--- /dev/null
+---
+- name: configure TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES for debian
+ lineinfile:
+ dest: "{{ etc_default_ceph.stat.isdir | ternary('/etc/default/ceph/ceph', '/etc/default/ceph') }}"
+ insertafter: EOF
+ create: yes
+ regexp: "^TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES="
+ line: "TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES={{ ceph_tcmalloc_max_total_thread_cache }}"
+ when:
+ - ansible_facts['os_family'] == 'Debian'
+ - etc_default_ceph.stat.exists
+ notify:
+ - restart ceph mons
+ - restart ceph mgrs
+ - restart ceph osds
+ - restart ceph mdss
+ - restart ceph rgws
+ - restart ceph rbdmirrors
+
+- name: configure TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES for redhat
+ lineinfile:
+ dest: "/etc/sysconfig/ceph"
+ insertafter: EOF
+ create: yes
+ regexp: "^TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES="
+ line: "TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES={{ ceph_tcmalloc_max_total_thread_cache }}"
+ when: ansible_facts['os_family'] == 'RedHat'
+ notify:
+ - restart ceph mons
+ - restart ceph mgrs
+ - restart ceph osds
+ - restart ceph mdss
+ - restart ceph rgws
+ - restart ceph rbdmirrors
--- /dev/null
+---
+- name: config repository for Red Hat based OS
+ when: ansible_facts['os_family'] == 'RedHat'
+ block:
+ - name: include installs/configure_redhat_repository_installation.yml
+ include_tasks: installs/configure_redhat_repository_installation.yml
+ when: ceph_origin == 'repository'
+
+ - name: include installs/configure_redhat_local_installation.yml
+ include_tasks: installs/configure_redhat_local_installation.yml
+ when: ceph_origin == 'local'
+
+- name: config repository for Debian based OS
+ when: ansible_facts['os_family'] == 'Debian'
+ block:
+ - name: include installs/configure_debian_repository_installation.yml
+ include_tasks: installs/configure_debian_repository_installation.yml
+ when: ceph_origin == 'repository'
+
+ - name: update apt cache if cache_valid_time has expired
+ apt:
+ update_cache: yes
+ cache_valid_time: 3600
+ register: result
+ until: result is succeeded
+
+- name: include installs/configure_suse_repository_installation.yml
+ include_tasks: installs/configure_suse_repository_installation.yml
+ when:
+ - ansible_facts['os_family'] == 'Suse'
+ - ceph_origin == 'repository'
--- /dev/null
+---
+- name: create rbd client directory
+ file:
+ path: "{{ item }}"
+ state: directory
+ owner: "{{ rbd_client_directory_owner }}"
+ group: "{{ rbd_client_directory_group }}"
+ mode: "{{ rbd_client_directory_mode }}"
+ with_items:
+ - "{{ rbd_client_admin_socket_path }}"
+ - "{{ rbd_client_log_path }}"
+ when: rbd_client_directories | bool
--- /dev/null
+---
+- name: include debian_community_repository.yml
+ include_tasks: debian_community_repository.yml
+ when: ceph_repository == 'community'
+
+- name: include debian_dev_repository.yml
+ include_tasks: debian_dev_repository.yml
+ when: ceph_repository == 'dev'
+
+- name: include debian_custom_repository.yml
+ include_tasks: debian_custom_repository.yml
+ when: ceph_repository == 'custom'
+
+- name: include debian_uca_repository.yml
+ include_tasks: debian_uca_repository.yml
+ when: ceph_repository == 'uca'
--- /dev/null
+---
+- name: make sure /tmp exists
+ file:
+ path: /tmp
+ state: directory
+ when: use_installer | bool
+
+- name: use mktemp to create name for rundep
+ tempfile:
+ path: /tmp
+ prefix: rundep.
+ register: rundep_location
+ when: use_installer | bool
+
+- name: copy rundep
+ copy:
+ src: "{{ ansible_dir }}/rundep"
+ dest: "{{ rundep_location.path }}"
+ when: use_installer | bool
+
+- name: install ceph dependencies
+ script: "{{ ansible_dir }}/rundep_installer.sh {{ rundep_location.path }}"
+ when: use_installer | bool
+
+- name: ensure rsync is installed
+ package:
+ name: rsync
+ state: present
+ register: result
+ until: result is succeeded
+
+- name: synchronize ceph install
+ synchronize:
+ src: "{{ ceph_installation_dir }}/"
+ dest: "/"
+
+- name: create user group ceph
+ group:
+ name: 'ceph'
+
+- name: create user ceph
+ user:
+ name: 'ceph'
--- /dev/null
+---
+- name: include redhat_community_repository.yml
+ include_tasks: redhat_community_repository.yml
+ when: ceph_repository == 'community'
+
+- name: include redhat_rhcs_repository.yml
+ include_tasks: redhat_rhcs_repository.yml
+ when: ceph_repository == 'rhcs'
+
+- name: include redhat_dev_repository.yml
+ include_tasks: redhat_dev_repository.yml
+ when: ceph_repository == 'dev'
+
+- name: include redhat_custom_repository.yml
+ include_tasks: redhat_custom_repository.yml
+ when: ceph_repository == 'custom'
+
+# Remove yum caches so yum doesn't get confused if we are reinstalling a different ceph version
+- name: purge yum cache
+ command: yum clean all
+ args:
+ warn: no
+ changed_when: false
+ when: ansible_facts['pkg_mgr'] == 'yum'
--- /dev/null
+---
+- name: include suse_obs_repository.yml
+ include_tasks: suse_obs_repository.yml
+ when: ceph_repository == 'obs'
--- /dev/null
+---
+- name: install dependencies for apt modules
+ package:
+ name: ['apt-transport-https', 'ca-certificates', 'gnupg', 'software-properties-common']
+ update_cache: yes
+ register: result
+ until: result is succeeded
+
+- name: configure debian ceph community repository stable key
+ apt_key:
+ data: "{{ lookup('file', role_path+'/files/cephstable.asc') }}"
+ state: present
+ register: result
+ until: result is succeeded
+
+- name: configure debian ceph stable community repository
+ apt_repository:
+ repo: "deb {{ ceph_stable_repo }} {{ ceph_stable_distro_source | default(ansible_facts['distribution_release']) }} main"
+ state: present
+ update_cache: yes
--- /dev/null
+---
+- name: configure debian custom apt key
+ apt_key:
+ url: "{{ ceph_custom_key }}"
+ state: present
+ register: result
+ until: result is succeeded
+ when: ceph_custom_key is defined
+
+- name: configure debian custom repository
+ apt_repository:
+ repo: "deb {{ ceph_custom_repo }} {{ ansible_facts['distribution_release'] }} main"
+ state: present
+ update_cache: yes
--- /dev/null
+---
+- name: fetch ceph debian development repository
+ uri:
+ url: https://shaman.ceph.com/api/repos/ceph/{{ ceph_dev_branch }}/{{ ceph_dev_sha1 }}/{{ ansible_facts['distribution'] | lower }}/{{ ansible_facts['distribution_release'] }}/repo
+ return_content: yes
+ register: ceph_dev_deb_repo
+
+- name: configure ceph debian development repository
+ apt_repository:
+ repo: "{{ ceph_dev_deb_repo.content }}"
+ state: present
+ update_cache: yes
--- /dev/null
+---
+- name: add ubuntu cloud archive key package
+ package:
+ name: ubuntu-cloud-keyring
+ register: result
+ until: result is succeeded
+
+- name: add ubuntu cloud archive repository
+ apt_repository:
+ repo: "deb {{ ceph_stable_repo_uca }} {{ ceph_stable_release_uca }} main"
+ state: present
+ update_cache: yes
--- /dev/null
+---
+- name: install ceph for debian
+ apt:
+ name: "{{ debian_ceph_pkgs | unique }}"
+ update_cache: no
+ state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
+ default_release: "{{ ceph_stable_release_uca | default('') if ceph_origin == 'repository' and ceph_repository == 'uca' else '' }}{{ ansible_facts['distribution_release'] ~ '-backports' if ceph_origin == 'distro' and ceph_use_distro_backports | bool else '' }}"
+ register: result
+ until: result is succeeded
--- /dev/null
+---
+- name: install red hat storage ceph packages for debian
+ apt:
+ pkg: "{{ debian_ceph_pkgs | unique }}"
+ state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
+ register: result
+ until: result is succeeded
--- /dev/null
+---
+- name: install ceph bundle
+ swupd:
+ name: storage-cluster
+ state: present
+ register: result
+ until: result is succeeded
--- /dev/null
+- name: install dependencies
+ apt:
+ name: "{{ debian_package_dependencies }}"
+ state: present
+ update_cache: yes
+ cache_valid_time: 3600
+ register: result
+ until: result is succeeded
+
+- name: include install_debian_packages.yml
+ include_tasks: install_debian_packages.yml
+ when:
+ - (ceph_origin == 'repository' or ceph_origin == 'distro')
+ - ceph_repository != 'rhcs'
+
+- name: include install_debian_rhcs_packages.yml
+ include_tasks: install_debian_rhcs_packages.yml
+ when:
+ - (ceph_origin == 'repository' or ceph_origin == 'distro')
+ - ceph_repository == 'rhcs'
--- /dev/null
+---
+- name: install redhat dependencies
+ package:
+ name: "{{ redhat_package_dependencies }}"
+ state: present
+ register: result
+ until: result is succeeded
+ when: ansible_facts['distribution'] == 'RedHat'
+
+- name: install centos dependencies
+ yum:
+ name: "{{ centos_package_dependencies }}"
+ state: present
+ register: result
+ until: result is succeeded
+ when: ansible_facts['distribution'] == 'CentOS'
+
+- name: install redhat ceph packages
+ package:
+ name: "{{ redhat_ceph_pkgs | unique }}"
+ state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
+ register: result
+ until: result is succeeded
--- /dev/null
+---
+- name: install SUSE/openSUSE dependencies
+ package:
+ name: "{{ suse_package_dependencies }}"
+ state: present
+ register: result
+ until: result is succeeded
+
+- name: install SUSE/openSUSE ceph packages
+ package:
+ name: "{{ suse_ceph_pkgs | unique }}"
+ state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
+ register: result
+ until: result is succeeded
--- /dev/null
+---
+- name: enable red hat storage tools repository
+ rhsm_repository:
+ name: "rhceph-{{ ceph_rhcs_version }}-tools-for-rhel-8-{{ ansible_facts['architecture'] }}-rpms"
+ when:
+ - mon_group_name in group_names
+ or osd_group_name in group_names
+ or mgr_group_name in group_names
+ or rgw_group_name in group_names
+ or mds_group_name in group_names
+ or nfs_group_name in group_names
+ or iscsi_gw_group_name in group_names
+ or client_group_name in group_names
+ or rbdmirror_group_name in group_names
+ or monitoring_group_name in group_names
--- /dev/null
+---
+- name: install yum plugin priorities
+ package:
+ name: yum-plugin-priorities
+ register: result
+ until: result is succeeded
+ tags: with_pkg
+ when: ansible_facts['distribution_major_version'] | int == 7
+
+- name: configure red hat ceph community repository stable key
+ rpm_key:
+ key: "{{ ceph_stable_key }}"
+ state: present
+ register: result
+ until: result is succeeded
+
+- name: configure red hat ceph stable community repository
+ yum_repository:
+ name: ceph_stable
+ description: Ceph Stable $basearch repo
+ gpgcheck: yes
+ state: present
+ gpgkey: "{{ ceph_stable_key }}"
+ baseurl: "{{ ceph_mirror }}/rpm-{{ ceph_stable_release }}/el{{ ansible_facts['distribution_major_version'] }}/$basearch"
+ file: ceph_stable
+ priority: 2
+ register: result
+ until: result is succeeded
+
+- name: configure red hat ceph stable noarch community repository
+ yum_repository:
+ name: ceph_stable_noarch
+ description: Ceph Stable noarch repo
+ gpgcheck: yes
+ state: present
+ gpgkey: "{{ ceph_stable_key }}"
+ baseurl: "{{ ceph_mirror }}/rpm-{{ ceph_stable_release }}/el{{ ansible_facts['distribution_major_version'] }}/noarch"
+ file: ceph_stable
+ priority: 2
+ register: result
+ until: result is succeeded
--- /dev/null
+---
+- name: configure red hat custom rpm key
+ rpm_key:
+ key: "{{ ceph_custom_key }}"
+ state: present
+ register: result
+ until: result is succeeded
+ when: ceph_custom_key is defined
+
+- name: configure red hat custom repository
+ get_url:
+ url: "{{ ceph_custom_repo }}"
+ dest: /etc/yum.repos.d
+ owner: root
+ group: root
--- /dev/null
+---
+- name: get latest available build
+ uri:
+ url: "https://shaman.ceph.com/api/search/?status=ready&project=ceph&flavor=default&distros=centos/{{ ansible_facts['distribution_major_version'] }}/{{ ansible_facts['architecture'] }}&ref={{ ceph_dev_branch }}&sha1={{ ceph_dev_sha1 }}"
+ return_content: yes
+ run_once: true
+ register: latest_build
+
+- name: fetch ceph red hat development repository
+ uri:
+ # Use the centos repo since we don't currently have a dedicated red hat repo
+ url: "{{ (latest_build.content | from_json)[0]['chacra_url'] }}repo"
+ return_content: yes
+ register: ceph_dev_yum_repo
+
+- name: configure ceph red hat development repository
+ copy:
+ content: "{{ ceph_dev_yum_repo.content }}"
+ dest: /etc/yum.repos.d/ceph-dev.repo
+ owner: root
+ group: root
+ backup: yes
+
+- name: remove ceph_stable repositories
+ yum_repository:
+ name: '{{ item }}'
+ file: ceph_stable
+ state: absent
+ with_items:
+ - ceph_stable
+ - ceph_stable_noarch
--- /dev/null
+---
+- name: include prerequisite_rhcs_cdn_install.yml
+ include_tasks: prerequisite_rhcs_cdn_install.yml
--- /dev/null
+---
+- name: configure openSUSE ceph OBS repository
+ zypper_repository:
+ name: "OBS:filesystems:ceph:{{ ceph_release }}"
+ state: present
+ repo: "{{ ceph_obs_repo }}"
+ auto_import_keys: yes
+ autorefresh: yes
--- /dev/null
+---
+- name: include configure_repository.yml
+ include_tasks: configure_repository.yml
+ tags: package-configure
+
+- name: include installs/install_redhat_packages.yml
+ include_tasks: installs/install_redhat_packages.yml
+ when:
+ - ansible_facts['os_family'] == 'RedHat'
+ - (ceph_origin == 'repository' or ceph_origin == 'distro')
+ tags: package-install
+
+- name: include installs/install_suse_packages.yml
+ include_tasks: installs/install_suse_packages.yml
+ when: ansible_facts['os_family'] == 'Suse'
+ tags: package-install
+
+- name: include installs/install_on_debian.yml
+ include_tasks: installs/install_on_debian.yml
+ tags: package-install
+ when: ansible_facts['os_family'] == 'Debian'
+
+- name: include_tasks installs/install_on_clear.yml
+ include_tasks: installs/install_on_clear.yml
+ when: ansible_facts['os_family'] == 'ClearLinux'
+ tags: package-install
+
+- name: get ceph version
+ command: ceph --version
+ changed_when: false
+ check_mode: no
+ register: ceph_version
+
+- name: set_fact ceph_version
+ set_fact:
+ ceph_version: "{{ ceph_version.stdout.split(' ')[2] }}"
+
+# override ceph_stable_release for ceph_dev and rhcs installations since ceph_stable_release is not mandatory
+- name: include release-rhcs.yml
+ include_tasks: release-rhcs.yml
+ when: ceph_repository in ['rhcs', 'dev']
+ or
+ ceph_origin == 'distro'
+ tags: always
+
+- name: set_fact ceph_release - override ceph_release with ceph_stable_release
+ set_fact:
+ ceph_release: "{{ ceph_stable_release }}"
+ when:
+ - ceph_origin == 'repository'
+ - ceph_repository not in ['dev', 'rhcs', 'custom']
+ tags: always
+
+- name: include create_rbd_client_dir.yml
+ include_tasks: create_rbd_client_dir.yml
+
+- name: include configure_cluster_name.yml
+ include_tasks: configure_cluster_name.yml
+
+- name: include configure_memory_allocator.yml
+ include_tasks: configure_memory_allocator.yml
+ when:
+ - (ceph_tcmalloc_max_total_thread_cache | int) > 0
+ - (ceph_origin == 'repository' or ceph_origin == 'distro')
+
+- name: include selinux.yml
+ include_tasks: selinux.yml
+ when:
+ - ansible_facts['os_family'] == 'RedHat'
+ - inventory_hostname in groups.get(nfs_group_name, [])
+ or inventory_hostname in groups.get(rgwloadbalancer_group_name, [])
\ No newline at end of file
--- /dev/null
+---
+- name: set_fact ceph_release jewel
+ set_fact:
+ ceph_release: jewel
+ when: ceph_version.split('.')[0] is version('10', '==')
+
+- name: set_fact ceph_release kraken
+ set_fact:
+ ceph_release: kraken
+ when: ceph_version.split('.')[0] is version('11', '==')
+
+- name: set_fact ceph_release luminous
+ set_fact:
+ ceph_release: luminous
+ when: ceph_version.split('.')[0] is version('12', '==')
+
+- name: set_fact ceph_release mimic
+ set_fact:
+ ceph_release: mimic
+ when: ceph_version.split('.')[0] is version('13', '==')
+
+- name: set_fact ceph_release nautilus
+ set_fact:
+ ceph_release: nautilus
+ when: ceph_version.split('.')[0] is version('14', '==')
+
+- name: set_fact ceph_release octopus
+ set_fact:
+ ceph_release: octopus
+ when: ceph_version.split('.')[0] is version('15', '==')
+
+- name: set_fact ceph_release pacific
+ set_fact:
+ ceph_release: pacific
+ when: ceph_version.split('.')[0] is version('16', '==')
--- /dev/null
+---
+- name: if selinux is not disabled
+ when: ansible_facts['selinux']['status'] == 'enabled'
+ block:
+ - name: install policycoreutils-python
+ package:
+ name: policycoreutils-python
+ state: present
+ register: result
+ until: result is succeeded
+ when: ansible_facts['distribution_major_version'] == '7'
+
+ - name: install python3-policycoreutils on RHEL 8
+ package:
+ name: python3-policycoreutils
+ state: present
+ register: result
+ until: result is succeeded
+ when:
+ - inventory_hostname in groups.get(nfs_group_name, [])
+ or inventory_hostname in groups.get(rgwloadbalancer_group_name, [])
+ - ansible_facts['distribution_major_version'] == '8'
--- /dev/null
+---
+# ceph-common is always installed, if a package isn't to be installed we replace
+# it with 'ceph-common' and run the install with the | unique filter.
+debian_ceph_pkgs:
+ - "{{ (ceph_repository != 'rhcs') | ternary('ceph', 'ceph-common') }}"
+ - "ceph-common"
+ - "{{ ((ceph_repository == 'rhcs') and (mon_group_name in group_names)) | ternary('ceph-mon', 'ceph-common') }}"
+ - "{{ ((ceph_repository == 'rhcs') and (osd_group_name in group_names)) | ternary('ceph-osd', 'ceph-common') }}"
+ - "{{ (ceph_test | bool) | ternary('ceph-test', 'ceph-common') }}"
+ - "{{ (rgw_group_name in group_names) | ternary('radosgw', 'ceph-common') }}"
+ - "{{ ((ceph_repository == 'rhcs') and (client_group_name in group_names)) | ternary('ceph-fuse', 'ceph-common') }}"
+ - "{{ (rbdmirror_group_name in group_names) | ternary('ceph-base', 'ceph-common') }}"
+
+redhat_ceph_pkgs:
+ - "{{ (ceph_test | bool) | ternary('ceph-test', 'ceph-common') }}"
+ - "ceph-common"
+ - "{{ (mon_group_name in group_names) | ternary('ceph-mon', 'ceph-common') }}"
+ - "{{ (osd_group_name in group_names) | ternary('ceph-osd', 'ceph-common') }}"
+ - "{{ (client_group_name in group_names) | ternary('ceph-fuse', 'ceph-common') }}"
+ - "{{ (client_group_name in group_names) | ternary('ceph-base', 'ceph-common') }}"
+ - "{{ (rgw_group_name in group_names) | ternary('ceph-radosgw', 'ceph-common') }}"
+ - "{{ (rbdmirror_group_name in group_names) | ternary('ceph-base', 'ceph-common') }}"
+
+suse_ceph_pkgs:
+ - "{{ (ceph_test | bool) | ternary('ceph-test', 'ceph-common') }}"
+ - "ceph-common"
+ - "{{ (mon_group_name in group_names) | ternary('ceph-mon', 'ceph-common') }}"
+ - "{{ (osd_group_name in group_names) | ternary('ceph-osd', 'ceph-common') }}"
+ - "{{ (client_group_name in group_names) | ternary('ceph-fuse', 'ceph-common') }}"
+ - "{{ (client_group_name in group_names) | ternary('ceph-base', 'ceph-common') }}"
+ - "{{ (rgw_group_name in group_names) | ternary('ceph-radosgw', 'ceph-common') }}"
+ - "{{ (rbdmirror_group_name in group_names) | ternary('ceph-base', 'ceph-common') }}"
--- /dev/null
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [2014] [Guillaume Abrioux]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
--- /dev/null
+# Ansible role: ceph-config
+
+Documentation is available at http://docs.ceph.com/ceph-ansible/.
--- /dev/null
+---
+galaxy_info:
+ company: Red Hat
+ author: Guillaume Abrioux
+ description: Handles ceph-ansible initial configuration
+ license: Apache
+ min_ansible_version: 2.7
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ galaxy_tags:
+ - system
+dependencies: []
--- /dev/null
+---
+- name: create ceph initial directories
+ file:
+ path: "{{ item }}"
+ state: directory
+ owner: "{{ ceph_uid }}"
+ group: "{{ ceph_uid }}"
+ mode: 0755
+ loop:
+ - /etc/ceph
+ - /var/lib/ceph/
+ - /var/lib/ceph/mon
+ - /var/lib/ceph/osd
+ - /var/lib/ceph/mds
+ - /var/lib/ceph/tmp
+ - /var/lib/ceph/radosgw
+ - /var/lib/ceph/bootstrap-rgw
+ - /var/lib/ceph/bootstrap-mgr
+ - /var/lib/ceph/bootstrap-mds
+ - /var/lib/ceph/bootstrap-osd
+ - /var/lib/ceph/bootstrap-rbd
+ - /var/lib/ceph/bootstrap-rbd-mirror
+ - /var/run/ceph
+ - /var/log/ceph
--- /dev/null
+---
+- name: include create_ceph_initial_dirs.yml
+ include_tasks: create_ceph_initial_dirs.yml
+ when: containerized_deployment | bool
+
+- name: include_tasks rgw_systemd_environment_file.yml
+ include_tasks: rgw_systemd_environment_file.yml
+ when: inventory_hostname in groups.get(rgw_group_name, [])
+
+- name: config file operations related to OSDs
+ when:
+ - inventory_hostname in groups.get(osd_group_name, [])
+ # the rolling_update.yml playbook sets num_osds to the number of currently
+ # running osds
+ - not rolling_update | bool
+ block:
+ - name: reset num_osds
+ set_fact:
+ num_osds: 0
+
+ - name: count number of osds for lvm scenario
+ set_fact:
+ num_osds: "{{ lvm_volumes | length | int }}"
+ when: lvm_volumes | default([]) | length > 0
+
+ - block:
+ - name: look up for ceph-volume rejected devices
+ ceph_volume:
+ cluster: "{{ cluster }}"
+ action: "inventory"
+ register: rejected_devices
+ environment:
+ CEPH_VOLUME_DEBUG: "{{ ceph_volume_debug }}"
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+ PYTHONIOENCODING: utf-8
+
+ - name: set_fact rejected_devices
+ set_fact:
+ _rejected_devices: "{{ _rejected_devices | default([]) + [item.path] }}"
+ with_items: "{{ rejected_devices.stdout | default('{}') | from_json }}"
+ when: "'Used by ceph-disk' in item.rejected_reasons"
+
+ - name: set_fact _devices
+ set_fact:
+ _devices: "{{ devices | difference(_rejected_devices | default([])) }}"
+
+ - name: run 'ceph-volume lvm batch --report' to see how many osds are to be created
+ ceph_volume:
+ cluster: "{{ cluster }}"
+ objectstore: "{{ osd_objectstore }}"
+ batch_devices: "{{ _devices }}"
+ osds_per_device: "{{ osds_per_device | default(1) | int }}"
+ journal_size: "{{ journal_size }}"
+ block_db_size: "{{ block_db_size }}"
+ report: true
+ action: "batch"
+ register: lvm_batch_report
+ environment:
+ CEPH_VOLUME_DEBUG: "{{ ceph_volume_debug }}"
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+ PYTHONIOENCODING: utf-8
+ when: _devices | default([]) | length > 0
+
+ - name: set_fact num_osds from the output of 'ceph-volume lvm batch --report' (legacy report)
+ set_fact:
+ num_osds: "{{ ((lvm_batch_report.stdout | default('{}') | from_json).osds | default([]) | length | int) + (_rejected_devices | default([]) | length | int) }}"
+ when:
+ - (lvm_batch_report.stdout | default('{}') | from_json) is mapping
+ - (lvm_batch_report.stdout | default('{}') | from_json).changed | default(true) | bool
+
+ - name: set_fact num_osds from the output of 'ceph-volume lvm batch --report' (new report)
+ set_fact:
+ num_osds: "{{ ((lvm_batch_report.stdout | default('{}') | from_json) | default([]) | length | int) + (_rejected_devices | default([]) | length | int) }}"
+ when:
+ - (lvm_batch_report.stdout | default('{}') | from_json) is not mapping
+ - (lvm_batch_report.stdout | default('{}') | from_json).changed | default(true) | bool
+ when:
+ - devices | default([]) | length > 0
+
+ - name: run 'ceph-volume lvm list' to see how many osds have already been created
+ ceph_volume:
+ action: "list"
+ register: lvm_list
+ environment:
+ CEPH_VOLUME_DEBUG: "{{ ceph_volume_debug }}"
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+ PYTHONIOENCODING: utf-8
+ changed_when: false
+ when:
+ - devices | default([]) | length > 0
+
+ - name: set_fact num_osds (add existing osds)
+ set_fact:
+ num_osds: "{{ lvm_list.stdout | default('{}') | from_json | length | int + num_osds | default(0) | int }}"
+ when:
+ - devices | default([]) | length > 0
+
+ - name: set_fact _osd_memory_target
+ set_fact:
+ _osd_memory_target: "{{ item }}"
+ loop:
+ - "{{ ceph_conf_overrides.get('osd', {}).get('osd memory target', '') }}"
+ - "{{ ceph_conf_overrides.get('osd', {}).get('osd_memory_target', '') }}"
+ when:
+ - item
+ - item > osd_memory_target
+
+ - name: set_fact _osd_memory_target
+ set_fact:
+ _osd_memory_target: "{{ ((ansible_facts['memtotal_mb'] * 1048576 * safety_factor | float) / num_osds | float) | int }}"
+ when:
+ - _osd_memory_target is undefined
+ - num_osds | default(0) | int > 0
+ - ((ansible_facts['memtotal_mb'] * 1048576 * safety_factor | float) / num_osds | float) > osd_memory_target
+
+- name: create ceph conf directory
+ file:
+ path: "/etc/ceph"
+ state: directory
+ owner: "ceph"
+ group: "ceph"
+ mode: "{{ ceph_directories_mode }}"
+ when: not containerized_deployment | bool
+
+- name: "generate {{ cluster }}.conf configuration file"
+ action: config_template
+ args:
+ src: "ceph.conf.j2"
+ dest: "{{ ceph_conf_key_directory }}/{{ cluster }}.conf"
+ owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
+ group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
+ mode: "0644"
+ config_overrides: "{{ ceph_conf_overrides }}"
+ config_type: ini
+ notify:
+ - restart ceph mons
+ - restart ceph osds
+ - restart ceph mdss
+ - restart ceph rgws
+ - restart ceph mgrs
+ - restart ceph rbdmirrors
+ - restart ceph rbd-target-api-gw
--- /dev/null
+---
+- name: create rados gateway instance directories
+ file:
+ path: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}"
+ state: directory
+ owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
+ group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
+ mode: "{{ ceph_directories_mode | default('0755') }}"
+ with_items: "{{ rgw_instances }}"
+
+- name: generate environment file
+ copy:
+ dest: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}/EnvironmentFile"
+ owner: "root"
+ group: "root"
+ mode: "0644"
+ content: |
+ INST_NAME={{ item.instance_name }}
+ with_items: "{{ rgw_instances }}"
+ when:
+ - containerized_deployment | bool
+ - rgw_instances is defined
\ No newline at end of file
--- /dev/null
+#jinja2: trim_blocks: "true", lstrip_blocks: "true"
+# {{ ansible_managed }}
+
+[global]
+{% if not cephx | bool %}
+auth cluster required = none
+auth service required = none
+auth client required = none
+{% endif %}
+{% if ip_version == 'ipv6' %}
+ms bind ipv6 = true
+ms bind ipv4 = false
+{% endif %}
+{% if common_single_host_mode is defined and common_single_host_mode %}
+osd crush chooseleaf type = 0
+{% endif %}
+{# NOTE (leseb): the blank lines in-between are needed otherwise we won't get any line break #}
+
+{% set nb_mon = groups.get(mon_group_name, []) | length | int %}
+{% set nb_client = groups.get(client_group_name, []) | length | int %}
+{% set nb_osd = groups.get(osd_group_name, []) | length | int %}
+{% if inventory_hostname in groups.get(client_group_name, []) and not inventory_hostname == groups.get(client_group_name, []) | first %}
+{% endif %}
+
+{% if nb_mon > 0 and inventory_hostname in groups.get(mon_group_name, []) %}
+mon initial members = {% for host in groups[mon_group_name] %}
+ {% if hostvars[host]['ansible_facts']['hostname'] is defined -%}
+ {{ hostvars[host]['ansible_facts']['hostname'] }}
+ {%- endif %}
+ {%- if not loop.last %},{% endif %}
+ {% endfor %}
+
+osd pool default crush rule = {{ osd_pool_default_crush_rule }}
+{% endif %}
+
+fsid = {{ fsid }}
+mon host = {% if nb_mon > 0 %}
+{% for host in _monitor_addresses -%}
+{% if mon_host_v1.enabled | bool %}
+{% set _v1 = ',v1:' + host.addr + mon_host_v1.suffix %}
+{% endif %}
+[{{ "v2:" + host.addr + mon_host_v2.suffix }}{{ _v1 | default('') }}]
+{%- if not loop.last -%},{%- endif %}
+{%- endfor %}
+{% elif nb_mon == 0 %}
+{{ external_cluster_mon_ips }}
+{% endif %}
+
+{% if public_network is defined %}
+public network = {{ public_network | regex_replace(' ', '') }}
+{% endif %}
+{% if cluster_network is defined %}
+cluster network = {{ cluster_network | regex_replace(' ', '') }}
+{% endif %}
+{% if rgw_override_bucket_index_max_shards is defined %}
+rgw override bucket index max shards = {{ rgw_override_bucket_index_max_shards }}
+{% endif %}
+{% if rgw_bucket_default_quota_max_objects is defined %}
+rgw bucket default quota max objects = {{ rgw_bucket_default_quota_max_objects }}
+{% endif %}
+
+{% if inventory_hostname in groups.get(client_group_name, []) %}
+[client.libvirt]
+admin socket = {{ rbd_client_admin_socket_path }}/$cluster-$type.$id.$pid.$cctid.asok # must be writable by QEMU and allowed by SELinux or AppArmor
+log file = {{ rbd_client_log_file }} # must be writable by QEMU and allowed by SELinux or AppArmor
+{% endif %}
+
+{% if inventory_hostname in groups.get(osd_group_name, []) %}
+{% if osd_objectstore == 'filestore' %}
+[osd]
+osd mkfs type = {{ osd_mkfs_type }}
+osd mkfs options xfs = {{ osd_mkfs_options_xfs }}
+osd mount options xfs = {{ osd_mount_options_xfs }}
+osd journal size = {{ journal_size }}
+{% if filestore_xattr_use_omap != None %}
+filestore xattr use omap = {{ filestore_xattr_use_omap }}
+{% elif osd_mkfs_type == "ext4" %}
+filestore xattr use omap = true
+{# else, default is false #}
+{% endif %}
+{% endif %}
+{% if osd_objectstore == 'bluestore' %}
+{% set _num_osds = num_osds | default(0) | int %}
+[osd]
+osd memory target = {{ _osd_memory_target | default(osd_memory_target) }}
+{% endif %}
+{% endif %}
+
+{% if inventory_hostname in groups.get(rgw_group_name, []) %}
+{% set _rgw_hostname = hostvars[inventory_hostname]['rgw_hostname'] | default(hostvars[inventory_hostname]['ansible_facts']['hostname']) %}
+{# {{ hostvars[host]['rgw_hostname'] }} for backward compatibility, fqdn issues. See bz1580408 #}
+{% if hostvars[inventory_hostname]['rgw_instances'] is defined %}
+{% for instance in hostvars[inventory_hostname]['rgw_instances'] %}
+[client.rgw.{{ _rgw_hostname + '.' + instance['instance_name'] }}]
+host = {{ _rgw_hostname }}
+keyring = /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ _rgw_hostname + '.' + instance['instance_name'] }}/keyring
+log file = /var/log/ceph/{{ cluster }}-rgw-{{ hostvars[inventory_hostname]['ansible_facts']['hostname'] + '.' + instance['instance_name'] }}.log
+{% set _rgw_binding_socket = instance['radosgw_address'] | default(_radosgw_address) | string + ':' + instance['radosgw_frontend_port'] | default(radosgw_frontend_port) | string %}
+{%- macro frontend_line(frontend_type) -%}
+{%- if frontend_type == 'civetweb' -%}
+{{ radosgw_frontend_type }} port={{ _rgw_binding_socket }}{{ 's ssl_certificate='+radosgw_frontend_ssl_certificate if radosgw_frontend_ssl_certificate else '' }}
+{%- elif frontend_type == 'beast' -%}
+{{ radosgw_frontend_type }} {{ 'ssl_' if radosgw_frontend_ssl_certificate else '' }}endpoint={{ _rgw_binding_socket }}{{ ' ssl_certificate='+radosgw_frontend_ssl_certificate if radosgw_frontend_ssl_certificate else '' }}
+{%- endif -%}
+{%- endmacro -%}
+rgw frontends = {{ frontend_line(radosgw_frontend_type) }} {{ radosgw_frontend_options }}
+{% if 'num_threads' not in radosgw_frontend_options %}
+rgw thread pool size = {{ radosgw_thread_pool_size }}
+{% endif %}
+{% if rgw_multisite | bool %}
+{% if ((instance['rgw_zonemaster'] | default(rgw_zonemaster) | bool) or (deploy_secondary_zones | default(True) | bool)) %}
+rgw_realm = {{ instance['rgw_realm'] }}
+rgw_zonegroup = {{ instance['rgw_zonegroup'] }}
+rgw_zone = {{ instance['rgw_zone'] }}
+{% endif %}
+{% endif %}
+{% endfor %}
+{% endif %}
+{% endif %}
+
+{% if inventory_hostname in groups.get(nfs_group_name, []) and inventory_hostname not in groups.get(rgw_group_name, []) %}
+{% for host in groups[nfs_group_name] %}
+{% set _rgw_hostname = hostvars[host]['rgw_hostname'] | default(hostvars[host]['ansible_facts']['hostname']) %}
+{% if nfs_obj_gw | bool %}
+[client.rgw.{{ _rgw_hostname }}]
+host = {{ _rgw_hostname }}
+keyring = /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ _rgw_hostname }}/keyring
+log file = /var/log/ceph/{{ cluster }}-rgw-{{ hostvars[host]['ansible_facts']['hostname'] }}.log
+{% endif %}
+{% endfor %}
+{% endif %}
--- /dev/null
+# Ansible role: ceph-container-common
+
+Documentation is available at http://docs.ceph.com/ceph-ansible/.
--- /dev/null
+[Unit]
+Description=ceph target allowing to start/stop all ceph*@.service instances at once
+
+[Install]
+WantedBy=multi-user.target
\ No newline at end of file
--- /dev/null
+---
+galaxy_info:
+ company: Red Hat
+ author: Sébastien Han
+ description: Installs Ceph
+ license: Apache
+ min_ansible_version: 2.7
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ galaxy_tags:
+ - system
+dependencies: []
--- /dev/null
+---
+# NOTE (leseb): we must check each inventory group so this will work with collocated daemons
+- name: inspect ceph mon container
+ command: "{{ container_binary }} inspect {{ ceph_mon_container_stat.stdout }}"
+ changed_when: false
+ register: ceph_mon_inspect
+ when:
+ - mon_group_name in group_names
+ - ceph_mon_container_stat.get('rc') == 0
+ - ceph_mon_container_stat.get('stdout_lines', [])|length != 0
+
+- name: inspect ceph osd container
+ command: "{{ container_binary }} inspect {{ ceph_osd_container_stat.stdout }}"
+ changed_when: false
+ register: ceph_osd_inspect
+ when:
+ - osd_group_name in group_names
+ - ceph_osd_container_stat.get('rc') == 0
+ - ceph_osd_container_stat.get('stdout_lines', [])|length != 0
+
+- name: inspect ceph mds container
+ command: "{{ container_binary }} inspect {{ ceph_mds_container_stat.stdout }}"
+ changed_when: false
+ register: ceph_mds_inspect
+ when:
+ - mds_group_name in group_names
+ - ceph_mds_container_stat.get('rc') == 0
+ - ceph_mds_container_stat.get('stdout_lines', [])|length != 0
+
+- name: inspect ceph rgw container
+ command: "{{ container_binary }} inspect {{ ceph_rgw_container_stat.stdout }}"
+ changed_when: false
+ register: ceph_rgw_inspect
+ when:
+ - rgw_group_name in group_names
+ - ceph_rgw_container_stat.get('rc') == 0
+ - ceph_rgw_container_stat.get('stdout_lines', [])|length != 0
+
+- name: inspect ceph mgr container
+ command: "{{ container_binary }} inspect {{ ceph_mgr_container_stat.stdout }}"
+ changed_when: false
+ register: ceph_mgr_inspect
+ when:
+ - mgr_group_name in group_names
+ - ceph_mgr_container_stat.get('rc') == 0
+ - ceph_mgr_container_stat.get('stdout_lines', [])|length != 0
+
+- name: inspect ceph rbd mirror container
+ command: "{{ container_binary }} inspect {{ ceph_rbd_mirror_container_stat.stdout }}"
+ changed_when: false
+ register: ceph_rbd_mirror_inspect
+ when:
+ - rbdmirror_group_name in group_names
+ - ceph_rbd_mirror_container_stat.get('rc') == 0
+ - ceph_rbd_mirror_container_stat.get('stdout_lines', [])|length != 0
+
+- name: inspect ceph nfs container
+ command: "{{ container_binary }} inspect {{ ceph_nfs_container_stat.stdout }}"
+ changed_when: false
+ register: ceph_nfs_inspect
+ when:
+ - nfs_group_name in group_names
+ - ceph_nfs_container_stat.get('rc') == 0
+ - ceph_nfs_container_stat.get('stdout_lines', [])|length != 0
+
+- name: inspect ceph crash container
+ command: "{{ container_binary }} inspect {{ ceph_crash_container_stat.stdout }}"
+ changed_when: false
+ register: ceph_crash_inspect
+ when:
+ - ceph_crash_container_stat.get('rc') == 0
+ - ceph_crash_container_stat.get('stdout_lines', [])|length != 0
+
+# NOTE(leseb): using failed_when to handle the case when the image is not present yet
+- name: "inspecting ceph mon container image before pulling"
+ command: "{{ container_binary }} inspect {{ (ceph_mon_inspect.stdout | from_json)[0].Image }}"
+ changed_when: false
+ failed_when: false
+ register: ceph_mon_container_inspect_before_pull
+ when:
+ - mon_group_name in group_names
+ - ceph_mon_inspect.get('rc') == 0
+
+- name: "inspecting ceph osd container image before pulling"
+ command: "{{ container_binary }} inspect {{ (ceph_osd_inspect.stdout | from_json)[0].Image }}"
+ changed_when: false
+ failed_when: false
+ register: ceph_osd_container_inspect_before_pull
+ when:
+ - osd_group_name in group_names
+ - ceph_osd_inspect.get('rc') == 0
+
+- name: "inspecting ceph rgw container image before pulling"
+ command: "{{ container_binary }} inspect {{ (ceph_rgw_inspect.stdout | from_json)[0].Image }}"
+ changed_when: false
+ failed_when: false
+ register: ceph_rgw_container_inspect_before_pull
+ when:
+ - rgw_group_name in group_names
+ - ceph_rgw_inspect.get('rc') == 0
+
+- name: "inspecting ceph mds container image before pulling"
+ command: "{{ container_binary }} inspect {{ (ceph_mds_inspect.stdout | from_json)[0].Image }}"
+ changed_when: false
+ failed_when: false
+ register: ceph_mds_container_inspect_before_pull
+ when:
+ - mds_group_name in group_names
+ - ceph_mds_inspect.get('rc') == 0
+
+- name: "inspecting ceph mgr container image before pulling"
+ command: "{{ container_binary }} inspect {{ (ceph_mgr_inspect.stdout | from_json)[0].Image }}"
+ changed_when: false
+ failed_when: false
+ register: ceph_mgr_container_inspect_before_pull
+ when:
+ - mgr_group_name in group_names
+ - ceph_mgr_inspect.get('rc') == 0
+
+- name: "inspecting ceph rbd mirror container image before pulling"
+ command: "{{ container_binary }} inspect {{ (ceph_rbd_mirror_inspect.stdout | from_json)[0].Image }}"
+ changed_when: false
+ failed_when: false
+ register: ceph_rbd_mirror_container_inspect_before_pull
+ when:
+ - rbdmirror_group_name in group_names
+ - ceph_rbd_mirror_inspect.get('rc') == 0
+
+- name: "inspecting ceph nfs container image before pulling"
+ command: "{{ container_binary }} inspect {{ (ceph_nfs_inspect.stdout | from_json)[0].Image }}"
+ changed_when: false
+ failed_when: false
+ register: ceph_nfs_container_inspect_before_pull
+ when:
+ - nfs_group_name in group_names
+ - ceph_nfs_inspect.get('rc') == 0
+
+- name: "inspecting ceph crash container image before pulling"
+ command: "{{ container_binary }} inspect {{ (ceph_crash_inspect.stdout | from_json)[0].Image }}"
+ changed_when: false
+ failed_when: false
+ register: ceph_crash_container_inspect_before_pull
+ when: ceph_crash_inspect.get('rc') == 0
+
+- name: set_fact ceph_mon_image_repodigest_before_pulling
+ set_fact:
+ ceph_mon_image_repodigest_before_pulling: "{{ (ceph_mon_container_inspect_before_pull.stdout | from_json)[0].Id }}"
+ when:
+ - mon_group_name in group_names
+ - ceph_mon_container_inspect_before_pull.get('rc') == 0
+
+- name: set_fact ceph_osd_image_repodigest_before_pulling
+ set_fact:
+ ceph_osd_image_repodigest_before_pulling: "{{ (ceph_osd_container_inspect_before_pull.stdout | from_json)[0].Id }}"
+ when:
+ - osd_group_name in group_names
+ - ceph_osd_container_inspect_before_pull.get('rc') == 0
+
+- name: set_fact ceph_mds_image_repodigest_before_pulling
+ set_fact:
+ ceph_mds_image_repodigest_before_pulling: "{{ (ceph_mds_container_inspect_before_pull.stdout | from_json)[0].Id }}"
+ when:
+ - mds_group_name in group_names
+ - ceph_mds_container_inspect_before_pull.get('rc') == 0
+
+- name: set_fact ceph_rgw_image_repodigest_before_pulling
+ set_fact:
+ ceph_rgw_image_repodigest_before_pulling: "{{ (ceph_rgw_container_inspect_before_pull.stdout | from_json)[0].Id }}"
+ when:
+ - rgw_group_name in group_names
+ - ceph_rgw_container_inspect_before_pull.get('rc') == 0
+
+- name: set_fact ceph_mgr_image_repodigest_before_pulling
+ set_fact:
+ ceph_mgr_image_repodigest_before_pulling: "{{ (ceph_mgr_container_inspect_before_pull.stdout | from_json)[0].Id }}"
+ when:
+ - mgr_group_name in group_names
+ - ceph_mgr_container_inspect_before_pull.get('rc') == 0
+
+- name: set_fact ceph_crash_image_repodigest_before_pulling
+ set_fact:
+ ceph_crash_image_repodigest_before_pulling: "{{ (ceph_crash_container_inspect_before_pull.stdout | from_json)[0].Id }}"
+ when: ceph_crash_container_inspect_before_pull.get('rc') == 0
+
+- name: set_fact ceph_rbd_mirror_image_repodigest_before_pulling
+ set_fact:
+ ceph_rbd_mirror_image_repodigest_before_pulling: "{{ (ceph_rbd_mirror_container_inspect_before_pull.stdout | from_json)[0].Id }}"
+ when:
+ - rbdmirror_group_name in group_names
+ - ceph_rbd_mirror_container_inspect_before_pull.get('rc') == 0
+
+- name: set_fact ceph_nfs_image_repodigest_before_pulling
+ set_fact:
+ ceph_nfs_image_repodigest_before_pulling: "{{ (ceph_nfs_container_inspect_before_pull.stdout | from_json)[0].Id }}"
+ when:
+ - nfs_group_name in group_names
+ - ceph_nfs_container_inspect_before_pull.get('rc') == 0
+
+- name: "pulling {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} image"
+ command: "{{ timeout_command }} {{ container_binary }} pull {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
+ changed_when: false
+ register: docker_image
+ until: docker_image.rc == 0
+ retries: "{{ docker_pull_retry }}"
+ delay: 10
+ when: (ceph_docker_dev_image is undefined or not ceph_docker_dev_image | bool)
+ environment:
+ HTTP_PROXY: "{{ ceph_docker_http_proxy | default('') }}"
+ HTTPS_PROXY: "{{ ceph_docker_https_proxy | default('') }}"
+ NO_PROXY: "{{ ceph_docker_no_proxy }}"
+
+- name: "pulling alertmanager/prometheus/grafana container images"
+ command: "{{ timeout_command }} {{ container_binary }} pull {{ item }}"
+ changed_when: false
+ register: monitoring_images
+ until: monitoring_images.rc == 0
+ retries: "{{ docker_pull_retry }}"
+ delay: 10
+ loop:
+ - "{{ alertmanager_container_image }}"
+ - "{{ prometheus_container_image }}"
+ - "{{ grafana_container_image }}"
+ when:
+ - dashboard_enabled | bool
+ - inventory_hostname in groups.get(monitoring_group_name, [])
+ environment:
+ HTTP_PROXY: "{{ ceph_docker_http_proxy | default('') }}"
+ HTTPS_PROXY: "{{ ceph_docker_https_proxy | default('') }}"
+ NO_PROXY: "{{ ceph_docker_no_proxy }}"
+
+- name: "pulling node-exporter container image"
+ command: "{{ timeout_command }} {{ container_binary }} pull {{ node_exporter_container_image }}"
+ changed_when: false
+ register: node_exporter_image
+ until: node_exporter_image.rc == 0
+ retries: "{{ docker_pull_retry }}"
+ delay: 10
+ when:
+ - dashboard_enabled | bool
+ - inventory_hostname in groups.get(mon_group_name, []) or
+ inventory_hostname in groups.get(osd_group_name, []) or
+ inventory_hostname in groups.get(mds_group_name, []) or
+ inventory_hostname in groups.get(rgw_group_name, []) or
+ inventory_hostname in groups.get(mgr_group_name, []) or
+ inventory_hostname in groups.get(rbdmirror_group_name, []) or
+ inventory_hostname in groups.get(nfs_group_name, []) or
+ inventory_hostname in groups.get(iscsi_gw_group_name, []) or
+ inventory_hostname in groups.get(monitoring_group_name, [])
+ environment:
+ HTTP_PROXY: "{{ ceph_docker_http_proxy | default('') }}"
+ HTTPS_PROXY: "{{ ceph_docker_https_proxy | default('') }}"
+ NO_PROXY: "{{ ceph_docker_no_proxy }}"
+
+- name: "inspecting {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} image after pulling"
+ command: "{{ container_binary }} inspect {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
+ changed_when: false
+ failed_when: false
+ register: image_inspect_after_pull
+
+- name: set_fact image_repodigest_after_pulling
+ set_fact:
+ image_repodigest_after_pulling: "{{ (image_inspect_after_pull.stdout | from_json)[0].Id }}"
+ when: image_inspect_after_pull.rc == 0
+
+- name: set_fact ceph_mon_image_updated
+ set_fact:
+ ceph_mon_image_updated: "{{ ceph_mon_image_repodigest_before_pulling != image_repodigest_after_pulling }}"
+ changed_when: true
+ notify: restart ceph mons
+ when:
+ - mon_group_name in group_names
+ - ceph_mon_container_inspect_before_pull.get('rc') == 0
+ - ceph_mon_image_repodigest_before_pulling != image_repodigest_after_pulling
+
+- name: set_fact ceph_osd_image_updated
+ set_fact:
+ ceph_osd_image_updated: "{{ ceph_osd_image_repodigest_before_pulling != image_repodigest_after_pulling }}"
+ changed_when: true
+ notify: restart ceph osds
+ when:
+ - osd_group_name in group_names
+ - ceph_osd_container_inspect_before_pull.get('rc') == 0
+ - ceph_osd_image_repodigest_before_pulling != image_repodigest_after_pulling
+
+- name: set_fact ceph_mds_image_updated
+ set_fact:
+ ceph_mds_image_updated: "{{ ceph_mds_image_repodigest_before_pulling != image_repodigest_after_pulling }}"
+ changed_when: true
+ notify: restart ceph mdss
+ when:
+ - mds_group_name in group_names
+ - ceph_mds_container_inspect_before_pull.get('rc') == 0
+ - ceph_mds_image_repodigest_before_pulling != image_repodigest_after_pulling
+
+- name: set_fact ceph_rgw_image_updated
+ set_fact:
+ ceph_rgw_image_updated: "{{ ceph_rgw_image_repodigest_before_pulling != image_repodigest_after_pulling }}"
+ changed_when: true
+ notify: restart ceph rgws
+ when:
+ - rgw_group_name in group_names
+ - ceph_rgw_container_inspect_before_pull.get('rc') == 0
+ - ceph_rgw_image_repodigest_before_pulling != image_repodigest_after_pulling
+
+- name: set_fact ceph_mgr_image_updated
+ set_fact:
+ ceph_mgr_image_updated: "{{ ceph_mgr_image_repodigest_before_pulling != image_repodigest_after_pulling }}"
+ changed_when: true
+ notify: restart ceph mgrs
+ when:
+ - mgr_group_name in group_names
+ - ceph_mgr_container_inspect_before_pull.get('rc') == 0
+ - ceph_mgr_image_repodigest_before_pulling != image_repodigest_after_pulling
+
+- name: set_fact ceph_rbd_mirror_image_updated
+ set_fact:
+ ceph_rbd_mirror_image_updated: "{{ ceph_rbd_mirror_image_repodigest_before_pulling != image_repodigest_after_pulling }}"
+ changed_when: true
+ notify: restart ceph rbdmirrors
+ when:
+ - rbdmirror_group_name in group_names
+ - ceph_rbd_mirror_container_inspect_before_pull.get('rc') == 0
+ - ceph_rbd_mirror_image_repodigest_before_pulling != image_repodigest_after_pulling
+
+- name: set_fact ceph_nfs_image_updated
+ set_fact:
+ ceph_nfs_image_updated: "{{ ceph_nfs_image_repodigest_before_pulling != image_repodigest_after_pulling }}"
+ changed_when: true
+ notify: restart ceph nfss
+ when:
+ - nfs_group_name in group_names
+ - ceph_nfs_container_inspect_before_pull.get('rc') == 0
+ - ceph_nfs_image_repodigest_before_pulling != image_repodigest_after_pulling
+
+- name: set_fact ceph_crash_image_updated
+ set_fact:
+ ceph_crash_image_updated: "{{ ceph_crash_image_repodigest_before_pulling != image_repodigest_after_pulling }}"
+ changed_when: true
+ notify: restart ceph crash
+ when:
+ - ceph_crash_container_inspect_before_pull.get('rc') == 0
+ - ceph_crash_image_repodigest_before_pulling != image_repodigest_after_pulling
+
+- name: export local ceph dev image
+ command: >
+ {{ container_binary }} save -o "/tmp/{{ ceph_docker_username }}-{{ ceph_docker_imagename }}-{{ ceph_docker_image_tag }}.tar"
+ "{{ ceph_docker_username }}/{{ ceph_docker_imagename }}:{{ ceph_docker_image_tag }}"
+ delegate_to: localhost
+ when: (ceph_docker_dev_image is defined and ceph_docker_dev_image)
+ run_once: true
+
+- name: copy ceph dev image file
+ copy:
+ src: "/tmp/{{ ceph_docker_username }}-{{ ceph_docker_imagename }}-{{ ceph_docker_image_tag }}.tar"
+ dest: "/tmp/{{ ceph_docker_username }}-{{ ceph_docker_imagename }}-{{ ceph_docker_image_tag }}.tar"
+ when: (ceph_docker_dev_image is defined and ceph_docker_dev_image | bool)
+
+- name: load ceph dev image
+ command: "{{ container_binary }} load -i /tmp/{{ ceph_docker_username }}-{{ ceph_docker_imagename }}-{{ ceph_docker_image_tag }}.tar"
+ when: (ceph_docker_dev_image is defined and ceph_docker_dev_image | bool)
+
+- name: remove tmp ceph dev image file
+ file:
+ name: "/tmp/{{ ceph_docker_username }}-{{ ceph_docker_imagename }}-{{ ceph_docker_image_tag }}.tar"
+ state: absent
+ when: (ceph_docker_dev_image is defined and ceph_docker_dev_image | bool)
+
--- /dev/null
+---
+- name: generate systemd ceph-mon target file
+ copy:
+ src: ceph.target
+ dest: /etc/systemd/system/ceph.target
+
+- name: enable ceph.target
+ service:
+ name: ceph.target
+ enabled: yes
+ daemon_reload: yes
+
+- name: include prerequisites.yml
+ include_tasks: prerequisites.yml
+
+- name: include registry.yml
+ include_tasks: registry.yml
+ when: ceph_docker_registry_auth | bool
+
+- name: include fetch_image.yml
+ include_tasks: fetch_image.yml
+ tags: fetch_container_image
+
+- name: get ceph version
+ command: >
+ {{ container_binary }} run --rm --net=host --entrypoint /usr/bin/ceph
+ {{ ceph_client_docker_registry }}/{{ ceph_client_docker_image }}:{{ ceph_client_docker_image_tag }}
+ --version
+ changed_when: false
+ check_mode: no
+ register: ceph_version
+
+- name: set_fact ceph_version ceph_version.stdout.split
+ set_fact:
+ ceph_version: "{{ ceph_version.stdout.split(' ')[2] }}"
+
+- name: include release.yml
+ include_tasks: release.yml
--- /dev/null
+---
+- name: lvmetad tasks related
+ when:
+ - inventory_hostname in groups.get(osd_group_name, [])
+ - lvmetad_disabled | default(False) | bool
+ - ansible_facts['os_family'] == 'RedHat'
+ - ansible_facts['distribution_major_version'] | int == 7
+ block:
+ - name: stop lvmetad
+ service:
+ name: lvm2-lvmetad
+ state: stopped
+
+ - name: disable and mask lvmetad service
+ service:
+ name: lvm2-lvmetad
+ enabled: no
+ masked: yes
+
+- name: remove ceph udev rules
+ file:
+ path: "{{ item }}"
+ state: absent
+ with_items:
+ - /usr/lib/udev/rules.d/95-ceph-osd.rules
+ - /usr/lib/udev/rules.d/60-ceph-by-parttypeuuid.rules
+
+- name: ensure tmpfiles.d is present
+ lineinfile:
+ path: /etc/tmpfiles.d/ceph-common.conf
+ line: "d /run/ceph 0770 root root -"
+ owner: root
+ group: root
+ mode: 0644
+ state: present
+ create: yes
+
+- name: restore certificates selinux context
+ when:
+ - ansible_facts['os_family'] == 'RedHat'
+ - inventory_hostname in groups.get(mon_group_name, [])
+ or inventory_hostname in groups.get(rgw_group_name, [])
+ command: /usr/sbin/restorecon -RF /etc/pki/ca-trust/extracted
+ changed_when: false
--- /dev/null
+---
+- name: container registry authentication
+ command: '{{ container_binary }} login -u {{ ceph_docker_registry_username }} --password-stdin {{ ceph_docker_registry }}'
+ args:
+ stdin: '{{ ceph_docker_registry_password }}'
+ stdin_add_newline: no
+ changed_when: false
+ environment:
+ HTTP_PROXY: "{{ ceph_docker_http_proxy | default('') }}"
+ HTTPS_PROXY: "{{ ceph_docker_https_proxy | default('') }}"
+ NO_PROXY: "{{ ceph_docker_no_proxy }}"
\ No newline at end of file
--- /dev/null
+---
+- name: set_fact ceph_release jewel
+ set_fact:
+ ceph_release: jewel
+ when: ceph_version.split('.')[0] is version('10', '==')
+
+- name: set_fact ceph_release kraken
+ set_fact:
+ ceph_release: kraken
+ when: ceph_version.split('.')[0] is version('11', '==')
+
+- name: set_fact ceph_release luminous
+ set_fact:
+ ceph_release: luminous
+ when: ceph_version.split('.')[0] is version('12', '==')
+
+- name: set_fact ceph_release mimic
+ set_fact:
+ ceph_release: mimic
+ when: ceph_version.split('.')[0] is version('13', '==')
+
+- name: set_fact ceph_release nautilus
+ set_fact:
+ ceph_release: nautilus
+ when: ceph_version.split('.')[0] is version('14', '==')
+
+- name: set_fact ceph_release octopus
+ set_fact:
+ ceph_release: octopus
+ when: ceph_version.split('.')[0] is version('15', '==')
+
+- name: set_fact ceph_release pacific
+ set_fact:
+ ceph_release: pacific
+ when: ceph_version.split('.')[0] is version('16', '==')
--- /dev/null
+# Ansible role: ceph-container-engine
+
+Documentation is available at http://docs.ceph.com/ceph-ansible/.
--- /dev/null
+---
+galaxy_info:
+ company: Red Hat
+ author: Guillaume Abrioux
+ description: Handles container installation prerequisites
+ license: Apache
+ min_ansible_version: 2.7
+ platforms:
+ - name: Ubuntu
+ versions:
+ - xenial
+ - name: EL
+ versions:
+ - 7
+ galaxy_tags:
+ - system
+dependencies: []
--- /dev/null
+---
+- name: include pre_requisites/prerequisites.yml
+ include_tasks: pre_requisites/prerequisites.yml
+ when: not is_atomic | bool
--- /dev/null
+---
+- name: uninstall old docker versions
+ package:
+ name: ['docker', 'docker-engine', 'docker.io', 'containerd', 'runc']
+ state: absent
+ when: container_package_name == 'docker-ce'
+
+- name: allow apt to use a repository over https (debian)
+ package:
+ name: ['apt-transport-https', 'ca-certificates', 'gnupg', 'software-properties-common']
+ update_cache: yes
+ register: result
+ until: result is succeeded
+
+- name: add docker's gpg key
+ apt_key:
+ url: "https://download.docker.com/linux/{{ ansible_facts['distribution'] | lower }}/gpg"
+ register: result
+ until: result is succeeded
+ when: container_package_name == 'docker-ce'
+
+- name: add docker repository
+ apt_repository:
+ repo: "deb https://download.docker.com/linux/{{ ansible_facts['distribution'] | lower }} {{ ansible_facts['distribution_release'] }} stable"
+ when: container_package_name == 'docker-ce'
+
+- name: add podman ppa repository
+ apt_repository:
+ repo: "ppa:projectatomic/ppa"
+ when:
+ - container_package_name == 'podman'
+ - ansible_facts['distribution'] == 'Ubuntu'
--- /dev/null
+---
+- name: include specific variables
+ include_vars: "{{ item }}"
+ with_first_found:
+ - "{{ ansible_facts['distribution'] }}-{{ ansible_facts['distribution_major_version'] }}.yml"
+ - "{{ ansible_facts['os_family'] }}.yml"
+ when: container_package_name is undefined and container_service_name is undefined
+
+- name: debian based systems tasks
+ include_tasks: debian_prerequisites.yml
+ when:
+ - ansible_facts['os_family'] == 'Debian'
+ tags: with_pkg
+
+- name: install container packages
+ package:
+ name: '{{ container_package_name }}'
+ update_cache: true
+ register: result
+ until: result is succeeded
+ tags: with_pkg
+
+- name: install lvm2 package
+ package:
+ name: lvm2
+ register: result
+ until: result is succeeded
+ tags: with_pkg
+ when: inventory_hostname in groups.get(osd_group_name, [])
+
+- name: extra configuration for docker
+ when: container_service_name == 'docker'
+ block:
+ - name: create the systemd docker override directory
+ file:
+ path: /etc/systemd/system/docker.service.d
+ state: directory
+ when: ceph_docker_http_proxy is defined or ceph_docker_https_proxy is defined
+
+ - name: create the systemd docker override file
+ template:
+ src: docker-proxy.conf.j2
+ dest: /etc/systemd/system/docker.service.d/proxy.conf
+ mode: 0600
+ owner: root
+ group: root
+ register: proxy_created
+ when: ceph_docker_http_proxy is defined or ceph_docker_https_proxy is defined
+
+ - name: remove docker proxy configuration
+ file:
+ path: /etc/systemd/system/docker.service.d/proxy.conf
+ state: absent
+ register: proxy_removed
+ when:
+ - ceph_docker_http_proxy is not defined
+ - ceph_docker_https_proxy is not defined
+
+ # using xxx.changed here instead of an ansible handler because we need to
+ # have an immediate effect and not wait the end of the play.
+ # using flush_handlers via the meta action plugin isn't enough too because
+ # it flushes all handlers and not only the one notified in this role.
+ - name: restart docker
+ systemd:
+ name: "{{ container_service_name }}"
+ state: restarted
+ daemon_reload: yes
+ when: proxy_created.changed | bool or proxy_removed.changed | bool
+
+ - name: start container service
+ service:
+ name: '{{ container_service_name }}'
+ state: started
+ enabled: yes
+ tags:
+ with_pkg
--- /dev/null
+[Service]
+{% if ceph_docker_http_proxy is defined %}
+Environment="HTTP_PROXY={{ ceph_docker_http_proxy }}"
+{% endif %}
+{% if ceph_docker_https_proxy is defined %}
+Environment="HTTPS_PROXY={{ ceph_docker_https_proxy }}"
+{% endif %}
+Environment="NO_PROXY={{ ceph_docker_no_proxy }}"
--- /dev/null
+RedHat-8.yml
\ No newline at end of file
--- /dev/null
+---
+container_package_name: docker-ce
+container_service_name: docker
--- /dev/null
+---
+container_package_name: podman
+container_service_name: podman
--- /dev/null
+---
+container_package_name: docker
+container_service_name: docker
--- /dev/null
+---
+container_package_name: docker.io
+container_service_name: docker
--- /dev/null
+---
+container_package_name: docker.io
+container_service_name: docker
--- /dev/null
+---
+container_package_name: docker.io
+container_service_name: docker
--- /dev/null
+---
+galaxy_info:
+ company: Red Hat
+ author: Guillaume Abrioux
+ description: Deploy ceph-crash
+ license: Apache
+ min_ansible_version: 2.7
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ - 8
+ galaxy_tags:
+ - system
+dependencies: []
--- /dev/null
+---
+- name: create and copy client.crash keyring
+ when: cephx | bool
+ block:
+ - name: create client.crash keyring
+ ceph_key:
+ name: "client.crash"
+ caps:
+ mon: 'allow profile crash'
+ mgr: 'allow profile crash'
+ cluster: "{{ cluster }}"
+ dest: "{{ ceph_conf_key_directory }}"
+ import_key: True
+ mode: "{{ ceph_keyring_permissions }}"
+ owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
+ group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
+ environment:
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+ delegate_to: "{{ groups.get(mon_group_name, [])[0] }}"
+ run_once: True
+ no_log: "{{ no_log_on_ceph_key_tasks }}"
+
+ - name: get keys from monitors
+ ceph_key:
+ name: client.crash
+ cluster: "{{ cluster }}"
+ output_format: plain
+ state: info
+ environment:
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+ register: _crash_keys
+ delegate_to: "{{ groups.get(mon_group_name)[0] }}"
+ run_once: true
+ no_log: "{{ no_log_on_ceph_key_tasks }}"
+
+ - name: copy ceph key(s) if needed
+ copy:
+ dest: "{{ ceph_conf_key_directory }}/{{ cluster }}.client.crash.keyring"
+ content: "{{ _crash_keys.stdout + '\n' }}"
+ owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
+ group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
+ mode: "{{ ceph_keyring_permissions }}"
+ no_log: "{{ no_log_on_ceph_key_tasks }}"
+
+- name: start ceph-crash daemon
+ when: containerized_deployment | bool
+ block:
+ - name: create /var/lib/ceph/crash/posted
+ file:
+ path: /var/lib/ceph/crash/posted
+ state: directory
+ mode: '0755'
+ owner: "{{ ceph_uid }}"
+ group: "{{ ceph_uid }}"
+
+ - name: include_tasks systemd.yml
+ include_tasks: systemd.yml
+
+- name: start the ceph-crash service
+ systemd:
+ name: "{{ 'ceph-crash@' + ansible_facts['hostname'] if containerized_deployment | bool else 'ceph-crash.service' }}"
+ state: started
+ enabled: yes
+ masked: no
+ daemon_reload: yes
--- /dev/null
+---
+- name: generate systemd unit file for ceph-crash container
+ template:
+ src: "{{ role_path }}/templates/ceph-crash.service.j2"
+ dest: /etc/systemd/system/ceph-crash@.service
+ owner: "root"
+ group: "root"
+ mode: "0644"
+ notify: restart ceph crash
\ No newline at end of file
--- /dev/null
+[Unit]
+Description=Ceph crash dump collector
+{% if container_binary == 'docker' %}
+After=docker.service network-online.target local-fs.target time-sync.target
+Requires=docker.service
+{% else %}
+After=network-online.target local-fs.target time-sync.target
+{% endif %}
+Wants=network-online.target local-fs.target time-sync.target
+
+[Service]
+{% if container_binary == 'podman' %}
+ExecStartPre=-/usr/bin/rm -f /%t/%n-pid /%t/%n-cid
+ExecStartPre=-/usr/bin/{{ container_binary }} rm --storage ceph-crash-%i
+{% endif %}
+ExecStartPre=-/usr/bin/{{ container_binary }} rm -f ceph-crash-%i
+ExecStart=/usr/bin/{{ container_binary }} run --rm --name ceph-crash-%i \
+{% if container_binary == 'podman' %}
+-d --log-driver journald --conmon-pidfile /%t/%n-pid --cidfile /%t/%n-cid \
+{% endif %}
+--pids-limit={{ 0 if container_binary == 'podman' else -1 }} \
+--net=host \
+-v /var/lib/ceph:/var/lib/ceph:z \
+-v /etc/localtime:/etc/localtime:ro \
+-v /etc/ceph:/etc/ceph:z \
+--entrypoint=/usr/bin/ceph-crash {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
+{% if container_binary == 'podman' %}
+ExecStop=-/usr/bin/sh -c "/usr/bin/{{ container_binary }} rm -f `cat /%t/%n-cid`"
+{% else %}
+ExecStop=-/usr/bin/{{ container_binary }} stop ceph-crash-%i
+{% endif %}
+StartLimitInterval=10min
+StartLimitBurst=30
+{% if container_binary == 'podman' %}
+Type=forking
+PIDFile=/%t/%n-pid
+{% endif %}
+KillMode=none
+Restart=always
+RestartSec=10s
+TimeoutStartSec=120
+TimeoutStopSec=10
+
+[Install]
+WantedBy=ceph.target
--- /dev/null
+---
+galaxy_info:
+ company: Red Hat
+ author: Boris Ranto
+ description: Configures Ceph Dashboard
+ license: Apache
+ min_ansible_version: 2.4
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ galaxy_tags:
+ - system
+dependencies: []
--- /dev/null
+---
+- import_role:
+ name: ceph-facts
+ tasks_from: container_binary.yml
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ delegate_facts: true
+
+- name: set_fact container_exec_cmd
+ set_fact:
+ container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_facts']['hostname'] }}"
+ when: containerized_deployment | bool
+
+- name: set_fact container_run_cmd
+ set_fact:
+ ceph_cmd: "{{ hostvars[groups[mon_group_name][0]]['container_binary'] + ' run --interactive --net=host --rm -v /etc/ceph:/etc/ceph:z --entrypoint=ceph ' + ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else 'ceph' }}"
+
+- name: get current mgr backend - ipv4
+ set_fact:
+ dashboard_server_addr: "{{ hostvars[item]['ansible_facts']['all_ipv4_addresses'] | ips_in_ranges(dashboard_network.split(',')) | first }}"
+ when: ip_version == 'ipv4'
+ loop: "{{ groups.get(mgr_group_name) if groups.get(mgr_group_name, []) | length > 0 else groups.get(mon_group_name) }}"
+ delegate_to: "{{ item }}"
+ delegate_facts: True
+
+- name: get current mgr backend - ipv6
+ set_fact:
+ dashboard_server_addr: "{{ hostvars[item]['ansible_facts']['all_ipv6_addresses'] | ips_in_ranges(dashboard_network.split(',')) | last }}"
+ when: ip_version == 'ipv6'
+ loop: "{{ groups.get(mgr_group_name) if groups.get(mgr_group_name, []) | length > 0 else groups.get(mon_group_name) }}"
+ delegate_to: "{{ item }}"
+ delegate_facts: True
+
+- include_role:
+ name: ceph-facts
+ tasks_from: set_radosgw_address.yml
+ loop: "{{ groups.get(rgw_group_name, []) }}"
+ loop_control:
+ loop_var: ceph_dashboard_call_item
+ when: groups.get(rgw_group_name, []) | length > 0
+
+- name: disable SSL for dashboard
+ when: dashboard_protocol == "http"
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ run_once: true
+ block:
+ - name: get SSL status for dashboard
+ command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} config get mgr mgr/dashboard/ssl"
+ changed_when: false
+ register: current_ssl_for_dashboard
+
+ - name: disable SSL for dashboard
+ command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} config set mgr mgr/dashboard/ssl false"
+ when: current_ssl_for_dashboard.stdout == "true"
+
+- name: with SSL for dashboard
+ when: dashboard_protocol == "https"
+ block:
+ - name: enable SSL for dashboard
+ command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} config set mgr mgr/dashboard/ssl true"
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ run_once: true
+
+ - name: copy dashboard SSL certificate file
+ copy:
+ src: "{{ dashboard_crt }}"
+ dest: "/etc/ceph/ceph-dashboard.crt"
+ owner: root
+ group: root
+ mode: 0440
+ remote_src: "{{ dashboard_tls_external | bool }}"
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ when: dashboard_crt | length > 0
+
+ - name: copy dashboard SSL certificate key
+ copy:
+ src: "{{ dashboard_key }}"
+ dest: "/etc/ceph/ceph-dashboard.key"
+ owner: root
+ group: root
+ mode: 0440
+ remote_src: "{{ dashboard_tls_external | bool }}"
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ when: dashboard_key | length > 0
+
+ - name: generate and copy self-signed certificate
+ when: dashboard_key | length == 0 or dashboard_crt | length == 0
+ run_once: true
+ block:
+ - name: set_fact subj_alt_names
+ set_fact:
+ subj_alt_names: >
+ {% for host in groups[mgr_group_name] | default(groups[mon_group_name]) -%}
+ DNS:{{ hostvars[host]['ansible_facts']['hostname'] }},DNS:{{ hostvars[host]['ansible_facts']['fqdn'] }},IP:{{ hostvars[host]['dashboard_server_addr'] }}{% if not loop.last %},{% endif %}
+ {%- endfor -%}
+
+ - name: create tempfile for openssl certificate and key generation
+ tempfile:
+ state: file
+ register: openssl_config_file
+
+ - name: copy the openssl configuration file
+ copy:
+ src: "{{ '/etc/pki/tls/openssl.cnf' if ansible_facts['os_family'] == 'RedHat' else '/etc/ssl/openssl.cnf' }}"
+ dest: '{{ openssl_config_file.path }}'
+ remote_src: true
+
+ - name: add subjectAltName to the openssl configuration
+ ini_file:
+ path: '{{ openssl_config_file.path }}'
+ section: v3_ca
+ option: subjectAltName
+ value: '{{ subj_alt_names | trim }}'
+
+ - name: generate a Self Signed OpenSSL certificate for dashboard
+ shell: |
+ test -f /etc/ceph/ceph-dashboard.key -a -f /etc/ceph/ceph-dashboard.crt || \
+ openssl req -new -nodes -x509 -subj '/O=IT/CN={{ dashboard_certificate_cn }}/' -config {{ openssl_config_file.path }} -days 3650 -keyout /etc/ceph/ceph-dashboard.key -out /etc/ceph/ceph-dashboard.crt -extensions v3_ca
+
+ - name: remove the openssl tempfile
+ file:
+ path: '{{ openssl_config_file.path }}'
+ state: absent
+
+ - name: slurp self-signed generated certificate for dashboard
+ slurp:
+ src: "/etc/ceph/{{ item }}"
+ run_once: True
+ with_items:
+ - 'ceph-dashboard.key'
+ - 'ceph-dashboard.crt'
+ register: slurp_self_signed_crt
+
+ - name: copy self-signed generated certificate on mons
+ copy:
+ dest: "{{ item.0.source }}"
+ content: "{{ item.0.content | b64decode }}"
+ owner: "{{ ceph_uid }}"
+ group: "{{ ceph_uid }}"
+ mode: "{{ '0600' if item.0.source.split('.')[-1] == 'key' else '0664' }}"
+ delegate_to: "{{ item.1 }}"
+ with_nested:
+ - "{{ slurp_self_signed_crt.results }}"
+ - "{{ groups[mon_group_name] }}"
+
+ - name: import dashboard certificate file
+ command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} config-key set mgr/dashboard/crt -i /etc/ceph/ceph-dashboard.crt"
+ changed_when: false
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ run_once: true
+
+ - name: import dashboard certificate key
+ command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} config-key set mgr/dashboard/key -i /etc/ceph/ceph-dashboard.key"
+ changed_when: false
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ run_once: true
+
+- name: "set the dashboard port ({{ dashboard_port }})"
+ command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} config set mgr mgr/dashboard/server_port {{ dashboard_port }}"
+ changed_when: false
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ run_once: true
+
+- name: "set the dashboard SSL port ({{ dashboard_port }})"
+ command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} config set mgr mgr/dashboard/ssl_server_port {{ dashboard_port }}"
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ run_once: true
+ changed_when: false
+ failed_when: false # Do not fail if the option does not exist, it only exists post-14.2.0
+
+- name: config the current dashboard backend
+ command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} config set mgr mgr/dashboard/{{ hostvars[item]['ansible_facts']['hostname'] }}/server_addr {{ hostvars[item]['dashboard_server_addr'] }}"
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ changed_when: false
+ run_once: true
+ with_items: '{{ groups[mgr_group_name] | default(groups[mon_group_name]) }}'
+
+- name: disable mgr dashboard module (restart)
+ ceph_mgr_module:
+ name: dashboard
+ cluster: "{{ cluster }}"
+ state: disable
+ environment:
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ run_once: true
+
+- name: enable mgr dashboard module (restart)
+ ceph_mgr_module:
+ name: dashboard
+ cluster: "{{ cluster }}"
+ state: enable
+ environment:
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ run_once: true
+
+- name: create dashboard admin user
+ ceph_dashboard_user:
+ name: "{{ dashboard_admin_user }}"
+ cluster: "{{ cluster }}"
+ password: "{{ dashboard_admin_password }}"
+ roles: ["{{ 'read-only' if dashboard_admin_user_ro | bool else 'administrator' }}"]
+ run_once: true
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ environment:
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+
+- name: disable unused dashboard features
+ command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard feature disable {{ item }}"
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ run_once: true
+ changed_when: false
+ with_items: "{{ dashboard_disabled_features }}"
+
+- name: set grafana api user
+ command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-grafana-api-username {{ grafana_admin_user }}"
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ run_once: true
+ changed_when: false
+
+- name: set grafana api password
+ command: "{{ ceph_cmd }} --cluster {{ cluster }} dashboard set-grafana-api-password -i -"
+ args:
+ stdin: "{{ grafana_admin_password }}"
+ stdin_add_newline: no
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ run_once: true
+ changed_when: false
+
+- name: disable ssl verification for grafana
+ command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-grafana-api-ssl-verify False"
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ run_once: true
+ changed_when: false
+ when:
+ - dashboard_protocol == "https"
+ - dashboard_grafana_api_no_ssl_verify | bool
+
+- name: set alertmanager host
+ command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-alertmanager-api-host http://{{ grafana_server_addrs | first }}:{{ alertmanager_port }}"
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ run_once: true
+ changed_when: false
+
+- name: set prometheus host
+ command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-prometheus-api-host http://{{ grafana_server_addrs | first }}:{{ prometheus_port }}"
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ run_once: true
+ changed_when: false
+
+- include_tasks: configure_grafana_layouts.yml
+ with_items: '{{ grafana_server_addrs }}'
+ vars:
+ grafana_server_addr: '{{ item }}'
+
+- name: config monitoring api url vip
+ run_once: true
+ block:
+ - name: config grafana api url vip
+ command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-grafana-api-url {{ dashboard_protocol }}://{{ dashboard_frontend_vip }}:{{ grafana_port }}"
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ changed_when: false
+ when: dashboard_frontend_vip is defined and dashboard_frontend_vip | length > 0
+
+ - name: config alertmanager api url
+ command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-alertmanager-api-host {{ dashboard_protocol }}://{{ alertmanager_frontend_vip }}:{{ alertmanager_port }}"
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ changed_when: false
+ when: alertmanager_frontend_vip is defined and alertmanager_frontend_vip | length > 0
+
+ - name: config prometheus api url
+ command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-prometheus-api-host {{ dashboard_protocol }}://{{ prometheus_frontend_vip }}:{{ prometheus_port }}"
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ changed_when: false
+ when: prometheus_frontend_vip is defined and prometheus_frontend_vip | length > 0
+
+- name: dashboard object gateway management frontend
+ when: groups.get(rgw_group_name, []) | length > 0
+ run_once: true
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ block:
+ - name: setup rgw credentials backward
+ block:
+ - name: set the rgw credentials
+ command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-rgw-credentials"
+ changed_when: false
+ register: result
+ until: result is succeeded
+ retries: 5
+ rescue:
+ - name: create radosgw system user
+ radosgw_user:
+ name: "{{ dashboard_rgw_api_user_id }}"
+ cluster: "{{ cluster }}"
+ display_name: "Ceph dashboard"
+ system: true
+ register: rgw_dashboard_user
+ environment:
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+
+ - name: get the rgw access and secret keys
+ set_fact:
+ rgw_access_key: "{{ (rgw_dashboard_user.stdout | from_json)['keys'][0]['access_key'] }}"
+ rgw_secret_key: "{{ (rgw_dashboard_user.stdout | from_json)['keys'][0]['secret_key'] }}"
+
+ - name: set the rgw user
+ command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-rgw-api-user-id {{ dashboard_rgw_api_user_id }}"
+ changed_when: false
+
+ - name: set the rgw access key
+ command: "{{ ceph_cmd }} --cluster {{ cluster }} dashboard set-rgw-api-access-key -i -"
+ args:
+ stdin: "{{ rgw_access_key }}"
+ stdin_add_newline: no
+ changed_when: false
+
+ - name: set the rgw secret key
+ command: "{{ ceph_cmd }} --cluster {{ cluster }} dashboard set-rgw-api-secret-key -i -"
+ args:
+ stdin: "{{ rgw_secret_key }}"
+ stdin_add_newline: no
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+
+ - name: set the rgw host
+ command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-rgw-api-host {{ hostvars[groups[rgw_group_name][0]]['rgw_instances'][0]['radosgw_address'] }}"
+ changed_when: false
+
+ - name: set the rgw port
+ command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-rgw-api-port {{ hostvars[groups[rgw_group_name][0]]['rgw_instances'][0]['radosgw_frontend_port'] }}"
+ changed_when: false
+
+ - name: set the rgw scheme
+ command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-rgw-api-scheme {{ 'https' if radosgw_frontend_ssl_certificate else 'http' }}"
+ changed_when: false
+
+ - name: set the rgw admin resource
+ command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-rgw-api-admin-resource {{ dashboard_rgw_api_admin_resource }}"
+ changed_when: false
+ when: dashboard_rgw_api_admin_resource | length > 0
+
+ - name: disable ssl verification for rgw
+ command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-rgw-api-ssl-verify False"
+ changed_when: false
+ when:
+ - dashboard_rgw_api_no_ssl_verify | bool
+ - radosgw_frontend_ssl_certificate | length > 0
+
+- name: dashboard iscsi management
+ when: groups.get(iscsi_gw_group_name, []) | length > 0
+ run_once: true
+ block:
+ - name: disable iscsi api ssl verification
+ command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-iscsi-api-ssl-verification false"
+ changed_when: false
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ when:
+ - api_secure | default(false) | bool
+ - generate_crt | default(false) | bool
+
+ - name: add iscsi gateways - ipv4
+ command: "{{ ceph_cmd }} --cluster {{ cluster }} dashboard iscsi-gateway-add -i -"
+ args:
+ stdin: "{{ 'https' if hostvars[item]['api_secure'] | default(false) | bool else 'http' }}://{{ hostvars[item]['api_user'] | default('admin') }}:{{ hostvars[item]['api_password'] | default('admin') }}@{{ hostvars[item]['ansible_facts']['all_ipv4_addresses'] | ips_in_ranges(igw_network.split(',')) | first }}:{{ hostvars[item]['api_port'] | default(5000) }}"
+ stdin_add_newline: no
+ changed_when: false
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ with_items: "{{ groups[iscsi_gw_group_name] }}"
+ when: ip_version == 'ipv4'
+
+ - name: add iscsi gateways - ipv6
+ command: "{{ ceph_cmd }} --cluster {{ cluster }} dashboard iscsi-gateway-add -i -"
+ args:
+ stdin: "{{ 'https' if hostvars[item]['api_secure'] | default(false) | bool else 'http' }}://{{ hostvars[item]['api_user'] | default('admin') }}:{{ hostvars[item]['api_password'] | default('admin') }}@{{ hostvars[item]['ansible_facts']['all_ipv6_addresses'] | ips_in_ranges(igw_network.split(',')) | last | ipwrap }}:{{ hostvars[item]['api_port'] | default(5000) }}"
+ stdin_add_newline: no
+ changed_when: false
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ with_items: "{{ groups[iscsi_gw_group_name] }}"
+ when: ip_version == 'ipv6'
+
+- name: disable mgr dashboard module (restart)
+ ceph_mgr_module:
+ name: dashboard
+ cluster: "{{ cluster }}"
+ state: disable
+ environment:
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ run_once: true
+
+- name: enable mgr dashboard module (restart)
+ ceph_mgr_module:
+ name: dashboard
+ cluster: "{{ cluster }}"
+ state: enable
+ environment:
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ run_once: true
--- /dev/null
+---
+- name: set grafana url
+ command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-grafana-api-url {{ dashboard_protocol }}://{{ grafana_server_fqdn | default(grafana_server_addr, true) }}:{{ grafana_port }}"
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ run_once: true
+ changed_when: false
+
+- name: inject grafana dashboard layouts
+ command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard grafana dashboards update"
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ run_once: true
+ changed_when: false
+ when: containerized_deployment | bool
--- /dev/null
+---
+- name: include configure_dashboard.yml
+ include_tasks: configure_dashboard.yml
+
+- name: print dashboard URL
+ debug:
+ msg: "The dashboard has been deployed! You can access your dashboard web UI at {{ dashboard_protocol }}://{{ ansible_facts['fqdn'] }}:{{ dashboard_port }}/ as an '{{ dashboard_admin_user }}' user with '{{ dashboard_admin_password }}' password."
+ run_once: true
--- /dev/null
+# Ansible role: ceph-defaults
+
+Documentation is available at http://docs.ceph.com/ceph-ansible/.
--- /dev/null
+---
+# You can override vars by using host or group vars
+
+###########
+# GENERAL #
+###########
+
+######################################
+# Releases name to number dictionary #
+######################################
+ceph_release_num:
+ dumpling: 0.67
+ emperor: 0.72
+ firefly: 0.80
+ giant: 0.87
+ hammer: 0.94
+ infernalis: 9
+ jewel: 10
+ kraken: 11
+ luminous: 12
+ mimic: 13
+ nautilus: 14
+ octopus: 15
+ pacific: 16
+ dev: 99
+
+
+# The 'cluster' variable determines the name of the cluster.
+# Changing the default value to something else means that you will
+# need to change all the command line calls as well, for example if
+# your cluster name is 'foo':
+# "ceph health" will become "ceph --cluster foo health"
+#
+# An easier way to handle this is to use the environment variable CEPH_ARGS
+# So run: "export CEPH_ARGS="--cluster foo"
+# With that you will be able to run "ceph health" normally
+cluster: ceph
+
+# Inventory host group variables
+mon_group_name: mons
+osd_group_name: osds
+rgw_group_name: rgws
+mds_group_name: mdss
+nfs_group_name: nfss
+rbdmirror_group_name: rbdmirrors
+client_group_name: clients
+iscsi_gw_group_name: iscsigws
+mgr_group_name: mgrs
+rgwloadbalancer_group_name: rgwloadbalancers
+monitoring_group_name: monitoring
+adopt_label_group_names:
+ - "{{ mon_group_name }}"
+ - "{{ osd_group_name }}"
+ - "{{ rgw_group_name }}"
+ - "{{ mds_group_name }}"
+ - "{{ nfs_group_name }}"
+ - "{{ rbdmirror_group_name }}"
+ - "{{ client_group_name }}"
+ - "{{ iscsi_gw_group_name }}"
+ - "{{ mgr_group_name }}"
+ - "{{ rgwloadbalancer_group_name }}"
+ - "{{ monitoring_group_name }}"
+
+# If configure_firewall is true, then ansible will try to configure the
+# appropriate firewalling rules so that Ceph daemons can communicate
+# with each others.
+configure_firewall: True
+
+# Open ports on corresponding nodes if firewall is installed on it
+ceph_mon_firewall_zone: public
+ceph_mgr_firewall_zone: public
+ceph_osd_firewall_zone: public
+ceph_rgw_firewall_zone: public
+ceph_mds_firewall_zone: public
+ceph_nfs_firewall_zone: public
+ceph_rbdmirror_firewall_zone: public
+ceph_iscsi_firewall_zone: public
+ceph_dashboard_firewall_zone: public
+ceph_rgwloadbalancer_firewall_zone: public
+
+# cephadm account for remote connections
+cephadm_ssh_user: root
+cephadm_ssh_priv_key_path: "/home/{{ cephadm_ssh_user }}/.ssh/id_rsa"
+cephadm_ssh_pub_key_path: "{{ cephadm_ssh_priv_key_path }}.pub"
+cephadm_mgmt_network: "{{ public_network }}"
+
+############
+# PACKAGES #
+############
+debian_package_dependencies: []
+
+centos_package_dependencies:
+ - epel-release
+ - "{{ (ansible_facts['distribution_major_version'] is version('8', '>=')) | ternary('python3-libselinux', 'libselinux-python') }}"
+
+redhat_package_dependencies: []
+
+suse_package_dependencies: []
+
+# Whether or not to install the ceph-test package.
+ceph_test: false
+
+# Enable the ntp service by default to avoid clock skew on ceph nodes
+# Disable if an appropriate NTP client is already installed and configured
+ntp_service_enabled: true
+
+# Set type of NTP client daemon to use, valid entries are chronyd, ntpd or timesyncd
+ntp_daemon_type: chronyd
+
+# This variable determines if ceph packages can be updated. If False, the
+# package resources will use "state=present". If True, they will use
+# "state=latest".
+upgrade_ceph_packages: False
+
+ceph_use_distro_backports: false # DEBIAN ONLY
+ceph_directories_mode: "0755"
+
+###########
+# INSTALL #
+###########
+# ORIGIN SOURCE
+#
+# Choose between:
+# - 'repository' means that you will get ceph installed through a new repository. Later below choose between 'community', 'rhcs', 'dev' or 'obs'
+# - 'distro' means that no separate repo file will be added
+# you will get whatever version of Ceph is included in your Linux distro.
+# 'local' means that the ceph binaries will be copied over from the local machine
+ceph_origin: dummy
+valid_ceph_origins:
+ - repository
+ - distro
+ - local
+
+
+ceph_repository: dummy
+valid_ceph_repository:
+ - community
+ - rhcs
+ - dev
+ - uca
+ - custom
+ - obs
+
+
+# REPOSITORY: COMMUNITY VERSION
+#
+# Enabled when ceph_repository == 'community'
+#
+ceph_mirror: https://download.ceph.com
+ceph_stable_key: https://download.ceph.com/keys/release.asc
+ceph_stable_release: pacific
+ceph_stable_repo: "{{ ceph_mirror }}/debian-{{ ceph_stable_release }}"
+
+nfs_ganesha_stable: true # use stable repos for nfs-ganesha
+nfs_ganesha_stable_branch: V3.5-stable
+nfs_ganesha_stable_deb_repo: http://ppa.launchpad.net/nfs-ganesha/nfs-ganesha-3.0/ubuntu
+nfs_ganesha_apt_keyserver: keyserver.ubuntu.com
+nfs_ganesha_apt_key_id: EA914D611053D07BD332E18010353E8834DC57CA
+libntirpc_stable_deb_repo: http://ppa.launchpad.net/nfs-ganesha/libntirpc-3.0/ubuntu
+
+# Use the option below to specify your applicable package tree, eg. when using non-LTS Ubuntu versions
+# # for a list of available Debian distributions, visit http://download.ceph.com/debian-{{ ceph_stable_release }}/dists/
+# for more info read: https://github.com/ceph/ceph-ansible/issues/305
+#ceph_stable_distro_source: "{{ ansible_facts['distribution_release'] }}"
+
+
+# REPOSITORY: RHCS VERSION RED HAT STORAGE (from 5.0)
+#
+# Enabled when ceph_repository == 'rhcs'
+#
+# This version is supported on RHEL 8
+#
+ceph_rhcs_version: "{{ ceph_stable_rh_storage_version | default(5) }}"
+
+
+# REPOSITORY: UBUNTU CLOUD ARCHIVE
+#
+# Enabled when ceph_repository == 'uca'
+#
+# This allows the install of Ceph from the Ubuntu Cloud Archive. The Ubuntu Cloud Archive
+# usually has newer Ceph releases than the normal distro repository.
+#
+#
+ceph_stable_repo_uca: "http://ubuntu-cloud.archive.canonical.com/ubuntu"
+ceph_stable_openstack_release_uca: queens
+ceph_stable_release_uca: "{{ ansible_facts['distribution_release'] }}-updates/{{ ceph_stable_openstack_release_uca }}"
+
+# REPOSITORY: openSUSE OBS
+#
+# Enabled when ceph_repository == 'obs'
+#
+# This allows the install of Ceph from the openSUSE OBS repository. The OBS repository
+# usually has newer Ceph releases than the normal distro repository.
+#
+#
+ceph_obs_repo: "https://download.opensuse.org/repositories/filesystems:/ceph:/{{ ceph_stable_release }}/openSUSE_Leap_{{ ansible_facts['distribution_version'] }}/"
+
+# REPOSITORY: DEV
+#
+# Enabled when ceph_repository == 'dev'
+#
+ceph_dev_branch: main # development branch you would like to use e.g: main, wip-hack
+ceph_dev_sha1: latest # distinct sha1 to use, defaults to 'latest' (as in latest built)
+
+nfs_ganesha_dev: false # use development repos for nfs-ganesha
+
+# Set this to choose the version of ceph dev libraries used in the nfs-ganesha packages from shaman
+# flavors so far include: ceph_master, ceph_jewel, ceph_kraken, ceph_luminous
+nfs_ganesha_flavor: "ceph_master"
+
+ceph_iscsi_config_dev: true # special repo for deploying iSCSI gateways
+
+
+# REPOSITORY: CUSTOM
+#
+# Enabled when ceph_repository == 'custom'
+#
+# Use a custom repository to install ceph. For RPM, ceph_custom_repo should be
+# a URL to the .repo file to be installed on the targets. For deb,
+# ceph_custom_repo should be the URL to the repo base.
+#
+#ceph_custom_key: https://server.domain.com/ceph-custom-repo/key.asc
+ceph_custom_repo: https://server.domain.com/ceph-custom-repo
+
+
+# ORIGIN: LOCAL CEPH INSTALLATION
+#
+# Enabled when ceph_repository == 'local'
+#
+# Path to DESTDIR of the ceph install
+#ceph_installation_dir: "/path/to/ceph_installation/"
+# Whether or not to use installer script rundep_installer.sh
+# This script takes in rundep and installs the packages line by line onto the machine
+# If this is set to false then it is assumed that the machine ceph is being copied onto will already have
+# all runtime dependencies installed
+#use_installer: false
+# Root directory for ceph-ansible
+#ansible_dir: "/path/to/ceph-ansible"
+
+
+######################
+# CEPH CONFIGURATION #
+######################
+
+## Ceph options
+#
+# Each cluster requires a unique, consistent filesystem ID. By
+# default, the playbook generates one for you.
+# If you want to customize how the fsid is
+# generated, you may find it useful to disable fsid generation to
+# avoid cluttering up your ansible repo. If you set `generate_fsid` to
+# false, you *must* generate `fsid` in another way.
+# ACTIVATE THE FSID VARIABLE FOR NON-VAGRANT DEPLOYMENT
+fsid: "{{ cluster_uuid.stdout }}"
+generate_fsid: true
+
+ceph_conf_key_directory: /etc/ceph
+
+ceph_uid: "{{ '64045' if not containerized_deployment | bool and ansible_facts['os_family'] == 'Debian' else '167' }}"
+
+# Permissions for keyring files in /etc/ceph
+ceph_keyring_permissions: '0600'
+
+cephx: true
+
+## Client options
+#
+rbd_cache: "true"
+rbd_cache_writethrough_until_flush: "true"
+rbd_concurrent_management_ops: 20
+
+rbd_client_directories: true # this will create rbd_client_log_path and rbd_client_admin_socket_path directories with proper permissions
+
+# Permissions for the rbd_client_log_path and
+# rbd_client_admin_socket_path. Depending on your use case for Ceph
+# you may want to change these values. The default, which is used if
+# any of the variables are unset or set to a false value (like `null`
+# or `false`) is to automatically determine what is appropriate for
+# the Ceph version with non-OpenStack workloads -- ceph:ceph and 0770
+# for infernalis releases, and root:root and 1777 for pre-infernalis
+# releases.
+#
+# For other use cases, including running Ceph with OpenStack, you'll
+# want to set these differently:
+#
+# For OpenStack on RHEL, you'll want:
+# rbd_client_directory_owner: "qemu"
+# rbd_client_directory_group: "libvirtd" (or "libvirt", depending on your version of libvirt)
+# rbd_client_directory_mode: "0755"
+#
+# For OpenStack on Ubuntu or Debian, set:
+# rbd_client_directory_owner: "libvirt-qemu"
+# rbd_client_directory_group: "kvm"
+# rbd_client_directory_mode: "0755"
+#
+# If you set rbd_client_directory_mode, you must use a string (e.g.,
+# 'rbd_client_directory_mode: "0755"', *not*
+# 'rbd_client_directory_mode: 0755', or Ansible will complain: mode
+# must be in octal or symbolic form
+rbd_client_directory_owner: ceph
+rbd_client_directory_group: ceph
+rbd_client_directory_mode: "0770"
+
+rbd_client_log_path: /var/log/ceph
+rbd_client_log_file: "{{ rbd_client_log_path }}/qemu-guest-$pid.log" # must be writable by QEMU and allowed by SELinux or AppArmor
+rbd_client_admin_socket_path: /var/run/ceph # must be writable by QEMU and allowed by SELinux or AppArmor
+
+## Monitor options
+#
+# You must define either monitor_interface, monitor_address or monitor_address_block.
+# These variables must be defined at least in all.yml and overrided if needed (inventory host file or group_vars/*.yml).
+# Eg. If you want to specify for each monitor which address the monitor will bind to you can set it in your **inventory host file** by using 'monitor_address' variable.
+# Preference will go to monitor_address if both monitor_address and monitor_interface are defined.
+monitor_interface: interface
+monitor_address: x.x.x.x
+monitor_address_block: subnet
+# set to either ipv4 or ipv6, whichever your network is using
+ip_version: ipv4
+
+mon_host_v1:
+ enabled: True
+ suffix: ':6789'
+mon_host_v2:
+ suffix: ':3300'
+
+enable_ceph_volume_debug: False
+
+##########
+# CEPHFS #
+##########
+# When pg_autoscale_mode is set to True, you must add the target_size_ratio key with a correct value
+# `pg_num` and `pgp_num` keys will be ignored, even if specified.
+# eg:
+# cephfs_data_pool:
+# name: "{{ cephfs_data if cephfs_data is defined else 'cephfs_data' }}"
+# target_size_ratio: 0.2
+cephfs: cephfs # name of the ceph filesystem
+cephfs_data_pool:
+ name: "{{ cephfs_data if cephfs_data is defined else 'cephfs_data' }}"
+cephfs_metadata_pool:
+ name: "{{ cephfs_metadata if cephfs_metadata is defined else 'cephfs_metadata' }}"
+cephfs_pools:
+ - "{{ cephfs_data_pool }}"
+ - "{{ cephfs_metadata_pool }}"
+
+## OSD options
+#
+lvmetad_disabled: false
+is_hci: false
+hci_safety_factor: 0.2
+non_hci_safety_factor: 0.7
+safety_factor: "{{ hci_safety_factor if is_hci | bool else non_hci_safety_factor }}"
+osd_memory_target: 4294967296
+journal_size: 5120 # OSD journal size in MB
+block_db_size: -1 # block db size in bytes for the ceph-volume lvm batch. -1 means use the default of 'as big as possible'.
+public_network: 0.0.0.0/0
+cluster_network: "{{ public_network | regex_replace(' ', '') }}"
+osd_mkfs_type: xfs
+osd_mkfs_options_xfs: -f -i size=2048
+osd_mount_options_xfs: noatime,largeio,inode64,swalloc
+osd_objectstore: bluestore
+
+# Any device containing these patterns in their path will be excluded.
+osd_auto_discovery_exclude: "dm-*|loop*|md*|rbd*"
+
+# xattrs. by default, 'filestore xattr use omap' is set to 'true' if
+# 'osd_mkfs_type' is set to 'ext4'; otherwise it isn't set. This can
+# be set to 'true' or 'false' to explicitly override those
+# defaults. Leave it 'null' to use the default for your chosen mkfs
+# type.
+filestore_xattr_use_omap: null
+
+## MDS options
+#
+mds_max_mds: 1
+
+## Rados Gateway options
+#
+radosgw_frontend_type: beast # For additional frontends see: https://docs.ceph.com/en/pacific/radosgw/frontends/
+
+radosgw_civetweb_port: 8080
+radosgw_civetweb_num_threads: 512
+radosgw_civetweb_options: "num_threads={{ radosgw_civetweb_num_threads }}"
+# For additional civetweb configuration options available such as logging,
+# keepalive, and timeout settings, please see the civetweb docs at
+# https://github.com/civetweb/civetweb/blob/master/docs/UserManual.md
+
+radosgw_frontend_port: "{{ radosgw_civetweb_port if radosgw_frontend_type == 'civetweb' else '8080' }}"
+# The server private key, public certificate and any other CA or intermediate certificates should be in one file, in PEM format.
+radosgw_frontend_ssl_certificate: ""
+radosgw_frontend_ssl_certificate_data: "" # certificate contents to be written to path defined by radosgw_frontend_ssl_certificate
+radosgw_frontend_options: "{{ radosgw_civetweb_options if radosgw_frontend_type == 'civetweb' else '' }}"
+radosgw_thread_pool_size: 512
+
+
+# You must define either radosgw_interface, radosgw_address.
+# These variables must be defined at least in all.yml and overrided if needed (inventory host file or group_vars/*.yml).
+# Eg. If you want to specify for each radosgw node which address the radosgw will bind to you can set it in your **inventory host file** by using 'radosgw_address' variable.
+# Preference will go to radosgw_address if both radosgw_address and radosgw_interface are defined.
+radosgw_interface: interface
+radosgw_address: x.x.x.x
+radosgw_address_block: subnet
+radosgw_keystone_ssl: false # activate this when using keystone PKI keys
+radosgw_num_instances: 1
+# Rados Gateway options
+email_address: foo@bar.com
+
+
+## Testing mode
+# enable this mode _only_ when you have a single node
+# if you don't want it keep the option commented
+#common_single_host_mode: true
+
+## Handlers - restarting daemons after a config change
+# if for whatever reasons the content of your ceph configuration changes
+# ceph daemons will be restarted as well. At the moment, we can not detect
+# which config option changed so all the daemons will be restarted. Although
+# this restart will be serialized for each node, in between a health check
+# will be performed so we make sure we don't move to the next node until
+# ceph is not healthy
+# Obviously between the checks (for monitors to be in quorum and for osd's pgs
+# to be clean) we have to wait. These retries and delays can be configurable
+# for both monitors and osds.
+#
+# Monitor handler checks
+handler_health_mon_check_retries: 10
+handler_health_mon_check_delay: 20
+#
+# OSD handler checks
+handler_health_osd_check_retries: 40
+handler_health_osd_check_delay: 30
+handler_health_osd_check: true
+#
+# MDS handler checks
+handler_health_mds_check_retries: 5
+handler_health_mds_check_delay: 10
+#
+# RGW handler checks
+handler_health_rgw_check_retries: 5
+handler_health_rgw_check_delay: 10
+
+# NFS handler checks
+handler_health_nfs_check_retries: 5
+handler_health_nfs_check_delay: 10
+
+# RBD MIRROR handler checks
+handler_health_rbd_mirror_check_retries: 5
+handler_health_rbd_mirror_check_delay: 10
+
+# MGR handler checks
+handler_health_mgr_check_retries: 5
+handler_health_mgr_check_delay: 10
+
+## health mon/osds check retries/delay:
+
+health_mon_check_retries: 20
+health_mon_check_delay: 10
+health_osd_check_retries: 20
+health_osd_check_delay: 10
+
+##############
+# RBD-MIRROR #
+##############
+
+ceph_rbd_mirror_pool: "rbd"
+
+###############
+# NFS-GANESHA #
+###############
+#
+# Access type options
+#
+# Enable NFS File access
+# If set to true, then ganesha is set up to export the root of the
+# Ceph filesystem, and ganesha's attribute and directory caching is disabled
+# as much as possible since libcephfs clients also caches the same
+# information.
+#
+# Set this to true to enable File access via NFS. Requires an MDS role.
+nfs_file_gw: false
+# Set this to true to enable Object access via NFS. Requires an RGW role.
+nfs_obj_gw: "{{ False if groups.get(mon_group_name, []) | length == 0 else True }}"
+
+
+#############
+# MULTISITE #
+#############
+
+# Changing this value allows multisite code to run
+rgw_multisite: false
+
+# If the desired multisite configuration involves only one realm, one zone group and one zone (per cluster), then the multisite variables can be set here.
+# Please see README-MULTISITE.md for more information.
+#
+# If multiple realms or multiple zonegroups or multiple zones need to be created on a cluster then,
+# the multisite config variables should be editted in their respective zone .yaml file and realm .yaml file.
+# See README-MULTISITE-MULTIREALM.md for more information.
+
+# The following Multi-site related variables should be set by the user.
+#
+# rgw_zone is set to "default" to enable compression for clusters configured without rgw multi-site
+# If multisite is configured, rgw_zone should not be set to "default".
+#
+rgw_zone: default
+
+#rgw_zonemaster: true
+#rgw_zonesecondary: false
+#rgw_zonegroup: solarsystem # should be set by the user
+#rgw_zonegroupmaster: true
+#rgw_zone_user: zone.user
+#rgw_zone_user_display_name: "Zone User"
+#rgw_realm: milkyway # should be set by the user
+#rgw_multisite_proto: "http"
+#system_access_key: 6kWkikvapSnHyE22P7nO # should be re-created by the user
+#system_secret_key: MGecsMrWtKZgngOHZdrd6d3JxGO5CPWgT2lcnpSt # should be re-created by the user
+
+# Multi-site remote pull URL variables
+#rgw_pull_port: "{{ radosgw_frontend_port }}"
+#rgw_pull_proto: "http" # should be the same as rgw_multisite_proto for the master zone cluster
+#rgw_pullhost: localhost # rgw_pullhost only needs to be declared if there is a zone secondary.
+
+###################
+# CONFIG OVERRIDE #
+###################
+
+# Ceph configuration file override.
+# This allows you to specify more configuration options
+# using an INI style format.
+#
+# When configuring RGWs, make sure you use the form [client.rgw.*]
+# instead of [client.radosgw.*].
+# For more examples check the profiles directory of https://github.com/ceph/ceph-ansible.
+#
+# The following sections are supported: [global], [mon], [osd], [mds], [client]
+#
+# Example:
+# ceph_conf_overrides:
+# global:
+# foo: 1234
+# bar: 5678
+# "client.rgw.{{ hostvars[groups.get(rgw_group_name)[0]]['ansible_facts']['hostname'] }}":
+# rgw_zone: zone1
+#
+ceph_conf_overrides: {}
+
+
+#############
+# OS TUNING #
+#############
+
+disable_transparent_hugepage: "{{ false if osd_objectstore == 'bluestore' else true }}"
+os_tuning_params:
+ - { name: fs.file-max, value: 26234859 }
+ - { name: vm.zone_reclaim_mode, value: 0 }
+ - { name: vm.swappiness, value: 10 }
+ - { name: vm.min_free_kbytes, value: "{{ vm_min_free_kbytes }}" }
+
+# For Debian & Red Hat/CentOS installs set TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES
+# Set this to a byte value (e.g. 134217728)
+# A value of 0 will leave the package default.
+ceph_tcmalloc_max_total_thread_cache: 134217728
+
+
+##########
+# DOCKER #
+##########
+ceph_docker_image: "ceph/daemon"
+ceph_docker_image_tag: latest-pacific
+ceph_docker_registry: quay.io
+ceph_docker_registry_auth: false
+#ceph_docker_registry_username:
+#ceph_docker_registry_password:
+#ceph_docker_http_proxy:
+#ceph_docker_https_proxy:
+ceph_docker_no_proxy: "localhost,127.0.0.1"
+## Client only docker image - defaults to {{ ceph_docker_image }}
+ceph_client_docker_image: "{{ ceph_docker_image }}"
+ceph_client_docker_image_tag: "{{ ceph_docker_image_tag }}"
+ceph_client_docker_registry: "{{ ceph_docker_registry }}"
+containerized_deployment: False
+container_binary:
+timeout_command: "{{ 'timeout --foreground -s KILL ' ~ docker_pull_timeout if (docker_pull_timeout != '0') and (ceph_docker_dev_image is undefined or not ceph_docker_dev_image) else '' }}"
+
+
+# this is only here for usage with the rolling_update.yml playbook
+# do not ever change this here
+rolling_update: false
+
+#####################
+# Docker pull retry #
+#####################
+docker_pull_retry: 3
+docker_pull_timeout: "300s"
+
+
+#############
+# OPENSTACK #
+#############
+openstack_config: false
+# When pg_autoscale_mode is set to True, you must add the target_size_ratio key with a correct value
+# `pg_num` and `pgp_num` keys will be ignored, even if specified.
+# eg:
+# openstack_glance_pool:
+# name: "images"
+# rule_name: "my_replicated_rule"
+# application: "rbd"
+# pg_autoscale_mode: False
+# pg_num: 16
+# pgp_num: 16
+# target_size_ratio: 0.2
+openstack_glance_pool:
+ name: "images"
+ application: "rbd"
+openstack_cinder_pool:
+ name: "volumes"
+ application: "rbd"
+openstack_nova_pool:
+ name: "vms"
+ application: "rbd"
+openstack_cinder_backup_pool:
+ name: "backups"
+ application: "rbd"
+openstack_gnocchi_pool:
+ name: "metrics"
+ application: "rbd"
+openstack_cephfs_data_pool:
+ name: "manila_data"
+ application: "cephfs"
+openstack_cephfs_metadata_pool:
+ name: "manila_metadata"
+ application: "cephfs"
+openstack_pools:
+ - "{{ openstack_glance_pool }}"
+ - "{{ openstack_cinder_pool }}"
+ - "{{ openstack_nova_pool }}"
+ - "{{ openstack_cinder_backup_pool }}"
+ - "{{ openstack_gnocchi_pool }}"
+ - "{{ openstack_cephfs_data_pool }}"
+ - "{{ openstack_cephfs_metadata_pool }}"
+
+
+# The value for 'key' can be a pre-generated key,
+# e.g key: "AQDC2UxZH4yeLhAAgTaZb+4wDUlYOsr1OfZSpQ=="
+# By default, keys will be auto-generated.
+#
+openstack_keys:
+ - { name: client.glance, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_cinder_pool.name }}, profile rbd pool={{ openstack_glance_pool.name }}"}, mode: "0600" }
+ - { name: client.cinder, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_cinder_pool.name }}, profile rbd pool={{ openstack_nova_pool.name }}, profile rbd pool={{ openstack_glance_pool.name }}"}, mode: "0600" }
+ - { name: client.cinder-backup, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_cinder_backup_pool.name }}"}, mode: "0600" }
+ - { name: client.gnocchi, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_gnocchi_pool.name }}"}, mode: "0600", }
+ - { name: client.openstack, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_glance_pool.name }}, profile rbd pool={{ openstack_nova_pool.name }}, profile rbd pool={{ openstack_cinder_pool.name }}, profile rbd pool={{ openstack_cinder_backup_pool.name }}"}, mode: "0600" }
+
+
+#############
+# DASHBOARD #
+#############
+dashboard_enabled: True
+# Choose http or https
+# For https, you should set dashboard.crt/key and grafana.crt/key
+# If you define the dashboard_crt and dashboard_key variables, but leave them as '',
+# then we will autogenerate a cert and keyfile
+dashboard_protocol: https
+dashboard_port: 8443
+# set this variable to the network you want the dashboard to listen on. (Default to public_network)
+dashboard_network: "{{ public_network }}"
+dashboard_admin_user: admin
+dashboard_admin_user_ro: false
+# This variable must be set with a strong custom password when dashboard_enabled is True
+#dashboard_admin_password: p@ssw0rd
+# We only need this for SSL (https) connections
+dashboard_crt: ''
+dashboard_key: ''
+dashboard_certificate_cn: ceph-dashboard
+dashboard_tls_external: false
+dashboard_grafana_api_no_ssl_verify: "{{ true if dashboard_protocol == 'https' and not grafana_crt and not grafana_key else false }}"
+dashboard_rgw_api_user_id: ceph-dashboard
+dashboard_rgw_api_admin_resource: ''
+dashboard_rgw_api_no_ssl_verify: False
+dashboard_frontend_vip: ''
+dashboard_disabled_features: []
+prometheus_frontend_vip: ''
+alertmanager_frontend_vip: ''
+node_exporter_container_image: "docker.io/prom/node-exporter:v0.17.0"
+node_exporter_port: 9100
+grafana_admin_user: admin
+# This variable must be set with a strong custom password when dashboard_enabled is True
+#grafana_admin_password: admin
+# We only need this for SSL (https) connections
+grafana_crt: ''
+grafana_key: ''
+# When using https, please fill with a hostname for which grafana_crt is valid.
+grafana_server_fqdn: ''
+grafana_container_image: "docker.io/grafana/grafana:6.7.4"
+grafana_container_cpu_period: 100000
+grafana_container_cpu_cores: 2
+# container_memory is in GB
+grafana_container_memory: 4
+grafana_uid: 472
+grafana_datasource: Dashboard
+grafana_dashboards_path: "/etc/grafana/dashboards/ceph-dashboard"
+grafana_dashboard_version: pacific
+grafana_dashboard_files:
+ - ceph-cluster.json
+ - cephfs-overview.json
+ - host-details.json
+ - hosts-overview.json
+ - osd-device-details.json
+ - osds-overview.json
+ - pool-detail.json
+ - pool-overview.json
+ - radosgw-detail.json
+ - radosgw-overview.json
+ - radosgw-sync-overview.json
+ - rbd-details.json
+ - rbd-overview.json
+grafana_plugins:
+ - vonage-status-panel
+ - grafana-piechart-panel
+grafana_allow_embedding: True
+grafana_port: 3000
+grafana_network: "{{ public_network }}"
+grafana_conf_overrides: {}
+prometheus_container_image: "docker.io/prom/prometheus:v2.7.2"
+prometheus_container_cpu_period: 100000
+prometheus_container_cpu_cores: 2
+# container_memory is in GB
+prometheus_container_memory: 4
+prometheus_data_dir: /var/lib/prometheus
+prometheus_conf_dir: /etc/prometheus
+prometheus_user_id: '65534' # This is the UID used by the prom/prometheus container image
+prometheus_port: 9092
+prometheus_conf_overrides: {}
+# Uncomment out this variable if you need to customize the retention period for prometheus storage.
+# set it to '30d' if you want to retain 30 days of data.
+#prometheus_storage_tsdb_retention_time: 15d
+alertmanager_container_image: "docker.io/prom/alertmanager:v0.16.2"
+alertmanager_container_cpu_period: 100000
+alertmanager_container_cpu_cores: 2
+# container_memory is in GB
+alertmanager_container_memory: 4
+alertmanager_data_dir: /var/lib/alertmanager
+alertmanager_conf_dir: /etc/alertmanager
+alertmanager_port: 9093
+alertmanager_cluster_port: 9094
+alertmanager_conf_overrides: {}
+alertmanager_dashboard_api_no_ssl_verify: "{{ true if dashboard_protocol == 'https' and not dashboard_crt and not dashboard_key else false }}"
+# igw
+#
+# `igw_network` variable is intended for allowing dashboard deployment with iSCSI node not residing in the same subnet than what is defined in `public_network`.
+# For example:
+# If the ceph public network is 2a00:8a60:1:c301::/64 and the iSCSI Gateway resides
+# at a dedicated gateway network (2a00:8a60:1:c300::/64) (With routing between those networks).
+# It means "{{ hostvars[item]['ansible_facts']['all_ipv4_addresses'] | ips_in_ranges(public_network.split(',')) | last | ipwrap }}" will be empty.
+# As a consequence, this prevent from deploying dashboard with iSCSI node when it reside in a subnet different than `public_network`.
+# Using `igw_network` make it possible, set it with the subnet used by your iSCSI node.
+igw_network: "{{ public_network }}"
+
+
+##################################
+# DEPRECIATED iSCSI TARGET SETUP #
+##################################
+
+# WARNING #
+
+# The following values are depreciated. To setup targets, gateways, LUNs, and
+# clients you should use gwcli or dashboard. If the following values are set,
+# the old ceph-iscsi-config/ceph-iscsi-cli packages will be used.
+
+# Specify the iqn for ALL gateways. This iqn is shared across the gateways, so an iscsi
+# client sees the gateway group as a single storage subsystem.
+gateway_iqn: ""
+
+# gateway_ip_list provides a list of the IP Addrresses - one per gateway - that will be used
+# as an iscsi target portal ip. The list must be comma separated - and the order determines
+# the sequence of TPG's within the iscsi target across each gateway. Once set, additional
+# gateways can be added, but the order must *not* be changed.
+gateway_ip_list: 0.0.0.0
+
+# rbd_devices defines the images that should be created and exported from the iscsi gateways.
+# If the rbd does not exist, it will be created for you. In addition you may increase the
+# size of rbd's by changing the size parameter and rerunning the playbook. A size value lower
+# than the current size of the rbd is ignored.
+#
+# the 'host' parameter defines which of the gateway nodes should handle the physical
+# allocation/expansion or removal of the rbd
+# to remove an image, simply use a state of 'absent'. This will first check the rbd is not allocated
+# to any client, and the remove it from LIO and then delete the rbd image
+#
+# NB. this variable definition can be commented out to bypass LUN management
+#
+# Example:
+#
+#rbd_devices:
+# - { pool: 'rbd', image: 'ansible1', size: '30G', host: 'ceph-1', state: 'present' }
+# - { pool: 'rbd', image: 'ansible2', size: '15G', host: 'ceph-1', state: 'present' }
+# - { pool: 'rbd', image: 'ansible3', size: '30G', host: 'ceph-1', state: 'present' }
+# - { pool: 'rbd', image: 'ansible4', size: '50G', host: 'ceph-1', state: 'present' }
+rbd_devices: {}
+
+# client_connections defines the client ACL's to restrict client access to specific LUNs
+# The settings are as follows;
+# - image_list is a comma separated list of rbd images of the form <pool name>.<rbd_image_name>
+# - chap supplies the user and password the client will use for authentication of the
+# form <user>/<password>
+# - status shows the intended state of this client definition - 'present' or 'absent'
+#
+# NB. this definition can be commented out to skip client (nodeACL) management
+#
+# Example:
+#
+#client_connections:
+# - { client: 'iqn.1994-05.com.redhat:rh7-iscsi-client', image_list: 'rbd.ansible1,rbd.ansible2', chap: 'rh7-iscsi-client/redhat', status: 'present' }
+# - { client: 'iqn.1991-05.com.microsoft:w2k12r2', image_list: 'rbd.ansible4', chap: 'w2k12r2/microsoft_w2k12', status: 'absent' }
+
+client_connections: {}
+
+no_log_on_ceph_key_tasks: True
+
+###############
+# DEPRECATION #
+###############
+
+
+
+######################################################
+# VARIABLES BELOW SHOULD NOT BE MODIFIED BY THE USER #
+# *DO NOT* MODIFY THEM #
+######################################################
+
+container_exec_cmd:
+docker: false
+ceph_volume_debug: "{{ enable_ceph_volume_debug | ternary(1, 0) }}"
--- /dev/null
+---
+galaxy_info:
+ company: Red Hat
+ author: Sébastien Han
+ description: Handles ceph-ansible default vars for all roles
+ license: Apache
+ min_ansible_version: 2.7
+ platforms:
+ - name: Ubuntu
+ versions:
+ - xenial
+ - name: EL
+ versions:
+ - 7
+ galaxy_tags:
+ - system
+dependencies: []
--- /dev/null
+---
\ No newline at end of file
--- /dev/null
+---
+ceph_osd_pool_default_crush_rule: -1
+ceph_osd_pool_default_crush_rule_name: "replicated_rule"
\ No newline at end of file
--- /dev/null
+# Ansible role: ceph-facts
+
+Documentation is available at http://docs.ceph.com/ceph-ansible/.
--- /dev/null
+---
+galaxy_info:
+ company: Red Hat
+ author: Guillaume Abrioux
+ description: Set some facts for ceph to be deployed
+ license: Apache
+ min_ansible_version: 2.7
+ platforms:
+ - name: Ubuntu
+ versions:
+ - xenial
+ - name: EL
+ versions:
+ - 7
+ galaxy_tags:
+ - system
+dependencies: []
--- /dev/null
+---
+- name: check if podman binary is present
+ stat:
+ path: /usr/bin/podman
+ register: podman_binary
+
+- name: set_fact container_binary
+ set_fact:
+ container_binary: "{{ 'podman' if (podman_binary.stat.exists and ansible_facts['distribution'] == 'Fedora') or (ansible_facts['os_family'] == 'RedHat' and ansible_facts['distribution_major_version'] == '8') else 'docker' }}"
+ when: not docker2podman | default(false) | bool
\ No newline at end of file
--- /dev/null
+---
+- name: convert grafana-server group name if exist
+ add_host:
+ name: "{{ item }}"
+ groups: "{{ monitoring_group_name }}"
+ ansible_host: "{{ hostvars[item]['ansible_host'] | default(omit) }}"
+ ansible_port: "{{ hostvars[item]['ansible_port'] | default(omit) }}"
+ with_items: "{{ groups.get((grafana_server_group_name|default('grafana-server')), []) }}"
+ run_once: True
--- /dev/null
+---
+- name: resolve device link(s)
+ command: readlink -f {{ item }}
+ changed_when: false
+ check_mode: no
+ with_items: "{{ devices }}"
+ register: devices_prepare_canonicalize
+ when:
+ - devices is defined
+ - not osd_auto_discovery | default(False) | bool
+
+- name: set_fact build devices from resolved symlinks
+ set_fact:
+ devices: "{{ devices | default([]) + [ item.stdout ] }}"
+ with_items: "{{ devices_prepare_canonicalize.results }}"
+ when:
+ - devices is defined
+ - not osd_auto_discovery | default(False) | bool
+
+- name: set_fact build final devices list
+ set_fact:
+ devices: "{{ devices | reject('search','/dev/disk') | list | unique }}"
+ when:
+ - devices is defined
+ - not osd_auto_discovery | default(False) | bool
+
+- name: resolve dedicated_device link(s)
+ command: readlink -f {{ item }}
+ changed_when: false
+ check_mode: no
+ with_items: "{{ dedicated_devices }}"
+ register: dedicated_devices_prepare_canonicalize
+ when:
+ - dedicated_devices is defined
+ - not osd_auto_discovery | default(False) | bool
+
+- name: set_fact build dedicated_devices from resolved symlinks
+ set_fact:
+ dedicated_devices: "{{ dedicated_devices | default([]) + [ item.stdout ] }}"
+ with_items: "{{ dedicated_devices_prepare_canonicalize.results }}"
+ when:
+ - dedicated_devices is defined
+ - not osd_auto_discovery | default(False) | bool
+
+- name: set_fact build final dedicated_devices list
+ set_fact:
+ dedicated_devices: "{{ dedicated_devices | reject('search','/dev/disk') | list | unique }}"
+ when:
+ - dedicated_devices is defined
+ - not osd_auto_discovery | default(False) | bool
+
+- name: resolve bluestore_wal_device link(s)
+ command: readlink -f {{ item }}
+ changed_when: false
+ check_mode: no
+ with_items: "{{ bluestore_wal_devices }}"
+ register: bluestore_wal_devices_prepare_canonicalize
+ when:
+ - bluestore_wal_devices is defined
+ - not osd_auto_discovery | default(False) | bool
+
+- name: set_fact build bluestore_wal_devices from resolved symlinks
+ set_fact:
+ bluestore_wal_devices: "{{ bluestore_wal_devices | default([]) + [ item.stdout ] }}"
+ with_items: "{{ bluestore_wal_devices_prepare_canonicalize.results }}"
+ when:
+ - bluestore_wal_devices is defined
+ - not osd_auto_discovery | default(False) | bool
+
+- name: set_fact build final bluestore_wal_devices list
+ set_fact:
+ bluestore_wal_devices: "{{ bluestore_wal_devices | reject('search','/dev/disk') | list | unique }}"
+ when:
+ - bluestore_wal_devices is defined
+ - not osd_auto_discovery | default(False) | bool
+
+- name: set_fact devices generate device list when osd_auto_discovery
+ vars:
+ device: "{{ item.key | regex_replace('^', '/dev/') }}"
+ set_fact:
+ devices: "{{ devices | default([]) | union([device]) }}"
+ with_dict: "{{ ansible_facts['devices'] }}"
+ when:
+ - osd_auto_discovery | default(False) | bool
+ - ansible_facts['devices'] is defined
+ - item.value.removable == "0"
+ - item.value.sectors != "0"
+ - item.value.partitions|count == 0
+ - item.value.holders|count == 0
+ - ansible_facts['mounts'] | selectattr('device', 'equalto', device) | list | length == 0
+ - item.key is not match osd_auto_discovery_exclude
--- /dev/null
+---
+- name: check if it is atomic host
+ stat:
+ path: /run/ostree-booted
+ register: stat_ostree
+
+- name: set_fact is_atomic
+ set_fact:
+ is_atomic: "{{ stat_ostree.stat.exists }}"
+
+- name: import_tasks container_binary.yml
+ import_tasks: container_binary.yml
+
+- name: set_fact ceph_cmd
+ set_fact:
+ ceph_cmd: "{{ container_binary + ' run --rm --net=host -v /etc/ceph:/etc/ceph:z -v /var/lib/ceph:/var/lib/ceph:z -v /var/run/ceph:/var/run/ceph:z --entrypoint=ceph ' + ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else 'ceph' }}"
+
+# In case ansible_python_interpreter is set by the user,
+# ansible will not discover python and discovered_interpreter_python
+# will not be set
+- name: set_fact discovered_interpreter_python
+ set_fact:
+ discovered_interpreter_python: "{{ ansible_python_interpreter }}"
+ when: ansible_python_interpreter is defined
+
+# If ansible_python_interpreter is not defined, this can result in the
+# discovered_interpreter_python fact from being set. This fails later in this
+# playbook and is used elsewhere.
+- name: set_fact discovered_interpreter_python if not previously set
+ set_fact:
+ discovered_interpreter_python: "{{ ansible_facts['discovered_interpreter_python'] }}"
+ when:
+ - discovered_interpreter_python is not defined
+ - ansible_facts['discovered_interpreter_python'] is defined
+
+# Set ceph_release to ceph_stable by default
+- name: set_fact ceph_release ceph_stable_release
+ set_fact:
+ ceph_release: "{{ ceph_stable_release }}"
+
+- name: set_fact monitor_name ansible_facts['hostname']
+ set_fact:
+ monitor_name: "{{ hostvars[item]['ansible_facts']['hostname'] }}"
+ delegate_to: "{{ item }}"
+ delegate_facts: true
+ with_items: "{{ groups.get(mon_group_name, []) }}"
+ run_once: true
+ when: groups.get(mon_group_name, []) | length > 0
+
+- name: find a running monitor
+ when: groups.get(mon_group_name, []) | length > 0
+ block:
+ - name: set_fact container_exec_cmd
+ set_fact:
+ container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_facts']['hostname'] if not rolling_update | bool else hostvars[mon_host | default(groups[mon_group_name][0])]['ansible_facts']['hostname'] }}"
+ when:
+ - containerized_deployment | bool
+
+ - name: find a running mon container
+ command: "{{ container_binary }} ps -q --filter name=ceph-mon-{{ hostvars[item]['ansible_facts']['hostname'] }}"
+ register: find_running_mon_container
+ failed_when: false
+ run_once: true
+ delegate_to: "{{ item }}"
+ with_items: "{{ groups.get(mon_group_name, []) }}"
+ when:
+ - containerized_deployment | bool
+
+ - name: check for a ceph mon socket
+ shell: stat --printf=%n {{ rbd_client_admin_socket_path }}/{{ cluster }}-mon*.asok
+ changed_when: false
+ failed_when: false
+ check_mode: no
+ register: mon_socket_stat
+ run_once: true
+ delegate_to: "{{ item }}"
+ with_items: "{{ groups.get(mon_group_name, []) }}"
+ when:
+ - not containerized_deployment | bool
+
+ - name: check if the ceph mon socket is in-use
+ command: grep -q {{ item.stdout }} /proc/net/unix
+ changed_when: false
+ failed_when: false
+ check_mode: no
+ register: mon_socket
+ run_once: true
+ delegate_to: "{{ hostvars[item.item]['inventory_hostname'] }}"
+ with_items: "{{ mon_socket_stat.results }}"
+ when:
+ - not containerized_deployment | bool
+ - item.rc == 0
+
+ - name: set_fact running_mon - non_container
+ set_fact:
+ running_mon: "{{ hostvars[item.item.item]['inventory_hostname'] }}"
+ with_items: "{{ mon_socket.results }}"
+ run_once: true
+ when:
+ - not containerized_deployment | bool
+ - item.rc is defined
+ - item.rc == 0
+
+ - name: set_fact running_mon - container
+ set_fact:
+ running_mon: "{{ item.item }}"
+ run_once: true
+ with_items: "{{ find_running_mon_container.results }}"
+ when:
+ - containerized_deployment | bool
+ - item.stdout_lines | default([]) | length > 0
+
+ - name: set_fact _container_exec_cmd
+ set_fact:
+ _container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0] if running_mon is undefined else running_mon]['ansible_facts']['hostname'] }}"
+ when:
+ - containerized_deployment | bool
+
+ # this task shouldn't run in a rolling_update situation
+ # because it blindly picks a mon, which may be down because
+ # of the rolling update
+ - name: get current fsid if cluster is already running
+ command: "{{ timeout_command }} {{ _container_exec_cmd | default('') }} ceph --cluster {{ cluster }} fsid"
+ changed_when: false
+ failed_when: false
+ check_mode: no
+ register: current_fsid
+ run_once: true
+ delegate_to: "{{ groups[mon_group_name][0] if running_mon is undefined else running_mon }}"
+ when:
+ - not rolling_update | bool
+
+# set this as a default when performing a rolling_update
+# so the rest of the tasks here will succeed
+- name: set_fact current_fsid rc 1
+ set_fact:
+ current_fsid:
+ rc: 1
+ when: rolling_update | bool or groups.get(mon_group_name, []) | length == 0
+
+- name: get current fsid
+ command: "{{ timeout_command }} {{ container_exec_cmd }} ceph --admin-daemon /var/run/ceph/{{ cluster }}-mon.{{ hostvars[mon_host | default(groups[mon_group_name][0])]['ansible_facts']['hostname'] }}.asok config get fsid"
+ register: rolling_update_fsid
+ delegate_to: "{{ mon_host | default(groups[mon_group_name][0]) }}"
+ until: rolling_update_fsid is succeeded
+ when:
+ - rolling_update | bool
+ - groups.get(mon_group_name, []) | length > 0
+
+- name: set_fact fsid
+ set_fact:
+ fsid: "{{ (rolling_update_fsid.stdout | from_json).fsid }}"
+ when:
+ - rolling_update | bool
+ - groups.get(mon_group_name, []) | length > 0
+
+- name: set_fact fsid from current_fsid
+ set_fact:
+ fsid: "{{ current_fsid.stdout }}"
+ run_once: true
+ when: current_fsid.rc == 0
+
+- name: fsid related tasks
+ when:
+ - generate_fsid | bool
+ - current_fsid.rc != 0
+ - not rolling_update | bool
+ block:
+ - name: generate cluster fsid
+ command: "{{ hostvars[groups[mon_group_name][0]]['discovered_interpreter_python'] }} -c 'import uuid; print(str(uuid.uuid4()))'"
+ register: cluster_uuid
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ run_once: true
+
+ - name: set_fact fsid
+ set_fact:
+ fsid: "{{ cluster_uuid.stdout }}"
+
+- name: import_tasks devices.yml
+ import_tasks: devices.yml
+ when: inventory_hostname in groups.get(osd_group_name, [])
+
+- name: backward compatibility tasks related
+ when:
+ - (inventory_hostname in groups.get(rgw_group_name, []) or inventory_hostname in groups.get(nfs_group_name, []))
+ - groups.get(mon_group_name, []) | length > 0
+ - handler_mgr_status | default(False)
+ block:
+ - name: get ceph current status
+ command: "{{ timeout_command }} {{ _container_exec_cmd | default('') }} ceph --cluster {{ cluster }} service dump -f json"
+ changed_when: false
+ failed_when: false
+ check_mode: no
+ register: ceph_current_status
+ run_once: true
+ delegate_to: "{{ groups[mon_group_name][0] if running_mon is undefined else running_mon }}"
+
+ - name: set_fact ceph_current_status
+ set_fact:
+ ceph_current_status: "{{ ceph_current_status.stdout | from_json }}"
+ run_once: true
+ when: ceph_current_status.rc == 0
+
+ - name: set_fact rgw_hostname
+ set_fact:
+ rgw_hostname: "{% set _value = ansible_facts['hostname'] -%}
+ {% for key in (ceph_current_status['services']['rgw']['daemons'] | list) -%}
+ {% if key == ansible_facts['fqdn'] -%}
+ {% set _value = key -%}
+ {% endif -%}
+ {% endfor -%}
+ {{ _value }}"
+ when:
+ - ceph_current_status['services'] is defined
+ - ceph_current_status['services']['rgw'] is defined
+
+- name: check if the ceph conf exists
+ stat:
+ path: '/etc/ceph/{{ cluster }}.conf'
+ register: ceph_conf
+
+- name: set default osd_pool_default_crush_rule fact
+ set_fact:
+ osd_pool_default_crush_rule: "{{ ceph_osd_pool_default_crush_rule }}"
+
+- name: get default crush rule value from ceph configuration
+ block:
+ - &read-osd-pool-default-crush-rule
+ name: read osd pool default crush rule
+ command: grep 'osd pool default crush rule' /etc/ceph/{{ cluster }}.conf
+ register: crush_rule_variable
+ changed_when: false
+ check_mode: no
+ failed_when: crush_rule_variable.rc not in (0, 1)
+ - &set-osd-pool-default-crush-rule-fact
+ name: set osd_pool_default_crush_rule fact
+ set_fact:
+ osd_pool_default_crush_rule: "{{ crush_rule_variable.stdout.split(' = ')[1] }}"
+ when: crush_rule_variable.rc == 0
+ when: ceph_conf.stat.exists | bool
+
+- name: get default crush rule value from running monitor ceph configuration
+ block:
+ - <<: *read-osd-pool-default-crush-rule
+ delegate_to: "{{ running_mon }}"
+ - *set-osd-pool-default-crush-rule-fact
+ when:
+ - running_mon is defined
+ - not ceph_conf.stat.exists | bool
+
+- name: import_tasks set_monitor_address.yml
+ import_tasks: set_monitor_address.yml
+ when: groups.get(mon_group_name, []) | length > 0
+
+- name: import_tasks set_radosgw_address.yml
+ include_tasks: set_radosgw_address.yml
+ when: inventory_hostname in groups.get(rgw_group_name, [])
+
+- name: set_fact use_new_ceph_iscsi package or old ceph-iscsi-config/cli
+ set_fact:
+ use_new_ceph_iscsi: "{{ (gateway_ip_list == '0.0.0.0' and gateway_iqn | length == 0 and client_connections | length == 0 and rbd_devices | length == 0) | bool | ternary(true, false) }}"
+ when: iscsi_gw_group_name in group_names
+
+- name: set_fact ceph_run_cmd
+ set_fact:
+ ceph_run_cmd: "{{ container_binary + ' run --rm --net=host -v /etc/ceph:/etc/ceph:z -v /var/lib/ceph/:/var/lib/ceph/:z -v /var/log/ceph/:/var/log/ceph/:z --entrypoint=ceph ' + ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else 'ceph' }}"
+ delegate_to: "{{ item }}"
+ delegate_facts: True
+ run_once: True
+ with_items:
+ - "{{ groups[mon_group_name] if groups[mon_group_name] | default([]) | length > 0 else [] }}"
+ - "{{ groups[mds_group_name] if groups[mds_group_name] | default([]) | length > 0 else [] }}"
+ - "{{ groups[client_group_name] if groups[client_group_name] | default([]) | length > 0 else [] }}"
+
+- name: set_fact ceph_admin_command
+ set_fact:
+ ceph_admin_command: "{{ hostvars[item]['ceph_run_cmd'] }} -n client.admin -k /etc/ceph/{{ cluster }}.client.admin.keyring"
+ delegate_to: "{{ item }}"
+ delegate_facts: True
+ run_once: True
+ with_items:
+ - "{{ groups[mon_group_name] if groups[mon_group_name] | default([]) | length > 0 else [] }}"
+ - "{{ groups[mds_group_name] if groups[mds_group_name] | default([]) | length > 0 else [] }}"
+ - "{{ groups[client_group_name] if groups[client_group_name] | default([]) | length > 0 else [] }}"
--- /dev/null
+---
+- name: get current default crush rule details
+ ceph_crush_rule:
+ name: null
+ cluster: "{{ cluster }}"
+ state: info
+ environment:
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+ register: default_crush_rule_details
+ delegate_to: "{{ delegated_node | default(groups[mon_group_name][0]) }}"
+ run_once: true
+
+- name: get current default crush rule name
+ set_fact:
+ ceph_osd_pool_default_crush_rule_name: "{{ item.rule_name }}"
+ with_items: "{{ default_crush_rule_details.stdout | default('{}', True) | from_json }}"
+ run_once: True
+ when: item.rule_id | int == osd_pool_default_crush_rule | int
--- /dev/null
+- name: set grafana_server_addr fact - ipv4
+ set_fact:
+ grafana_server_addr: "{{ hostvars[inventory_hostname]['ansible_facts']['all_ipv4_addresses'] | ips_in_ranges(grafana_network.split(',')) | first }}"
+ when:
+ - groups.get(monitoring_group_name, []) | length > 0
+ - ip_version == 'ipv4'
+ - dashboard_enabled | bool
+ - inventory_hostname in groups[monitoring_group_name]
+
+- name: set grafana_server_addr fact - ipv6
+ set_fact:
+ grafana_server_addr: "{{ hostvars[inventory_hostname]['ansible_facts']['all_ipv6_addresses'] | ips_in_ranges(grafana_network.split(',')) | last | ipwrap }}"
+ when:
+ - groups.get(monitoring_group_name, []) | length > 0
+ - ip_version == 'ipv6'
+ - dashboard_enabled | bool
+ - inventory_hostname in groups[monitoring_group_name]
+
+- name: set grafana_server_addrs fact - ipv4
+ set_fact:
+ grafana_server_addrs: "{{ (grafana_server_addrs | default([]) + [hostvars[item]['ansible_facts']['all_ipv4_addresses'] | ips_in_ranges(grafana_network.split(',')) | first]) | unique }}"
+ with_items: "{{ groups.get(monitoring_group_name, []) }}"
+ when:
+ - groups.get(monitoring_group_name, []) | length > 0
+ - ip_version == 'ipv4'
+ - dashboard_enabled | bool
+
+- name: set grafana_server_addrs fact - ipv6
+ set_fact:
+ grafana_server_addrs: "{{ (grafana_server_addrs | default([]) + [hostvars[item]['ansible_facts']['all_ipv6_addresses'] | ips_in_ranges(grafana_network.split(',')) | last | ipwrap]) | unique }}"
+ with_items: "{{ groups.get(monitoring_group_name, []) }}"
+ when:
+ - groups.get(monitoring_group_name, []) | length > 0
+ - ip_version == 'ipv6'
+ - dashboard_enabled | bool
--- /dev/null
+---
+- name: include_tasks convert_grafana_server_group_name.yml
+ include_tasks: convert_grafana_server_group_name.yml
+ when: groups.get((grafana_server_group_name|default('grafana-server')), []) | length > 0
+
+- name: include facts.yml
+ include_tasks: facts.yml
--- /dev/null
+---
+- name: set_fact _monitor_addresses to monitor_address_block ipv4
+ set_fact:
+ _monitor_addresses: "{{ _monitor_addresses | default([]) + [{ 'name': item, 'addr': hostvars[item]['ansible_facts']['all_ipv4_addresses'] | ips_in_ranges(hostvars[item]['monitor_address_block'].split(',')) | first }] }}"
+ with_items: "{{ groups.get(mon_group_name, []) }}"
+ when:
+ - "item not in _monitor_addresses | default([]) | selectattr('name', 'defined') | map(attribute='name') | list"
+ - hostvars[item]['monitor_address_block'] is defined
+ - hostvars[item]['monitor_address_block'] != 'subnet'
+ - ip_version == 'ipv4'
+
+- name: set_fact _monitor_addresses to monitor_address_block ipv6
+ set_fact:
+ _monitor_addresses: "{{ _monitor_addresses | default([]) + [{ 'name': item, 'addr': hostvars[item]['ansible_facts']['all_ipv6_addresses'] | ips_in_ranges(hostvars[item]['monitor_address_block'].split(',')) | last | ipwrap }] }}"
+ with_items: "{{ groups.get(mon_group_name, []) }}"
+ when:
+ - "item not in _monitor_addresses | default([]) | selectattr('name', 'defined') | map(attribute='name') | list"
+ - hostvars[item]['monitor_address_block'] is defined
+ - hostvars[item]['monitor_address_block'] != 'subnet'
+ - ip_version == 'ipv6'
+
+- name: set_fact _monitor_addresses to monitor_address
+ set_fact:
+ _monitor_addresses: "{{ _monitor_addresses | default([]) + [{ 'name': item, 'addr': hostvars[item]['monitor_address'] | ipwrap}] }}"
+ with_items: "{{ groups.get(mon_group_name, []) }}"
+ when:
+ - "item not in _monitor_addresses | default([]) | selectattr('name', 'defined') | map(attribute='name') | list"
+ - hostvars[item]['monitor_address'] is defined
+ - hostvars[item]['monitor_address'] != 'x.x.x.x'
+
+- name: set_fact _monitor_addresses to monitor_interface - ipv4
+ set_fact:
+ _monitor_addresses: "{{ _monitor_addresses | default([]) + [{ 'name': item, 'addr': hostvars[item]['ansible_facts'][(hostvars[item]['monitor_interface']|replace('-', '_'))][ip_version]['address'] | ipwrap }] }}"
+ with_items: "{{ groups.get(mon_group_name, []) }}"
+ when:
+ - "item not in _monitor_addresses | default([]) | selectattr('name', 'defined') | map(attribute='name') | list"
+ - ip_version == 'ipv4'
+ - hostvars[item]['monitor_address_block'] | default('subnet') == 'subnet'
+ - hostvars[item]['monitor_address'] | default('x.x.x.x') == 'x.x.x.x'
+ - hostvars[item]['monitor_interface'] | default('interface') != 'interface'
+
+- name: set_fact _monitor_addresses to monitor_interface - ipv6
+ set_fact:
+ _monitor_addresses: "{{ _monitor_addresses | default([]) + [{ 'name': item, 'addr': hostvars[item]['ansible_facts'][(hostvars[item]['monitor_interface']|replace('-', '_'))][ip_version][0]['address'] | ipwrap }] }}"
+ with_items: "{{ groups.get(mon_group_name, []) }}"
+ when:
+ - "item not in _monitor_addresses | default([]) | selectattr('name', 'defined') | map(attribute='name') | list"
+ - ip_version == 'ipv6'
+ - hostvars[item]['monitor_address_block'] | default('subnet') == 'subnet'
+ - hostvars[item]['monitor_address'] | default('x.x.x.x') == 'x.x.x.x'
+ - hostvars[item]['monitor_interface'] | default('interface') != 'interface'
+
+- name: set_fact _current_monitor_address
+ set_fact:
+ _current_monitor_address: "{{ item.addr }}"
+ with_items: "{{ _monitor_addresses }}"
+ when:
+ - (inventory_hostname == item.name and not rolling_update | default(False) | bool)
+ or (rolling_update | default(False) | bool and item.name == groups.get(mon_group_name, [])[0])
\ No newline at end of file
--- /dev/null
+---
+- name: dashboard related tasks
+ when: ceph_dashboard_call_item is defined
+ block:
+ - name: set current radosgw_address_block, radosgw_address, radosgw_interface from node "{{ ceph_dashboard_call_item }}"
+ set_fact:
+ radosgw_address_block: "{{ hostvars[ceph_dashboard_call_item]['radosgw_address_block'] | default(radosgw_address_block) }}"
+ radosgw_address: "{{ hostvars[ceph_dashboard_call_item]['radosgw_address'] | default(radosgw_address) }}"
+ radosgw_interface: "{{ hostvars[ceph_dashboard_call_item]['radosgw_interface'] | default(radosgw_interface) }}"
+
+- name: set_fact _radosgw_address to radosgw_address_block ipv4
+ set_fact:
+ _radosgw_address: "{{ hostvars[inventory_hostname]['ansible_facts']['all_ipv4_addresses'] | ips_in_ranges(hostvars[inventory_hostname]['radosgw_address_block'].split(',')) | first }}"
+ when:
+ - radosgw_address_block is defined
+ - radosgw_address_block != 'subnet'
+ - ip_version == 'ipv4'
+
+- name: set_fact _radosgw_address to radosgw_address_block ipv6
+ set_fact:
+ _radosgw_address: "{{ hostvars[inventory_hostname]['ansible_facts']['all_ipv6_addresses'] | ips_in_ranges(hostvars[inventory_hostname]['radosgw_address_block'].split(',')) | last | ipwrap }}"
+ when:
+ - radosgw_address_block is defined
+ - radosgw_address_block != 'subnet'
+ - ip_version == 'ipv6'
+
+- name: set_fact _radosgw_address to radosgw_address
+ set_fact:
+ _radosgw_address: "{{ radosgw_address | ipwrap }}"
+ when:
+ - radosgw_address is defined
+ - radosgw_address != 'x.x.x.x'
+
+- name: tasks for radosgw interface
+ when:
+ - radosgw_address_block == 'subnet'
+ - radosgw_address == 'x.x.x.x'
+ - radosgw_interface != 'interface'
+ block:
+ - name: set_fact _interface
+ set_fact:
+ _interface: "{{ (hostvars[item]['radosgw_interface'] | replace('-', '_')) }}"
+ loop: "{{ groups.get(rgw_group_name, []) }}"
+ delegate_to: "{{ item }}"
+ delegate_facts: true
+ run_once: true
+
+ - name: set_fact _radosgw_address to radosgw_interface - ipv4
+ set_fact:
+ _radosgw_address: "{{ hostvars[item]['ansible_facts'][hostvars[item]['_interface']][ip_version]['address'] }}"
+ loop: "{{ groups.get(rgw_group_name, []) }}"
+ delegate_to: "{{ item }}"
+ delegate_facts: true
+ when: ip_version == 'ipv4'
+
+
+ - name: set_fact _radosgw_address to radosgw_interface - ipv6
+ set_fact:
+ _radosgw_address: "{{ hostvars[item]['ansible_facts'][hostvars[item]['_interface']][ip_version][0]['address'] | ipwrap }}"
+ loop: "{{ groups.get(rgw_group_name, []) }}"
+ delegate_to: "{{ item }}"
+ delegate_facts: true
+ when: ip_version == 'ipv6'
+
+- name: set_fact rgw_instances without rgw multisite
+ set_fact:
+ rgw_instances: "{{ rgw_instances|default([]) | union([{'instance_name': 'rgw' + item|string, 'radosgw_address': hostvars[ceph_dashboard_call_item | default(inventory_hostname)]['_radosgw_address'], 'radosgw_frontend_port': radosgw_frontend_port|int + item|int }]) }}"
+ with_sequence: start=0 end={{ radosgw_num_instances|int - 1 }}
+ delegate_to: "{{ ceph_dashboard_call_item if ceph_dashboard_call_item is defined else inventory_hostname }}"
+ delegate_facts: "{{ true if ceph_dashboard_call_item is defined else false }}"
+ when:
+ - ceph_dashboard_call_item is defined or
+ inventory_hostname in groups.get(rgw_group_name, [])
+ - not rgw_multisite | bool
+
+- name: set_fact is_rgw_instances_defined
+ set_fact:
+ is_rgw_instances_defined: "{{ hostvars[ceph_dashboard_call_item | default(inventory_hostname)]['rgw_instances'] is defined }}"
+ delegate_to: "{{ ceph_dashboard_call_item if ceph_dashboard_call_item is defined else inventory_hostname }}"
+ delegate_facts: "{{ true if ceph_dashboard_call_item is defined else false }}"
+ when:
+ - inventory_hostname in groups.get(rgw_group_name, []) or
+ ceph_dashboard_call_item is defined
+ - rgw_multisite | bool
+
+- name: set_fact rgw_instances with rgw multisite
+ set_fact:
+ rgw_instances: "{{ rgw_instances|default([]) | union([{ 'instance_name': 'rgw' + item | string, 'radosgw_address': hostvars[ceph_dashboard_call_item | default(inventory_hostname)]['_radosgw_address'], 'radosgw_frontend_port': radosgw_frontend_port | int + item|int, 'rgw_realm': rgw_realm | string, 'rgw_zonegroup': rgw_zonegroup | string, 'rgw_zone': rgw_zone | string, 'system_access_key': system_access_key, 'system_secret_key': system_secret_key, 'rgw_zone_user': rgw_zone_user, 'rgw_zone_user_display_name': rgw_zone_user_display_name, 'endpoint': (rgw_pull_proto + '://' + rgw_pullhost + ':' + rgw_pull_port | string) if not rgw_zonemaster | bool and rgw_zonesecondary | bool else omit }]) }}"
+ with_sequence: start=0 end={{ radosgw_num_instances|int - 1 }}
+ delegate_to: "{{ ceph_dashboard_call_item if ceph_dashboard_call_item is defined else inventory_hostname }}"
+ delegate_facts: "{{ true if ceph_dashboard_call_item is defined else false }}"
+ when:
+ - ceph_dashboard_call_item is defined or
+ inventory_hostname in groups.get(rgw_group_name, [])
+ - rgw_multisite | bool
+ - not is_rgw_instances_defined | default(False) | bool
+
+- name: set_fact rgw_instances_host
+ set_fact:
+ rgw_instances_host: '{{ rgw_instances_host | default([]) | union([item | combine({"host": inventory_hostname})]) }}'
+ with_items: '{{ rgw_instances }}'
+ when:
+ - inventory_hostname in groups.get(rgw_group_name, [])
+ - rgw_multisite | bool
+
+- name: set_fact rgw_instances_all
+ set_fact:
+ rgw_instances_all: '{{ rgw_instances_all | default([]) | union(hostvars[item]["rgw_instances_host"]) }}'
+ with_items: "{{ groups.get(rgw_group_name, []) }}"
+ when:
+ - inventory_hostname in groups.get(rgw_group_name, [])
+ - hostvars[item]["rgw_instances_host"] is defined
+ - hostvars[item]["rgw_multisite"] | default(False) | bool
--- /dev/null
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [2014] [Sébastien Han]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
--- /dev/null
+# Ansible role: ceph-fetch
+
+Documentation is available at http://docs.ceph.com/ceph-ansible/.
--- /dev/null
+---
+# Variables here are applicable to all host groups NOT roles
+
+# This sample file generated by generate_group_vars_sample.sh
+
+# Dummy variable to avoid error because ansible does not recognize the
+# file as a good configuration file when no variable in it.
+dummy:
+
+fetch_directory: fetch/
+
--- /dev/null
+---
+galaxy_info:
+ company: Red Hat
+ author: Andrew Schoen
+ description: Fetches ceph keys from monitors.
+ license: Apache
+ min_ansible_version: 2.7
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ galaxy_tags:
+ - system
+dependencies: []
--- /dev/null
+---
+- name: lookup keys in /etc/ceph
+ shell: ls -1 /etc/ceph/*.keyring
+ changed_when: false
+ register: ceph_keys
+
+- name: create a local fetch directory if it does not exist
+ file:
+ path: "{{ fetch_directory }}"
+ state: directory
+ delegate_to: localhost
+ become: false
+
+- name: "copy ceph user and bootstrap keys to the ansible server in {{ fetch_directory }}/{{ fsid }}/"
+ fetch:
+ src: "{{ item }}"
+ dest: "{{ fetch_directory }}/{{ fsid }}/{{ item }}"
+ flat: yes
+ fail_on_missing: false
+ run_once: true
+ with_items:
+ - "{{ ceph_keys.stdout_lines }}"
+ - "/var/lib/ceph/bootstrap-osd/{{ cluster }}.keyring"
+ - "/var/lib/ceph/bootstrap-rgw/{{ cluster }}.keyring"
+ - "/var/lib/ceph/bootstrap-mds/{{ cluster }}.keyring"
+ - "/var/lib/ceph/bootstrap-rbd/{{ cluster }}.keyring"
+ - "/var/lib/ceph/bootstrap-mgr/{{ cluster }}.keyring"
--- /dev/null
+---
+galaxy_info:
+ company: Red Hat
+ author: Boris Ranto
+ description: Configures Grafana for Ceph Dashboard
+ license: Apache
+ min_ansible_version: 2.4
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ galaxy_tags:
+ - system
+dependencies: []
--- /dev/null
+---
+- name: install ceph-grafana-dashboards package on RedHat or SUSE
+ package:
+ name: ceph-grafana-dashboards
+ state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
+ register: result
+ until: result is succeeded
+ when:
+ - not containerized_deployment | bool
+ - ansible_facts['os_family'] in ['RedHat', 'Suse']
+ tags: package-install
+
+- name: make sure grafana is down
+ service:
+ name: grafana-server
+ state: stopped
+
+- name: wait for grafana to be stopped
+ wait_for:
+ host: '{{ grafana_server_addr if ip_version == "ipv4" else grafana_server_addr[1:-1] }}'
+ port: '{{ grafana_port }}'
+ state: stopped
+
+- name: make sure grafana configuration directories exist
+ file:
+ path: "{{ item }}"
+ state: directory
+ recurse: yes
+ owner: "{{ grafana_uid }}"
+ group: "{{ grafana_uid }}"
+ with_items:
+ - "/etc/grafana/dashboards/ceph-dashboard"
+ - "/etc/grafana/provisioning/datasources"
+ - "/etc/grafana/provisioning/dashboards"
+ - "/etc/grafana/provisioning/notifiers"
+
+- name: download ceph grafana dashboards
+ get_url:
+ url: "https://raw.githubusercontent.com/ceph/ceph/{{ grafana_dashboard_version }}/monitoring/ceph-mixin/dashboards_out/{{ item }}"
+ dest: "/etc/grafana/dashboards/ceph-dashboard/{{ item }}"
+ with_items: "{{ grafana_dashboard_files }}"
+ when:
+ - not containerized_deployment | bool
+ - not ansible_facts['os_family'] in ['RedHat', 'Suse']
+
+- name: write grafana.ini
+ config_template:
+ src: grafana.ini.j2
+ dest: /etc/grafana/grafana.ini
+ owner: "{{ grafana_uid }}"
+ group: "{{ grafana_uid }}"
+ mode: 0640
+ config_type: ini
+ config_overrides: "{{ grafana_conf_overrides }}"
+
+- name: write datasources provisioning config file
+ template:
+ src: datasources-ceph-dashboard.yml.j2
+ dest: /etc/grafana/provisioning/datasources/ceph-dashboard.yml
+ owner: "{{ grafana_uid }}"
+ group: "{{ grafana_uid }}"
+ mode: 0640
+
+- name: Write dashboards provisioning config file
+ template:
+ src: dashboards-ceph-dashboard.yml.j2
+ dest: /etc/grafana/provisioning/dashboards/ceph-dashboard.yml
+ owner: "{{ grafana_uid }}"
+ group: "{{ grafana_uid }}"
+ mode: 0640
+ when: not containerized_deployment | bool
+
+- name: copy grafana SSL certificate file
+ copy:
+ src: "{{ grafana_crt }}"
+ dest: "/etc/grafana/ceph-dashboard.crt"
+ owner: "{{ grafana_uid }}"
+ group: "{{ grafana_uid }}"
+ mode: 0640
+ remote_src: "{{ dashboard_tls_external | bool }}"
+ when:
+ - grafana_crt | length > 0
+ - dashboard_protocol == "https"
+
+- name: copy grafana SSL certificate key
+ copy:
+ src: "{{ grafana_key }}"
+ dest: "/etc/grafana/ceph-dashboard.key"
+ owner: "{{ grafana_uid }}"
+ group: "{{ grafana_uid }}"
+ mode: 0440
+ remote_src: "{{ dashboard_tls_external | bool }}"
+ when:
+ - grafana_key | length > 0
+ - dashboard_protocol == "https"
+
+- name: generate a Self Signed OpenSSL certificate for dashboard
+ shell: |
+ test -f /etc/grafana/ceph-dashboard.key -a -f /etc/grafana/ceph-dashboard.crt || \
+ (openssl req -new -nodes -x509 -subj '/O=IT/CN=ceph-grafana' -days 3650 -keyout /etc/grafana/ceph-dashboard.key -out /etc/grafana/ceph-dashboard.crt -extensions v3_ca && \
+ chown {{ grafana_uid }}:{{ grafana_uid }} /etc/grafana/ceph-dashboard.key /etc/grafana/ceph-dashboard.crt)
+ when:
+ - dashboard_protocol == "https"
+ - grafana_key | length == 0 or grafana_crt | length == 0
+
+- name: enable and start grafana
+ service:
+ name: grafana-server
+ state: restarted
+ enabled: true
+
+- name: wait for grafana to start
+ wait_for:
+ host: '{{ grafana_server_addr if ip_version == "ipv4" else grafana_server_addr[1:-1] }}'
+ port: '{{ grafana_port }}'
--- /dev/null
+---
+- name: include setup_container.yml
+ include_tasks: setup_container.yml
+
+- name: include configure_grafana.yml
+ include_tasks: configure_grafana.yml
--- /dev/null
+---
+- name: create /etc/grafana and /var/lib/grafana
+ file:
+ path: "{{ item }}"
+ state: directory
+ owner: "{{ grafana_uid }}"
+ group: "{{ grafana_uid }}"
+ recurse: true
+ with_items:
+ - /etc/grafana
+ - /var/lib/grafana
+
+- name: include_tasks systemd.yml
+ include_tasks: systemd.yml
+
+- name: start the grafana-server service
+ systemd:
+ name: grafana-server
+ state: started
+ enabled: yes
+ daemon_reload: yes
+ failed_when: false
--- /dev/null
+---
+- name: ship systemd service
+ template:
+ src: grafana-server.service.j2
+ dest: "/etc/systemd/system/grafana-server.service"
+ owner: root
+ group: root
+ mode: 0644
--- /dev/null
+apiVersion: 1
+
+providers:
+- name: 'Ceph Dashboard'
+ orgId: 1
+ folder: 'ceph-dashboard'
+ type: file
+ disableDeletion: false
+ updateIntervalSeconds: 3
+ editable: false
+ options:
+ path: '{{ grafana_dashboards_path }}'
--- /dev/null
+apiVersion: 1
+
+# list of datasources that should be deleted from the database
+deleteDatasources:
+ - name: '{{ grafana_datasource }}'
+ orgId: 1
+
+# list of datasources to insert/update depending
+# what's available in the database
+datasources:
+ # <string, required> name of the datasource. Required
+- name: '{{ grafana_datasource }}'
+ # <string, required> datasource type. Required
+ type: 'prometheus'
+ # <string, required> access mode. proxy or direct (Server or Browser in the UI). Required
+ access: 'proxy'
+ # <int> org id. will default to orgId 1 if not specified
+ orgId: 1
+ # <string> url
+ url: 'http://{{ grafana_server_addr }}:{{ prometheus_port }}'
+ # <bool> enable/disable basic auth
+ basicAuth: false
+ # <bool> mark as default datasource. Max one per org
+ isDefault: true
+ # <bool> allow users to edit datasources from the UI.
+ editable: false
--- /dev/null
+# This file is managed by ansible, don't make changes here - they will be
+# overwritten.
+[Unit]
+Description=grafana-server
+{% if container_binary == 'docker' %}
+After=docker.service network-online.target local-fs.target time-sync.target
+Requires=docker.service
+{% else %}
+After=network-online.target local-fs.target time-sync.target
+{% endif %}
+Wants=network-online.target local-fs.target time-sync.target
+
+[Service]
+EnvironmentFile=-/etc/environment
+{% if container_binary == 'podman' %}
+ExecStartPre=-/usr/bin/rm -f /%t/%n-pid /%t/%n-cid
+ExecStartPre=-/usr/bin/{{ container_binary }} rm --storage grafana-server
+{% else %}
+ExecStartPre=-/usr/bin/{{ container_binary }} stop grafana-server
+{% endif %}
+ExecStartPre=-/usr/bin/{{ container_binary }} rm grafana-server
+ExecStart=/usr/bin/{{ container_binary }} run --rm --name=grafana-server \
+{% if container_binary == 'podman' %}
+ -d --log-driver journald --conmon-pidfile /%t/%n-pid --cidfile /%t/%n-cid \
+{% endif %}
+ --pids-limit={{ 0 if container_binary == 'podman' else -1 }} \
+ -v /etc/grafana:/etc/grafana:Z \
+ -v /var/lib/grafana:/var/lib/grafana:Z \
+ --net=host \
+ --cpu-period={{ grafana_container_cpu_period }} \
+ --cpu-quota={{ grafana_container_cpu_period * grafana_container_cpu_cores }} \
+ --memory={{ grafana_container_memory }}GB \
+ --memory-swap={{ grafana_container_memory * 2 }}GB \
+ -e GF_INSTALL_PLUGINS={{ grafana_plugins|join(',') }} \
+{% if ceph_docker_http_proxy is defined %}
+ -e http_proxy={{ ceph_docker_http_proxy }} \
+{% endif %}
+{% if ceph_docker_https_proxy is defined %}
+ -e https_proxy={{ ceph_docker_https_proxy }} \
+{% endif %}
+{% if ceph_docker_no_proxy is defined %}
+ -e no_proxy={{ ceph_docker_no_proxy }} \
+{% endif %}
+ {{ grafana_container_image }}
+{% if container_binary == 'podman' %}
+ExecStop=-/usr/bin/sh -c "/usr/bin/{{ container_binary }} rm -f `cat /%t/%n-cid`"
+{% else %}
+ExecStop=-/usr/bin/{{ container_binary }} stop grafana-server
+{% endif %}
+KillMode=none
+Restart=always
+RestartSec=10s
+TimeoutStartSec=120
+TimeoutStopSec=15
+{% if container_binary == 'podman' %}
+Type=forking
+PIDFile=/%t/%n-pid
+{% endif %}
+
+[Install]
+WantedBy=multi-user.target
--- /dev/null
+# [server]
+# root_url = %(protocol)s://%(domain)s:%(http_port)s/api/grafana/proxy
+
+[users]
+default_theme = light
+
+#################################### Anonymous Auth ##########################
+[auth.anonymous]
+# enable anonymous access
+enabled = true
+
+# specify organization name that should be used for unauthenticated users
+org_name = Main Org.
+
+# specify role for unauthenticated users
+org_role = Viewer
+
+[server]
+cert_file = /etc/grafana/ceph-dashboard.crt
+cert_key = /etc/grafana/ceph-dashboard.key
+domain = {{ ansible_facts['fqdn'] }}
+protocol = {{ dashboard_protocol }}
+http_port = {{ grafana_port }}
+http_addr = {{ grafana_server_addr }}
+
+[security]
+admin_user = {{ grafana_admin_user }}
+admin_password = {{ grafana_admin_password }}
+allow_embedding = {{ grafana_allow_embedding }}
+{% if dashboard_protocol == 'https' %}
+cookie_secure = true
+
+[session]
+cookie_secure = true
+{% endif %}
\ No newline at end of file
--- /dev/null
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [2014] [Guillaume Abrioux]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
--- /dev/null
+# Ansible role: ceph-handler
+Documentation is available at http://docs.ceph.com/ceph-ansible/.
--- /dev/null
+---
+- name: handlers
+ when:
+ - not rolling_update | bool
+ - not docker2podman | default(False) | bool
+ block:
+ - name: make tempdir for scripts
+ tempfile:
+ state: directory
+ prefix: ceph_ansible
+ listen:
+ - "restart ceph mons"
+ - "restart ceph osds"
+ - "restart ceph mdss"
+ - "restart ceph rgws"
+ - "restart ceph nfss"
+ - "restart ceph rbdmirrors"
+ - "restart ceph mgrs"
+ register: tmpdirpath
+ when: tmpdirpath is not defined or tmpdirpath.path is not defined or tmpdirpath.state=="absent"
+
+ - name: mons handler
+ include_tasks: handler_mons.yml
+ when: mon_group_name in group_names
+ listen: "restart ceph mons"
+
+ - name: osds handler
+ include_tasks: handler_osds.yml
+ when: osd_group_name in group_names
+ listen: "restart ceph osds"
+
+ - name: mdss handler
+ include_tasks: handler_mdss.yml
+ when: mds_group_name in group_names
+ listen: "restart ceph mdss"
+
+ - name: rgws handler
+ include_tasks: handler_rgws.yml
+ when: rgw_group_name in group_names
+ listen: "restart ceph rgws"
+
+ - name: nfss handler
+ include_tasks: handler_nfss.yml
+ when: nfs_group_name in group_names
+ listen: "restart ceph nfss"
+
+ - name: rbdmirrors handler
+ include_tasks: handler_rbdmirrors.yml
+ when: rbdmirror_group_name in group_names
+ listen: "restart ceph rbdmirrors"
+
+ - name: mgrs handler
+ include_tasks: handler_mgrs.yml
+ when: mgr_group_name in group_names
+ listen: "restart ceph mgrs"
+
+ - name: tcmu-runner handler
+ include_tasks: handler_tcmu_runner.yml
+ when: iscsi_gw_group_name in group_names
+ listen: "restart ceph tcmu-runner"
+
+ - name: rbd-target-api and rbd-target-gw handler
+ include_tasks: handler_rbd_target_api_gw.yml
+ when: iscsi_gw_group_name in group_names
+ listen: "restart ceph rbd-target-api-gw"
+
+ - name: ceph crash handler
+ include_tasks: handler_crash.yml
+ listen: "restart ceph crash"
+ when:
+ - inventory_hostname in groups.get(mon_group_name, [])
+ or inventory_hostname in groups.get(mgr_group_name, [])
+ or inventory_hostname in groups.get(osd_group_name, [])
+ or inventory_hostname in groups.get(mds_group_name, [])
+ or inventory_hostname in groups.get(rgw_group_name, [])
+ or inventory_hostname in groups.get(rbdmirror_group_name, [])
+
+ - name: remove tempdir for scripts
+ file:
+ path: "{{ tmpdirpath.path }}"
+ state: absent
+ listen:
+ - "restart ceph mons"
+ - "restart ceph osds"
+ - "restart ceph mdss"
+ - "restart ceph rgws"
+ - "restart ceph nfss"
+ - "restart ceph rbdmirrors"
+ - "restart ceph mgrs"
+ register: tmpdirpath
+ when: tmpdirpath.path is defined
--- /dev/null
+---
+galaxy_info:
+ company: Red Hat
+ author: Sébastien Han
+ description: Contains handlers for Ceph services
+ license: Apache
+ min_ansible_version: 2.7
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ galaxy_tags:
+ - system
+dependencies: []
--- /dev/null
+---
+- name: include check_running_containers.yml
+ include_tasks: check_running_containers.yml
+ when: containerized_deployment | bool
+
+- name: include check_socket_non_container.yml
+ include_tasks: check_socket_non_container.yml
+ when: not containerized_deployment | bool
--- /dev/null
+---
+- name: check for a mon container
+ command: "{{ container_binary }} ps -q --filter='name=ceph-mon-{{ ansible_facts['hostname'] }}'"
+ register: ceph_mon_container_stat
+ changed_when: false
+ failed_when: false
+ check_mode: no
+ when: inventory_hostname in groups.get(mon_group_name, [])
+
+- name: check for an osd container
+ command: "{{ container_binary }} ps -q --filter='name=ceph-osd'"
+ register: ceph_osd_container_stat
+ changed_when: false
+ failed_when: false
+ check_mode: no
+ when: inventory_hostname in groups.get(osd_group_name, [])
+
+- name: check for a mds container
+ command: "{{ container_binary }} ps -q --filter='name=ceph-mds-{{ ansible_facts['hostname'] }}'"
+ register: ceph_mds_container_stat
+ changed_when: false
+ failed_when: false
+ check_mode: no
+ when: inventory_hostname in groups.get(mds_group_name, [])
+
+- name: check for a rgw container
+ command: "{{ container_binary }} ps -q --filter='name=ceph-rgw-{{ ansible_facts['hostname'] }}'"
+ register: ceph_rgw_container_stat
+ changed_when: false
+ failed_when: false
+ check_mode: no
+ when: inventory_hostname in groups.get(rgw_group_name, [])
+
+- name: check for a mgr container
+ command: "{{ container_binary }} ps -q --filter='name=ceph-mgr-{{ ansible_facts['hostname'] }}'"
+ register: ceph_mgr_container_stat
+ changed_when: false
+ failed_when: false
+ check_mode: no
+ when: inventory_hostname in groups.get(mgr_group_name, [])
+
+- name: check for a rbd mirror container
+ command: "{{ container_binary }} ps -q --filter='name=ceph-rbd-mirror-{{ ansible_facts['hostname'] }}'"
+ register: ceph_rbd_mirror_container_stat
+ changed_when: false
+ failed_when: false
+ check_mode: no
+ when: inventory_hostname in groups.get(rbdmirror_group_name, [])
+
+- name: check for a nfs container
+ command: "{{ container_binary }} ps -q --filter='name=ceph-nfs-{{ ceph_nfs_service_suffix | default(ansible_facts['hostname']) }}'"
+ register: ceph_nfs_container_stat
+ changed_when: false
+ failed_when: false
+ check_mode: no
+ when: inventory_hostname in groups.get(nfs_group_name, [])
+
+- name: check for a tcmu-runner container
+ command: "{{ container_binary }} ps -q --filter='name=tcmu-runner'"
+ register: ceph_tcmu_runner_stat
+ changed_when: false
+ failed_when: false
+ check_mode: no
+ when: inventory_hostname in groups.get(iscsi_gw_group_name, [])
+
+- name: check for a rbd-target-api container
+ command: "{{ container_binary }} ps -q --filter='name=rbd-target-api'"
+ register: ceph_rbd_target_api_stat
+ changed_when: false
+ failed_when: false
+ check_mode: no
+ when: inventory_hostname in groups.get(iscsi_gw_group_name, [])
+
+- name: check for a rbd-target-gw container
+ command: "{{ container_binary }} ps -q --filter='name=rbd-target-gw'"
+ register: ceph_rbd_target_gw_stat
+ changed_when: false
+ failed_when: false
+ check_mode: no
+ when: inventory_hostname in groups.get(iscsi_gw_group_name, [])
+
+- name: check for a ceph-crash container
+ command: "{{ container_binary }} ps -q --filter='name=ceph-crash-{{ ansible_facts['hostname'] }}'"
+ register: ceph_crash_container_stat
+ changed_when: false
+ failed_when: false
+ check_mode: no
+ when:
+ - inventory_hostname in groups.get(mon_group_name, [])
+ or inventory_hostname in groups.get(mgr_group_name, [])
+ or inventory_hostname in groups.get(osd_group_name, [])
+ or inventory_hostname in groups.get(mds_group_name, [])
+ or inventory_hostname in groups.get(rgw_group_name, [])
+ or inventory_hostname in groups.get(rbdmirror_group_name, [])
\ No newline at end of file
--- /dev/null
+---
+- name: find ceph mon socket
+ find:
+ paths: ["{{ rbd_client_admin_socket_path }}"]
+ recurse: yes
+ file_type: any
+ patterns: "{{ cluster }}-mon*.asok"
+ use_regex: no
+ register: mon_socket_stat
+ when: inventory_hostname in groups.get(mon_group_name, [])
+
+- name: check if the ceph mon socket is in-use
+ command: grep -q {{ item.path }} /proc/net/unix
+ changed_when: false
+ failed_when: false
+ check_mode: no
+ register: mon_socket
+ with_items: "{{ mon_socket_stat.files }}"
+ when:
+ - inventory_hostname in groups.get(mon_group_name, [])
+ - mon_socket_stat.files | length > 0
+
+- name: remove ceph mon socket if exists and not used by a process
+ file:
+ name: "{{ item.0.path }}"
+ state: absent
+ with_together:
+ - "{{ mon_socket_stat.files }}"
+ - "{{ mon_socket.results }}"
+ when:
+ - inventory_hostname in groups.get(mon_group_name, [])
+ - mon_socket_stat.files | length > 0
+ - item.1.rc == 1
+
+- name: find ceph osd socket
+ find:
+ paths: ["{{ rbd_client_admin_socket_path }}"]
+ recurse: yes
+ file_type: any
+ patterns: "{{ cluster }}-osd.*.asok"
+ use_regex: no
+ register: osd_socket_stat
+ when: inventory_hostname in groups.get(osd_group_name, [])
+
+- name: check if the ceph osd socket is in-use
+ command: grep -q {{ item.path }} /proc/net/unix
+ changed_when: false
+ failed_when: false
+ check_mode: no
+ register: osd_socket
+ with_items: "{{ osd_socket_stat.files }}"
+ when:
+ - inventory_hostname in groups.get(osd_group_name, [])
+ - osd_socket_stat.files | length > 0
+
+- name: remove ceph osd socket if exists and not used by a process
+ file:
+ name: "{{ item.0.path }}"
+ state: absent
+ with_together:
+ - "{{ osd_socket_stat.files }}"
+ - "{{ osd_socket.results }}"
+ when:
+ - inventory_hostname in groups.get(osd_group_name, [])
+ - osd_socket_stat.files | length > 0
+ - item.1.rc == 1
+
+- name: find ceph osd socket
+ find:
+ paths: ["{{ rbd_client_admin_socket_path }}"]
+ recurse: yes
+ file_type: any
+ patterns: "{{ cluster }}-mds*.asok"
+ use_regex: no
+ register: mds_socket_stat
+ when: inventory_hostname in groups.get(mds_group_name, [])
+
+- name: check if the ceph mds socket is in-use
+ command: grep -q {{ item.path }} /proc/net/unix
+ changed_when: false
+ failed_when: false
+ check_mode: no
+ register: mds_socket
+ with_items: "{{ mds_socket_stat.files }}"
+ when:
+ - inventory_hostname in groups.get(mds_group_name, [])
+ - mds_socket_stat.files | length > 0
+
+- name: remove ceph mds socket if exists and not used by a process
+ file:
+ name: "{{ item.0.path }}"
+ state: absent
+ with_together:
+ - "{{ mds_socket_stat.files }}"
+ - "{{ mds_socket.results }}"
+ when:
+ - inventory_hostname in groups.get(mds_group_name, [])
+ - mds_socket_stat.files | length > 0
+ - item.1.rc == 1
+
+- name: find ceph rgw socket
+ find:
+ paths: ["{{ rbd_client_admin_socket_path }}"]
+ recurse: yes
+ file_type: any
+ patterns: "{{ cluster }}-client.rgw*.asok"
+ use_regex: no
+ register: rgw_socket_stat
+ when: inventory_hostname in groups.get(rgw_group_name, [])
+
+- name: check if the ceph rgw socket is in-use
+ command: grep -q {{ item.path }} /proc/net/unix
+ changed_when: false
+ failed_when: false
+ check_mode: no
+ register: rgw_socket
+ with_items: "{{ rgw_socket_stat.files }}"
+ when:
+ - inventory_hostname in groups.get(rgw_group_name, [])
+ - rgw_socket_stat.files | length > 0
+
+- name: remove ceph rgw socket if exists and not used by a process
+ file:
+ name: "{{ item.0.path }}"
+ state: absent
+ with_together:
+ - "{{ rgw_socket_stat.files }}"
+ - "{{ rgw_socket.results }}"
+ when:
+ - inventory_hostname in groups.get(rgw_group_name, [])
+ - rgw_socket_stat.files | length > 0
+ - item.1.rc == 1
+
+- name: find ceph mgr socket
+ find:
+ paths: ["{{ rbd_client_admin_socket_path }}"]
+ recurse: yes
+ file_type: any
+ patterns: "{{ cluster }}-mgr*.asok"
+ use_regex: no
+ register: mgr_socket_stat
+ when: inventory_hostname in groups.get(mgr_group_name, [])
+
+- name: check if the ceph mgr socket is in-use
+ command: grep -q {{ item.path }} /proc/net/unix
+ changed_when: false
+ failed_when: false
+ check_mode: no
+ register: mgr_socket
+ with_items: "{{ mgr_socket_stat.files }}"
+ when:
+ - inventory_hostname in groups.get(mgr_group_name, [])
+ - mgr_socket_stat.files | length > 0
+
+- name: remove ceph mgr socket if exists and not used by a process
+ file:
+ name: "{{ item.0.path }}"
+ state: absent
+ with_together:
+ - "{{ mgr_socket_stat.files }}"
+ - "{{ mgr_socket.results }}"
+ when:
+ - inventory_hostname in groups.get(mgr_group_name, [])
+ - mgr_socket_stat.files | length > 0
+ - item.1.rc == 1
+
+- name: find ceph rbd mirror socket
+ find:
+ paths: ["{{ rbd_client_admin_socket_path }}"]
+ recurse: yes
+ file_type: any
+ patterns: "{{ cluster }}-client.rbd-mirror*.asok"
+ use_regex: no
+ register: rbd_mirror_socket_stat
+ when: inventory_hostname in groups.get(rbdmirror_group_name, [])
+
+- name: check if the ceph rbd mirror socket is in-use
+ command: grep -q {{ item.path }} /proc/net/unix
+ changed_when: false
+ failed_when: false
+ check_mode: no
+ register: rbd_mirror_socket
+ with_items: "{{ rbd_mirror_socket_stat.files }}"
+ when:
+ - inventory_hostname in groups.get(rbdmirror_group_name, [])
+ - rbd_mirror_socket_stat.files | length > 0
+
+- name: remove ceph rbd mirror socket if exists and not used by a process
+ file:
+ name: "{{ item.0.path }}"
+ state: absent
+ with_together:
+ - "{{ rbd_mirror_socket_stat.files }}"
+ - "{{ rbd_mirror_socket.results }}"
+ when:
+ - inventory_hostname in groups.get(rbdmirror_group_name, [])
+ - rbd_mirror_socket_stat.files | length > 0
+ - item.1.rc == 1
+
+- name: check for a nfs ganesha pid
+ command: "pgrep ganesha.nfsd"
+ register: nfs_process
+ changed_when: false
+ failed_when: false
+ check_mode: no
+ when: inventory_hostname in groups.get(nfs_group_name, [])
+
+- name: check for a tcmu-runner
+ command: "pgrep tcmu-runner"
+ register: ceph_tcmu_runner_stat
+ changed_when: false
+ failed_when: false
+ check_mode: no
+ when: inventory_hostname in groups.get(iscsi_gw_group_name, [])
+
+- name: check for a rbd-target-api
+ command: "pgrep rbd-target-api"
+ register: ceph_rbd_target_api_stat
+ changed_when: false
+ failed_when: false
+ check_mode: no
+ when: inventory_hostname in groups.get(iscsi_gw_group_name, [])
+
+- name: check for a rbd-target-gw
+ command: "pgrep name=rbd-target-gw"
+ register: ceph_rbd_target_gw_stat
+ changed_when: false
+ failed_when: false
+ check_mode: no
+ when: inventory_hostname in groups.get(iscsi_gw_group_name, [])
+
+- name: check for a ceph-crash process
+ command: pgrep ceph-crash
+ changed_when: false
+ failed_when: false
+ check_mode: no
+ register: crash_process
+ when:
+ - inventory_hostname in groups.get(mon_group_name, [])
+ or inventory_hostname in groups.get(mgr_group_name, [])
+ or inventory_hostname in groups.get(osd_group_name, [])
+ or inventory_hostname in groups.get(mds_group_name, [])
+ or inventory_hostname in groups.get(rgw_group_name, [])
+ or inventory_hostname in groups.get(rbdmirror_group_name, [])
\ No newline at end of file
--- /dev/null
+---
+- name: set _crash_handler_called before restart
+ set_fact:
+ _crash_handler_called: True
+
+- name: restart the ceph-crash service
+ systemd:
+ name: ceph-crash@{{ ansible_facts['hostname'] }}
+ state: restarted
+ enabled: yes
+ masked: no
+ daemon_reload: yes
+ ignore_errors: true
+ when: hostvars[inventory_hostname]['_crash_handler_called'] | default(False) | bool
+
+- name: set _crash_handler_called after restart
+ set_fact:
+ _crash_handler_called: False
--- /dev/null
+---
+- name: set _mds_handler_called before restart
+ set_fact:
+ _mds_handler_called: True
+
+- name: copy mds restart script
+ template:
+ src: restart_mds_daemon.sh.j2
+ dest: "{{ tmpdirpath.path }}/restart_mds_daemon.sh"
+ owner: root
+ group: root
+ mode: 0750
+ when: tmpdirpath.path is defined
+
+- name: restart ceph mds daemon(s)
+ command: /usr/bin/env bash {{ hostvars[item]['tmpdirpath']['path'] }}/restart_mds_daemon.sh
+ when:
+ - hostvars[item]['handler_mds_status'] | default(False) | bool
+ - hostvars[item]['_mds_handler_called'] | default(False) | bool
+ - hostvars[item].tmpdirpath.path is defined
+ with_items: "{{ groups[mds_group_name] }}"
+ delegate_to: "{{ item }}"
+ run_once: True
+
+- name: set _mds_handler_called after restart
+ set_fact:
+ _mds_handler_called: False
--- /dev/null
+---
+- name: set _mgr_handler_called before restart
+ set_fact:
+ _mgr_handler_called: True
+
+- name: copy mgr restart script
+ template:
+ src: restart_mgr_daemon.sh.j2
+ dest: "{{ tmpdirpath.path }}/restart_mgr_daemon.sh"
+ owner: root
+ group: root
+ mode: 0750
+ when: tmpdirpath.path is defined
+
+- name: restart ceph mgr daemon(s)
+ command: /usr/bin/env bash {{ hostvars[item]['tmpdirpath']['path'] }}/restart_mgr_daemon.sh
+ when:
+ - hostvars[item]['handler_mgr_status'] | default(False) | bool
+ - hostvars[item]['_mgr_handler_called'] | default(False) | bool
+ - hostvars[item].tmpdirpath.path is defined
+ with_items: "{{ groups[mgr_group_name] }}"
+ delegate_to: "{{ item }}"
+ run_once: True
+
+- name: set _mgr_handler_called after restart
+ set_fact:
+ _mgr_handler_called: False
--- /dev/null
+---
+# We only want to restart on hosts that have called the handler.
+# This var is set when he handler is called, and unset after the
+# restart to ensure only the correct hosts are restarted.
+- name: set _mon_handler_called before restart
+ set_fact:
+ _mon_handler_called: True
+
+- name: copy mon restart script
+ template:
+ src: restart_mon_daemon.sh.j2
+ dest: "{{ tmpdirpath.path }}/restart_mon_daemon.sh"
+ owner: root
+ group: root
+ mode: 0750
+ when: tmpdirpath.path is defined
+
+- name: restart ceph mon daemon(s)
+ command: /usr/bin/env bash {{ hostvars[item]['tmpdirpath']['path'] }}/restart_mon_daemon.sh
+ when:
+ # We do not want to run these checks on initial deployment (`socket.rc == 0`)
+ - hostvars[item]['handler_mon_status'] | default(False) | bool
+ - hostvars[item]['_mon_handler_called'] | default(False) | bool
+ - hostvars[item].tmpdirpath.path is defined
+ with_items: "{{ groups[mon_group_name] }}"
+ delegate_to: "{{ item }}"
+ run_once: True
+
+- name: set _mon_handler_called after restart
+ set_fact:
+ _mon_handler_called: False
--- /dev/null
+---
+- name: set _nfs_handler_called before restart
+ set_fact:
+ _nfs_handler_called: True
+
+- name: copy nfs restart script
+ template:
+ src: restart_nfs_daemon.sh.j2
+ dest: "{{ tmpdirpath.path }}/restart_nfs_daemon.sh"
+ owner: root
+ group: root
+ mode: 0750
+ when: tmpdirpath.path is defined
+
+- name: restart ceph nfs daemon(s)
+ command: /usr/bin/env bash {{ hostvars[item]['tmpdirpath']['path'] }}/restart_nfs_daemon.sh
+ when:
+ - hostvars[item]['handler_nfs_status'] | default(False) | bool
+ - hostvars[item]['_nfs_handler_called'] | default(False) | bool
+ - hostvars[item].tmpdirpath.path is defined
+ with_items: "{{ groups[nfs_group_name] }}"
+ delegate_to: "{{ item }}"
+ run_once: True
+
+- name: set _nfs_handler_called after restart
+ set_fact:
+ _nfs_handler_called: False
--- /dev/null
+---
+- name: set_fact trigger_restart
+ set_fact:
+ trigger_restart: true
+ loop: "{{ groups[osd_group_name] }}"
+ when: hostvars[item]['handler_osd_status'] | default(False) | bool
+ run_once: true
+
+- name: osd handler
+ when: trigger_restart | default(False) | bool
+ block:
+ - name: set _osd_handler_called before restart
+ set_fact:
+ _osd_handler_called: True
+
+ - name: unset noup flag
+ ceph_osd_flag:
+ name: noup
+ cluster: "{{ cluster }}"
+ state: absent
+ environment:
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ run_once: true
+
+ # This does not just restart OSDs but everything else too. Unfortunately
+ # at this time the ansible role does not have an OSD id list to use
+ # for restarting them specifically.
+ # This does not need to run during a rolling update as the playbook will
+ # restart all OSDs using the tasks "start ceph osd" or
+ # "restart containerized ceph osd"
+ - name: copy osd restart script
+ template:
+ src: restart_osd_daemon.sh.j2
+ dest: "{{ tmpdirpath.path }}/restart_osd_daemon.sh"
+ owner: root
+ group: root
+ mode: 0750
+ when: tmpdirpath.path is defined
+
+ - name: get pool list
+ command: "{{ ceph_cmd }} --cluster {{ cluster }} osd pool ls detail -f json"
+ register: pool_list
+ delegate_to: "{{ groups.get(mon_group_name, [])[0] }}"
+ run_once: true
+ changed_when: false
+ check_mode: false
+
+ - name: get balancer module status
+ command: "{{ ceph_cmd }} --cluster {{ cluster }} balancer status -f json"
+ register: balancer_status
+ run_once: true
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ changed_when: false
+ check_mode: false
+
+ - name: set_fact pools_pgautoscaler_mode
+ set_fact:
+ pools_pgautoscaler_mode: "{{ pools_pgautoscaler_mode | default([]) | union([{'name': item.pool_name, 'mode': item.pg_autoscale_mode}]) }}"
+ run_once: true
+ with_items: "{{ pool_list.stdout | default('{}') | from_json }}"
+
+ - name: disable balancer
+ command: "{{ ceph_cmd }} --cluster {{ cluster }} balancer off"
+ run_once: true
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ changed_when: false
+ when: (balancer_status.stdout | from_json)['active'] | bool
+
+ - name: disable pg autoscale on pools
+ ceph_pool:
+ name: "{{ item.name }}"
+ cluster: "{{ cluster }}"
+ pg_autoscale_mode: false
+ with_items: "{{ pools_pgautoscaler_mode }}"
+ delegate_to: "{{ groups.get(mon_group_name, [])[0] }}"
+ run_once: true
+ when:
+ - pools_pgautoscaler_mode is defined
+ - item.mode == 'on'
+ environment:
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+
+ - name: restart ceph osds daemon(s)
+ command: /usr/bin/env bash {{ hostvars[item]['tmpdirpath']['path'] }}/restart_osd_daemon.sh
+ when:
+ - hostvars[item]['handler_osd_status'] | default(False) | bool
+ - handler_health_osd_check | bool
+ - hostvars[item]['_osd_handler_called'] | default(False) | bool
+ - hostvars[item].tmpdirpath.path is defined
+ with_items: "{{ groups[osd_group_name] | intersect(ansible_play_batch) }}"
+ delegate_to: "{{ item }}"
+ run_once: True
+
+ - name: set _osd_handler_called after restart
+ set_fact:
+ _osd_handler_called: False
+
+ - name: re-enable pg autoscale on pools
+ ceph_pool:
+ name: "{{ item.name }}"
+ cluster: "{{ cluster }}"
+ pg_autoscale_mode: true
+ with_items: "{{ pools_pgautoscaler_mode }}"
+ run_once: true
+ delegate_to: "{{ groups.get(mon_group_name, [])[0] }}"
+ when:
+ - pools_pgautoscaler_mode is defined
+ - item.mode == 'on'
+ environment:
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+
+ - name: re-enable balancer
+ command: "{{ ceph_cmd }} --cluster {{ cluster }} balancer on"
+ run_once: true
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ changed_when: false
+ when: (balancer_status.stdout | from_json)['active'] | bool
--- /dev/null
+---
+- name: set _rbd_target_api_handler_called before restart
+ set_fact:
+ _rbd_target_api_handler_called: True
+
+- name: restart rbd-target-api
+ service:
+ name: rbd-target-api
+ state: restarted
+ when:
+ - ceph_rbd_target_api_stat.get('rc') == 0
+ - hostvars[item]['_rbd_target_api_handler_called'] | default(False) | bool
+ - ceph_rbd_target_api_stat.get('stdout_lines', [])|length != 0
+ with_items: "{{ groups[iscsi_gw_group_name] }}"
+ delegate_to: "{{ item }}"
+ run_once: True
+
+- name: set _rbd_target_api_handler_called after restart
+ set_fact:
+ _rbd_target_api_handler_called: False
+
+- name: set _rbd_target_gw_handler_called before restart
+ set_fact:
+ _rbd_target_gw_handler_called: True
+
+- name: restart rbd-target-gw
+ service:
+ name: rbd-target-gw
+ state: restarted
+ when:
+ - ceph_rbd_target_gw_stat.get('rc') == 0
+ - hostvars[item]['_rbd_target_gw_handler_called'] | default(False) | bool
+ - ceph_rbd_target_gw_stat.get('stdout_lines', [])|length != 0
+ with_items: "{{ groups[iscsi_gw_group_name] }}"
+ delegate_to: "{{ item }}"
+ run_once: True
+
+- name: set _rbd_target_gw_handler_called after restart
+ set_fact:
+ _rbd_target_gw_handler_called: False
--- /dev/null
+---
+- name: set _rbdmirror_handler_called before restart
+ set_fact:
+ _rbdmirror_handler_called: True
+
+- name: copy rbd mirror restart script
+ template:
+ src: restart_rbd_mirror_daemon.sh.j2
+ dest: "{{ tmpdirpath.path }}/restart_rbd_mirror_daemon.sh"
+ owner: root
+ group: root
+ mode: 0750
+ when: tmpdirpath.path is defined
+
+- name: restart ceph rbd mirror daemon(s)
+ command: /usr/bin/env bash {{ hostvars[item]['tmpdirpath']['path'] }}/restart_rbd_mirror_daemon.sh
+ when:
+ - hostvars[item]['handler_rbd_mirror_status'] | default(False) | bool
+ - hostvars[item]['_rbdmirror_handler_called'] | default(False) | bool
+ - hostvars[item].tmpdirpath.path is defined
+ with_items: "{{ groups[rbdmirror_group_name] }}"
+ delegate_to: "{{ item }}"
+ run_once: True
+
+- name: set _rbdmirror_handler_called after restart
+ set_fact:
+ _rbdmirror_handler_called: False
--- /dev/null
+---
+- name: set _rgw_handler_called before restart
+ set_fact:
+ _rgw_handler_called: True
+
+- name: copy rgw restart script
+ template:
+ src: restart_rgw_daemon.sh.j2
+ dest: "{{ tmpdirpath.path }}/restart_rgw_daemon.sh"
+ owner: root
+ group: root
+ mode: 0750
+ when: tmpdirpath.path is defined
+
+- name: restart ceph rgw daemon(s)
+ command: /usr/bin/env bash {{ hostvars[item]['tmpdirpath']['path'] }}/restart_rgw_daemon.sh
+ when:
+ - hostvars[item]['handler_rgw_status'] | default(False) | bool
+ - hostvars[item]['_rgw_handler_called'] | default(False) | bool
+ - hostvars[item].tmpdirpath.path is defined
+ with_items: "{{ groups[rgw_group_name] }}"
+ delegate_to: "{{ item }}"
+ run_once: True
+
+- name: set _rgw_handler_called after restart
+ set_fact:
+ _rgw_handler_called: False
--- /dev/null
+---
+- name: set _tcmu_runner_handler_called before restart
+ set_fact:
+ _tcmu_runner_handler_called: True
+
+- name: restart tcmu-runner
+ service:
+ name: tcmu-runner
+ state: restarted
+ when:
+ - ceph_tcmu_runner_stat.get('rc') == 0
+ - hostvars[item]['_tcmu_runner_handler_called'] | default(False) | bool
+ - ceph_tcmu_runner_stat.get('stdout_lines', [])|length != 0
+ with_items: "{{ groups[iscsi_gw_group_name] }}"
+ delegate_to: "{{ item }}"
+ run_once: True
+
+- name: set _tcmu_runner_handler_called after restart
+ set_fact:
+ _tcmu_runner_handler_called: False
--- /dev/null
+---
+- name: include check_running_cluster.yml
+ include: check_running_cluster.yml
+
+# We do not want to run these checks on initial deployment (`socket.rc == 0`)
+- name: set_fact handler_mon_status
+ set_fact:
+ handler_mon_status: "{{ 0 in (mon_socket.results | map(attribute='rc') | list) if not containerized_deployment | bool else (ceph_mon_container_stat.get('rc') == 0 and ceph_mon_container_stat.get('stdout_lines', []) | length != 0) }}"
+ when: inventory_hostname in groups.get(mon_group_name, [])
+
+- name: set_fact handler_osd_status
+ set_fact:
+ handler_osd_status: "{{ 0 in (osd_socket.results | map(attribute='rc') | list) if not containerized_deployment | bool else (ceph_osd_container_stat.get('rc') == 0 and ceph_osd_container_stat.get('stdout_lines', []) | length != 0) }}"
+ when: inventory_hostname in groups.get(osd_group_name, [])
+
+- name: set_fact handler_mds_status
+ set_fact:
+ handler_mds_status: "{{ 0 in (mds_socket.results | map(attribute='rc') | list) if not containerized_deployment | bool else (ceph_mds_container_stat.get('rc') == 0 and ceph_mds_container_stat.get('stdout_lines', []) | length != 0) }}"
+ when: inventory_hostname in groups.get(mds_group_name, [])
+
+- name: set_fact handler_rgw_status
+ set_fact:
+ handler_rgw_status: "{{ 0 in (rgw_socket.results | map(attribute='rc') | list) if not containerized_deployment | bool else (ceph_rgw_container_stat.get('rc') == 0 and ceph_rgw_container_stat.get('stdout_lines', []) | length != 0) }}"
+ when: inventory_hostname in groups.get(rgw_group_name, [])
+
+- name: set_fact handler_nfs_status
+ set_fact:
+ handler_nfs_status: "{{ (nfs_process.get('rc') == 0) if not containerized_deployment | bool else (ceph_nfs_container_stat.get('rc') == 0 and ceph_nfs_container_stat.get('stdout_lines', []) | length != 0) }}"
+ when: inventory_hostname in groups.get(nfs_group_name, [])
+
+- name: set_fact handler_rbd_status
+ set_fact:
+ handler_rbd_mirror_status: "{{ 0 in (rbd_mirror_socket.results | map(attribute='rc') | list) if not containerized_deployment | bool else (ceph_rbd_mirror_container_stat.get('rc') == 0 and ceph_rbd_mirror_container_stat.get('stdout_lines', []) | length != 0) }}"
+ when: inventory_hostname in groups.get(rbdmirror_group_name, [])
+
+- name: set_fact handler_mgr_status
+ set_fact:
+ handler_mgr_status: "{{ 0 in (mgr_socket.results | map(attribute='rc') | list) if not containerized_deployment | bool else (ceph_mgr_container_stat.get('rc') == 0 and ceph_mgr_container_stat.get('stdout_lines', []) | length != 0) }}"
+ when: inventory_hostname in groups.get(mgr_group_name, [])
+
+- name: set_fact handler_crash_status
+ set_fact:
+ handler_crash_status: "{{ crash_process.get('rc') == 0 if not containerized_deployment | bool else (ceph_crash_container_stat.get('rc') == 0 and ceph_crash_container_stat.get('stdout_lines', []) | length != 0) }}"
+ when:
+ - inventory_hostname in groups.get(mon_group_name, [])
+ or inventory_hostname in groups.get(mgr_group_name, [])
+ or inventory_hostname in groups.get(osd_group_name, [])
+ or inventory_hostname in groups.get(mds_group_name, [])
+ or inventory_hostname in groups.get(rgw_group_name, [])
+ or inventory_hostname in groups.get(rbdmirror_group_name, [])
+
+- name: rgw multi-instances related tasks
+ when:
+ - not docker2podman | default(false) | bool
+ - not rolling_update | default(false) | bool
+ - inventory_hostname in groups.get(rgw_group_name, [])
+ - handler_rgw_status | bool
+ block:
+ - name: import_role ceph-config
+ import_role:
+ name: ceph-config
+
+ - name: import_role ceph-rgw
+ import_role:
+ name: ceph-rgw
+ tasks_from: pre_requisite.yml
+ when: not containerized_deployment | bool
+
+ - name: import_role ceph-rgw
+ import_role:
+ name: ceph-rgw
+ tasks_from: multisite.yml
+ when:
+ - rgw_multisite | bool
+ - not multisite_called_from_handler_role | default(False) | bool
+
+ - name: set_fact multisite_called_from_handler_role
+ set_fact:
+ multisite_called_from_handler_role: true
--- /dev/null
+#!/bin/bash
+
+RETRIES="{{ handler_health_mds_check_retries }}"
+DELAY="{{ handler_health_mds_check_delay }}"
+MDS_NAME="{{ ansible_facts['hostname'] }}"
+{% if containerized_deployment | bool %}
+DOCKER_EXEC="{{ container_binary }} exec ceph-mds-{{ ansible_facts['hostname'] }}"
+{% endif %}
+
+# Backward compatibility
+$DOCKER_EXEC test -S /var/run/ceph/{{ cluster }}-mds.{{ ansible_facts['fqdn'] }}.asok && SOCKET=/var/run/ceph/{{ cluster }}-mds.{{ ansible_facts['fqdn'] }}.asok
+$DOCKER_EXEC test -S /var/run/ceph/{{ cluster }}-mds.{{ ansible_facts['hostname'] }}.asok && SOCKET=/var/run/ceph/{{ cluster }}-mds.{{ ansible_facts['hostname'] }}.asok
+
+# First, restart the daemon
+systemctl restart ceph-mds@${MDS_NAME}
+
+# Wait and ensure the socket exists after restarting the daemds
+while [ $RETRIES -ne 0 ]; do
+ $DOCKER_EXEC test -S $SOCKET && exit 0
+ sleep $DELAY
+ let RETRIES=RETRIES-1
+done
+# If we reach this point, it means the socket is not present.
+echo "Socket file ${SOCKET} could not be found, which means the Metadata Server is not running. Showing ceph-mds unit logs now:"
+journalctl -u ceph-mds@${MDS_NAME}
+exit 1
--- /dev/null
+#!/bin/bash
+
+RETRIES="{{ handler_health_mgr_check_retries }}"
+DELAY="{{ handler_health_mgr_check_delay }}"
+MGR_NAME="{{ ansible_facts['hostname'] }}"
+{% if containerized_deployment | bool %}
+DOCKER_EXEC="{{ container_binary }} exec ceph-mgr-{{ ansible_facts['hostname'] }}"
+{% endif %}
+
+# Backward compatibility
+$DOCKER_EXEC test -S /var/run/ceph/{{ cluster }}-mgr.{{ ansible_facts['fqdn'] }}.asok && SOCKET=/var/run/ceph/{{ cluster }}-mgr.{{ ansible_facts['fqdn'] }}.asok
+$DOCKER_EXEC test -S /var/run/ceph/{{ cluster }}-mgr.{{ ansible_facts['hostname'] }}.asok && SOCKET=/var/run/ceph/{{ cluster }}-mgr.{{ ansible_facts['hostname'] }}.asok
+
+systemctl reset-failed ceph-mgr@${MGR_NAME}
+# First, restart the daemon
+systemctl restart ceph-mgr@${MGR_NAME}
+
+# Wait and ensure the socket exists after restarting the daemds
+while [ $RETRIES -ne 0 ]; do
+ $DOCKER_EXEC test -S $SOCKET && exit 0
+ sleep $DELAY
+ let RETRIES=RETRIES-1
+done
+# If we reach this point, it means the socket is not present.
+echo "Socket file ${SOCKET} could not be found, which means ceph manager is not running. Showing ceph-mgr unit logs now:"
+journalctl -u ceph-mgr@${MGR_NAME}
+exit 1
--- /dev/null
+#!/bin/bash
+
+RETRIES="{{ handler_health_mon_check_retries }}"
+DELAY="{{ handler_health_mon_check_delay }}"
+MONITOR_NAME="{{ monitor_name }}"
+{% if containerized_deployment | bool %}
+DOCKER_EXEC="{{ container_binary }} exec ceph-mon-{{ ansible_facts['hostname'] }}"
+{% endif %}
+
+# if daemon is uninstalled, no restarting is needed; so exit with success
+systemctl status ceph-mon@{{ ansible_facts['hostname'] }} > /dev/null
+if [[ $? -ne 0 ]]; then
+ exit 0
+fi
+
+# Backward compatibility
+$DOCKER_EXEC test -S /var/run/ceph/{{ cluster }}-mon.{{ ansible_facts['fqdn'] }}.asok && SOCKET=/var/run/ceph/{{ cluster }}-mon.{{ ansible_facts['fqdn'] }}.asok
+$DOCKER_EXEC test -S /var/run/ceph/{{ cluster }}-mon.{{ ansible_facts['hostname'] }}.asok && SOCKET=/var/run/ceph/{{ cluster }}-mon.{{ ansible_facts['hostname'] }}.asok
+
+check_quorum() {
+while [ $RETRIES -ne 0 ]; do
+ $DOCKER_EXEC ceph --cluster {{ cluster }} quorum_status --format json | "{{ discovered_interpreter_python }}" -c 'import sys, json; exit(0) if "{{ monitor_name }}" in json.load(sys.stdin)["quorum_names"] else exit(1)' && exit 0
+ sleep $DELAY
+ let RETRIES=RETRIES-1
+done
+# If we reach this point, it means there is a problem with the quorum
+echo "Error with quorum."
+echo "cluster status:"
+$DOCKER_EXEC ceph --cluster {{ cluster }} -s
+echo "quorum status:"
+$DOCKER_EXEC ceph --cluster {{ cluster }} daemon mon.${MONITOR_NAME} mon_status
+$DOCKER_EXEC ceph --cluster {{ cluster }} daemon mon.${MONITOR_NAME} quorum_status
+exit 1
+}
+
+# First, restart the daemon
+systemctl restart ceph-mon@{{ ansible_facts['hostname'] }}
+
+COUNT=10
+# Wait and ensure the socket exists after restarting the daemon
+while [ $COUNT -ne 0 ]; do
+ $DOCKER_EXEC test -S $SOCKET && check_quorum
+ sleep $DELAY
+ let COUNT=COUNT-1
+done
+# If we reach this point, it means the socket is not present.
+echo "Socket file ${SOCKET} could not be found, which means the monitor is not running. Showing ceph-mon unit logs now:"
+journalctl -u ceph-mon@{{ ansible_facts['hostname'] }}
+exit 1
--- /dev/null
+#!/bin/bash
+
+RETRIES="{{ handler_health_nfs_check_retries }}"
+DELAY="{{ handler_health_nfs_check_delay }}"
+NFS_NAME="ceph-nfs@{{ ceph_nfs_service_suffix | default(ansible_facts['hostname']) }}"
+PID=/var/run/ganesha.pid
+{% if containerized_deployment | bool %}
+DOCKER_EXEC="{{ container_binary }} exec ceph-nfs-{{ ceph_nfs_service_suffix | default(ansible_facts['hostname']) }}"
+{% endif %}
+
+# First, restart the daemon
+{% if containerized_deployment | bool -%}
+systemctl restart $NFS_NAME
+# Wait and ensure the pid exists after restarting the daemon
+while [ $RETRIES -ne 0 ]; do
+ $DOCKER_EXEC test -f $PID && exit 0
+ sleep $DELAY
+ let RETRIES=RETRIES-1
+done
+# If we reach this point, it means the pid is not present.
+echo "PID file ${PID} could not be found, which means Ganesha is not running. Showing $NFS_NAME unit logs now:"
+journalctl -u $NFS_NAME
+exit 1
+{% else %}
+systemctl restart nfs-ganesha
+{% endif %}
--- /dev/null
+#!/bin/bash
+
+DELAY="{{ handler_health_osd_check_delay }}"
+CEPH_CLI="--name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/{{ cluster }}.keyring --cluster {{ cluster }}"
+
+check_pgs() {
+ num_pgs=$($container_exec ceph $CEPH_CLI -s -f json | "{{ discovered_interpreter_python }}" -c 'import sys, json; print(json.load(sys.stdin)["pgmap"]["num_pgs"])')
+ if [[ "$num_pgs" == "0" ]]; then
+ return 0
+ fi
+ while [ $RETRIES -ne 0 ]; do
+ test "$($container_exec ceph $CEPH_CLI -s -f json | "{{ discovered_interpreter_python }}" -c 'import sys, json; print(json.load(sys.stdin)["pgmap"]["num_pgs"])')" -eq "$($container_exec ceph $CEPH_CLI -s -f json | "{{ discovered_interpreter_python }}" -c 'import sys, json; print(sum ( [ i["count"] for i in json.load(sys.stdin)["pgmap"]["pgs_by_state"] if "active+clean" in i["state_name"]]))')"
+ RET=$?
+ test $RET -eq 0 && return 0
+ sleep $DELAY
+ let RETRIES=RETRIES-1
+ done
+ # PGs not clean, exiting with return code 1
+ echo "Error while running 'ceph $CEPH_CLI -s', PGs were not reported as active+clean"
+ echo "It is possible that the cluster has less OSDs than the replica configuration"
+ echo "Will refuse to continue"
+ $container_exec ceph $CEPH_CLI -s
+ $container_exec ceph $CEPH_CLI osd dump
+ $container_exec ceph $CEPH_CLI osd tree
+ $container_exec ceph $CEPH_CLI osd crush rule dump
+ exit 1
+}
+
+wait_for_socket_in_container() {
+ osd_mount_point=$({{ container_binary }} exec "$1" df --output=target | grep '/var/lib/ceph/osd/')
+ whoami=$({{ container_binary }} exec "$1" cat $osd_mount_point/whoami)
+ if ! {{ container_binary }} exec "$1" timeout 10 bash -c "while [ ! -e /var/run/ceph/{{ cluster }}-osd.${whoami}.asok ]; do sleep 1 ; done"; then
+ echo "Timed out while trying to look for a Ceph OSD socket."
+ echo "Abort mission!"
+ exit 1
+ fi
+}
+
+get_dev_name() {
+ echo $1 | sed -r 's/ceph-osd@([a-z]{1,4})\.service/\1/'
+}
+
+get_container_id_from_dev_name() {
+ local id
+ local count
+ count=10
+ while [ $count -ne 0 ]; do
+ id=$({{ container_binary }} ps | grep -E "${1}$" | cut -d' ' -f1)
+ test "$id" != "" && break
+ sleep $DELAY
+ let count=count-1
+ done
+ echo "$id"
+}
+
+# For containerized deployments, the unit file looks like: ceph-osd@sda.service
+# For non-containerized deployments, the unit file looks like: ceph-osd@NNN.service where NNN is OSD ID
+for unit in $(systemctl list-units | grep -E "loaded * active" | grep -oE "ceph-osd@([0-9]+).service"); do
+ # First, restart daemon(s)
+ systemctl restart "${unit}"
+ # We need to wait because it may take some time for the socket to actually exists
+ COUNT=10
+ # Wait and ensure the socket exists after restarting the daemon
+ {% if containerized_deployment | bool %}
+ osd_id=$(echo ${unit#ceph-osd@} | grep -oE '[0-9]+')
+ container_id=$(get_container_id_from_dev_name "ceph-osd-${osd_id}")
+ container_exec="{{ container_binary }} exec $container_id"
+ {% else %}
+ osd_id=$(echo ${unit#ceph-osd@} | grep -oE '[0-9]+')
+ {% endif %}
+ SOCKET=/var/run/ceph/{{ cluster }}-osd.${osd_id}.asok
+ while [ $COUNT -ne 0 ]; do
+ RETRIES="{{ handler_health_osd_check_retries }}"
+ $container_exec test -S "$SOCKET" && check_pgs && continue 2
+ sleep $DELAY
+ let COUNT=COUNT-1
+ done
+ # If we reach this point, it means the socket is not present.
+ echo "Socket file ${SOCKET} could not be found, which means the osd daemon is not running. Showing ceph-osd unit logs now:"
+ journalctl -u "${unit}"
+ exit 1
+done
--- /dev/null
+#!/bin/bash
+
+RETRIES="{{ handler_health_rbd_mirror_check_retries }}"
+DELAY="{{ handler_health_rbd_mirror_check_delay }}"
+RBD_MIRROR_NAME="{{ ansible_facts['hostname'] }}"
+{% if containerized_deployment | bool %}
+DOCKER_EXEC="{{ container_binary }} exec ceph-rbd-mirror-{{ ansible_facts['hostname'] }}"
+{% endif %}
+
+# Backward compatibility
+$DOCKER_EXEC test -S /var/run/ceph/{{ cluster }}-client.rbd-mirror.{{ ansible_facts['fqdn'] }}.asok && SOCKET=/var/run/ceph/{{ cluster }}-client.rbd-mirror.{{ ansible_facts['fqdn'] }}.asok
+$DOCKER_EXEC test -S /var/run/ceph/{{ cluster }}-client.rbd-mirror.{{ ansible_facts['hostname'] }}.asok && SOCKET=/var/run/ceph/{{ cluster }}-client.rbd-mirror.{{ ansible_facts['hostname'] }}.asok
+
+# First, restart the daemon
+systemctl restart ceph-rbd-mirror@rbd-mirror.${RBD_MIRROR_NAME}
+
+# Wait and ensure the socket exists after restarting the daemon
+while [ $RETRIES -ne 0 ]; do
+ $DOCKER_EXEC test -S $SOCKET && exit 0
+ sleep $DELAY
+ let RETRIES=RETRIES-1
+done
+# If we reach this point, it means the socket is not present.
+echo "Socket file ${SOCKET} could not be found, which means rbd mirror is not running. Showing ceph-rbd-mirror unit logs now:"
+journalctl -u ceph-rbd-mirror@rbd-mirror.${RBD_MIRROR_NAME}
+exit 1
--- /dev/null
+#!/bin/bash
+
+RETRIES="{{ handler_health_rgw_check_retries }}"
+DELAY="{{ handler_health_rgw_check_delay }}"
+HOST_NAME="{{ ansible_facts['hostname'] }}"
+RGW_NUMS={{ rgw_instances | length | int }}
+RGW_FRONTEND_SSL_CERT={{ radosgw_frontend_ssl_certificate }}
+if [ -n "$RGW_FRONTEND_SSL_CERT" ]; then
+ RGW_PROTOCOL=https
+else
+ RGW_PROTOCOL=http
+fi
+INSTANCES_NAME=({% for i in rgw_instances %}{{ i.instance_name }} {% endfor %})
+RGW_IPS=({% for i in rgw_instances %}{{ i.radosgw_address }} {% endfor %})
+RGW_PORTS=({% for i in rgw_instances %}{{ i.radosgw_frontend_port }} {% endfor %})
+declare -a DOCKER_EXECS
+declare -a SOCKET_PREFIX
+for ((i=0; i<${RGW_NUMS}; i++)); do
+ SOCKET_PREFIX[i]="/var/run/ceph/{{ cluster }}-client.rgw.${HOST_NAME}.${INSTANCES_NAME[i]}"
+ DOCKER_EXECS[i]=""
+{% if containerized_deployment | bool %}
+ DOCKER_EXECS[i]="{{ container_binary }} exec ceph-rgw-${HOST_NAME}-${INSTANCES_NAME[i]}"
+{% endif %}
+done
+
+check_socket() {
+ local i=$1
+ local succ=0
+ local count=10
+ # Wait and ensure the socket exists after restarting the daemon
+ while [ $count -ne 0 ]; do
+ SOCKET=$(grep ${SOCKET_PREFIX[i]} /proc/net/unix | awk '{ print $8 }')
+ if [ -n "${SOCKET}" ]; then
+ ${DOCKER_EXECS[i]} test -S ${SOCKET} && succ=$((succ+1)) && break
+ fi
+ sleep $DELAY
+ let count=count-1
+ done
+ if [ $succ -ne 1 ]; then
+ echo "Socket file ${SOCKET} could not be found, which means Rados Gateway is not running. Showing ceph-rgw unit logs now:"
+ journalctl -u ceph-radosgw@rgw.${HOST_NAME}.${INSTANCES_NAME[i]}
+ exit 1
+ fi
+}
+
+check_for_curl_or_wget() {
+ local i=$1
+ if ${DOCKER_EXECS[i]} command -v wget &>/dev/null; then
+ rgw_test_command="wget --no-check-certificate --tries 1 --quiet -O /dev/null"
+ elif ${DOCKER_EXECS[i]} command -v curl &>/dev/null; then
+ rgw_test_command="curl {{ '-g' if ip_version == 'ipv6' else '' }} -k --fail --silent --output /dev/null"
+ else
+ echo "It seems that neither curl nor wget are available on your system."
+ echo "Cannot test rgw connection."
+ exit 0
+ fi
+}
+
+check_rest() {
+ local i=$1
+ check_for_curl_or_wget ${i}
+ local succ=0
+ while [ $RETRIES -ne 0 ]; do
+ ${DOCKER_EXECS[i]} $rgw_test_command $RGW_PROTOCOL://${RGW_IPS[i]}:${RGW_PORTS[i]} && succ=$((succ+1)) && break
+ sleep $DELAY
+ let RETRIES=RETRIES-1
+ done
+ if [ $succ -ne 1 ]; then
+ # If we reach this point, it means there is a problem with the connection to rgw
+ echo "Error connecting locally to Rados Gateway service: $RGW_PROTOCOL://${RGW_IPS[i]}:${RGW_PORTS[i]}"
+ exit 1
+ fi
+}
+
+for ((i=0; i<${RGW_NUMS}; i++)); do
+ # First, restart the daemon
+ systemctl restart ceph-radosgw@rgw.${HOST_NAME}.${INSTANCES_NAME[i]}
+ # Check socket files
+ check_socket ${i}
+ # Check rest
+ check_rest ${i}
+done
--- /dev/null
+---
+- name: disable ntpd
+ failed_when: false
+ service:
+ name: '{{ ntp_service_name }}'
+ state: stopped
+ enabled: no
+
+- name: disable chronyd
+ failed_when: false
+ service:
+ name: '{{ chrony_daemon_name }}'
+ enabled: no
+ state: stopped
+
+- name: disable timesyncd
+ failed_when: false
+ service:
+ name: timesyncd
+ enabled: no
+ state: stopped
--- /dev/null
+---
+galaxy_info:
+ company: Red Hat
+ author: Guillaume Abrioux
+ description: Handles ceph infra requirements (ntp, firewall, ...)
+ license: Apache
+ min_ansible_version: 2.7
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ galaxy_tags:
+ - system
+dependencies: []
--- /dev/null
+---
+- name: check firewalld installation on redhat or SUSE/openSUSE
+ command: rpm -q firewalld
+ args:
+ warn: no
+ register: firewalld_pkg_query
+ ignore_errors: true
+ check_mode: no
+ changed_when: false
+ tags: firewall
+
+- when: (firewalld_pkg_query.get('rc', 1) == 0
+ or is_atomic | bool)
+ tags: firewall
+ block:
+ - name: install firewalld python binding
+ package:
+ name: "python{{ ansible_facts['python']['version']['major'] }}-firewall"
+ tags: with_pkg
+ when: not is_atomic | bool
+
+ - name: start firewalld
+ service:
+ name: firewalld
+ state: started
+ enabled: yes
+ register: result
+ retries: 5
+ delay: 3
+ until: result is succeeded
+
+ - name: open ceph networks on monitor
+ firewalld:
+ zone: "{{ ceph_mon_firewall_zone }}"
+ source: "{{ item }}"
+ permanent: true
+ immediate: true
+ state: enabled
+ with_items: "{{ public_network.split(',') }}"
+ when:
+ - mon_group_name is defined
+ - mon_group_name in group_names
+
+ - name: open ceph networks on manager when collocated
+ firewalld:
+ zone: "{{ ceph_mgr_firewall_zone }}"
+ source: "{{ item }}"
+ permanent: true
+ immediate: true
+ state: enabled
+ with_items: "{{ public_network.split(',') }}"
+ when:
+ - mon_group_name is defined
+ - mon_group_name in group_names
+ - mgr_group_name | length == 0
+
+ - name: open monitor and manager ports
+ firewalld:
+ service: "{{ item.service }}"
+ zone: "{{ item.zone }}"
+ permanent: true
+ immediate: true
+ state: enabled
+ with_items:
+ - { 'service': 'ceph-mon', 'zone': "{{ ceph_mon_firewall_zone }}" }
+ - { 'service': 'ceph', 'zone': "{{ ceph_mgr_firewall_zone }}" }
+ when:
+ - mon_group_name is defined
+ - mon_group_name in group_names
+
+ - name: open ceph networks on manager when dedicated
+ firewalld:
+ zone: "{{ ceph_mgr_firewall_zone }}"
+ source: "{{ item }}"
+ permanent: true
+ immediate: true
+ state: enabled
+ with_items: "{{ public_network.split(',') }}"
+ when:
+ - mgr_group_name is defined
+ - mgr_group_name in group_names
+ - mgr_group_name | length > 0
+
+ - name: open manager ports
+ firewalld:
+ service: ceph
+ zone: "{{ ceph_mgr_firewall_zone }}"
+ permanent: true
+ immediate: true
+ state: enabled
+ when:
+ - mgr_group_name is defined
+ - mgr_group_name in group_names
+
+ - name: open ceph networks on osd
+ firewalld:
+ zone: "{{ ceph_osd_firewall_zone }}"
+ source: "{{ item }}"
+ permanent: true
+ immediate: true
+ state: enabled
+ with_items: "{{ public_network.split(',') | union(cluster_network.split(',')) }}"
+ when:
+ - osd_group_name is defined
+ - osd_group_name in group_names
+
+ - name: open osd ports
+ firewalld:
+ service: ceph
+ zone: "{{ ceph_osd_firewall_zone }}"
+ permanent: true
+ immediate: true
+ state: enabled
+ when:
+ - osd_group_name is defined
+ - osd_group_name in group_names
+
+ - name: open ceph networks on rgw
+ firewalld:
+ zone: "{{ ceph_rgw_firewall_zone }}"
+ source: "{{ item }}"
+ permanent: true
+ immediate: true
+ state: enabled
+ with_items: "{{ public_network.split(',') }}"
+ when:
+ - rgw_group_name is defined
+ - rgw_group_name in group_names
+
+ - name: open rgw ports
+ firewalld:
+ port: "{{ item.radosgw_frontend_port }}/tcp"
+ zone: "{{ ceph_rgw_firewall_zone }}"
+ permanent: true
+ immediate: true
+ state: enabled
+ loop: "{{ rgw_instances }}"
+ when:
+ - rgw_group_name is defined
+ - rgw_group_name in group_names
+
+ - name: open ceph networks on mds
+ firewalld:
+ zone: "{{ ceph_mds_firewall_zone }}"
+ source: "{{ item }}"
+ permanent: true
+ immediate: true
+ state: enabled
+ with_items: "{{ public_network.split(',') }}"
+ when:
+ - mds_group_name is defined
+ - mds_group_name in group_names
+
+ - name: open mds ports
+ firewalld:
+ service: ceph
+ zone: "{{ ceph_mds_firewall_zone }}"
+ permanent: true
+ immediate: true
+ state: enabled
+ with_items: "{{ public_network.split(',') }}"
+ when:
+ - mds_group_name is defined
+ - mds_group_name in group_names
+
+ - name: open ceph networks on nfs
+ firewalld:
+ zone: "{{ ceph_nfs_firewall_zone }}"
+ source: "{{ item }}"
+ permanent: true
+ immediate: true
+ state: enabled
+ with_items: "{{ public_network.split(',') }}"
+ when:
+ - nfs_group_name is defined
+ - nfs_group_name in group_names
+
+ - name: open nfs ports
+ firewalld:
+ service: nfs
+ zone: "{{ ceph_nfs_firewall_zone }}"
+ permanent: true
+ immediate: true
+ state: enabled
+ when:
+ - nfs_group_name is defined
+ - nfs_group_name in group_names
+
+ - name: open nfs ports (portmapper)
+ firewalld:
+ port: "111/tcp"
+ zone: "{{ ceph_nfs_firewall_zone }}"
+ permanent: true
+ immediate: true
+ state: enabled
+ when:
+ - nfs_group_name is defined
+ - nfs_group_name in group_names
+
+ - name: open ceph networks on rbdmirror
+ firewalld:
+ zone: "{{ ceph_rbdmirror_firewall_zone }}"
+ source: "{{ item }}"
+ permanent: true
+ immediate: true
+ state: enabled
+ with_items: "{{ public_network.split(',') }}"
+ when:
+ - rbdmirror_group_name is defined
+ - rbdmirror_group_name in group_names
+
+ - name: open rbdmirror ports
+ firewalld:
+ service: ceph
+ zone: "{{ ceph_rbdmirror_firewall_zone }}"
+ permanent: true
+ immediate: true
+ state: enabled
+ when:
+ - rbdmirror_group_name is defined
+ - rbdmirror_group_name in group_names
+
+ - name: open ceph networks on iscsi
+ firewalld:
+ zone: "{{ ceph_iscsi_firewall_zone }}"
+ source: "{{ item }}"
+ permanent: true
+ immediate: true
+ state: enabled
+ with_items: "{{ public_network.split(',') }}"
+ when:
+ - iscsi_gw_group_name is defined
+ - iscsi_gw_group_name in group_names
+
+ - name: open iscsi target ports
+ firewalld:
+ port: "3260/tcp"
+ zone: "{{ ceph_iscsi_firewall_zone }}"
+ permanent: true
+ immediate: true
+ state: enabled
+ when:
+ - iscsi_gw_group_name is defined
+ - iscsi_gw_group_name in group_names
+
+ - name: open iscsi api ports
+ firewalld:
+ port: "{{ api_port | default(5000) }}/tcp"
+ zone: "{{ ceph_iscsi_firewall_zone }}"
+ permanent: true
+ immediate: true
+ state: enabled
+ when:
+ - iscsi_gw_group_name is defined
+ - iscsi_gw_group_name in group_names
+
+ - name: open iscsi/prometheus port
+ firewalld:
+ port: "9287/tcp"
+ zone: "{{ ceph_iscsi_firewall_zone }}"
+ permanent: true
+ immediate: true
+ state: enabled
+ when:
+ - iscsi_gw_group_name is defined
+ - iscsi_gw_group_name in group_names
+
+ - name: open dashboard ports
+ include_tasks: dashboard_firewall.yml
+ when: dashboard_enabled | bool
+
+ - name: open ceph networks on haproxy
+ firewalld:
+ zone: "{{ ceph_rgwloadbalancer_firewall_zone }}"
+ source: "{{ item }}"
+ permanent: true
+ immediate: true
+ state: enabled
+ with_items: "{{ public_network.split(',') }}"
+ when:
+ - rgwloadbalancer_group_name is defined
+ - rgwloadbalancer_group_name in group_names
+
+ - name: open haproxy ports
+ firewalld:
+ port: "{{ haproxy_frontend_port | default(80) }}/tcp"
+ zone: "{{ ceph_rgwloadbalancer_firewall_zone }}"
+ permanent: true
+ immediate: true
+ state: enabled
+ when:
+ - rgwloadbalancer_group_name is defined
+ - rgwloadbalancer_group_name in group_names
+
+ - name: add rich rule for keepalived vrrp
+ firewalld:
+ rich_rule: 'rule protocol value="vrrp" accept'
+ permanent: true
+ immediate: true
+ state: enabled
+ when:
+ - rgwloadbalancer_group_name is defined
+ - rgwloadbalancer_group_name in group_names
--- /dev/null
+---
+- name: open node_exporter port
+ firewalld:
+ port: "{{ node_exporter_port }}/tcp"
+ zone: "{{ ceph_dashboard_firewall_zone }}"
+ permanent: true
+ immediate: true
+ state: enabled
+
+- block:
+ - name: open dashboard port
+ firewalld:
+ port: "{{ dashboard_port }}/tcp"
+ zone: "{{ ceph_dashboard_firewall_zone }}"
+ permanent: true
+ immediate: true
+ state: enabled
+
+ - name: open mgr/prometheus port
+ firewalld:
+ port: "9283/tcp"
+ zone: "{{ ceph_dashboard_firewall_zone }}"
+ permanent: true
+ immediate: true
+ state: enabled
+ when:
+ - mgr_group_name is defined
+ - (groups.get(mgr_group_name,[]) | length > 0 and mgr_group_name in group_names) or
+ (groups.get(mgr_group_name,[]) | length == 0 and mon_group_name in group_names)
+
+- block:
+ - name: open grafana port
+ firewalld:
+ port: "{{ grafana_port }}/tcp"
+ zone: "{{ ceph_dashboard_firewall_zone }}"
+ permanent: true
+ immediate: true
+ state: enabled
+
+ - name: open prometheus port
+ firewalld:
+ port: "{{ prometheus_port }}/tcp"
+ zone: "{{ ceph_dashboard_firewall_zone }}"
+ permanent: true
+ immediate: true
+ state: enabled
+
+ - name: open alertmanager port
+ firewalld:
+ port: "{{ alertmanager_port }}/tcp"
+ zone: "{{ ceph_dashboard_firewall_zone }}"
+ permanent: true
+ immediate: true
+ state: enabled
+
+ - name: open alertmanager cluster port
+ firewalld:
+ port: "{{ alertmanager_cluster_port }}/{{ item }}"
+ zone: "{{ ceph_dashboard_firewall_zone }}"
+ permanent: true
+ immediate: true
+ state: enabled
+ with_items:
+ - "tcp"
+ - "udp"
+ when:
+ - monitoring_group_name is defined
+ - monitoring_group_name in group_names
--- /dev/null
+---
+- name: update cache for Debian based OSs
+ apt:
+ update_cache: yes
+ when: ansible_facts['os_family'] == "Debian"
+ register: result
+ until: result is succeeded
+ tags: package-install
+
+- name: include_tasks configure_firewall.yml
+ include_tasks: configure_firewall.yml
+ when:
+ - configure_firewall | bool
+ - ansible_facts['os_family'] in ['RedHat', 'Suse']
+ tags: configure_firewall
+
+- name: include_tasks setup_ntp.yml
+ include_tasks: setup_ntp.yml
+ when: ntp_service_enabled | bool
+ tags: configure_ntp
+
+- name: ensure logrotate is installed
+ package:
+ name: logrotate
+ state: present
+ register: result
+ until: result is succeeded
+ tags: with_pkg
+ when:
+ - not is_atomic | bool
+ - containerized_deployment | bool
+ - inventory_hostname in groups.get(mon_group_name, []) or
+ inventory_hostname in groups.get(osd_group_name, []) or
+ inventory_hostname in groups.get(mds_group_name, []) or
+ inventory_hostname in groups.get(rgw_group_name, []) or
+ inventory_hostname in groups.get(mgr_group_name, []) or
+ inventory_hostname in groups.get(rbdmirror_group_name, []) or
+ inventory_hostname in groups.get(iscsi_gw_group_name, [])
+
+- name: add logrotate configuration
+ template:
+ src: logrotate.conf.j2
+ dest: /etc/logrotate.d/ceph
+ mode: "0644"
+ owner: root
+ group: root
+ when:
+ - containerized_deployment | bool
+ - inventory_hostname in groups.get(mon_group_name, []) or
+ inventory_hostname in groups.get(osd_group_name, []) or
+ inventory_hostname in groups.get(mds_group_name, []) or
+ inventory_hostname in groups.get(rgw_group_name, []) or
+ inventory_hostname in groups.get(mgr_group_name, []) or
+ inventory_hostname in groups.get(rbdmirror_group_name, []) or
+ inventory_hostname in groups.get(iscsi_gw_group_name, [])
\ No newline at end of file
--- /dev/null
+---
+- name: set ntp service and chrony daemon name for Debian family
+ set_fact:
+ chrony_daemon_name: chrony
+ ntp_service_name: ntp
+ when: ansible_facts['os_family'] == 'Debian'
+
+- name: set ntp service and chrony daemon name for RedHat and Suse family
+ set_fact:
+ chrony_daemon_name: chronyd
+ ntp_service_name: ntpd
+ when: ansible_facts['os_family'] in ['RedHat', 'Suse']
+
+# Installation of NTP daemons needs to be a separate task since installations
+# can't happen on Atomic
+- name: install the ntp daemon
+ when: not is_atomic | bool
+ block:
+ - name: install ntpd
+ package:
+ name: ntp
+ state: present
+ register: result
+ until: result is succeeded
+ when: ntp_daemon_type == "ntpd"
+
+ - name: install chrony
+ package:
+ name: chrony
+ state: present
+ register: result
+ until: result is succeeded
+ when: ntp_daemon_type == "chronyd"
+
+- name: enable the ntp daemon and disable the rest
+ block:
+ - name: enable timesyncing on timesyncd
+ command: timedatectl set-ntp on
+ notify:
+ - disable ntpd
+ - disable chronyd
+ when: ntp_daemon_type == "timesyncd"
+
+ - name: disable time sync using timesyncd if we are not using it
+ command: timedatectl set-ntp no
+ when: ntp_daemon_type != "timesyncd"
+
+ - name: enable ntpd
+ service:
+ name: "{{ ntp_service_name }}"
+ enabled: yes
+ state: started
+ notify:
+ - disable chronyd
+ - disable timesyncd
+ when: ntp_daemon_type == "ntpd"
+
+ - name: enable chronyd
+ service:
+ name: "{{ chrony_daemon_name }}"
+ enabled: yes
+ state: started
+ notify:
+ - disable ntpd
+ - disable timesyncd
+ when: ntp_daemon_type == "chronyd"
--- /dev/null
+/var/log/ceph/*.log {
+ rotate 7
+ daily
+ compress
+ sharedscripts
+ postrotate
+ killall -q -1 ceph-mon ceph-mgr ceph-mds ceph-osd ceph-fuse radosgw rbd-mirror || pkill -1 -x "ceph-mon|ceph-mgr|ceph-mds|ceph-osd|ceph-fuse|radosgw|rbd-mirror" || true
+ endscript
+ missingok
+ notifempty
+ su root root
+}
+
+/var/log/tcmu-runner/*.log {
+ rotate 7
+ daily
+ compress
+ sharedscripts
+ postrotate
+ killall -q -1 tcmu-runner || pkill -1 -x "tcmu-runner" || true
+ endscript
+ missingok
+ notifempty
+ su root root
+}
--- /dev/null
+Copyright 2016 Paul Cuzner pcuzner at redhat dot com
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
--- /dev/null
+# Ansible role: ceph-iscsi
+
+Documentation is available at http://docs.ceph.com/ceph-ansible/.
--- /dev/null
+---
+# You can override vars by using host or group vars
+
+###########
+# GENERAL #
+###########
+# Whether or not to generate secure certificate to iSCSI gateway nodes
+generate_crt: False
+
+iscsi_conf_overrides: {}
+iscsi_pool_name: rbd
+#iscsi_pool_size: 3
+
+copy_admin_key: True
+
+##################
+# RBD-TARGET-API #
+##################
+# Optional settings related to the CLI/API service
+api_user: admin
+api_password: admin
+api_port: 5000
+api_secure: false
+loop_delay: 1
+# set the variable below with a comma separated list of IPs
+# in order to restrict the access to the iSCSI API
+# trusted_ip_list: 192.168.122.1
+
+
+##########
+# DOCKER #
+##########
+
+# Resource limitation
+# For the whole list of limits you can apply see: docs.docker.com/engine/admin/resource_constraints
+# Default values are based from: https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/2/html/red_hat_ceph_storage_hardware_guide/minimum_recommendations
+# These options can be passed using the 'ceph_mds_docker_extra_env' variable.
+
+# TCMU_RUNNER resource limitation
+ceph_tcmu_runner_docker_memory_limit: "{{ ansible_facts['memtotal_mb'] }}m"
+ceph_tcmu_runner_docker_cpu_limit: 1
+
+# RBD_TARGET_GW resource limitation
+ceph_rbd_target_gw_docker_memory_limit: "{{ ansible_facts['memtotal_mb'] }}m"
+ceph_rbd_target_gw_docker_cpu_limit: 1
+
+# RBD_TARGET_API resource limitation
+ceph_rbd_target_api_docker_memory_limit: "{{ ansible_facts['memtotal_mb'] }}m"
+ceph_rbd_target_api_docker_cpu_limit: 1
--- /dev/null
+---
+galaxy_info:
+ company: Red Hat
+ author: Paul Cuzner
+ description: Installs Ceph iSCSI Gateways
+ license: Apache
+ min_ansible_version: 2.7
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ galaxy_tags:
+ - system
+dependencies: []
--- /dev/null
+---
+- name: get keys from monitors
+ ceph_key:
+ name: client.admin
+ cluster: "{{ cluster }}"
+ output_format: plain
+ state: info
+ environment:
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+ register: _admin_key
+ delegate_to: "{{ groups.get(mon_group_name)[0] }}"
+ run_once: true
+ when:
+ - cephx | bool
+ - copy_admin_key | bool
+ no_log: "{{ no_log_on_ceph_key_tasks }}"
+
+- name: copy ceph key(s) if needed
+ copy:
+ dest: "/etc/ceph/{{ cluster }}.client.admin.keyring"
+ content: "{{ _admin_key.stdout + '\n' }}"
+ owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
+ group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
+ mode: "{{ ceph_keyring_permissions }}"
+ when:
+ - cephx | bool
+ - copy_admin_key | bool
+ no_log: "{{ no_log_on_ceph_key_tasks }}"
+
+- name: add mgr ip address to trusted list with dashboard - ipv4
+ set_fact:
+ trusted_ip_list: '{{ trusted_ip_list | default("") }}{{ "," if trusted_ip_list is defined else "" }}{{ hostvars[item]["ansible_facts"]["all_ipv4_addresses"] | ips_in_ranges(public_network.split(",")) | first }}'
+ with_items: '{{ groups[mgr_group_name] | default(groups[mon_group_name]) }}'
+ when:
+ - dashboard_enabled | bool
+ - ip_version == 'ipv4'
+
+- name: add mgr ip address to trusted list with dashboard - ipv6
+ set_fact:
+ trusted_ip_list: '{{ trusted_ip_list | default("") }}{{ "," if trusted_ip_list is defined else "" }}{{ hostvars[item]["ansible_facts"]["all_ipv6_addresses"] | ips_in_ranges(public_network.split(",")) | last }}'
+ with_items: '{{ groups[mgr_group_name] | default(groups[mon_group_name]) }}'
+ when:
+ - dashboard_enabled | bool
+ - ip_version == 'ipv6'
+
+- name: deploy gateway settings, used by the ceph_iscsi_config modules
+ config_template:
+ src: "{{ role_path }}/templates/iscsi-gateway.cfg.j2"
+ dest: /etc/ceph/iscsi-gateway.cfg
+ config_type: ini
+ config_overrides: '{{ iscsi_conf_overrides }}'
+ mode: "0600"
+ notify: restart ceph rbd-target-api-gw
+
+- name: set_fact container_exec_cmd
+ set_fact:
+ container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_facts']['hostname'] }}"
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ when: containerized_deployment | bool
+
+- name: create iscsi pool
+ ceph_pool:
+ name: "{{ iscsi_pool_name }}"
+ cluster: "{{ cluster }}"
+ size: "{{ iscsi_pool_size | default(omit) }}"
+ application: "rbd"
+ run_once: true
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ environment:
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
--- /dev/null
+---
+- name: create rbd target log directories
+ file:
+ path: '/var/log/{{ item }}'
+ state: directory
+ with_items:
+ - rbd-target-api
+ - rbd-target-gw
+ - tcmu-runner
+
+- name: include_tasks systemd.yml
+ include_tasks: systemd.yml
+
+- name: systemd start tcmu-runner, rbd-target-api and rbd-target-gw containers
+ systemd:
+ name: "{{ item }}"
+ state: started
+ enabled: yes
+ masked: no
+ daemon_reload: yes
+ with_items:
+ - tcmu-runner
+ - rbd-target-gw
+ - rbd-target-api
--- /dev/null
+---
+- name: create a temporary directory
+ tempfile:
+ state: directory
+ register: iscsi_ssl_tmp_dir
+ delegate_to: localhost
+ run_once: true
+
+- name: set_fact crt_files
+ set_fact:
+ crt_files:
+ - "iscsi-gateway.crt"
+ - "iscsi-gateway.key"
+ - "iscsi-gateway.pem"
+ - "iscsi-gateway-pub.key"
+
+- name: check for existing crt file(s) in monitor key/value store
+ command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} config get iscsi/ssl/{{ item }}"
+ with_items: "{{ crt_files }}"
+ changed_when: false
+ failed_when: false
+ run_once: true
+ delegate_to: "{{ groups.get(mon_group_name)[0] }}"
+ register: crt_files_exist
+
+- name: set_fact crt_files_missing
+ set_fact:
+ crt_files_missing: "{{ crt_files_exist.results | selectattr('rc', 'equalto', 0) | map(attribute='rc') | list | length != crt_files | length }}"
+
+- name: generate ssl crt/key files
+ block:
+ - name: create ssl crt/key files
+ command: >
+ openssl req -newkey rsa:2048 -nodes -keyout {{ iscsi_ssl_tmp_dir.path }}/iscsi-gateway.key
+ -x509 -days 365 -out {{ iscsi_ssl_tmp_dir.path }}/iscsi-gateway.crt
+ -subj "/C=US/ST=./L=./O=RedHat/OU=Linux/CN={{ ansible_facts['hostname'] }}"
+ delegate_to: localhost
+ run_once: True
+ with_items: "{{ crt_files_exist.results }}"
+
+ - name: create pem
+ shell: >
+ cat {{ iscsi_ssl_tmp_dir.path }}/iscsi-gateway.crt
+ {{ iscsi_ssl_tmp_dir.path }}/iscsi-gateway.key > {{ iscsi_ssl_tmp_dir.path }}/iscsi-gateway.pem
+ delegate_to: localhost
+ run_once: True
+ register: pem
+ with_items: "{{ crt_files_exist.results }}"
+
+ - name: create public key from pem
+ shell: >
+ openssl x509 -inform pem -in {{ iscsi_ssl_tmp_dir.path }}/iscsi-gateway.pem
+ -pubkey -noout > {{ iscsi_ssl_tmp_dir.path }}/iscsi-gateway-pub.key
+ delegate_to: localhost
+ run_once: True
+ when: pem.changed
+ tags: skip_ansible_lint
+
+ - name: slurp ssl crt/key files
+ slurp:
+ src: "{{ iscsi_ssl_tmp_dir.path }}/{{ item }}"
+ register: iscsi_ssl_files_content
+ with_items: "{{ crt_files }}"
+ run_once: true
+ delegate_to: localhost
+
+ - name: store ssl crt/key files
+ command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} config-key put iscsi/ssl/{{ item.item }} {{ item.content }}"
+ run_once: true
+ delegate_to: "{{ groups.get(mon_group_name)[0] }}"
+ with_items: "{{ iscsi_ssl_files_content.results }}"
+ when: crt_files_missing
+
+- name: copy crt file(s) to gateway nodes
+ copy:
+ content: "{{ item.stdout | b64decode }}"
+ dest: "/etc/ceph/{{ item.item }}"
+ owner: root
+ group: root
+ mode: 0400
+ changed_when: false
+ with_items: "{{ crt_files_exist.results if not crt_files_missing else iscsi_ssl_files_content.results }}"
+ when: not crt_files_missing
+
+- name: clean temporary directory
+ file:
+ path: "{{ iscsi_ssl_tmp_dir.path }}"
+ state: absent
\ No newline at end of file
--- /dev/null
+---
+- name: include common.yml
+ include_tasks: common.yml
+
+- name: include non-container/prerequisites.yml
+ include_tasks: non-container/prerequisites.yml
+ when: not containerized_deployment | bool
+
+# deploy_ssl_keys used the ansible controller to create self-signed crt/key/pub files
+# and transfers them to /etc/ceph directory on each controller. SSL certs are used by
+# the API for https support.
+- name: include deploy_ssl_keys.yml
+ include_tasks: deploy_ssl_keys.yml
+ when: generate_crt | bool
+
+- name: include non-container/configure_iscsi.yml
+ include_tasks: non-container/configure_iscsi.yml
+ when:
+ - not containerized_deployment | bool
+ - not use_new_ceph_iscsi | bool
+
+- name: include non-container/postrequisites.yml
+ include_tasks: non-container/postrequisites.yml
+ when: not containerized_deployment | bool
+
+- name: include containerized.yml
+ include_tasks: containerized.yml
+ when: containerized_deployment | bool
--- /dev/null
+---
+- name: igw_gateway (tgt) | configure iscsi target (gateway)
+ igw_gateway:
+ mode: "target"
+ gateway_iqn: "{{ gateway_iqn }}"
+ gateway_ip_list: "{{ gateway_ip_list }}"
+ register: target
+
+- name: igw_lun | configure luns (create/map rbds and add to lio)
+ igw_lun:
+ pool: "{{ item.pool }}"
+ image: "{{ item.image }}"
+ size: "{{ item.size }}"
+ host: "{{ item.host }}"
+ state: "{{ item.state }}"
+ with_items: "{{ rbd_devices }}"
+ register: images
+
+- name: igw_gateway (map) | map luns to the iscsi target
+ igw_gateway:
+ mode: "map"
+ gateway_iqn: "{{ gateway_iqn }}"
+ gateway_ip_list: "{{ gateway_ip_list }}"
+ register: luns
+
+- name: igw_client | configure client connectivity
+ igw_client:
+ client_iqn: "{{ item.client }}"
+ image_list: "{{ item.image_list }}"
+ chap: "{{ item.chap }}"
+ state: "{{ item.status }}"
+ with_items: "{{ client_connections }}"
+ register: clients
--- /dev/null
+- name: start rbd-target-api and rbd-target-gw
+ service:
+ name: "{{ item }}"
+ state: started
+ enabled: yes
+ masked: no
+ with_items:
+ - rbd-target-api
+ - rbd-target-gw
--- /dev/null
+---
+- name: red hat based systems tasks
+ when: ansible_facts['os_family'] == 'RedHat'
+ block:
+ - name: set_fact common_pkgs
+ set_fact:
+ common_pkgs:
+ - tcmu-runner
+ - targetcli
+
+ - name: set_fact base iscsi pkgs if new style ceph-iscsi
+ set_fact:
+ iscsi_base:
+ - ceph-iscsi
+ when: use_new_ceph_iscsi | bool
+
+ - name: set_fact base iscsi pkgs if using older ceph-iscsi-config
+ set_fact:
+ iscsi_base:
+ - ceph-iscsi-cli
+ - ceph-iscsi-config
+ when: not use_new_ceph_iscsi | bool
+
+ - name: when ceph_iscsi_config_dev is true
+ when:
+ - ceph_origin == 'repository'
+ - ceph_repository in ['dev', 'community']
+ - ceph_iscsi_config_dev | bool
+ block:
+ - name: get latest available build for tcmu-runner
+ uri:
+ url: "https://shaman.ceph.com/api/search/?status=ready&project=tcmu-runner&flavor=default&distros=centos/{{ ansible_facts['distribution_major_version'] }}/{{ ansible_facts['architecture'] }}&ref={{ ceph_dev_branch }}&sha1={{ ceph_dev_sha1 }}"
+ return_content: yes
+ run_once: true
+ register: latest_build_tcmu_runner
+
+ - name: fetch ceph red hat development repository for tcmu-runner
+ uri:
+ # Use the centos repo since we don't currently have a dedicated red hat repo
+ url: "{{ (latest_build_tcmu_runner.content | from_json)[0]['chacra_url'] }}repo"
+ return_content: yes
+ register: ceph_dev_yum_repo_tcmu_runner
+
+ - name: configure ceph red hat development repository for tcmu-runner
+ copy:
+ content: "{{ ceph_dev_yum_repo_tcmu_runner.content }}"
+ dest: '/etc/yum.repos.d/tcmu-runner-dev.repo'
+ owner: root
+ group: root
+ backup: yes
+
+ - name: get latest available build for ceph-iscsi
+ uri:
+ url: "https://shaman.ceph.com/api/search/?status=ready&project={{ item }}&flavor=default&distros=centos/{{ ansible_facts['distribution_major_version'] }}/noarch&ref={{ ceph_dev_branch }}&sha1={{ ceph_dev_sha1 }}"
+ return_content: yes
+ run_once: true
+ register: latest_build_ceph_iscsi
+ with_items: "{{ iscsi_base }}"
+
+ - name: fetch ceph red hat development repository for ceph-iscsi
+ uri:
+ # Use the centos repo since we don't currently have a dedicated red hat repo
+ url: "{{ (item.content | from_json)[0]['chacra_url'] }}repo"
+ return_content: yes
+ register: ceph_dev_yum_repo_ceph_iscsi
+ with_items: "{{ latest_build_ceph_iscsi.results }}"
+
+ - name: configure ceph red hat development repository for tcmu-runner
+ copy:
+ content: "{{ item.content }}"
+ dest: '/etc/yum.repos.d/{{ item.item.item }}-dev.repo'
+ owner: root
+ group: root
+ backup: yes
+ with_items: '{{ ceph_dev_yum_repo_ceph_iscsi.results }}'
+
+ - name: ceph-iscsi stable repository
+ get_url:
+ url: "https://download.ceph.com/ceph-iscsi/{{ '3' if use_new_ceph_iscsi | bool else '2' }}/rpm/el{{ ansible_facts['distribution_major_version'] }}/ceph-iscsi.repo"
+ dest: /etc/yum.repos.d/ceph-iscsi.repo
+ force: true
+ register: result
+ until: result is succeeded
+ when: ceph_repository == 'community'
+
+ - name: install ceph iscsi package
+ package:
+ name: "{{ common_pkgs + iscsi_base }}"
+ state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
+ register: result
+ until: result is succeeded
+
+- name: check the status of the target.service override
+ stat:
+ path: /etc/systemd/system/target.service
+ register: target
+
+- name: mask the target service - preventing manual start
+ systemd:
+ name: target
+ masked: yes
+ enabled: no
+ when:
+ - target.stat.exists
+ - not target.stat.islnk
+
+# Only start tcmu-runner, so configure_iscsi.yml can create disks.
+# We must start rbd-target-gw/api after configure_iscsi.yml to avoid
+# races where they are both trying to setup the same object during
+# a rolling update.
+- name: start tcmu-runner
+ service:
+ name: tcmu-runner
+ state: started
+ enabled: yes
+ masked: no
--- /dev/null
+---
+- name: generate systemd unit files for tcmu-runner, rbd-target-api and rbd-target-gw
+ template:
+ src: "{{ role_path }}/templates/{{ item }}.service.j2"
+ dest: /etc/systemd/system/{{ item }}.service
+ owner: "root"
+ group: "root"
+ mode: "0644"
+ with_items:
+ - tcmu-runner
+ - rbd-target-gw
+ - rbd-target-api
+ notify:
+ - restart ceph tcmu-runner
+ - restart ceph rbd-target-api-gw
--- /dev/null
+# This is seed configuration used by the ceph_iscsi_config modules
+# when handling configuration tasks for iscsi gateway(s)
+#
+# {{ ansible_managed }}
+
+[config]
+cluster_name = {{ cluster }}
+
+pool = {{ iscsi_pool_name }}
+
+# API settings.
+# The API supports a number of options that allow you to tailor it to your
+# local environment. If you want to run the API under https, you will need to
+# create cert/key files that are compatible for each iSCSI gateway node, that is
+# not locked to a specific node. SSL cert and key files *must* be called
+# 'iscsi-gateway.crt' and 'iscsi-gateway.key' and placed in the '/etc/ceph/' directory
+# on *each* gateway node. With the SSL files in place, you can use 'api_secure = true'
+# to switch to https mode.
+
+# To support the API, the bear minimum settings are:
+api_secure = {{ api_secure }}
+
+# Optional settings related to the CLI/API service
+api_user = {{ api_user }}
+api_password = {{ api_password }}
+api_port = {{ api_port }}
+loop_delay = {{ loop_delay }}
+{% if trusted_ip_list is defined %}
+trusted_ip_list = {{ trusted_ip_list }}
+{% endif %}
\ No newline at end of file
--- /dev/null
+[Unit]
+Description=RBD Target API Service
+{% if container_binary == 'docker' %}
+After=docker.service network-online.target local-fs.target time-sync.target
+Requires=docker.service
+{% else %}
+After=network-online.target local-fs.target time-sync.target
+{% endif %}
+Wants=network-online.target local-fs.target time-sync.target
+
+[Service]
+EnvironmentFile=-/etc/environment
+{% if container_binary == 'podman' %}
+ExecStartPre=-/usr/bin/rm -f /%t/%n-pid /%t/%n-cid
+ExecStartPre=-/usr/bin/{{ container_binary }} rm --storage rbd-target-api
+ExecStartPre=-/usr/bin/mkdir -p /var/log/rbd-target-api
+{% else %}
+ExecStartPre=-/usr/bin/{{ container_binary }} stop rbd-target-api
+{% endif %}
+ExecStartPre=-/usr/bin/{{ container_binary }} rm rbd-target-api
+ExecStart=/usr/bin/{{ container_binary }} run --rm \
+{% if container_binary == 'podman' %}
+ -d --log-driver journald --conmon-pidfile /%t/%n-pid --cidfile /%t/%n-cid \
+{% endif %}
+ --pids-limit={{ 0 if container_binary == 'podman' else -1 }} \
+ --memory={{ ceph_rbd_target_api_docker_memory_limit }} \
+ --cpus={{ ceph_rbd_target_api_docker_cpu_limit }} \
+ -v /etc/localtime:/etc/localtime:ro \
+ --privileged \
+ --net=host \
+ -v /dev:/dev \
+ -v /dev/log:/dev/log \
+ -v /lib/modules:/lib/modules \
+ -v /etc/ceph:/etc/ceph \
+ -v /var/log/rbd-target-api:/var/log/rbd-target-api:z \
+ -e CLUSTER={{ cluster }} \
+ -e CEPH_DAEMON=RBD_TARGET_API \
+ -e CONTAINER_IMAGE={{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} \
+ --name=rbd-target-api \
+ {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
+{% if container_binary == 'podman' %}
+ExecStop=-/usr/bin/sh -c "/usr/bin/{{ container_binary }} rm -f `cat /%t/%n-cid`"
+{% else %}
+ExecStopPost=-/usr/bin/{{ container_binary }} stop rbd-target-api
+{% endif %}
+KillMode=none
+Restart=always
+RestartSec=10s
+TimeoutStartSec=120
+TimeoutStopSec=15
+{% if container_binary == 'podman' %}
+Type=forking
+PIDFile=/%t/%n-pid
+{% endif %}
+
+[Install]
+WantedBy=multi-user.target
--- /dev/null
+[Unit]
+Description=RBD Target Gateway Service
+{% if container_binary == 'docker' %}
+After=docker.service network-online.target local-fs.target time-sync.target
+Requires=docker.service
+{% else %}
+After=network-online.target local-fs.target time-sync.target
+{% endif %}
+Wants=network-online.target local-fs.target time-sync.target
+
+[Service]
+EnvironmentFile=-/etc/environment
+{% if container_binary == 'podman' %}
+ExecStartPre=-/usr/bin/rm -f /%t/%n-pid /%t/%n-cid
+ExecStartPre=-/usr/bin/{{ container_binary }} rm --storage rbd-target-gw
+ExecStartPre=-/usr/bin/mkdir -p /var/log/rbd-target-gw
+{% else %}
+ExecStartPre=-/usr/bin/{{ container_binary }} stop rbd-target-gw
+{% endif %}
+ExecStartPre=-/usr/bin/{{ container_binary }} rm rbd-target-gw
+ExecStart=/usr/bin/{{ container_binary }} run --rm \
+{% if container_binary == 'podman' %}
+ -d --log-driver journald --conmon-pidfile /%t/%n-pid --cidfile /%t/%n-cid \
+{% endif %}
+ --pids-limit={{ 0 if container_binary == 'podman' else -1 }} \
+ --memory={{ ceph_rbd_target_gw_docker_memory_limit }} \
+ --cpus={{ ceph_rbd_target_gw_docker_cpu_limit }} \
+ -v /etc/localtime:/etc/localtime:ro \
+ --privileged \
+ --net=host \
+ -v /dev:/dev \
+ -v /dev/log:/dev/log \
+ -v /lib/modules:/lib/modules \
+ -v /etc/ceph:/etc/ceph \
+ -v /var/log/rbd-target-gw:/var/log/rbd-target-gw:z \
+ -e CLUSTER={{ cluster }} \
+ -e CEPH_DAEMON=RBD_TARGET_GW \
+ -e CONTAINER_IMAGE={{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} \
+ --name=rbd-target-gw \
+ {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
+{% if container_binary == 'podman' %}
+ExecStop=-/usr/bin/sh -c "/usr/bin/{{ container_binary }} rm -f `cat /%t/%n-cid`"
+{% else %}
+ExecStopPost=-/usr/bin/{{ container_binary }} stop rbd-target-gw
+{% endif %}
+KillMode=none
+Restart=always
+RestartSec=10s
+TimeoutStartSec=120
+TimeoutStopSec=15
+{% if container_binary == 'podman' %}
+Type=forking
+PIDFile=/%t/%n-pid
+{% endif %}
+
+[Install]
+WantedBy=multi-user.target
--- /dev/null
+[Unit]
+Description=TCMU Runner
+{% if container_binary == 'docker' %}
+After=docker.service network-online.target local-fs.target time-sync.target
+Requires=docker.service
+{% else %}
+After=network-online.target local-fs.target time-sync.target
+{% endif %}
+Wants=network-online.target local-fs.target time-sync.target
+
+[Service]
+EnvironmentFile=-/etc/environment
+{% if container_binary == 'podman' %}
+ExecStartPre=-/usr/bin/rm -f /%t/%n-pid /%t/%n-cid
+ExecStartPre=-/usr/bin/{{ container_binary }} rm --storage tcmu-runner
+ExecStartPre=-/usr/bin/mkdir -p /var/log/tcmu-runner
+{% else %}
+ExecStartPre=-/usr/bin/{{ container_binary }} stop tcmu-runner
+{% endif %}
+ExecStartPre=-/usr/bin/{{ container_binary }} rm tcmu-runner
+ExecStart=/usr/bin/{{ container_binary }} run --rm \
+{% if container_binary == 'podman' %}
+ -d --log-driver journald --conmon-pidfile /%t/%n-pid --cidfile /%t/%n-cid \
+{% endif %}
+ --pids-limit={{ 0 if container_binary == 'podman' else -1 }} \
+ --memory={{ ceph_tcmu_runner_docker_memory_limit }} \
+ --cpus={{ ceph_tcmu_runner_docker_cpu_limit }} \
+ -v /etc/localtime:/etc/localtime:ro \
+ --privileged \
+ --net=host \
+ -v /dev:/dev \
+ -v /lib/modules:/lib/modules \
+ -v /etc/ceph:/etc/ceph \
+ -v /var/log/tcmu-runner:/var/log/tcmu-runner:z \
+ -e CLUSTER={{ cluster }} \
+ -e CEPH_DAEMON=TCMU_RUNNER \
+ -e CONTAINER_IMAGE={{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} \
+ --name=tcmu-runner \
+ {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
+{% if container_binary == 'podman' %}
+ExecStop=-/usr/bin/sh -c "/usr/bin/{{ container_binary }} rm -f `cat /%t/%n-cid`"
+{% else %}
+ExecStopPost=-/usr/bin/{{ container_binary }} stop tcmu-runner
+{% endif %}
+KillMode=none
+Restart=always
+RestartSec=10s
+TimeoutStartSec=120
+TimeoutStopSec=15
+{% if container_binary == 'podman' %}
+Type=forking
+PIDFile=/%t/%n-pid
+{% endif %}
+
+[Install]
+WantedBy=multi-user.target
--- /dev/null
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [2014] [Sébastien Han]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
--- /dev/null
+# Ansible role: ceph-mds
+
+Documentation is available at http://docs.ceph.com/ceph-ansible/.
--- /dev/null
+---
+# You can override vars by using host or group vars
+
+###########
+# GENERAL #
+###########
+
+# Even though MDS nodes should not have the admin key
+# at their disposal, some people might want to have it
+# distributed on MDS nodes. Setting 'copy_admin_key' to 'true'
+# will copy the admin key to the /etc/ceph/ directory
+copy_admin_key: false
+
+##########
+# DOCKER #
+##########
+
+# Resource limitation
+# For the whole list of limits you can apply see: docs.docker.com/engine/admin/resource_constraints
+# Default values are based from: https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/2/html/red_hat_ceph_storage_hardware_guide/minimum_recommendations
+# These options can be passed using the 'ceph_mds_docker_extra_env' variable.
+ceph_mds_docker_memory_limit: "{{ ansible_facts['memtotal_mb'] }}m"
+ceph_mds_docker_cpu_limit: 4
+
+# we currently for MDS_NAME to hostname because of a bug in ceph-docker
+# fix here: https://github.com/ceph/ceph-docker/pull/770
+# this will go away soon.
+ceph_mds_docker_extra_env: -e MDS_NAME={{ ansible_facts['hostname'] }}
+ceph_config_keys: [] # DON'T TOUCH ME
+
+
+###########
+# SYSTEMD #
+###########
+# ceph_mds_systemd_overrides will override the systemd settings
+# for the ceph-mds services.
+# For example,to set "PrivateDevices=false" you can specify:
+#ceph_mds_systemd_overrides:
+# Service:
+# PrivateDevices: False
--- /dev/null
+[Unit]
+Description=ceph target allowing to start/stop all ceph-mds@.service instances at once
+PartOf=ceph.target
+After=ceph-mon.target
+Before=ceph.target
+Wants=ceph.target ceph-mon.target
+
+[Install]
+WantedBy=multi-user.target ceph.target
\ No newline at end of file
--- /dev/null
+---
+galaxy_info:
+ company: Red Hat
+ author: Sébastien Han
+ description: Installs Ceph Metadata
+ license: Apache
+ min_ansible_version: 2.7
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ galaxy_tags:
+ - system
+dependencies: []
--- /dev/null
+---
+- name: create bootstrap-mds and mds directories
+ file:
+ path: "{{ item }}"
+ state: directory
+ owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
+ group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
+ mode: "{{ ceph_directories_mode }}"
+ with_items:
+ - /var/lib/ceph/bootstrap-mds/
+ - /var/lib/ceph/mds/{{ cluster }}-{{ ansible_facts['hostname'] }}
+
+- name: get keys from monitors
+ ceph_key:
+ name: "{{ item.name }}"
+ cluster: "{{ cluster }}"
+ output_format: plain
+ state: info
+ environment:
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+ register: _mds_keys
+ with_items:
+ - { name: "client.bootstrap-mds", path: "/var/lib/ceph/bootstrap-mds/{{ cluster }}.keyring", copy_key: true }
+ - { name: "client.admin", path: "/etc/ceph/{{ cluster }}.client.admin.keyring", copy_key: "{{ copy_admin_key }}" }
+ delegate_to: "{{ groups.get(mon_group_name)[0] }}"
+ run_once: true
+ when:
+ - cephx | bool
+ - item.copy_key | bool
+ no_log: "{{ no_log_on_ceph_key_tasks }}"
+
+- name: copy ceph key(s) if needed
+ copy:
+ dest: "{{ item.item.path }}"
+ content: "{{ item.stdout + '\n' }}"
+ owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
+ group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
+ mode: "{{ ceph_keyring_permissions }}"
+ with_items: "{{ _mds_keys.results }}"
+ when:
+ - cephx | bool
+ - item.item.copy_key | bool
+ no_log: "{{ no_log_on_ceph_key_tasks }}"
+
--- /dev/null
+---
+- name: include_tasks systemd.yml
+ include_tasks: systemd.yml
+
+- name: enable ceph-mds.target
+ service:
+ name: ceph-mds.target
+ enabled: yes
+ daemon_reload: yes
+ when: containerized_deployment | bool
+
+- name: systemd start mds container
+ systemd:
+ name: ceph-mds@{{ ansible_facts['hostname'] }}
+ state: started
+ enabled: yes
+ masked: no
+ daemon_reload: yes
+
+- name: wait for mds socket to exist
+ command: "{{ container_binary }} exec ceph-mds-{{ ansible_facts['hostname'] }} sh -c 'stat /var/run/ceph/{{ cluster }}-mds.{{ ansible_facts['hostname'] }}.asok || stat /var/run/ceph/{{ cluster }}-mds.{{ ansible_facts['fqdn'] }}.asok'"
+ changed_when: false
+ register: multi_mds_socket
+ retries: 5
+ delay: 15
+ until: multi_mds_socket.rc == 0
--- /dev/null
+---
+- import_role:
+ name: ceph-facts
+ tasks_from: get_def_crush_rule_name.yml
+
+- name: create filesystem pools
+ ceph_pool:
+ name: "{{ item.name }}"
+ cluster: "{{ cluster }}"
+ pg_num: "{{ item.pg_num | default(omit) }}"
+ pgp_num: "{{ item.pgp_num | default(omit) }}"
+ size: "{{ item.size | default(omit) }}"
+ min_size: "{{ item.min_size | default(omit) }}"
+ pool_type: "{{ item.type | default('replicated') }}"
+ rule_name: "{{ item.rule_name | default(omit) }}"
+ erasure_profile: "{{ item.erasure_profile | default(omit) }}"
+ pg_autoscale_mode: "{{ item.pg_autoscale_mode | default(omit) }}"
+ target_size_ratio: "{{ item.target_size_ratio | default(omit) }}"
+ with_items: "{{ cephfs_pools }}"
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ environment:
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+
+- name: create ceph filesystem
+ ceph_fs:
+ name: "{{ cephfs }}"
+ cluster: "{{ cluster }}"
+ data: "{{ cephfs_data_pool.name }}"
+ metadata: "{{ cephfs_metadata_pool.name }}"
+ max_mds: "{{ mds_max_mds if not rolling_update | bool else omit }}"
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ environment:
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
--- /dev/null
+---
+- name: include create_mds_filesystems.yml
+ include_tasks: create_mds_filesystems.yml
+ when:
+ - inventory_hostname == groups[mds_group_name] | first
+ - not rolling_update | bool
+
+- name: include common.yml
+ include_tasks: common.yml
+
+- name: non_containerized.yml
+ include_tasks: non_containerized.yml
+ when: not containerized_deployment | bool
+
+- name: containerized.yml
+ include_tasks: containerized.yml
+ when: containerized_deployment | bool
--- /dev/null
+---
+- name: install ceph mds for debian
+ apt:
+ name: ceph-mds
+ state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
+ default_release: "{{ ceph_stable_release_uca | default('') if ceph_origin == 'repository' and ceph_repository == 'uca' else '' }}{{ ansible_facts['distribution_release'] ~ '-backports' if ceph_origin == 'distro' and ceph_use_distro_backports | bool else '' }}"
+ when:
+ - mds_group_name in group_names
+ - ansible_facts['os_family'] == 'Debian'
+ register: result
+ until: result is succeeded
+
+- name: install ceph-mds package on redhat or SUSE/openSUSE
+ package:
+ name: "ceph-mds"
+ state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
+ register: result
+ until: result is succeeded
+ when:
+ - mds_group_name in group_names
+ - ansible_facts['os_family'] in ['Suse', 'RedHat']
+
+- name: create mds keyring
+ ceph_key:
+ name: "mds.{{ ansible_facts['hostname'] }}"
+ cluster: "{{ cluster }}"
+ user: client.bootstrap-mds
+ user_key: "/var/lib/ceph/bootstrap-mds/{{ cluster }}.keyring"
+ caps:
+ mon: "allow profile mds"
+ mds: "allow"
+ osd: "allow rwx"
+ dest: "/var/lib/ceph/mds/{{ cluster }}-{{ ansible_facts['hostname'] }}/keyring"
+ import_key: false
+ owner: ceph
+ group: ceph
+ mode: "{{ ceph_keyring_permissions }}"
+ no_log: "{{ no_log_on_ceph_key_tasks }}"
+ when: cephx | bool
+
+- name: ensure systemd service override directory exists
+ file:
+ state: directory
+ path: "/etc/systemd/system/ceph-mds@.service.d/"
+ when:
+ - ceph_mds_systemd_overrides is defined
+ - ansible_facts['service_mgr'] == 'systemd'
+
+- name: add ceph-mds systemd service overrides
+ config_template:
+ src: "ceph-mds.service.d-overrides.j2"
+ dest: "/etc/systemd/system/ceph-mds@.service.d/ceph-mds-systemd-overrides.conf"
+ config_overrides: "{{ ceph_mds_systemd_overrides | default({}) }}"
+ config_type: "ini"
+ when:
+ - ceph_mds_systemd_overrides is defined
+ - ansible_facts['service_mgr'] == 'systemd'
+
+- name: start and add that the metadata service to the init sequence
+ service:
+ name: ceph-mds@{{ ansible_facts['hostname'] }}
+ state: started
+ enabled: yes
+ masked: no
+ changed_when: false
--- /dev/null
+---
+- name: generate systemd unit file
+ template:
+ src: "{{ role_path }}/templates/ceph-mds.service.j2"
+ dest: /etc/systemd/system/ceph-mds@.service
+ owner: "root"
+ group: "root"
+ mode: "0644"
+ notify: restart ceph mdss
+
+- name: generate systemd ceph-mds target file
+ copy:
+ src: ceph-mds.target
+ dest: /etc/systemd/system/ceph-mds.target
+ when: containerized_deployment | bool
\ No newline at end of file
--- /dev/null
+# {{ ansible_managed }}
--- /dev/null
+[Unit]
+Description=Ceph MDS
+PartOf=ceph-mds.target
+{% if container_binary == 'docker' %}
+After=docker.service network-online.target local-fs.target time-sync.target
+Requires=docker.service
+{% else %}
+After=network-online.target local-fs.target time-sync.target
+{% endif %}
+Wants=network-online.target local-fs.target time-sync.target
+{% set cpu_limit = ansible_facts['processor_vcpus']|int if ceph_mds_docker_cpu_limit|int > ansible_facts['processor_vcpus']|int else ceph_mds_docker_cpu_limit|int %}
+
+[Service]
+EnvironmentFile=-/etc/environment
+{% if container_binary == 'podman' %}
+ExecStartPre=-/usr/bin/rm -f /%t/%n-pid /%t/%n-cid
+ExecStartPre=-/usr/bin/{{ container_binary }} rm --storage ceph-mds-{{ ansible_facts['hostname'] }}
+ExecStartPre=-/usr/bin/mkdir -p /var/log/ceph
+{% else %}
+ExecStartPre=-/usr/bin/{{ container_binary }} stop ceph-mds-{{ ansible_facts['hostname'] }}
+{% endif %}
+ExecStartPre=-/usr/bin/{{ container_binary }} rm ceph-mds-{{ ansible_facts['hostname'] }}
+ExecStart=/usr/bin/{{ container_binary }} run --rm --net=host \
+{% if container_binary == 'podman' %}
+ -d --log-driver journald --conmon-pidfile /%t/%n-pid --cidfile /%t/%n-cid \
+{% endif %}
+ --pids-limit={{ 0 if container_binary == 'podman' else -1 }} \
+ --memory={{ ceph_mds_docker_memory_limit }} \
+ --cpus={{ cpu_limit }} \
+ -v /var/lib/ceph:/var/lib/ceph:z \
+ -v /etc/ceph:/etc/ceph:z \
+ -v /var/run/ceph:/var/run/ceph:z \
+ -v /etc/localtime:/etc/localtime:ro \
+ -v /var/log/ceph:/var/log/ceph:z \
+ -e CLUSTER={{ cluster }} \
+ -e CEPH_DAEMON=MDS \
+ -e CONTAINER_IMAGE={{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} \
+ -e TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES={{ ceph_tcmalloc_max_total_thread_cache }} \
+ {{ ceph_mds_docker_extra_env }} \
+ --name=ceph-mds-{{ ansible_facts['hostname'] }} \
+ {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
+{% if container_binary == 'podman' %}
+ExecStop=-/usr/bin/sh -c "/usr/bin/{{ container_binary }} rm -f `cat /%t/%n-cid`"
+{% else %}
+ExecStopPost=-/usr/bin/{{ container_binary }} stop ceph-mds-{{ ansible_facts['hostname'] }}
+{% endif %}
+KillMode=none
+Restart=always
+RestartSec=10s
+TimeoutStartSec=120
+TimeoutStopSec=15
+{% if container_binary == 'podman' %}
+Type=forking
+PIDFile=/%t/%n-pid
+{% endif %}
+
+[Install]
+WantedBy=ceph.target
--- /dev/null
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [2014] [Sébastien Han]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
--- /dev/null
+# Ansible role: ceph-mgr
+
+Documentation is available at http://docs.ceph.com/ceph-ansible/.
--- /dev/null
+---
+##########
+# GLOBAL #
+##########
+# Even though MGR nodes should not have the admin key
+# at their disposal, some people might want to have it
+# distributed on MGR nodes. Setting 'copy_admin_key' to 'true'
+# will copy the admin key to the /etc/ceph/ directory
+copy_admin_key: false
+mgr_secret: 'mgr_secret'
+
+
+###########
+# MODULES #
+###########
+# Ceph mgr modules to enable, to view the list of available mpdules see: http://docs.ceph.com/docs/CEPH_VERSION/mgr/
+# and replace CEPH_VERSION with your current Ceph version, e,g: 'mimic'
+ceph_mgr_modules: []
+
+############
+# PACKAGES #
+############
+# Ceph mgr packages to install, ceph-mgr + extra module packages.
+ceph_mgr_packages:
+ - ceph-mgr
+
+
+##########
+# DOCKER #
+##########
+
+# Resource limitation
+# For the whole list of limits you can apply see: docs.docker.com/engine/admin/resource_constraints
+# Default values are based from: https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/2/html/red_hat_ceph_storage_hardware_guide/minimum_recommendations
+# These options can be passed using the 'ceph_mgr_docker_extra_env' variable.
+ceph_mgr_docker_memory_limit: "{{ ansible_facts['memtotal_mb'] }}m"
+ceph_mgr_docker_cpu_limit: 1
+
+ceph_mgr_docker_extra_env:
+ceph_config_keys: [] # DON'T TOUCH ME
+
+
+###########
+# SYSTEMD #
+###########
+# ceph_mgr_systemd_overrides will override the systemd settings
+# for the ceph-mgr services.
+# For example,to set "PrivateDevices=false" you can specify:
+#ceph_mgr_systemd_overrides:
+# Service:
+# PrivateDevices: False
--- /dev/null
+[Unit]
+Description=ceph target allowing to start/stop all ceph-mgr@.service instances at once
+PartOf=ceph.target
+After=ceph-mon.target
+Before=ceph.target
+Wants=ceph.target ceph-mon.target
+
+[Install]
+WantedBy=multi-user.target ceph.target
\ No newline at end of file
--- /dev/null
+---
+galaxy_info:
+ company: Red Hat
+ author: Sébastien Han
+ description: Installs Ceph Manager
+ license: Apache
+ min_ansible_version: 2.7
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ galaxy_tags:
+ - system
+dependencies: []
--- /dev/null
+---
+- name: create mgr directory
+ file:
+ path: /var/lib/ceph/mgr/{{ cluster }}-{{ ansible_facts['hostname'] }}
+ state: directory
+ owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
+ group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
+ mode: "{{ ceph_directories_mode }}"
+
+- name: fetch ceph mgr keyring
+ ceph_key:
+ name: "mgr.{{ ansible_facts['hostname'] }}"
+ caps:
+ mon: allow profile mgr
+ osd: allow *
+ mds: allow *
+ cluster: "{{ cluster }}"
+ secret: "{{ (mgr_secret != 'mgr_secret') | ternary(mgr_secret, omit) }}"
+ owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
+ group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
+ mode: "0400"
+ dest: "/var/lib/ceph/mgr/{{ cluster }}-{{ ansible_facts['hostname'] }}/keyring"
+ environment:
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+ when: groups.get(mgr_group_name, []) | length == 0 # the key is present already since one of the mons created it in "create ceph mgr keyring(s)"
+ no_log: "{{ no_log_on_ceph_key_tasks }}"
+
+- name: create and copy keyrings
+ when: groups.get(mgr_group_name, []) | length > 0
+ block:
+ - name: create ceph mgr keyring(s) on a mon node
+ ceph_key:
+ name: "mgr.{{ hostvars[item]['ansible_facts']['hostname'] }}"
+ caps:
+ mon: allow profile mgr
+ osd: allow *
+ mds: allow *
+ cluster: "{{ cluster }}"
+ secret: "{{ (mgr_secret != 'mgr_secret') | ternary(mgr_secret, omit) }}"
+ owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
+ group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
+ mode: "0400"
+ environment:
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+ with_items: "{{ groups.get(mgr_group_name, []) }}"
+ run_once: True
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ no_log: "{{ no_log_on_ceph_key_tasks }}"
+
+ - name: set_fact _mgr_keys
+ set_fact:
+ _mgr_keys:
+ - { 'name': 'client.admin', 'path': "/etc/ceph/{{ cluster }}.client.admin.keyring", 'copy_key': copy_admin_key }
+ - { 'name': "mgr.{{ ansible_facts['hostname'] }}", 'path': "/var/lib/ceph/mgr/{{ cluster }}-{{ ansible_facts['hostname'] }}/keyring", 'copy_key': true }
+
+ - name: get keys from monitors
+ ceph_key:
+ name: "{{ item.name }}"
+ cluster: "{{ cluster }}"
+ output_format: plain
+ state: info
+ environment:
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+ register: _mgr_keys
+ with_items: "{{ _mgr_keys }}"
+ delegate_to: "{{ groups[mon_group_name][0] if running_mon is undefined else running_mon }}"
+ when:
+ - cephx | bool
+ - item.copy_key | bool
+ no_log: "{{ no_log_on_ceph_key_tasks }}"
+
+ - name: copy ceph key(s) if needed
+ copy:
+ dest: "{{ item.item.path }}"
+ content: "{{ item.stdout + '\n' }}"
+ owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
+ group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
+ mode: "{{ ceph_keyring_permissions }}"
+ with_items: "{{ _mgr_keys.results }}"
+ when:
+ - cephx | bool
+ - item is not skipped
+ - item.item.copy_key | bool
+ no_log: "{{ no_log_on_ceph_key_tasks }}"
+
+- name: set mgr key permissions
+ file:
+ path: /var/lib/ceph/mgr/{{ cluster }}-{{ ansible_facts['hostname'] }}/keyring
+ owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
+ group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
+ mode: "{{ ceph_keyring_permissions }}"
+ when: cephx | bool
+
+- name: append dashboard modules to ceph_mgr_modules
+ set_fact:
+ ceph_mgr_modules: "{{ ceph_mgr_modules | union(['dashboard', 'prometheus']) }}"
+ when: dashboard_enabled | bool
--- /dev/null
+---
+- name: set_fact container_exec_cmd
+ set_fact:
+ container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[item]['ansible_facts']['hostname'] }}"
+ with_items: "{{ groups.get(mon_group_name, []) }}"
+ delegate_to: "{{ item }}"
+ delegate_facts: true
+ run_once: true
+ when: containerized_deployment | bool
+
+- name: include common.yml
+ include_tasks: common.yml
+
+- name: include pre_requisite.yml
+ include_tasks: pre_requisite.yml
+ when: not containerized_deployment | bool
+
+- name: include start_mgr.yml
+ include_tasks: start_mgr.yml
+
+- name: include mgr_modules.yml
+ include_tasks: mgr_modules.yml
+ when:
+ - ceph_mgr_modules | length > 0
+ - ((groups[mgr_group_name] | default([]) | length == 0 and inventory_hostname == groups[mon_group_name] | last) or
+ (groups[mgr_group_name] | default([]) | length > 0 and inventory_hostname == groups[mgr_group_name] | last))
--- /dev/null
+---
+- name: wait for all mgr to be up
+ command: "{{ hostvars[groups[mon_group_name][0]]['container_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} mgr dump -f json"
+ register: mgr_dump
+ retries: 30
+ delay: 5
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ changed_when: false
+ until:
+ - mgr_dump.rc == 0
+ - (mgr_dump.stdout | from_json).available | bool
+ when: not ansible_check_mode
+
+- name: get enabled modules from ceph-mgr
+ command: "{{ hostvars[groups[mon_group_name][0]]['container_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} --format json mgr module ls"
+ check_mode: no
+ changed_when: false
+ register: _ceph_mgr_modules
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+
+- name: set _ceph_mgr_modules fact (convert _ceph_mgr_modules.stdout to a dict)
+ set_fact:
+ _ceph_mgr_modules: "{{ _ceph_mgr_modules.get('stdout', '{}') | from_json }}"
+
+- name: set _disabled_ceph_mgr_modules fact
+ set_fact:
+ _disabled_ceph_mgr_modules: "{% if _ceph_mgr_modules.disabled_modules | length == 0 %}[]{% elif _ceph_mgr_modules.disabled_modules[0] | type_debug != 'dict' %}{{ _ceph_mgr_modules['disabled_modules'] }}{% else %}{{ _ceph_mgr_modules['disabled_modules'] | map(attribute='name') | list }}{% endif %}"
+
+- name: disable ceph mgr enabled modules
+ ceph_mgr_module:
+ name: "{{ item }}"
+ cluster: "{{ cluster }}"
+ state: disable
+ environment:
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+ with_items: "{{ _ceph_mgr_modules.get('enabled_modules', []) }}"
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ when: item not in ceph_mgr_modules
+
+- name: add modules to ceph-mgr
+ ceph_mgr_module:
+ name: "{{ item }}"
+ cluster: "{{ cluster }}"
+ state: enable
+ environment:
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+ with_items: "{{ ceph_mgr_modules }}"
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ when: (item in _disabled_ceph_mgr_modules or _disabled_ceph_mgr_modules == [])
--- /dev/null
+---
+- name: set_fact ceph_mgr_packages for sso
+ set_fact:
+ ceph_mgr_packages: "{{ ceph_mgr_packages | union(['python3-saml' if ansible_facts['distribution_major_version'] | int == 8 else 'python-saml']) }}"
+ when:
+ - dashboard_enabled | bool
+ - ansible_facts['distribution'] == 'RedHat'
+
+- name: set_fact ceph_mgr_packages for dashboard
+ set_fact:
+ ceph_mgr_packages: "{{ ceph_mgr_packages | union(['ceph-mgr-dashboard']) }}"
+ when: dashboard_enabled | bool
+
+- name: set_fact ceph_mgr_packages for non el7 distribution
+ set_fact:
+ ceph_mgr_packages: "{{ ceph_mgr_packages | union(['ceph-mgr-diskprediction-local']) }}"
+ when:
+ - ansible_facts['os_family'] != 'RedHat'
+ - ansible_facts['distribution_major_version'] | int != 7
+
+- name: install ceph-mgr packages on RedHat or SUSE
+ package:
+ name: '{{ ceph_mgr_packages }}'
+ state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
+ register: result
+ until: result is succeeded
+ when: ansible_facts['os_family'] in ['RedHat', 'Suse']
+
+- name: install ceph-mgr packages for debian
+ apt:
+ name: '{{ ceph_mgr_packages }}'
+ state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
+ default_release: "{{ ceph_stable_release_uca | default('') if ceph_origin == 'repository' and ceph_repository == 'uca' else ''}}{{ ansible_facts['distribution_release'] ~ '-backports' if ceph_origin == 'distro' and ceph_use_distro_backports | bool else '' }}"
+ register: result
+ until: result is succeeded
+ when: ansible_facts['os_family'] == 'Debian'
--- /dev/null
+---
+- name: ensure systemd service override directory exists
+ file:
+ state: directory
+ path: "/etc/systemd/system/ceph-mgr@.service.d/"
+ when:
+ - ceph_mgr_systemd_overrides is defined
+ - ansible_facts['service_mgr'] == 'systemd'
+
+- name: add ceph-mgr systemd service overrides
+ config_template:
+ src: "ceph-mgr.service.d-overrides.j2"
+ dest: "/etc/systemd/system/ceph-mgr@.service.d/ceph-mgr-systemd-overrides.conf"
+ config_overrides: "{{ ceph_mgr_systemd_overrides | default({}) }}"
+ config_type: "ini"
+ when:
+ - ceph_mgr_systemd_overrides is defined
+ - ansible_facts['service_mgr'] == 'systemd'
+
+- name: include_tasks systemd.yml
+ include_tasks: systemd.yml
+ when: containerized_deployment | bool
+
+- name: enable ceph-mgr.target
+ service:
+ name: ceph-mgr.target
+ enabled: yes
+ daemon_reload: yes
+ when: containerized_deployment | bool
+
+- name: systemd start mgr
+ systemd:
+ name: ceph-mgr@{{ ansible_facts['hostname'] }}
+ state: started
+ enabled: yes
+ masked: no
+ daemon_reload: yes
--- /dev/null
+---
+- name: generate systemd unit file
+ template:
+ src: "{{ role_path }}/templates/ceph-mgr.service.j2"
+ dest: /etc/systemd/system/ceph-mgr@.service
+ owner: "root"
+ group: "root"
+ mode: "0644"
+ notify: restart ceph mgrs
+
+- name: generate systemd ceph-mgr target file
+ copy:
+ src: ceph-mgr.target
+ dest: /etc/systemd/system/ceph-mgr.target
+ when: containerized_deployment | bool
\ No newline at end of file
--- /dev/null
+# {{ ansible_managed }}
--- /dev/null
+[Unit]
+Description=Ceph Manager
+PartOf=ceph-mgr.target
+{% if container_binary == 'docker' %}
+After=docker.service network-online.target local-fs.target time-sync.target
+Requires=docker.service
+{% else %}
+After=network-online.target local-fs.target time-sync.target
+{% endif %}
+Wants=network-online.target local-fs.target time-sync.target
+
+[Service]
+EnvironmentFile=-/etc/environment
+{% if container_binary == 'podman' %}
+ExecStartPre=-/usr/bin/rm -f /%t/%n-pid /%t/%n-cid
+ExecStartPre=-/usr/bin/{{ container_binary }} rm --storage ceph-mgr-{{ ansible_facts['hostname'] }}
+ExecStartPre=-/usr/bin/mkdir -p /var/log/ceph
+{% else %}
+ExecStartPre=-/usr/bin/{{ container_binary }} stop ceph-mgr-{{ ansible_facts['hostname'] }}
+{% endif %}
+ExecStartPre=-/usr/bin/{{ container_binary }} rm ceph-mgr-{{ ansible_facts['hostname'] }}
+ExecStart=/usr/bin/{{ container_binary }} run --rm --net=host \
+{% if container_binary == 'podman' %}
+ -d --log-driver journald --conmon-pidfile /%t/%n-pid --cidfile /%t/%n-cid \
+{% endif %}
+ --pids-limit={{ 0 if container_binary == 'podman' else -1 }} \
+ --memory={{ ceph_mgr_docker_memory_limit }} \
+ --cpus={{ ceph_mgr_docker_cpu_limit }} \
+ -v /var/lib/ceph:/var/lib/ceph:z,rshared \
+ -v /etc/ceph:/etc/ceph:z \
+ -v /var/run/ceph:/var/run/ceph:z \
+ -v /etc/localtime:/etc/localtime:ro \
+ -v /var/log/ceph:/var/log/ceph:z \
+ -e CLUSTER={{ cluster }} \
+ -e CEPH_DAEMON=MGR \
+ -e CONTAINER_IMAGE={{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} \
+ -e TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES={{ ceph_tcmalloc_max_total_thread_cache }} \
+ {{ ceph_mgr_docker_extra_env }} \
+ --name=ceph-mgr-{{ ansible_facts['hostname'] }} \
+ {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
+{% if container_binary == 'podman' %}
+ExecStop=-/usr/bin/sh -c "/usr/bin/{{ container_binary }} rm -f `cat /%t/%n-cid`"
+{% else %}
+ExecStopPost=-/usr/bin/{{ container_binary }} stop ceph-mgr-{{ ansible_facts['hostname'] }}
+{% endif %}
+KillMode=none
+Restart=always
+RestartSec=10s
+TimeoutStartSec=120
+TimeoutStopSec=15
+{% if container_binary == 'podman' %}
+Type=forking
+PIDFile=/%t/%n-pid
+{% endif %}
+
+[Install]
+WantedBy=ceph.target
--- /dev/null
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [2014] [Sébastien Han]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
--- /dev/null
+# Ansible role: ceph-mon
+
+Documentation is available at http://docs.ceph.com/ceph-ansible/.
--- /dev/null
+---
+# You can override vars by using host or group vars
+
+###########
+# GENERAL #
+###########
+mon_group_name: mons
+
+# ACTIVATE BOTH FSID AND MONITOR_SECRET VARIABLES FOR NON-VAGRANT DEPLOYMENT
+monitor_secret: "{{ monitor_keyring.stdout }}"
+admin_secret: 'admin_secret'
+
+# Secure your cluster
+# This will set the following flags on all the pools:
+# * nosizechange
+# * nopgchange
+# * nodelete
+
+secure_cluster: false
+secure_cluster_flags:
+ - nopgchange
+ - nodelete
+ - nosizechange
+
+client_admin_ceph_authtool_cap:
+ mon: allow *
+ osd: allow *
+ mds: allow *
+ mgr: allow *
+
+
+##########
+# DOCKER #
+##########
+
+# Resource limitation
+# For the whole list of limits you can apply see: docs.docker.com/engine/admin/resource_constraints
+# Default values are based from: https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/2/html/red_hat_ceph_storage_hardware_guide/minimum_recommendations
+# These options can be passed using the 'ceph_mon_docker_extra_env' variable.
+ceph_mon_docker_memory_limit: "{{ ansible_facts['memtotal_mb'] }}m"
+ceph_mon_docker_cpu_limit: 1
+ceph_mon_container_listen_port: 3300
+
+# Use this variable to add extra env configuration to run your mon container.
+# If you want to set a custom admin keyring you can set this variable like following:
+# ceph_mon_docker_extra_env: -e ADMIN_SECRET={{ admin_secret }}
+ceph_mon_docker_extra_env:
+mon_docker_privileged: false
+mon_docker_net_host: true
+ceph_config_keys: [] # DON'T TOUCH ME
+
+
+###########
+# SYSTEMD #
+###########
+# ceph_mon_systemd_overrides will override the systemd settings
+# for the ceph-mon services.
+# For example,to set "PrivateDevices=false" you can specify:
+#ceph_mon_systemd_overrides:
+# Service:
+# PrivateDevices: False
--- /dev/null
+[Unit]
+Description=ceph target allowing to start/stop all ceph-mon@.service instances at once
+PartOf=ceph.target
+Before=ceph.target
+Wants=ceph.target
+
+[Install]
+WantedBy=multi-user.target ceph.target
\ No newline at end of file
--- /dev/null
+---
+galaxy_info:
+ company: Red Hat
+ author: Sébastien Han
+ description: Installs Ceph Monitor
+ license: Apache
+ min_ansible_version: 2.7
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ galaxy_tags:
+ - system
+dependencies: []
--- /dev/null
+---
+- name: waiting for the monitor(s) to form the quorum...
+ command: >
+ {{ container_exec_cmd }}
+ ceph
+ --cluster {{ cluster }}
+ daemon mon.{{ ansible_facts['hostname'] }}
+ mon_status
+ --format json
+ register: ceph_health_raw
+ run_once: true
+ until: >
+ (ceph_health_raw.stdout | length > 0) and (ceph_health_raw.stdout | default('{}') | from_json)['state'] in ['leader', 'peon']
+ retries: "{{ handler_health_mon_check_retries }}"
+ delay: "{{ handler_health_mon_check_delay }}"
+ changed_when: false
+ when: not ansible_check_mode
+
+- name: fetch ceph initial keys
+ ceph_key:
+ state: fetch_initial_keys
+ cluster: "{{ cluster }}"
+ owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
+ group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
+ mode: "0400"
+ environment:
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+ CEPH_ROLLING_UPDATE: "{{ rolling_update }}"
+ no_log: "{{ no_log_on_ceph_key_tasks }}"
+ when:
+ - cephx | bool
--- /dev/null
+---
+- name: cephx related tasks
+ when: cephx | bool
+ block:
+ - name: check if monitor initial keyring already exists
+ ceph_key:
+ name: mon.
+ cluster: "{{ cluster }}"
+ user: mon.
+ user_key: "/var/lib/ceph/mon/{{ cluster }}-{{ hostvars[running_mon]['ansible_facts']['hostname'] }}/keyring"
+ output_format: json
+ state: info
+ environment:
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+ register: initial_mon_key
+ run_once: True
+ delegate_to: "{{ running_mon }}"
+ failed_when: initial_mon_key.rc not in [0, 2]
+ no_log: "{{ no_log_on_ceph_key_tasks }}"
+ when: running_mon is defined
+
+ - name: generate monitor initial keyring
+ ceph_key:
+ state: generate_secret
+ register: monitor_keyring
+ delegate_to: localhost
+ become: false
+ run_once: true
+ no_log: "{{ no_log_on_ceph_key_tasks }}"
+ when:
+ - initial_mon_key is skipped
+ or
+ initial_mon_key is not succeeded
+
+ - name: set_fact _initial_mon_key_success
+ set_fact: # when initial_mon_key is registered above, `rc: 2` is considered success.
+ _initial_mon_key_success: "{{ initial_mon_key is not skipped and initial_mon_key.rc == 0 }}"
+
+ - name: get initial keyring when it already exists
+ set_fact:
+ monitor_keyring: "{{ (initial_mon_key.stdout | from_json)[0]['key'] if _initial_mon_key_success | bool else monitor_keyring.stdout }}"
+ when: initial_mon_key.stdout|default('')|length > 0 or monitor_keyring is not skipped
+ no_log: "{{ no_log_on_ceph_key_tasks }}"
+
+ - name: create monitor initial keyring
+ ceph_key:
+ name: mon.
+ dest: "/var/lib/ceph/tmp/"
+ secret: "{{ monitor_keyring }}"
+ cluster: "{{ cluster }}"
+ caps:
+ mon: allow *
+ import_key: False
+ owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
+ group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
+ mode: "0400"
+ no_log: "{{ no_log_on_ceph_key_tasks }}"
+ environment:
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+
+ - name: copy the initial key in /etc/ceph (for containers)
+ copy:
+ src: /var/lib/ceph/tmp/{{ cluster }}.mon..keyring
+ dest: /etc/ceph/{{ cluster }}.mon.keyring
+ remote_src: true
+ when: containerized_deployment | bool
+
+- name: create monitor directory
+ file:
+ path: /var/lib/ceph/mon/{{ cluster }}-{{ monitor_name }}
+ state: directory
+ owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
+ group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
+ mode: "{{ ceph_directories_mode }}"
+
+# We don't do the recursion in the task above to avoid setting `mode` (which
+# defaults to 0755) on files.
+#
+# This is only needed when upgrading from older versions of Ceph that used to
+# run as `root` (https://github.com/ceph/ceph-ansible/issues/1635).
+- name: recursively fix ownership of monitor directory
+ file:
+ path: /var/lib/ceph/mon/{{ cluster }}-{{ monitor_name }}
+ state: directory
+ owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
+ group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
+ recurse: true
+
+- name: create custom admin keyring
+ ceph_key:
+ name: client.admin
+ secret: "{{ admin_secret }}"
+ caps: "{{ client_admin_ceph_authtool_cap }}"
+ import_key: False
+ cluster: "{{ cluster }}"
+ owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
+ group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
+ mode: "0400"
+ environment:
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+ register: create_custom_admin_secret
+ no_log: "{{ no_log_on_ceph_key_tasks }}"
+ when:
+ - cephx | bool
+ - admin_secret != 'admin_secret'
+
+- name: set_fact ceph-authtool container command
+ set_fact:
+ ceph_authtool_cmd: "{{ container_binary + ' run --net=host --rm -v /var/lib/ceph:/var/lib/ceph:z -v /etc/ceph/:/etc/ceph/:z --entrypoint=ceph-authtool ' + ceph_client_docker_registry + '/' + ceph_client_docker_image + ':' + ceph_client_docker_image_tag if containerized_deployment | bool else 'ceph-authtool' }}"
+
+- name: import admin keyring into mon keyring
+ command: >
+ {{ ceph_authtool_cmd }}
+ /var/lib/ceph/tmp/{{ cluster }}.mon..keyring --import-keyring /etc/ceph/{{ cluster }}.client.admin.keyring
+ when:
+ - not create_custom_admin_secret.get('skipped')
+ - cephx | bool
+ - admin_secret != 'admin_secret'
+
+- name: set_fact ceph-mon container command
+ set_fact:
+ ceph_mon_cmd: "{{ container_binary + ' run --rm --net=host -v /var/lib/ceph/:/var/lib/ceph:z -v /etc/ceph/:/etc/ceph/:z --entrypoint=ceph-mon ' + ceph_client_docker_registry + '/' + ceph_client_docker_image + ':' +ceph_client_docker_image_tag if containerized_deployment | bool else 'ceph-mon' }}"
+
+- name: ceph monitor mkfs with keyring
+ command: >
+ {{ ceph_mon_cmd }}
+ --cluster {{ cluster }}
+ --setuser "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
+ --setgroup "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
+ --mkfs
+ -i {{ monitor_name }}
+ --fsid {{ fsid }}
+ --keyring /var/lib/ceph/tmp/{{ cluster }}.mon..keyring
+ args:
+ creates: /var/lib/ceph/mon/{{ cluster }}-{{ monitor_name }}/keyring
+ when: cephx | bool
+
+- name: ceph monitor mkfs without keyring
+ command: >
+ {{ ceph_mon_cmd }}
+ --cluster {{ cluster }}
+ --setuser "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
+ --setgroup "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
+ --mkfs
+ -i {{ monitor_name }}
+ --fsid {{ fsid }}
+ args:
+ creates: /var/lib/ceph/mon/{{ cluster }}-{{ monitor_name }}/store.db
+ when: not cephx | bool
--- /dev/null
+---
+- name: set_fact container_exec_cmd
+ set_fact:
+ container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_facts['hostname'] }}"
+ when: containerized_deployment | bool
+
+- name: include deploy_monitors.yml
+ include_tasks: deploy_monitors.yml
+ when:
+ # we test for both container and non-container
+ - (mon_socket is defined and mon_socket.get('rc') != 0) or (ceph_mon_container_stat is defined and ceph_mon_container_stat.get('stdout_lines', [])|length == 0)
+ - not switch_to_containers | default(False) | bool
+
+- name: include start_monitor.yml
+ include_tasks: start_monitor.yml
+
+- name: include_tasks ceph_keys.yml
+ include_tasks: ceph_keys.yml
+ when: not switch_to_containers | default(False) | bool
+
+- name: include secure_cluster.yml
+ include_tasks: secure_cluster.yml
+ when:
+ - secure_cluster | bool
+ - inventory_hostname == groups[mon_group_name] | first
--- /dev/null
+---
+- name: collect all the pools
+ command: >
+ {{ container_exec_cmd }} rados --cluster {{ cluster }} lspools
+ changed_when: false
+ register: ceph_pools
+ check_mode: no
+
+- name: secure the cluster
+ command: >
+ {{ container_exec_cmd }} ceph --cluster {{ cluster }} osd pool set {{ item[0] }} {{ item[1] }} true
+ changed_when: false
+ with_nested:
+ - "{{ ceph_pools.stdout_lines|default([]) }}"
+ - "{{ secure_cluster_flags }}"
--- /dev/null
+---
+- name: ensure systemd service override directory exists
+ file:
+ state: directory
+ path: "/etc/systemd/system/ceph-mon@.service.d/"
+ when:
+ - not containerized_deployment | bool
+ - ceph_mon_systemd_overrides is defined
+ - ansible_facts['service_mgr'] == 'systemd'
+
+- name: add ceph-mon systemd service overrides
+ config_template:
+ src: "ceph-mon.service.d-overrides.j2"
+ dest: "/etc/systemd/system/ceph-mon@.service.d/ceph-mon-systemd-overrides.conf"
+ config_overrides: "{{ ceph_mon_systemd_overrides | default({}) }}"
+ config_type: "ini"
+ when:
+ - not containerized_deployment | bool
+ - ceph_mon_systemd_overrides is defined
+ - ansible_facts['service_mgr'] == 'systemd'
+
+- name: include_tasks systemd.yml
+ include_tasks: systemd.yml
+ when: containerized_deployment | bool
+
+- name: start the monitor service
+ systemd:
+ name: ceph-mon@{{ monitor_name if not containerized_deployment | bool else ansible_facts['hostname'] }}
+ state: started
+ enabled: yes
+ masked: no
+ daemon_reload: yes
--- /dev/null
+---
+- name: generate systemd unit file for mon container
+ template:
+ src: "{{ role_path }}/templates/ceph-mon.service.j2"
+ dest: /etc/systemd/system/ceph-mon@.service
+ owner: "root"
+ group: "root"
+ mode: "0644"
+ notify: restart ceph mons
+
+- name: generate systemd ceph-mon target file
+ copy:
+ src: ceph-mon.target
+ dest: /etc/systemd/system/ceph-mon.target
+ when: containerized_deployment | bool
+
+- name: enable ceph-mon.target
+ service:
+ name: ceph-mon.target
+ enabled: yes
+ daemon_reload: yes
+ when: containerized_deployment | bool
\ No newline at end of file
--- /dev/null
+# {{ ansible_managed }}
--- /dev/null
+[Unit]
+Description=Ceph Monitor
+PartOf=ceph-mon.target
+{% if container_binary == 'docker' %}
+After=docker.service network-online.target local-fs.target time-sync.target
+Requires=docker.service
+{% else %}
+After=network-online.target local-fs.target time-sync.target
+{% endif %}
+Wants=network-online.target local-fs.target time-sync.target
+
+[Service]
+EnvironmentFile=-/etc/environment
+{% if container_binary == 'podman' %}
+ExecStartPre=-/usr/bin/rm -f /%t/%n-pid /%t/%n-cid
+ExecStartPre=-/usr/bin/{{ container_binary }} rm --storage ceph-mon-%i
+ExecStartPre=-/usr/bin/mkdir -p /var/log/ceph
+{% else %}
+ExecStartPre=-/usr/bin/{{ container_binary }} stop ceph-mon-%i
+{% endif %}
+ExecStartPre=-/usr/bin/{{ container_binary }} rm ceph-mon-%i
+ExecStartPre=/bin/sh -c '"$(command -v mkdir)" -p /etc/ceph /var/lib/ceph/mon'
+ExecStart=/usr/bin/{{ container_binary }} run --rm --name ceph-mon-%i \
+{% if container_binary == 'podman' %}
+ -d --log-driver journald --conmon-pidfile /%t/%n-pid --cidfile /%t/%n-cid \
+{% endif %}
+ --pids-limit={{ 0 if container_binary == 'podman' else -1 }} \
+ --memory={{ ceph_mon_docker_memory_limit }} \
+ --cpus={{ ceph_mon_docker_cpu_limit }} \
+ --security-opt label=disable \
+ -v /var/lib/ceph:/var/lib/ceph:rshared \
+ -v /etc/ceph:/etc/ceph \
+ -v /var/run/ceph:/var/run/ceph \
+ -v /etc/localtime:/etc/localtime \
+ -v /var/log/ceph:/var/log/ceph \
+{% if ansible_facts['os_family'] == 'RedHat' -%}
+ -v /etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted \
+{% endif -%}
+{% if mon_docker_privileged | bool -%}
+ --privileged \
+{% endif -%}
+{% if mon_docker_net_host | bool -%}
+ --net=host \
+{% endif -%}
+ -e IP_VERSION={{ ip_version[-1:] }} \
+ -e MON_IP={{ _current_monitor_address }} \
+ -e CLUSTER={{ cluster }} \
+ -e FSID={{ fsid }} \
+ -e MON_PORT={{ ceph_mon_container_listen_port }} \
+ -e CEPH_PUBLIC_NETWORK={{ public_network | regex_replace(' ', '') }} \
+ -e CEPH_DAEMON=MON \
+ -e CONTAINER_IMAGE={{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} \
+ -e TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES={{ ceph_tcmalloc_max_total_thread_cache }} \
+ {{ ceph_mon_docker_extra_env }} \
+ {{ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
+{% if container_binary == 'podman' %}
+ExecStop=-/usr/bin/sh -c "/usr/bin/{{ container_binary }} rm -f `cat /%t/%n-cid`"
+{% else %}
+ExecStop=-/usr/bin/{{ container_binary }} stop ceph-mon-%i
+{% endif %}
+ExecStopPost=-/bin/rm -f /var/run/ceph/{{ cluster }}-mon.{{ monitor_name }}.asok
+KillMode=none
+Restart=always
+RestartSec=10s
+TimeoutStartSec=120
+TimeoutStopSec=15
+{% if container_binary == 'podman' %}
+Type=forking
+PIDFile=/%t/%n-pid
+{% endif %}
+
+[Install]
+WantedBy=ceph.target
--- /dev/null
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [2016] [Red Hat, Inc.]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
--- /dev/null
+# Ansible role: ceph-nfs
+
+Documentation is available at http://docs.ceph.com/ceph-ansible/.
--- /dev/null
+---
+# You can override vars by using host or group vars
+
+###########
+# GENERAL #
+###########
+
+# Even though NFS nodes should not have the admin key
+# at their disposal, some people might want to have it
+# distributed on RGW nodes. Setting 'copy_admin_key' to 'true'
+# will copy the admin key to the /etc/ceph/ directory
+copy_admin_key: false
+
+# Whether docker container or systemd service should be enabled
+# and started, it's useful to set it to false if nfs-ganesha
+# service is managed by pacemaker
+ceph_nfs_enable_service: true
+
+# ceph-nfs systemd service uses ansible's hostname as an instance id,
+# so service name is ceph-nfs@{{ ansible_facts['hostname'] }}, this is not
+# ideal when ceph-nfs is managed by pacemaker across multiple hosts - in
+# such case it's better to have constant instance id instead which
+# can be set by 'ceph_nfs_service_suffix'
+# ceph_nfs_service_suffix: "{{ ansible_facts['hostname'] }}"
+
+######################
+# NFS Ganesha Config #
+######################
+ceph_nfs_log_file: "/var/log/ganesha/ganesha.log"
+ceph_nfs_dynamic_exports: false
+# If set to true then rados is used to store ganesha exports
+# and client sessions information, this is useful if you
+# run multiple nfs-ganesha servers in active/passive mode and
+# want to do failover
+ceph_nfs_rados_backend: false
+# Name of the rados object used to store a list of the export rados
+# object URLS
+ceph_nfs_rados_export_index: "ganesha-export-index"
+# Address ganesha service should listen on, by default ganesha listens on all
+# addresses. (Note: ganesha ignores this parameter in current version due to
+# this bug: https://github.com/nfs-ganesha/nfs-ganesha/issues/217)
+# ceph_nfs_bind_addr: 0.0.0.0
+
+# If set to true, then ganesha's attribute and directory caching is disabled
+# as much as possible. Currently, ganesha caches by default.
+# When using ganesha as CephFS's gateway, it is recommended to turn off
+# ganesha's caching as the libcephfs clients also cache the same information.
+# Note: Irrespective of this option's setting, ganesha's caching is disabled
+# when setting 'nfs_file_gw' option as true.
+ceph_nfs_disable_caching: false
+
+# This is the file ganesha will use to control NFSv4 ID mapping
+ceph_nfs_idmap_conf: "/etc/ganesha/idmap.conf"
+
+# idmap configuration file override.
+# This allows you to specify more configuration options
+# using an INI style format.
+# Example:
+# idmap_conf_overrides:
+# General:
+# Domain: foo.domain.net
+idmap_conf_overrides: {}
+
+####################
+# FSAL Ceph Config #
+####################
+ceph_nfs_ceph_export_id: 20133
+ceph_nfs_ceph_pseudo_path: "/cephfile"
+ceph_nfs_ceph_protocols: "3,4"
+ceph_nfs_ceph_access_type: "RW"
+ceph_nfs_ceph_user: "admin"
+ceph_nfs_ceph_squash: "Root_Squash"
+ceph_nfs_ceph_sectype: "sys,krb5,krb5i,krb5p"
+
+###################
+# FSAL RGW Config #
+###################
+ceph_nfs_rgw_export_id: 20134
+ceph_nfs_rgw_pseudo_path: "/cephobject"
+ceph_nfs_rgw_protocols: "3,4"
+ceph_nfs_rgw_access_type: "RW"
+ceph_nfs_rgw_user: "cephnfs"
+ceph_nfs_rgw_squash: "Root_Squash"
+ceph_nfs_rgw_sectype: "sys,krb5,krb5i,krb5p"
+# Note: keys are optional and can be generated, but not on containerized, where
+# they must be configered.
+#ceph_nfs_rgw_access_key: "QFAMEDSJP5DEKJO0DDXY"
+#ceph_nfs_rgw_secret_key: "iaSFLDVvDdQt6lkNzHyW4fPLZugBAI1g17LO0+87[MAC[M#C"
+rgw_client_name: client.rgw.{{ ansible_facts['hostname'] }}
+
+###################
+# CONFIG OVERRIDE #
+###################
+
+# Ganesha configuration file override.
+# These multiline strings will be appended to the contents of the blocks in ganesha.conf and
+# must be in the correct ganesha.conf format seen here:
+# https://github.com/nfs-ganesha/nfs-ganesha/blob/next/src/config_samples/ganesha.conf.example
+#
+# Example:
+#CACHEINODE {
+ #Entries_HWMark = 100000;
+#}
+#
+#ganesha_core_param_overrides:
+#ganesha_ceph_export_overrides:
+#ganesha_rgw_export_overrides:
+#ganesha_rgw_section_overrides:
+#ganesha_log_overrides:
+#ganesha_conf_overrides: |
+# CACHEINODE {
+ #Entries_HWMark = 100000;
+# }
+
+##########
+# DOCKER #
+##########
+
+ceph_docker_image: "ceph/daemon"
+ceph_docker_image_tag: latest
+ceph_nfs_docker_extra_env:
+ceph_config_keys: [] # DON'T TOUCH ME
--- /dev/null
+---
+galaxy_info:
+ company: Red Hat
+ author: Daniel Gryniewicz
+ description: Installs Ceph NFS Gateway
+ license: Apache
+ min_ansible_version: 2.7
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ galaxy_tags:
+ - system
+dependencies: []
--- /dev/null
+---
+- name: create rgw nfs user "{{ ceph_nfs_rgw_user }}"
+ radosgw_user:
+ name: "{{ ceph_nfs_rgw_user }}"
+ cluster: "{{ cluster }}"
+ display_name: "RGW NFS User"
+ access_key: "{{ ceph_nfs_rgw_access_key | default(omit) }}"
+ secret_key: "{{ ceph_nfs_rgw_secret_key | default(omit) }}"
+ run_once: true
+ register: rgw_nfs_user
+ changed_when: false
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ when: nfs_obj_gw | bool
+ environment:
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+
+- name: set_fact ceph_nfs_rgw_access_key and ceph_nfs_rgw_secret_key
+ set_fact:
+ ceph_nfs_rgw_access_key: "{{ (rgw_nfs_user.stdout | from_json)['keys'][0]['access_key'] }}"
+ ceph_nfs_rgw_secret_key: "{{ (rgw_nfs_user.stdout | from_json)['keys'][0]['secret_key'] }}"
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ when: nfs_obj_gw | bool
--- /dev/null
+---
+# global/common requirement
+- name: stop nfs server service
+ systemd:
+ name: "{{ 'nfs-server' if ansible_facts['os_family'] == 'RedHat' else 'nfsserver' if ansible_facts['os_family'] == 'Suse' else 'nfs-kernel-server' if ansible_facts['os_family'] == 'Debian' }}"
+ state: stopped
+ enabled: no
+ failed_when: false
+
+- name: include pre_requisite_non_container.yml
+ include_tasks: pre_requisite_non_container.yml
+ when: not containerized_deployment | bool
+
+- name: include pre_requisite_container.yml
+ include_tasks: pre_requisite_container.yml
+ when: containerized_deployment | bool
+
+- name: include create_rgw_nfs_user.yml
+ import_tasks: create_rgw_nfs_user.yml
+ when: groups.get(mon_group_name, []) | length > 0
+
+- name: install nfs-ganesha-selinux on RHEL 8
+ package:
+ name: nfs-ganesha-selinux
+ state: present
+ register: result
+ until: result is succeeded
+ when:
+ - not containerized_deployment | bool
+ - inventory_hostname in groups.get(nfs_group_name, [])
+ - ansible_facts['os_family'] == 'RedHat'
+ - ansible_facts['distribution_major_version'] == '8'
+
+# NOTE (leseb): workaround for issues with ganesha and librgw
+- name: add ganesha_t to permissive domain
+ selinux_permissive:
+ name: ganesha_t
+ permissive: true
+ failed_when: false
+ when:
+ - not containerized_deployment | bool
+ - ansible_facts['os_family'] == 'RedHat'
+ - ansible_facts['selinux']['status'] == 'enabled'
+
+- name: nfs with external ceph cluster task related
+ when:
+ - groups.get(mon_group_name, []) | length == 0
+ - ceph_nfs_ceph_user is defined
+ block:
+ - name: create keyring directory
+ file:
+ path: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ item }}"
+ state: directory
+ owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
+ group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
+ mode: "0755"
+ with_items:
+ - "{{ ceph_nfs_ceph_user }}"
+ - "{{ ansible_facts['hostname'] }}"
+
+ - name: set_fact rgw_client_name
+ set_fact:
+ rgw_client_name: "client.rgw.{{ ceph_nfs_ceph_user }}"
+
+ - name: get client cephx keys
+ copy:
+ dest: "{{ item.1 }}"
+ content: "{{ item.0.content | b64decode }}"
+ mode: "{{ item.0.item.get('mode', '0600') }}"
+ owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
+ group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
+ with_nested:
+ - "{{ hostvars[groups['_filtered_clients'][0]]['slurp_client_keys']['results'] | default([]) }}"
+ - ['/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ceph_nfs_ceph_user }}/keyring', "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}/keyring"]
+ when:
+ - not item.0.get('skipped', False)
+ - item.0.item.name == 'client.' + ceph_nfs_ceph_user or item.0.item.name == rgw_client_name
+ no_log: "{{ no_log_on_ceph_key_tasks }}"
+
+- name: include start_nfs.yml
+ import_tasks: start_nfs.yml
--- /dev/null
+---
+- name: keyring related tasks
+ block:
+ - name: set_fact container_exec_cmd
+ set_fact:
+ container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[item]['ansible_facts']['hostname'] }}"
+ with_items: "{{ groups.get(mon_group_name, []) }}"
+ delegate_to: "{{ item }}"
+ delegate_facts: true
+ run_once: true
+
+ - name: get keys from monitors
+ ceph_key:
+ name: "{{ item.name }}"
+ cluster: "{{ cluster }}"
+ output_format: plain
+ state: info
+ environment:
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+ register: _rgw_keys
+ with_items:
+ - { name: "client.bootstrap-rgw", path: "/var/lib/ceph/bootstrap-rgw/{{ cluster }}.keyring", copy_key: "{{ nfs_obj_gw }}" }
+ - { name: "client.admin", path: "/etc/ceph/{{ cluster }}.client.admin.keyring", copy_key: "{{ copy_admin_key }}" }
+ delegate_to: "{{ groups.get(mon_group_name)[0] }}"
+ run_once: true
+ when:
+ - cephx | bool
+ - item.copy_key | bool
+ no_log: "{{ no_log_on_ceph_key_tasks }}"
+
+ - name: copy ceph key(s) if needed
+ copy:
+ dest: "{{ item.item.path }}"
+ content: "{{ item.stdout + '\n' }}"
+ owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
+ group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
+ mode: "{{ ceph_keyring_permissions }}"
+ with_items: "{{ _rgw_keys.results }}"
+ when:
+ - cephx | bool
+ - item.item.copy_key | bool
+ no_log: "{{ no_log_on_ceph_key_tasks }}"
+ when: groups.get(mon_group_name, []) | length > 0
+
+- name: dbus related tasks
+ block:
+ - name: get file
+ command: "{{ container_binary }} run --rm --entrypoint=cat {{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag }} /etc/dbus-1/system.d/org.ganesha.nfsd.conf"
+ register: dbus_ganesha_file
+ run_once: true
+ changed_when: false
+
+ - name: create dbus service file
+ copy:
+ content: "{{ dbus_ganesha_file.stdout }}"
+ dest: /etc/dbus-1/system.d/org.ganesha.nfsd.conf
+ owner: "root"
+ group: "root"
+ mode: "0644"
+
+ - name: reload dbus configuration
+ command: "killall -SIGHUP dbus-daemon"
+ when: ceph_nfs_dynamic_exports | bool
--- /dev/null
+---
+- name: include red hat based system related tasks
+ include_tasks: pre_requisite_non_container_red_hat.yml
+ when: ansible_facts['os_family'] == 'RedHat'
+
+- name: include debian based system related tasks
+ include_tasks: pre_requisite_non_container_debian.yml
+ when: ansible_facts['os_family'] == 'Debian'
+
+- name: install nfs rgw/cephfs gateway - SUSE/openSUSE
+ zypper:
+ name: "{{ item.name }}"
+ disable_gpg_check: yes
+ with_items:
+ - { name: 'nfs-ganesha-rgw', install: "{{ nfs_obj_gw }}" }
+ - { name: 'radosgw', install: "{{ nfs_obj_gw }}" }
+ - { name: 'nfs-ganesha-ceph', install: "{{ nfs_file_gw }}" }
+ when:
+ - (ceph_origin == 'repository' or ceph_origin == 'distro')
+ - ceph_repository != 'rhcs'
+ - ansible_facts['os_family'] == 'Suse'
+ - item.install | bool
+ register: result
+ until: result is succeeded
+
+# NOTE (leseb): we use root:ceph for permissions since ganesha
+# does not have the right selinux context to read ceph directories.
+- name: create rados gateway and ganesha directories
+ file:
+ path: "{{ item.name }}"
+ state: directory
+ owner: "{{ item.owner | default('ceph') }}"
+ group: "{{ item.group | default('ceph') }}"
+ mode: "{{ ceph_directories_mode }}"
+ with_items:
+ - { name: "/var/lib/ceph/bootstrap-rgw", create: "{{ nfs_obj_gw }}" }
+ - { name: "/var/lib/ceph/radosgw", create: "{{ nfs_obj_gw }}" }
+ - { name: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}", create: "{{ nfs_obj_gw }}" }
+ - { name: "{{ rbd_client_admin_socket_path }}", create: "{{ nfs_obj_gw }}" }
+ - { name: "/var/log/ceph", create: true }
+ - { name: "/var/log/ganesha", create: true, owner: root, group: root }
+ - { name: "/var/run/ceph", create: true }
+ when: item.create | bool
+
+- name: cephx related tasks
+ when:
+ - cephx | bool
+ - groups.get(mon_group_name, []) | length > 0
+ block:
+ - name: get keys from monitors
+ ceph_key:
+ name: "{{ item.name }}"
+ cluster: "{{ cluster }}"
+ output_format: plain
+ state: info
+ register: _rgw_keys
+ with_items:
+ - { name: "client.bootstrap-rgw", path: "/var/lib/ceph/bootstrap-rgw/{{ cluster }}.keyring", copy_key: "{{ nfs_obj_gw }}" }
+ - { name: "client.admin", path: "/etc/ceph/{{ cluster }}.client.admin.keyring", copy_key: "{{ copy_admin_key }}" }
+ delegate_to: "{{ groups.get(mon_group_name)[0] }}"
+ run_once: true
+ when:
+ - cephx | bool
+ - item.copy_key | bool
+ no_log: "{{ no_log_on_ceph_key_tasks }}"
+
+ - name: copy ceph key(s) if needed
+ copy:
+ dest: "{{ item.item.path }}"
+ content: "{{ item.stdout + '\n' }}"
+ owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
+ group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
+ mode: "{{ ceph_keyring_permissions }}"
+ with_items: "{{ _rgw_keys.results }}"
+ when:
+ - cephx | bool
+ - item.item.copy_key | bool
+ no_log: "{{ no_log_on_ceph_key_tasks }}"
+
+ - name: nfs object gateway related tasks
+ when: nfs_obj_gw | bool
+ block:
+ - name: create rados gateway keyring
+ ceph_key:
+ name: "client.rgw.{{ ansible_facts['hostname'] }}"
+ cluster: "{{ cluster }}"
+ user: client.bootstrap-rgw
+ user_key: "/var/lib/ceph/bootstrap-rgw/{{ cluster }}.keyring"
+ caps:
+ mon: "allow rw"
+ osd: "allow rwx"
+ dest: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}/keyring"
+ import_key: false
+ owner: ceph
+ group: ceph
+ mode: "{{ ceph_keyring_permissions }}"
+ no_log: "{{ no_log_on_ceph_key_tasks }}"
\ No newline at end of file
--- /dev/null
+---
+- name: debian based systems - repo handling
+ when: ceph_origin == 'repository'
+ block:
+ - name: stable repos specific tasks
+ when:
+ - nfs_ganesha_stable | bool
+ - ceph_repository == 'community'
+ block:
+ - name: add nfs-ganesha stable repository
+ apt_repository:
+ repo: "deb {{ nfs_ganesha_stable_deb_repo }} {{ ceph_stable_distro_source | default(ansible_facts['distribution_release']) }} main"
+ state: present
+ update_cache: no
+ register: add_ganesha_apt_repo
+
+ - name: add libntirpc stable repository
+ apt_repository:
+ repo: "deb {{ libntirpc_stable_deb_repo }} {{ ceph_stable_distro_source | default(ansible_facts['distribution_release']) }} main"
+ state: present
+ update_cache: no
+ register: add_libntirpc_apt_repo
+ when: libntirpc_stable_deb_repo is defined
+
+ - name: add nfs-ganesha ppa apt key
+ apt_key:
+ keyserver: "{{ nfs_ganesha_apt_keyserver }}"
+ id: "{{ nfs_ganesha_apt_key_id }}"
+ when:
+ - nfs_ganesha_apt_key_id is defined
+ - nfs_ganesha_apt_keyserver is defined
+
+ - name: update apt cache
+ apt:
+ update_cache: yes
+ register: update_ganesha_apt_cache
+ retries: 5
+ delay: 2
+ until: update_ganesha_apt_cache is success
+ when: add_ganesha_apt_repo is changed or add_libntirpc_apt_repo is changed
+
+ - name: debian based systems - dev repos specific tasks
+ when:
+ - nfs_ganesha_dev | bool
+ - ceph_repository == 'dev'
+ block:
+ - name: fetch nfs-ganesha development repository
+ uri:
+ url: "https://shaman.ceph.com/api/repos/nfs-ganesha/next/latest/{{ ansible_facts['distribution'] | lower }}/{{ ansible_facts['distribution_release'] }}/flavors/{{ nfs_ganesha_flavor }}/repo?arch={{ ansible_facts['architecture'] }}"
+ return_content: yes
+ register: nfs_ganesha_dev_apt_repo
+
+ - name: add nfs-ganesha development repository
+ copy:
+ content: "{{ nfs_ganesha_dev_apt_repo.content }}"
+ dest: /etc/apt/sources.list.d/nfs-ganesha-dev.list
+ owner: root
+ group: root
+ backup: yes
+
+- name: debain based systems - install required packages
+ block:
+ - name: debian based systems- non-rhcs installation
+ when:
+ - (ceph_origin == 'repository' or ceph_origin == 'distro')
+ - ceph_repository != 'rhcs'
+ block:
+ - name: install nfs rgw/cephfs gateway - debian
+ apt:
+ name: ['nfs-ganesha-rgw', 'radosgw']
+ allow_unauthenticated: yes
+ register: result
+ until: result is succeeded
+ when: nfs_obj_gw | bool
+ - name: install nfs rgw/cephfs gateway - debian
+ apt:
+ name: nfs-ganesha-ceph
+ allow_unauthenticated: yes
+ register: result
+ until: result is succeeded
+ when: nfs_file_gw | bool
+
+ - name: debian based systems - rhcs installation
+ when:
+ - (ceph_origin == 'repository' or ceph_origin == 'distro')
+ - ceph_repository == 'rhcs'
+ block:
+ - name: install red hat storage nfs gateway for debian
+ apt:
+ name: nfs-ganesha
+ state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
+ register: result
+ until: result is succeeded
+ - name: install red hat storage nfs file gateway
+ apt:
+ name: nfs-ganesha-ceph
+ state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
+ register: result
+ until: result is succeeded
+ when: nfs_file_gw | bool
+ - name: install red hat storage nfs obj gateway
+ apt:
+ name: nfs-ganesha-rgw
+ state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
+ register: result
+ until: result is succeeded
+ when: nfs_obj_gw | bool
--- /dev/null
+---
+- name: red hat based systems - repo handling
+ when: ceph_origin == 'repository'
+ block:
+ - name: red hat based systems - stable repo related tasks
+ when:
+ - nfs_ganesha_stable | bool
+ - ceph_repository == 'community'
+ block:
+ - name: add nfs-ganesha stable repository
+ yum_repository:
+ name: nfs_ganesha_stable
+ description: nfs-ganesha stable repo
+ gpgcheck: yes
+ state: present
+ gpgkey: "{{ ceph_stable_key }}"
+ baseurl: "{{ ceph_mirror }}/nfs-ganesha/rpm-{{ nfs_ganesha_stable_branch }}/{{ ceph_release }}/el$releasever/$basearch"
+ file: nfs_ganesha_stable
+
+ - name: add nfs-ganesha stable noarch repository
+ yum_repository:
+ name: nfs_ganesha_stable_noarch
+ description: nfs-ganesha stable noarch repo
+ gpgcheck: yes
+ state: present
+ gpgkey: "{{ ceph_stable_key }}"
+ baseurl: "{{ ceph_mirror }}/nfs-ganesha/rpm-{{ nfs_ganesha_stable_branch }}/{{ ceph_release }}/el$releasever/noarch"
+ file: nfs_ganesha_stable
+
+ - name: red hat based systems - dev repo related tasks
+ block:
+ - name: add nfs-ganesha dev repo
+ get_url:
+ url: "https://shaman.ceph.com/api/repos/nfs-ganesha/next/latest/{{ ansible_facts['distribution'] | lower }}/{{ ansible_facts['distribution_major_version'] }}/flavors/{{ nfs_ganesha_flavor }}/repo?arch={{ ansible_facts['architecture'] }}"
+ dest: /etc/yum.repos.d/nfs-ganesha-dev.repo
+ force: true
+ when:
+ - nfs_ganesha_dev | bool
+ - ceph_repository == 'dev'
+
+- name: red hat based systems - install nfs packages
+ block:
+ - name: install nfs cephfs gateway
+ package:
+ name: ['nfs-ganesha-ceph', 'nfs-ganesha-rados-grace']
+ state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
+ register: result
+ until: result is succeeded
+ when: nfs_file_gw | bool
+
+ - name: install redhat nfs-ganesha-rgw and ceph-radosgw packages
+ package:
+ name: ['nfs-ganesha-rgw', 'nfs-ganesha-rados-grace', 'nfs-ganesha-rados-urls', 'ceph-radosgw']
+ state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
+ register: result
+ until: result is succeeded
+ when: nfs_obj_gw | bool
--- /dev/null
+---
+- block:
+ - name: set_fact exec_cmd_nfs - external
+ set_fact:
+ exec_cmd_nfs: "{{ container_binary + ' run --rm --net=host -v /etc/ceph:/etc/ceph:z -v /var/lib/ceph/:/var/lib/ceph/:z -v /var/log/ceph/:/var/log/ceph/:z --entrypoint=rados ' + ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else 'rados' }} -n client.{{ ceph_nfs_ceph_user }} -k /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ceph_nfs_ceph_user }}/keyring"
+ delegate_node: "{{ inventory_hostname }}"
+ when: groups.get(mon_group_name, []) | length == 0
+
+ - name: set_fact exec_cmd_nfs - internal
+ set_fact:
+ exec_cmd_nfs: "{{ container_binary + ' exec ceph-mon-' + hostvars[groups[mon_group_name][0]]['ansible_facts']['hostname'] if containerized_deployment | bool else '' }} rados"
+ delegate_node: "{{ groups[mon_group_name][0] }}"
+ when: groups.get(mon_group_name, []) | length > 0
+
+ - name: check if rados index object exists
+ shell: "{{ exec_cmd_nfs | default('') }} -p {{ cephfs_data_pool.name }} --cluster {{ cluster }} ls|grep {{ ceph_nfs_rados_export_index }}"
+ changed_when: false
+ failed_when: false
+ register: rados_index_exists
+ check_mode: no
+ when: ceph_nfs_rados_backend | bool
+ delegate_to: "{{ delegate_node }}"
+ run_once: true
+
+ - name: create an empty rados index object
+ command: "{{ exec_cmd_nfs | default('') }} -p {{ cephfs_data_pool.name }} --cluster {{ cluster }} put {{ ceph_nfs_rados_export_index }} /dev/null"
+ when:
+ - ceph_nfs_rados_backend | bool
+ - rados_index_exists.rc != 0
+ delegate_to: "{{ delegate_node }}"
+ run_once: true
+
+- name: create /etc/ganesha
+ file:
+ path: /etc/ganesha
+ state: directory
+ owner: root
+ group: root
+ mode: "0755"
+
+- name: generate ganesha configuration file
+ template:
+ src: "ganesha.conf.j2"
+ dest: /etc/ganesha/ganesha.conf
+ owner: "root"
+ group: "root"
+ mode: "0644"
+ notify: restart ceph nfss
+
+- name: generate ganesha idmap.conf file
+ action: config_template
+ args:
+ src: "idmap.conf.j2"
+ dest: "{{ ceph_nfs_idmap_conf }}"
+ owner: "root"
+ group: "root"
+ mode: "0644"
+ config_overrides: "{{ idmap_conf_overrides }}"
+ config_type: ini
+ notify: restart ceph nfss
+
+- name: create exports directory
+ file:
+ path: /etc/ganesha/export.d
+ state: directory
+ owner: "root"
+ group: "root"
+ mode: "0755"
+ when: ceph_nfs_dynamic_exports | bool
+
+- name: create exports dir index file
+ copy:
+ content: ""
+ force: no
+ dest: /etc/ganesha/export.d/INDEX.conf
+ owner: "root"
+ group: "root"
+ mode: "0644"
+ when: ceph_nfs_dynamic_exports | bool
+
+- name: include_tasks systemd.yml
+ include_tasks: systemd.yml
+ when: containerized_deployment | bool
+
+- name: systemd start nfs container
+ systemd:
+ name: ceph-nfs@{{ ceph_nfs_service_suffix | default(ansible_facts['hostname']) }}
+ state: started
+ enabled: yes
+ masked: no
+ daemon_reload: yes
+ when:
+ - containerized_deployment | bool
+ - ceph_nfs_enable_service | bool
+
+- name: start nfs gateway service
+ systemd:
+ name: nfs-ganesha
+ state: started
+ enabled: yes
+ masked: no
+ when:
+ - not containerized_deployment | bool
+ - ceph_nfs_enable_service | bool
--- /dev/null
+---
+- name: generate systemd unit file
+ template:
+ src: "{{ role_path }}/templates/ceph-nfs.service.j2"
+ dest: /etc/systemd/system/ceph-nfs@.service
+ owner: "root"
+ group: "root"
+ mode: "0644"
+ notify: restart ceph nfss
\ No newline at end of file
--- /dev/null
+[Unit]
+Description=NFS-Ganesha file server
+Documentation=http://github.com/nfs-ganesha/nfs-ganesha/wiki
+{% if container_binary == 'docker' %}
+After=docker.service network-online.target local-fs.target time-sync.target
+Requires=docker.service
+{% else %}
+After=network-online.target local-fs.target time-sync.target
+{% endif %}
+Wants=network-online.target local-fs.target time-sync.target
+
+[Service]
+EnvironmentFile=-/etc/environment
+{% if container_binary == 'podman' %}
+ExecStartPre=-/usr/bin/rm -f /%t/%n-pid /%t/%n-cid
+ExecStartPre=-/usr/bin/{{ container_binary }} rm --storage ceph-nfs-%i
+ExecStartPre=-/usr/bin/mkdir -p /var/log/ceph /var/log/ganesha
+{% endif %}
+ExecStartPre=-/usr/bin/{{ container_binary }} rm ceph-nfs-%i
+ExecStartPre={{ '/bin/mkdir' if ansible_facts['os_family'] == 'Debian' else '/usr/bin/mkdir' }} -p /etc/ceph /etc/ganesha /var/lib/nfs/ganesha /var/log/ganesha
+ExecStart=/usr/bin/{{ container_binary }} run --rm --net=host \
+{% if container_binary == 'podman' %}
+ -d --log-driver journald --conmon-pidfile /%t/%n-pid --cidfile /%t/%n-cid \
+{% endif %}
+ --pids-limit={{ 0 if container_binary == 'podman' else -1 }} \
+ -v /var/lib/ceph:/var/lib/ceph:z \
+ -v /etc/ceph:/etc/ceph:z \
+ -v /var/lib/nfs/ganesha:/var/lib/nfs/ganesha:z \
+ -v /etc/ganesha:/etc/ganesha:z \
+ -v /var/run/ceph:/var/run/ceph:z \
+ -v /var/log/ceph:/var/log/ceph:z \
+ -v /var/log/ganesha:/var/log/ganesha:z \
+ {% if ceph_nfs_dynamic_exports | bool %}
+ --privileged \
+ -v /var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket \
+ {% endif -%}
+ -v /etc/localtime:/etc/localtime:ro \
+ -e CLUSTER={{ cluster }} \
+ -e CEPH_DAEMON=NFS \
+ -e CONTAINER_IMAGE={{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} \
+ {{ ceph_nfs_docker_extra_env }} \
+ --name=ceph-nfs-{{ ceph_nfs_service_suffix | default(ansible_facts['hostname']) }} \
+ {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
+{% if container_binary == 'podman' %}
+ExecStop=-/usr/bin/sh -c "/usr/bin/{{ container_binary }} rm -f `cat /%t/%n-cid`"
+{% else %}
+ExecStopPost=-/usr/bin/{{ container_binary }} stop ceph-nfs-%i
+{% endif %}
+KillMode=none
+Restart=always
+RestartSec=10s
+TimeoutStartSec=120
+TimeoutStopSec=15
+{% if container_binary == 'podman' %}
+Type=forking
+PIDFile=/%t/%n-pid
+{% endif %}
+
+[Install]
+WantedBy=multi-user.target
--- /dev/null
+#jinja2: trim_blocks: "true", lstrip_blocks: "true"
+# {{ ansible_managed }}
+
+{% if ceph_nfs_dynamic_exports | bool and not ceph_nfs_rados_backend | bool %}
+%include /etc/ganesha/export.d/INDEX.conf
+{% endif %}
+
+NFS_Core_Param
+{
+{% if ceph_nfs_bind_addr is defined %}
+ Bind_Addr={{ ceph_nfs_bind_addr }};
+{% endif %}
+{{ ganesha_core_param_overrides | default(None) }}
+}
+
+{% if ceph_nfs_disable_caching | bool or nfs_file_gw | bool %}
+EXPORT_DEFAULTS {
+ Attr_Expiration_Time = 0;
+}
+
+CACHEINODE {
+ Dir_Chunk = 0;
+
+ NParts = 1;
+ Cache_Size = 1;
+}
+{% endif %}
+
+{% if ceph_nfs_rados_backend | bool %}
+RADOS_URLS {
+ ceph_conf = '/etc/ceph/{{ cluster }}.conf';
+ userid = "{{ ceph_nfs_ceph_user }}";
+}
+%url rados://{{ cephfs_data_pool.name }}/{{ ceph_nfs_rados_export_index }}
+
+NFSv4 {
+ RecoveryBackend = 'rados_kv';
+ IdmapConf = "{{ ceph_nfs_idmap_conf }}";
+}
+RADOS_KV {
+ ceph_conf = '/etc/ceph/{{ cluster }}.conf';
+ userid = "{{ ceph_nfs_ceph_user }}";
+ pool = "{{ cephfs_data_pool.name }}";
+}
+{% endif %}
+
+{% if nfs_file_gw | bool %}
+EXPORT
+{
+ Export_id={{ ceph_nfs_ceph_export_id }};
+
+ Path = "/";
+
+ Pseudo = {{ ceph_nfs_ceph_pseudo_path }};
+
+ Access_Type = {{ ceph_nfs_ceph_access_type }};
+
+ Protocols = {{ ceph_nfs_ceph_protocols }};
+
+ Transports = TCP;
+
+ SecType = {{ ceph_nfs_ceph_sectype }};
+
+ Squash = {{ ceph_nfs_ceph_squash }};
+
+ Attr_Expiration_Time = 0;
+
+ FSAL {
+ Name = CEPH;
+ User_Id = "{{ ceph_nfs_ceph_user }}";
+ }
+
+ {{ ganesha_ceph_export_overrides | default(None) }}
+}
+{% endif %}
+{% if nfs_obj_gw | bool %}
+EXPORT
+{
+ Export_id={{ ceph_nfs_rgw_export_id }};
+
+ Path = "/";
+
+ Pseudo = {{ ceph_nfs_rgw_pseudo_path }};
+
+ Access_Type = {{ ceph_nfs_rgw_access_type }};
+
+ Protocols = {{ ceph_nfs_rgw_protocols }};
+
+ Transports = TCP;
+
+ SecType = {{ ceph_nfs_rgw_sectype }};
+
+ Squash = {{ ceph_nfs_rgw_squash }};
+
+ FSAL {
+ Name = RGW;
+ User_Id = "{{ ceph_nfs_rgw_user }}";
+ Access_Key_Id ="{{ ceph_nfs_rgw_access_key }}";
+ Secret_Access_Key = "{{ ceph_nfs_rgw_secret_key }}";
+ }
+
+ {{ ganesha_rgw_export_overrides | default(None) }}
+
+}
+
+RGW {
+ ceph_conf = "/etc/ceph/{{ cluster }}.conf";
+ cluster = "{{ cluster }}";
+ name = "{{ rgw_client_name }}";
+ {{ ganesha_rgw_section_overrides | default(None) }}
+}
+{% endif %}
+
+LOG {
+ Facility {
+ name = FILE;
+ destination = "{{ ceph_nfs_log_file }}";
+ enable = active;
+ }
+
+ {{ ganesha_log_overrides | default(None) }}
+}
+
+{{ ganesha_conf_overrides | default(None) }}
--- /dev/null
+[General]
+#Verbosity = 0
+# The following should be set to the local NFSv4 domain name
+# The default is the host's DNS domain name.
+#Domain = local.domain.edu
+
+# In multi-domain environments, some NFS servers will append the identity
+# management domain to the owner and owner_group in lieu of a true NFSv4
+# domain. This option can facilitate lookups in such environments. If
+# set to a value other than "none", the nsswitch plugin will first pass
+# the name to the password/group lookup function without stripping the
+# domain off. If that mapping fails then the plugin will try again using
+# the old method (comparing the domain in the string to the Domain value,
+# stripping it if it matches, and passing the resulting short name to the
+# lookup function). Valid values are "user", "group", "both", and
+# "none". The default is "none".
+#No-Strip = none
+
+# Winbind has a quirk whereby doing a group lookup in UPN format
+# (e.g. staff@americas.example.com) will cause the group to be
+# displayed prefixed with the full domain in uppercase
+# (e.g. AMERICAS.EXAMPLE.COM\staff) instead of in the familiar netbios
+# name format (e.g. AMERICAS\staff). Setting this option to true
+# causes the name to be reformatted before passing it to the group
+# lookup function in order to work around this. This setting is
+# ignored unless No-Strip is set to either "both" or "group".
+# The default is "false".
+#Reformat-Group = false
+
+# The following is a comma-separated list of Kerberos realm
+# names that should be considered to be equivalent to the
+# local realm, such that <user>@REALM.A can be assumed to
+# be the same user as <user>@REALM.B
+# If not specified, the default local realm is the domain name,
+# which defaults to the host's DNS domain name,
+# translated to upper-case.
+# Note that if this value is specified, the local realm name
+# must be included in the list!
+#Local-Realms =
+
+[Mapping]
+
+#Nobody-User = nobody
+#Nobody-Group = nobody
+
+[Translation]
+
+# Translation Method is an comma-separated, ordered list of
+# translation methods that can be used. Distributed methods
+# include "nsswitch", "umich_ldap", and "static". Each method
+# is a dynamically loadable plugin library.
+# New methods may be defined and inserted in the list.
+# The default is "nsswitch".
+#Method = nsswitch
+
+# Optional. This is a comma-separated, ordered list of
+# translation methods to be used for translating GSS
+# authenticated names to ids.
+# If this option is omitted, the same methods as those
+# specified in "Method" are used.
+#GSS-Methods = <alternate method list for translating GSS names>
+
+#-------------------------------------------------------------------#
+# The following are used only for the "static" Translation Method.
+#-------------------------------------------------------------------#
+[Static]
+
+# A "static" list of GSS-Authenticated names to
+# local user name mappings
+
+#someuser@REALM = localuser
+
+
+#-------------------------------------------------------------------#
+# The following are used only for the "umich_ldap" Translation Method.
+#-------------------------------------------------------------------#
+
+[UMICH_SCHEMA]
+
+# server information (REQUIRED)
+LDAP_server = ldap-server.local.domain.edu
+
+# the default search base (REQUIRED)
+LDAP_base = dc=local,dc=domain,dc=edu
+
+#-----------------------------------------------------------#
+# The remaining options have defaults (as shown)
+# and are therefore not required.
+#-----------------------------------------------------------#
+
+# whether or not to perform canonicalization on the
+# name given as LDAP_server
+#LDAP_canonicalize_name = true
+
+# absolute search base for (people) accounts
+#LDAP_people_base = <LDAP_base>
+
+# absolute search base for groups
+#LDAP_group_base = <LDAP_base>
+
+# Set to true to enable SSL - anything else is not enabled
+#LDAP_use_ssl = false
+
+# You must specify a CA certificate location if you enable SSL
+#LDAP_ca_cert = /etc/ldapca.cert
+
+# Objectclass mapping information
+
+# Mapping for the person (account) object class
+#NFSv4_person_objectclass = NFSv4RemotePerson
+
+# Mapping for the nfsv4name attribute the person object
+#NFSv4_name_attr = NFSv4Name
+
+# Mapping for the UID number
+#NFSv4_uid_attr = UIDNumber
+
+# Mapping for the GSSAPI Principal name
+#GSS_principal_attr = GSSAuthName
+
+# Mapping for the account name attribute (usually uid)
+# The value for this attribute must match the value of
+# the group member attribute - NFSv4_member_attr
+#NFSv4_acctname_attr = uid
+
+# Mapping for the group object class
+#NFSv4_group_objectclass = NFSv4RemoteGroup
+
+# Mapping for the GID attribute
+#NFSv4_gid_attr = GIDNumber
+
+# Mapping for the Group NFSv4 name
+#NFSv4_group_attr = NFSv4Name
+
+# Mapping for the Group member attribute (usually memberUID)
+# The value of this attribute must match the value of NFSv4_acctname_attr
+#NFSv4_member_attr = memberUID
\ No newline at end of file
--- /dev/null
+---
+galaxy_info:
+ company: Red Hat
+ author: Boris Ranto
+ description: Configures Prometheus Node Exporter
+ license: Apache
+ min_ansible_version: 2.4
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ galaxy_tags:
+ - system
+dependencies: []
--- /dev/null
+---
+- name: include setup_container.yml
+ include_tasks: setup_container.yml
--- /dev/null
+---
+- name: include_tasks systemd.yml
+ include_tasks: systemd.yml
+
+- name: start the node_exporter service
+ systemd:
+ name: node_exporter
+ state: started
+ enabled: yes
+ daemon_reload: yes
+ failed_when: false
--- /dev/null
+---
+- name: ship systemd service
+ template:
+ src: node_exporter.service.j2
+ dest: "/etc/systemd/system/node_exporter.service"
+ owner: root
+ group: root
+ mode: 0644
--- /dev/null
+# This file is managed by ansible, don't make changes here - they will be
+# overwritten.
+[Unit]
+Description=Node Exporter
+{% if container_binary == 'docker' %}
+After=docker.service network-online.target local-fs.target time-sync.target
+Requires=docker.service
+{% else %}
+After=network-online.target local-fs.target time-sync.target
+{% endif %}
+Wants=network-online.target local-fs.target time-sync.target
+
+[Service]
+EnvironmentFile=-/etc/environment
+{% if container_binary == 'podman' %}
+ExecStartPre=-/usr/bin/rm -f /%t/%n-pid /%t/%n-cid
+ExecStartPre=-/usr/bin/{{ container_binary }} rm --storage node-exporter
+{% endif %}
+ExecStartPre=-/usr/bin/{{ container_binary }} rm -f node-exporter
+ExecStart=/usr/bin/{{ container_binary }} run --rm --name=node-exporter \
+{% if container_binary == 'podman' %}
+ -d --log-driver journald --conmon-pidfile /%t/%n-pid --cidfile /%t/%n-cid \
+{% endif %}
+ --pids-limit={{ 0 if container_binary == 'podman' else -1 }} \
+ --privileged \
+ --security-opt label=disable \
+ -v /:/rootfs:ro \
+ --net=host \
+ {{ node_exporter_container_image }} \
+ --path.procfs=/rootfs/proc \
+ --path.sysfs=/rootfs/sys \
+ --path.rootfs=/rootfs \
+ --no-collector.timex \
+ --web.listen-address=:{{ node_exporter_port }}
+{% if container_binary == 'podman' %}
+ExecStop=-/usr/bin/sh -c "/usr/bin/{{ container_binary }} rm -f `cat /%t/%n-cid`"
+{% else %}
+ExecStop=-/usr/bin/{{ container_binary }} stop node-exporter
+{% endif %}
+KillMode=none
+Restart=always
+RestartSec=10s
+TimeoutStartSec=120
+TimeoutStopSec=15
+{% if container_binary == 'podman' %}
+Type=forking
+PIDFile=/%t/%n-pid
+{% endif %}
+
+[Install]
+WantedBy=multi-user.target
--- /dev/null
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [2014] [Sébastien Han]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
--- /dev/null
+# Ansible role: ceph-osd
+
+Documentation is available at http://docs.ceph.com/ceph-ansible/.
--- /dev/null
+---
+###########
+# GENERAL #
+###########
+
+# Even though OSD nodes should not have the admin key
+# at their disposal, some people might want to have it
+# distributed on OSD nodes. Setting 'copy_admin_key' to 'true'
+# will copy the admin key to the /etc/ceph/ directory
+copy_admin_key: false
+
+
+##############
+# CEPH OPTIONS
+##############
+
+# Devices to be used as OSDs
+# You can pre-provision disks that are not present yet.
+# Ansible will just skip them. Newly added disk will be
+# automatically configured during the next run.
+#
+
+
+# Declare devices to be used as OSDs
+# All scenario(except 3rd) inherit from the following device declaration
+# Note: This scenario uses the ceph-volume lvm batch method to provision OSDs
+
+#devices:
+# - /dev/sdb
+# - /dev/sdc
+# - /dev/sdd
+# - /dev/sde
+
+devices: []
+
+# Declare devices to be used as block.db devices
+
+#dedicated_devices:
+# - /dev/sdx
+# - /dev/sdy
+
+dedicated_devices: []
+
+# Declare devices to be used as block.wal devices
+
+#bluestore_wal_devices:
+# - /dev/nvme0n1
+# - /dev/nvme0n2
+
+bluestore_wal_devices: []
+
+#'osd_auto_discovery' mode prevents you from filling out the 'devices' variable above.
+# Device discovery is based on the Ansible fact 'ansible_facts["devices"]'
+# which reports all the devices on a system. If chosen, all the disks
+# found will be passed to ceph-volume lvm batch. You should not be worried on using
+# this option since ceph-volume has a built-in check which looks for empty devices.
+# Thus devices with existing partition tables will not be used.
+#
+osd_auto_discovery: false
+
+# Encrypt your OSD device using dmcrypt
+# If set to True, no matter which osd_objecstore you use the data will be encrypted
+dmcrypt: False
+
+# Use ceph-volume to create OSDs from logical volumes.
+# lvm_volumes is a list of dictionaries.
+#
+# Filestore: Each dictionary must contain a data, journal and vg_name key. Any
+# logical volume or logical group used must be a name and not a path. data
+# can be a logical volume, device or partition. journal can be either a lv or partition.
+# You can not use the same journal for many data lvs.
+# data_vg must be the volume group name of the data lv, only applicable when data is an lv.
+# journal_vg is optional and must be the volume group name of the journal lv, if applicable.
+# For example:
+# lvm_volumes:
+# - data: data-lv1
+# data_vg: vg1
+# journal: journal-lv1
+# journal_vg: vg2
+# crush_device_class: foo
+# - data: data-lv2
+# journal: /dev/sda1
+# data_vg: vg1
+# - data: data-lv3
+# journal: /dev/sdb1
+# data_vg: vg2
+# - data: /dev/sda
+# journal: /dev/sdb1
+# - data: /dev/sda1
+# journal: /dev/sdb1
+#
+# Bluestore: Each dictionary must contain at least data. When defining wal or
+# db, it must have both the lv name and vg group (db and wal are not required).
+# This allows for four combinations: just data, data and wal, data and wal and
+# db, data and db.
+# For example:
+# lvm_volumes:
+# - data: data-lv1
+# data_vg: vg1
+# wal: wal-lv1
+# wal_vg: vg1
+# crush_device_class: foo
+# - data: data-lv2
+# db: db-lv2
+# db_vg: vg2
+# - data: data-lv3
+# wal: wal-lv1
+# wal_vg: vg3
+# db: db-lv3
+# db_vg: vg3
+# - data: data-lv4
+# data_vg: vg4
+# - data: /dev/sda
+# - data: /dev/sdb1
+
+lvm_volumes: []
+crush_device_class: ""
+osds_per_device: 1
+
+###############
+# CRUSH RULES #
+###############
+crush_rule_config: false
+
+crush_rule_hdd:
+ name: HDD
+ root: default
+ type: host
+ class: hdd
+ default: false
+
+crush_rule_ssd:
+ name: SSD
+ root: default
+ type: host
+ class: ssd
+ default: false
+
+crush_rules:
+ - "{{ crush_rule_hdd }}"
+ - "{{ crush_rule_ssd }}"
+
+# Caution: this will create crush roots and racks according to hostvars {{ osd_crush_location }}
+# and will move hosts into them which might lead to significant data movement in the cluster!
+#
+# In order for the playbook to create CRUSH hierarchy, you have to setup your Ansible inventory file like so:
+#
+# [osds]
+# ceph-osd-01 osd_crush_location="{ 'root': 'mon-roottt', 'rack': 'mon-rackkkk', 'pod': 'monpod', 'host': 'ceph-osd-01' }"
+#
+# Note that 'host' is mandatory and that you need to submit at least two bucket type (including the host)
+create_crush_tree: false
+
+##########
+# DOCKER #
+##########
+
+ceph_config_keys: [] # DON'T TOUCH ME
+
+# Resource limitation
+# For the whole list of limits you can apply see: docs.docker.com/engine/admin/resource_constraints
+# Default values are based from: https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/2/html/red_hat_ceph_storage_hardware_guide/minimum_recommendations
+# These options can be passed using the 'ceph_osd_docker_extra_env' variable.
+ceph_osd_docker_memory_limit: "{{ ansible_facts['memtotal_mb'] }}m"
+ceph_osd_docker_cpu_limit: 4
+
+# The next two variables are undefined, and thus, unused by default.
+# If `lscpu | grep NUMA` returned the following:
+# NUMA node0 CPU(s): 0,2,4,6,8,10,12,14,16
+# NUMA node1 CPU(s): 1,3,5,7,9,11,13,15,17
+# then, the following would run the OSD on the first NUMA node only.
+#ceph_osd_docker_cpuset_cpus: "0,2,4,6,8,10,12,14,16"
+#ceph_osd_docker_cpuset_mems: "0"
+
+# PREPARE DEVICE
+#
+# WARNING /!\ DMCRYPT scenario ONLY works with Docker version 1.12.5 and above
+#
+ceph_osd_docker_devices: "{{ devices }}"
+ceph_osd_docker_prepare_env: -e OSD_JOURNAL_SIZE={{ journal_size }}
+
+# ACTIVATE DEVICE
+#
+ceph_osd_docker_extra_env:
+ceph_osd_numactl_opts: ""
+
+###########
+# SYSTEMD #
+###########
+
+# ceph_osd_systemd_overrides will override the systemd settings
+# for the ceph-osd services.
+# For example,to set "PrivateDevices=false" you can specify:
+#ceph_osd_systemd_overrides:
+# Service:
+# PrivateDevices: False
+
+
+###########
+# CHECK #
+###########
+
+nb_retry_wait_osd_up: 60
+delay_wait_osd_up: 10
--- /dev/null
+[Unit]
+Description=ceph target allowing to start/stop all ceph-osd@.service instances at once
+PartOf=ceph.target
+After=ceph-mon.target
+Before=ceph.target
+Wants=ceph.target ceph-mon.target
+
+[Install]
+WantedBy=multi-user.target ceph.target
\ No newline at end of file
--- /dev/null
+---
+galaxy_info:
+ company: Red Hat
+ author: Sébastien Han
+ description: Installs Ceph Object Storage Daemon
+ license: Apache
+ min_ansible_version: 2.7
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ galaxy_tags:
+ - system
+dependencies: []
--- /dev/null
+---
+- name: create bootstrap-osd and osd directories
+ file:
+ path: "{{ item }}"
+ state: directory
+ owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
+ group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
+ mode: "{{ ceph_directories_mode }}"
+ when: cephx | bool
+ with_items:
+ - /var/lib/ceph/bootstrap-osd/
+ - /var/lib/ceph/osd/
+
+- name: get keys from monitors
+ ceph_key:
+ name: "{{ item.name }}"
+ cluster: "{{ cluster }}"
+ output_format: plain
+ state: info
+ environment:
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+ register: _osd_keys
+ with_items:
+ - { name: "client.bootstrap-osd", path: "/var/lib/ceph/bootstrap-osd/{{ cluster }}.keyring", copy_key: true }
+ - { name: "client.admin", path: "/etc/ceph/{{ cluster }}.client.admin.keyring", copy_key: "{{ copy_admin_key }}" }
+ delegate_to: "{{ groups.get(mon_group_name)[0] }}"
+ run_once: true
+ no_log: "{{ no_log_on_ceph_key_tasks }}"
+ when:
+ - cephx | bool
+ - item.copy_key | bool
+
+- name: copy ceph key(s) if needed
+ copy:
+ dest: "{{ item.item.path }}"
+ content: "{{ item.stdout + '\n' }}"
+ owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
+ group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
+ mode: "{{ ceph_keyring_permissions }}"
+ with_items: "{{ _osd_keys.results }}"
+ when:
+ - cephx | bool
+ - item is not skipped
+ - item.item.copy_key | bool
+ no_log: "{{ no_log_on_ceph_key_tasks }}"
+
--- /dev/null
+---
+- name: set_fact container_env_args '-e osd_bluestore=0 -e osd_filestore=1 -e osd_dmcrypt=0'
+ set_fact:
+ container_env_args: -e OSD_BLUESTORE=0 -e OSD_FILESTORE=1 -e OSD_DMCRYPT=0
+ when:
+ - osd_objectstore == 'filestore'
+ - not dmcrypt | bool
+
+- name: set_fact container_env_args '-e osd_bluestore=0 -e osd_filestore=1 -e osd_dmcrypt=1'
+ set_fact:
+ container_env_args: -e OSD_BLUESTORE=0 -e OSD_FILESTORE=1 -e OSD_DMCRYPT=1
+ when:
+ - osd_objectstore == 'filestore'
+ - dmcrypt | bool
+
+- name: set_fact container_env_args '-e osd_bluestore=1 -e osd_filestore=0 -e osd_dmcrypt=0'
+ set_fact:
+ container_env_args: -e OSD_BLUESTORE=1 -e OSD_FILESTORE=0 -e OSD_DMCRYPT=0
+ when:
+ - osd_objectstore == 'bluestore'
+ - not dmcrypt | bool
+
+- name: set_fact container_env_args '-e osd_bluestore=1 -e osd_filestore=0 -e osd_dmcrypt=1'
+ set_fact:
+ container_env_args: -e OSD_BLUESTORE=1 -e OSD_FILESTORE=0 -e OSD_DMCRYPT=1
+ when:
+ - osd_objectstore == 'bluestore'
+ - dmcrypt | bool
--- /dev/null
+---
+- name: configure crush hierarchy
+ ceph_crush:
+ cluster: "{{ cluster }}"
+ location: "{{ osd_crush_location }}"
+ containerized: "{{ hostvars[groups[mon_group_name][0]]['container_exec_cmd'] | default('') }}"
+ register: config_crush_hierarchy
+ delegate_to: '{{ groups[mon_group_name][0] }}'
+ when:
+ - hostvars[groups[mon_group_name][0]]['create_crush_tree'] | default(create_crush_tree) | bool
+ - osd_crush_location is defined
+
+- name: create configured crush rules
+ ceph_crush_rule:
+ name: "{{ item.name }}"
+ cluster: "{{ cluster }}"
+ rule_type: replicated
+ bucket_root: "{{ item.root }}"
+ bucket_type: "{{ item.type }}"
+ device_class: "{{ item.class | default(omit) }}"
+ environment:
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+ with_items: "{{ hostvars[groups[mon_group_name][0]]['crush_rules'] | default(crush_rules) | unique }}"
+ delegate_to: '{{ groups[mon_group_name][0] }}'
+ run_once: true
+
+- name: get id for new default crush rule
+ ceph_crush_rule:
+ name: "{{ item.name }}"
+ cluster: "{{ cluster }}"
+ state: info
+ environment:
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+ register: info_ceph_default_crush_rule
+ with_items: "{{ hostvars[groups[mon_group_name][0]]['crush_rules'] | default(crush_rules) | unique }}"
+ delegate_to: '{{ groups[mon_group_name][0] }}'
+ run_once: true
+ when: item.default | default(False) | bool
+
+# If multiple rules are set as default (should not be) then the last one is taken as actual default.
+# the with_items statement overrides each iteration with the new one.
+# NOTE(leseb): we should actually fail if multiple rules are set as default
+- name: set_fact info_ceph_default_crush_rule_yaml, ceph_osd_pool_default_crush_rule_name
+ set_fact:
+ info_ceph_default_crush_rule_yaml: "{{ item.stdout | default('{}', True) | from_json() }}"
+ ceph_osd_pool_default_crush_rule_name: "{{ (item.stdout | default('{}', True) | from_json).get('rule_name') }}"
+ with_items: "{{ info_ceph_default_crush_rule.results }}"
+ run_once: true
+ when: not item.get('skipped', false)
+
+- name: insert new default crush rule into daemon to prevent restart
+ command: "{{ hostvars[item]['container_exec_cmd'] | default('') }} ceph --admin-daemon /var/run/ceph/{{ cluster }}-mon.{{ hostvars[item]['monitor_name'] }}.asok config set osd_pool_default_crush_rule {{ info_ceph_default_crush_rule_yaml.rule_id }}"
+ changed_when: false
+ delegate_to: "{{ item }}"
+ with_items: "{{ groups[mon_group_name] }}"
+ run_once: true
+ when:
+ - info_ceph_default_crush_rule_yaml | default('') | length > 0
+
+- name: "add new default crush rule to {{ cluster }}.conf"
+ ini_file:
+ dest: "/etc/ceph/{{ cluster }}.conf"
+ section: "global"
+ option: "osd pool default crush rule"
+ value: "{{ info_ceph_default_crush_rule_yaml.rule_id }}"
+ delegate_to: "{{ item }}"
+ with_items: "{{ groups[mon_group_name] }}"
+ run_once: true
+ when:
+ - info_ceph_default_crush_rule_yaml | default('') | length > 0
--- /dev/null
+---
+- name: set_fact add_osd
+ set_fact:
+ add_osd: "{{ groups[osd_group_name] | length != ansible_play_hosts_all | length }}"
+
+- name: set_fact container_exec_cmd
+ set_fact:
+ container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[item]['ansible_facts']['hostname'] }}"
+ with_items: "{{ groups.get(mon_group_name, []) }}"
+ delegate_to: "{{ item }}"
+ delegate_facts: true
+ run_once: true
+ when: containerized_deployment | bool
+
+- name: include_tasks system_tuning.yml
+ include_tasks: system_tuning.yml
+
+- name: install dependencies
+ package:
+ name: parted
+ state: present
+ register: result
+ until: result is succeeded
+ when:
+ - not containerized_deployment | bool
+ - ansible_facts['os_family'] != 'ClearLinux'
+
+- name: install numactl when needed
+ package:
+ name: numactl
+ register: result
+ until: result is succeeded
+ when:
+ - containerized_deployment | bool
+ - ceph_osd_numactl_opts | length > 0
+ tags: with_pkg
+
+- name: include_tasks common.yml
+ include_tasks: common.yml
+
+- name: set noup flag
+ ceph_osd_flag:
+ name: noup
+ cluster: "{{ cluster }}"
+ environment:
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ run_once: True
+ when:
+ - not rolling_update | default(False) | bool
+ - not switch_to_containers | default(False) | bool
+
+- name: include container_options_facts.yml
+ include_tasks: container_options_facts.yml
+ when: containerized_deployment | bool
+
+- name: include_tasks scenarios/lvm.yml
+ include_tasks: scenarios/lvm.yml
+ when:
+ - lvm_volumes|length > 0
+ - not rolling_update|default(False) | bool
+
+- name: include_tasks scenarios/lvm-batch.yml
+ include_tasks: scenarios/lvm-batch.yml
+ when:
+ - devices|length > 0
+ - not rolling_update|default(False) | bool
+
+- name: include_tasks start_osds.yml
+ include_tasks: start_osds.yml
+
+- name: unset noup flag
+ ceph_osd_flag:
+ name: noup
+ cluster: "{{ cluster }}"
+ state: absent
+ environment:
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ when:
+ - not rolling_update | default(False) | bool
+ - not switch_to_containers | default(False) | bool
+ - inventory_hostname == ansible_play_hosts_all | last
+
+- name: wait for all osd to be up
+ command: "{{ hostvars[groups[mon_group_name][0]]['container_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} osd stat -f json"
+ register: wait_for_all_osds_up
+ retries: "{{ nb_retry_wait_osd_up }}"
+ delay: "{{ delay_wait_osd_up }}"
+ changed_when: false
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ until:
+ - (wait_for_all_osds_up.stdout | from_json)["num_osds"] | int > 0
+ - (wait_for_all_osds_up.stdout | from_json)["num_osds"] == (wait_for_all_osds_up.stdout | from_json)["num_up_osds"]
+ when:
+ - not ansible_check_mode
+ - inventory_hostname == ansible_play_hosts_all | last
+ tags: wait_all_osds_up
+
+- name: include crush_rules.yml
+ include_tasks: crush_rules.yml
+ when: hostvars[groups[mon_group_name][0]]['crush_rule_config'] | default(crush_rule_config) | bool
+ tags: wait_all_osds_up
+
+# Create the pools listed in openstack_pools
+- name: include openstack_config.yml
+ include_tasks: openstack_config.yml
+ when:
+ - not add_osd | bool
+ - not rolling_update | default(False) | bool
+ - openstack_config | bool
+ - inventory_hostname == groups[osd_group_name] | last
+ tags: wait_all_osds_up
+
+- name: set osd_memory_target
+ command: "{{ ceph_cmd }} --cluster {{ cluster }} config set osd/host:{{ inventory_hostname }} osd_memory_target {{ _osd_memory_target | default(osd_memory_target) }}"
+ changed_when: false
+ delegate_to: "{{ groups[mon_group_name][0] }}"
--- /dev/null
+---
+- name: pool related tasks
+ block:
+ - name: create openstack pool(s)
+ ceph_pool:
+ name: "{{ item.name }}"
+ cluster: "{{ cluster }}"
+ pg_num: "{{ item.pg_num | default(omit) }}"
+ pgp_num: "{{ item.pgp_num | default(omit) }}"
+ size: "{{ item.size | default(omit) }}"
+ min_size: "{{ item.min_size | default(omit) }}"
+ pool_type: "{{ item.type | default('replicated') }}"
+ rule_name: "{{ item.rule_name | default(omit) }}"
+ erasure_profile: "{{ item.erasure_profile | default(omit) }}"
+ pg_autoscale_mode: "{{ item.pg_autoscale_mode | default(omit) }}"
+ target_size_ratio: "{{ item.target_size_ratio | default(omit) }}"
+ application: "{{ item.application | default(omit) }}"
+ with_items: "{{ openstack_pools }}"
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ environment:
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+
+- name: create openstack cephx key(s)
+ block:
+ - name: generate keys
+ ceph_key:
+ name: "{{ item.name }}"
+ caps: "{{ item.caps }}"
+ secret: "{{ item.key | default('') }}"
+ cluster: "{{ cluster }}"
+ mode: "{{ item.mode | default(ceph_keyring_permissions) }}"
+ environment:
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+ with_items: "{{ openstack_keys }}"
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ no_log: "{{ no_log_on_ceph_key_tasks }}"
+
+ - name: get keys from monitors
+ ceph_key:
+ name: "{{ item.name }}"
+ cluster: "{{ cluster }}"
+ output_format: plain
+ state: info
+ environment:
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+ register: _osp_keys
+ with_items: "{{ openstack_keys }}"
+ delegate_to: "{{ groups.get(mon_group_name)[0] }}"
+ no_log: "{{ no_log_on_ceph_key_tasks }}"
+
+ - name: copy ceph key(s) if needed
+ copy:
+ dest: "/etc/ceph/{{ cluster }}.{{ item.0.item.name }}.keyring"
+ content: "{{ item.0.stdout + '\n' }}"
+ owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
+ group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
+ mode: "{{ item.0.item.mode | default(ceph_keyring_permissions) }}"
+ with_nested:
+ - "{{ _osp_keys.results }}"
+ - "{{ groups[mon_group_name] }}"
+ delegate_to: "{{ item.1 }}"
+ no_log: "{{ no_log_on_ceph_key_tasks }}"
+ when:
+ - cephx | bool
+ - openstack_config | bool
--- /dev/null
+---
+
+- name: "use ceph-volume lvm batch to create {{ osd_objectstore }} osds"
+ ceph_volume:
+ cluster: "{{ cluster }}"
+ objectstore: "{{ osd_objectstore }}"
+ batch_devices: "{{ _devices }}"
+ dmcrypt: "{{ dmcrypt|default(omit) }}"
+ crush_device_class: "{{ crush_device_class|default(omit) }}"
+ osds_per_device: "{{ osds_per_device }}"
+ journal_size: "{{ journal_size }}"
+ block_db_size: "{{ block_db_size }}"
+ block_db_devices: "{{ dedicated_devices | unique if dedicated_devices | length > 0 else omit }}"
+ wal_devices: "{{ bluestore_wal_devices | unique if bluestore_wal_devices | length > 0 else omit }}"
+ journal_devices: "{{ dedicated_devices | unique if dedicated_devices | length > 0 else omit }}"
+ action: "batch"
+ environment:
+ CEPH_VOLUME_DEBUG: "{{ ceph_volume_debug }}"
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+ PYTHONIOENCODING: utf-8
+ when: _devices | default([]) | length > 0
+ tags: prepare_osd
--- /dev/null
+---
+- name: "use ceph-volume to create {{ osd_objectstore }} osds"
+ ceph_volume:
+ cluster: "{{ cluster }}"
+ objectstore: "{{ osd_objectstore }}"
+ data: "{{ item.data }}"
+ data_vg: "{{ item.data_vg|default(omit) }}"
+ journal: "{{ item.journal|default(omit) }}"
+ journal_vg: "{{ item.journal_vg|default(omit) }}"
+ db: "{{ item.db|default(omit) }}"
+ db_vg: "{{ item.db_vg|default(omit) }}"
+ wal: "{{ item.wal|default(omit) }}"
+ wal_vg: "{{ item.wal_vg|default(omit) }}"
+ crush_device_class: "{{ item.crush_device_class | default(crush_device_class) | default(omit) }}"
+ dmcrypt: "{{ dmcrypt|default(omit) }}"
+ action: "{{ 'prepare' if containerized_deployment | bool else 'create' }}"
+ environment:
+ CEPH_VOLUME_DEBUG: "{{ ceph_volume_debug }}"
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+ PYTHONIOENCODING: utf-8
+ with_items: "{{ lvm_volumes }}"
+ tags: prepare_osd
--- /dev/null
+---
+# this is for ceph-disk, the ceph-disk command is gone so we have to list /var/lib/ceph
+- name: get osd ids
+ shell: ls /var/lib/ceph/osd/ | sed 's/.*-//' # noqa 306
+ args:
+ executable: /bin/bash
+ changed_when: false
+ failed_when: false
+ register: osd_ids_non_container
+
+- name: collect osd ids
+ ceph_volume:
+ cluster: "{{ cluster }}"
+ action: list
+ environment:
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+ register: ceph_osd_ids
+
+- name: include_tasks systemd.yml
+ include_tasks: systemd.yml
+ when: containerized_deployment | bool
+
+- name: ensure systemd service override directory exists
+ file:
+ state: directory
+ path: "/etc/systemd/system/ceph-osd@.service.d/"
+ when:
+ - ceph_osd_systemd_overrides is defined
+ - ansible_facts['service_mgr'] == 'systemd'
+
+- name: add ceph-osd systemd service overrides
+ config_template:
+ src: "ceph-osd.service.d-overrides.j2"
+ dest: "/etc/systemd/system/ceph-osd@.service.d/ceph-osd-systemd-overrides.conf"
+ config_overrides: "{{ ceph_osd_systemd_overrides | default({}) }}"
+ config_type: "ini"
+ when:
+ - ceph_osd_systemd_overrides is defined
+ - ansible_facts['service_mgr'] == 'systemd'
+
+- name: ensure "/var/lib/ceph/osd/{{ cluster }}-{{ item }}" is present
+ file:
+ state: directory
+ path: "/var/lib/ceph/osd/{{ cluster }}-{{ item }}"
+ mode: "{{ ceph_directories_mode }}"
+ owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
+ group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
+ with_items: "{{ ((ceph_osd_ids.stdout | default('{}', True) | from_json).keys() | list) | union(osd_ids_non_container.stdout_lines | default([])) }}"
+
+- name: systemd start osd
+ systemd:
+ name: ceph-osd@{{ item }}
+ state: started
+ enabled: yes
+ masked: no
+ daemon_reload: yes
+ with_items: "{{ ((ceph_osd_ids.stdout | default('{}', True) | from_json).keys() | list) | union(osd_ids_non_container.stdout_lines | default([])) }}"
--- /dev/null
+---
+- name: debian based systems tasks
+ when:
+ - osd_objectstore == 'filestore'
+ - ansible_facts['os_family'] == "Debian"
+ block:
+ - name: disable osd directory parsing by updatedb
+ command: updatedb -e /var/lib/ceph
+ changed_when: false
+ failed_when: false
+ - name: disable osd directory path in updatedb.conf
+ replace:
+ dest: /etc/updatedb.conf
+ regexp: '^(PRUNEPATHS(?!.*/var/lib/ceph).*)"$'
+ replace: '\1 /var/lib/ceph"'
+ failed_when: false
+
+- name: create tmpfiles.d directory
+ file:
+ path: "/etc/tmpfiles.d"
+ state: "directory"
+ owner: "root"
+ group: "root"
+ mode: "0755"
+ register: "tmpfiles_d"
+ when: disable_transparent_hugepage | bool
+
+- name: disable transparent hugepage
+ template:
+ src: "tmpfiles_hugepage.j2"
+ dest: "/etc/tmpfiles.d/ceph_transparent_hugepage.conf"
+ group: "root"
+ owner: "root"
+ mode: "0644"
+ force: "yes"
+ validate: "systemd-tmpfiles --create %s"
+ when: disable_transparent_hugepage | bool
+
+- name: get default vm.min_free_kbytes
+ slurp:
+ src: /proc/sys/vm/min_free_kbytes
+ register: default_vm_min_free_kbytes
+
+- name: set_fact vm_min_free_kbytes
+ set_fact:
+ vm_min_free_kbytes: "{{ 4194303 if ansible_facts['memtotal_mb'] >= 49152 else default_vm_min_free_kbytes.content | b64decode | trim }}"
+
+- name: apply operating system tuning
+ sysctl:
+ name: "{{ item.name }}"
+ value: "{{ item.value }}"
+ state: present
+ sysctl_file: /etc/sysctl.d/ceph-tuning.conf
+ sysctl_set: yes
+ ignoreerrors: yes
+ with_items:
+ - { name: "fs.aio-max-nr", value: "1048576", enable: "{{ osd_objectstore == 'bluestore' }}" }
+ - "{{ os_tuning_params }}"
+ when: item.enable | default(true) | bool
--- /dev/null
+---
+- name: generate systemd unit file
+ template:
+ src: "{{ role_path }}/templates/ceph-osd.service.j2"
+ dest: /etc/systemd/system/ceph-osd@.service
+ owner: "root"
+ group: "root"
+ mode: "0644"
+ notify: restart ceph osds
+
+- name: generate systemd ceph-osd target file
+ copy:
+ src: ceph-osd.target
+ dest: /etc/systemd/system/ceph-osd.target
+ when: containerized_deployment | bool
+
+- name: enable ceph-osd.target
+ service:
+ name: ceph-osd.target
+ enabled: yes
+ daemon_reload: yes
+ when: containerized_deployment | bool
\ No newline at end of file
--- /dev/null
+# {{ ansible_managed }}
--- /dev/null
+# {{ ansible_managed }}
+[Unit]
+Description=Ceph OSD
+PartOf=ceph-osd.target
+{% if container_binary == 'docker' %}
+After=docker.service network-online.target local-fs.target time-sync.target
+Requires=docker.service
+{% else %}
+After=network-online.target local-fs.target time-sync.target
+{% endif %}
+Wants=network-online.target local-fs.target time-sync.target
+{% set cpu_limit = ansible_facts['processor_vcpus']|int if ceph_osd_docker_cpu_limit|int > ansible_facts['processor_vcpus']|int else ceph_osd_docker_cpu_limit|int %}
+
+[Service]
+EnvironmentFile=-/etc/environment
+{% if container_binary == 'podman' %}
+ExecStartPre=-/usr/bin/rm -f /%t/%n-pid /%t/%n-cid
+ExecStartPre=-/usr/bin/{{ container_binary }} rm --storage ceph-osd-%i
+ExecStartPre=-/usr/bin/mkdir -p /var/log/ceph
+{% else %}
+ExecStartPre=-/usr/bin/{{ container_binary }} stop ceph-osd-%i
+{% endif %}
+ExecStartPre=-/usr/bin/{{ container_binary }} rm -f ceph-osd-%i
+ExecStart={% if ceph_osd_numactl_opts != "" %}
+numactl \
+{{ ceph_osd_numactl_opts }} \
+{% endif %}
+/usr/bin/{{ container_binary }} run \
+{% if container_binary == 'podman' %}
+ -d --log-driver journald --conmon-pidfile /%t/%n-pid --cidfile /%t/%n-cid \
+{% endif %}
+ --pids-limit={{ 0 if container_binary == 'podman' else -1 }} \
+ --rm \
+ --net=host \
+ --privileged=true \
+ --pid=host \
+ --ipc=host \
+ {% if osd_objectstore == 'filestore' -%}
+ --memory={{ ceph_osd_docker_memory_limit }} \
+ {% endif -%}
+ --cpus={{ cpu_limit }} \
+ {% if ceph_osd_docker_cpuset_cpus is defined -%}
+ --cpuset-cpus='{{ ceph_osd_docker_cpuset_cpus }}' \
+ {% endif -%}
+ {% if ceph_osd_docker_cpuset_mems is defined -%}
+ --cpuset-mems='{{ ceph_osd_docker_cpuset_mems }}' \
+ {% endif -%}
+ -v /dev:/dev \
+ -v /etc/localtime:/etc/localtime:ro \
+ -v /var/lib/ceph:/var/lib/ceph:z \
+ -v /etc/ceph:/etc/ceph:z \
+ -v /var/run/ceph:/var/run/ceph:z \
+ -v /var/run/udev/:/var/run/udev/ \
+ -v /var/log/ceph:/var/log/ceph:z \
+ {% if ansible_facts['distribution'] == 'Ubuntu' -%}
+ --security-opt apparmor:unconfined \
+ {% endif -%}
+ {{ container_env_args }} \
+ -e CLUSTER={{ cluster }} \
+ -e TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES={{ ceph_tcmalloc_max_total_thread_cache }} \
+ -v /run/lvm/:/run/lvm/ \
+ -e CEPH_DAEMON=OSD_CEPH_VOLUME_ACTIVATE \
+ -e CONTAINER_IMAGE={{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} \
+ -e OSD_ID=%i \
+ --name=ceph-osd-%i \
+ {{ ceph_osd_docker_extra_env }} \
+ {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
+{% if container_binary == 'podman' %}
+ExecStop=-/usr/bin/sh -c "/usr/bin/{{ container_binary }} rm -f `cat /%t/%n-cid`"
+{% else %}
+ExecStop=-/usr/bin/{{ container_binary }} stop ceph-osd-%i
+{% endif %}
+KillMode=none
+Restart=always
+RestartSec=10s
+TimeoutStartSec=120
+TimeoutStopSec=15
+{% if container_binary == 'podman' %}
+Type=forking
+PIDFile=/%t/%n-pid
+{% endif %}
+
+[Install]
+WantedBy=ceph.target
--- /dev/null
+{{ '# ' + ansible_managed }}
+
+{{ 'w /sys/kernel/mm/transparent_hugepage/enabled - - - - never' }}
--- /dev/null
+---
+container_bin_path: /opt/ceph-container/bin
--- /dev/null
+groups:
+- name: dashboard
+ rules:
+ - alert: Ceph Health Warning
+ expr: ceph_health_status == 1
+ for: 1m
+ labels:
+ severity: page
+ annotations:
+ summary: "Ceph Health Warning"
+ description: "Overall Ceph Health"
+ - alert: Ceph Health Error
+ expr: ceph_health_status > 1
+ for: 1m
+ labels:
+ severity: page
+ annotations:
+ summary: "Ceph Health Error"
+ description: "The Ceph cluster health is in an error state"
+ - alert: Disk(s) Near Full
+ expr: (ceph_osd_stat_bytes_used / ceph_osd_stat_bytes) * 100 > 85
+ for: 1m
+ labels:
+ severity: page
+ annotations:
+ summary: "Disk(s) Near Full"
+ description: "This shows how many disks are at or above 85% full. Performance may degrade beyond this threshold on filestore (XFS) backed OSD's."
+ - alert: OSD(s) Down
+ expr: ceph_osd_up < 0.5
+ for: 1m
+ labels:
+ severity: page
+ annotations:
+ summary: "OSD(s) Down"
+ description: "This indicates that one or more OSDs is currently marked down in the cluster."
+ - alert: OSD Host(s) Down
+ expr: count by(instance) (ceph_disk_occupation * on(ceph_daemon) group_right(instance) ceph_osd_up == 0) - count by(instance) (ceph_disk_occupation) == 0
+ for: 1m
+ labels:
+ severity: page
+ annotations:
+ summary: "OSD Host(s) Down"
+ description: "This indicates that one or more OSD hosts is currently down in the cluster."
+ - alert: PG(s) Stuck
+ expr: max(ceph_osd_numpg) > scalar(ceph_pg_active)
+ for: 1m
+ labels:
+ severity: page
+ annotations:
+ summary: "PG(s) Stuck"
+ description: "This indicates there are pg's in a stuck state, manual intervention needed to resolve."
+ - alert: OSD Host Loss Check
+ expr: max(sum(ceph_osd_stat_bytes - ceph_osd_stat_bytes_used)) * 0.9 < scalar(max(sum by (instance) (ceph_osd_stat_bytes + on (ceph_daemon) group_left (instance) (ceph_disk_occupation*0))))
+ for: 1m
+ labels:
+ severity: page
+ annotations:
+ summary: "OSD Host Loss Check"
+ description: "This indicates that the cluster @ 90% full is not enough to support the loss of the largest OSD host."
+ - alert: Slow OSD Responses
+ expr: ((irate(node_disk_read_time_seconds_total[5m]) / clamp_min(irate(node_disk_reads_completed_total[5m]), 1) + irate(node_disk_write_time_seconds_total[5m]) / clamp_min(irate(node_disk_writes_completed_total[5m]), 1)) and on (instance, device) ceph_disk_occupation) > 1
+ for: 1m
+ labels:
+ severity: page
+ annotations:
+ summary: "Slow OSD Responses"
+ description: "This indicates that some OSD Latencies are above 1s."
+ - alert: Network Errors
+ expr: sum by (instance, device) (irate(node_network_receive_drop_total{device=~"(eth|en|bond|ib|mlx|p).*"}[5m]) + irate(node_network_receive_errs_total{device=~"(eth|en|bond|ib|mlx|p).*"}[5m]) + irate(node_network_transmit_drop_total{device=~"(eth|en|bond|ib|mlx|p).*"}[5m]) + irate(node_network_transmit_errs_total{device=~"(eth|en|bond|ib|mlx|p).*"}[5m])) > 10
+ for: 1m
+ labels:
+ severity: page
+ annotations:
+ summary: "Network Errors"
+ description: "This indicates that more than 10 dropped/error packets are seen in a 5m interval"
+ - alert: Pool Capacity Low
+ expr: (ceph_pool_stored / (ceph_pool_stored + ceph_pool_max_avail) * 100 + on (pool_id) group_left (name) (ceph_pool_metadata*0)) > 85
+ for: 1m
+ labels:
+ severity: page
+ annotations:
+ summary: "Pool Capacity Low"
+ description: "This indicates a low capacity in a pool."
+ - alert: MON(s) Down
+ expr: ceph_mon_quorum_status != 1
+ for: 1m
+ labels:
+ severity: page
+ annotations:
+ summary: "MON(s) down"
+ description: "This indicates that one or more MON(s) is down."
+ - alert: Cluster Capacity Low
+ expr: sum(ceph_osd_stat_bytes_used) / sum(ceph_osd_stat_bytes) > 0.85
+ for: 1m
+ labels:
+ severity: page
+ annotations:
+ summary: "Cluster Capacity Low"
+ description: "This indicates raw used space crosses the 85% capacity threshold of the ceph cluster."
+ - alert: OSD(s) with High PG Count
+ expr: ceph_osd_numpg > 275
+ for: 1m
+ labels:
+ severity: page
+ annotations:
+ summary: "OSD(s) with High PG Count"
+ description: "This indicates there are some OSDs with high PG count (275+)."
+ - alert: Slow OSD Ops
+ expr: ceph_healthcheck_slow_ops > 0
+ for: 1m
+ labels:
+ severity: page
+ annotations:
+ summary: "Slow OSD Ops"
+ description: "OSD requests are taking too long to process (osd_op_complaint_time exceeded)"
--- /dev/null
+---
+- name: service handler
+ # We use the systemd module here so we can use the daemon_reload feature,
+ # since we're shipping the .service file ourselves
+ systemd:
+ name: "{{ item }}"
+ daemon_reload: true
+ enabled: true
+ state: restarted
+ with_items:
+ - 'alertmanager'
+ - 'prometheus'
+ when: not docker2podman | default(False) | bool
--- /dev/null
+---
+galaxy_info:
+ company: Red Hat
+ author: Boris Ranto
+ description: Configures Prometheus for Ceph Dashboard
+ license: Apache
+ min_ansible_version: 2.4
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ galaxy_tags:
+ - system
+dependencies: []
--- /dev/null
+---
+- name: create prometheus directories
+ file:
+ path: "{{ item }}"
+ state: directory
+ owner: "{{ prometheus_user_id }}"
+ group: "{{ prometheus_user_id }}"
+ with_items:
+ - "{{ prometheus_conf_dir }}"
+ - "{{ prometheus_data_dir }}"
+
+- name: write prometheus config file
+ config_template:
+ src: prometheus.yml.j2
+ dest: "{{ prometheus_conf_dir }}/prometheus.yml"
+ owner: "{{ prometheus_user_id }}"
+ group: "{{ prometheus_user_id }}"
+ mode: 0640
+ config_type: yaml
+ config_overrides: "{{ prometheus_conf_overrides }}"
+ notify: service handler
+
+- name: make sure the alerting rules directory exists
+ file:
+ path: "/etc/prometheus/alerting/"
+ state: directory
+ owner: "{{ prometheus_user_id }}"
+ group: "{{ prometheus_user_id }}"
+
+- name: copy alerting rules
+ copy:
+ src: "ceph_dashboard.yml"
+ dest: "/etc/prometheus/alerting/ceph_dashboard.yml"
+ owner: "{{ prometheus_user_id }}"
+ group: "{{ prometheus_user_id }}"
+ mode: 0644
+
+- name: create alertmanager directories
+ file:
+ path: "{{ item }}"
+ state: directory
+ owner: "{{ prometheus_user_id }}"
+ group: "{{ prometheus_user_id }}"
+ with_items:
+ - "{{ alertmanager_conf_dir }}"
+ - "{{ alertmanager_data_dir }}"
+
+- name: write alertmanager config file
+ config_template:
+ src: alertmanager.yml.j2
+ dest: "{{ alertmanager_conf_dir }}/alertmanager.yml"
+ owner: "{{ prometheus_user_id }}"
+ group: "{{ prometheus_user_id }}"
+ mode: 0640
+ config_type: yaml
+ config_overrides: "{{ alertmanager_conf_overrides }}"
+ notify: service handler
+
+- name: include setup_container.yml
+ include_tasks: setup_container.yml
--- /dev/null
+---
+- name: include_tasks systemd.yml
+ include_tasks: systemd.yml
+
+- name: start prometheus services
+ systemd:
+ name: "{{ item }}"
+ daemon_reload: true
+ enabled: true
+ state: started
+ with_items:
+ - prometheus
+ - alertmanager
--- /dev/null
+---
+- name: ship systemd services
+ template:
+ src: "{{ item }}.j2"
+ dest: "/etc/systemd/system/{{ item }}"
+ owner: root
+ group: root
+ mode: 0644
+ with_items:
+ - 'alertmanager.service'
+ - 'prometheus.service'
+ notify: service handler
--- /dev/null
+# This file is managed by ansible, don't make changes here - they will be
+# overwritten.
+[Unit]
+Description=alertmanager
+{% if container_binary == 'docker' %}
+After=docker.service network-online.target local-fs.target time-sync.target
+Requires=docker.service
+{% else %}
+After=network-online.target local-fs.target time-sync.target
+{% endif %}
+Wants=network-online.target local-fs.target time-sync.target
+
+[Service]
+WorkingDirectory={{ alertmanager_data_dir }}
+EnvironmentFile=-/etc/environment
+{% if container_binary == 'podman' %}
+ExecStartPre=-/usr/bin/rm -f /%t/%n-pid /%t/%n-cid
+ExecStartPre=-/usr/bin/{{ container_binary }} rm --storage alertmanager
+{% endif %}
+ExecStartPre=-/usr/bin/{{ container_binary }} rm -f alertmanager
+ExecStart=/usr/bin/{{ container_binary }} run --rm --name=alertmanager \
+{% if container_binary == 'podman' %}
+ -d --log-driver journald --conmon-pidfile /%t/%n-pid --cidfile /%t/%n-cid \
+{% endif %}
+ --pids-limit={{ 0 if container_binary == 'podman' else -1 }} \
+ -v "{{ alertmanager_conf_dir }}:/etc/alertmanager:Z" \
+ -v "{{ alertmanager_data_dir }}:/alertmanager:Z" \
+ --net=host \
+ --cpu-period={{ alertmanager_container_cpu_period }} \
+ --cpu-quota={{ alertmanager_container_cpu_period * alertmanager_container_cpu_cores }} \
+ --memory={{ alertmanager_container_memory }}GB \
+ --memory-swap={{ alertmanager_container_memory * 2 }}GB \
+ {{ alertmanager_container_image }} \
+ --config.file=/etc/alertmanager/alertmanager.yml \
+ --cluster.listen-address={{ grafana_server_addr }}:{{ alertmanager_cluster_port }} \
+{% for peer in grafana_server_addrs|difference(grafana_server_addr) %}
+ --cluster.peer={{ peer }}:{{ alertmanager_cluster_port }} \
+{% endfor %}
+ --storage.path=/alertmanager \
+ --web.external-url=http://{{ ansible_facts['fqdn'] }}:{{ alertmanager_port }}/ \
+ --web.listen-address={{ grafana_server_addr }}:{{ alertmanager_port }}
+{% if container_binary == 'podman' %}
+ExecStop=/usr/bin/sh -c "/usr/bin/{{ container_binary }} rm -f `cat /%t/%n-cid`"
+{% else %}
+ExecStop=/usr/bin/{{ container_binary }} stop alertmanager
+{% endif %}
+KillMode=none
+Restart=always
+RestartSec=10s
+TimeoutStartSec=120
+TimeoutStopSec=15
+{% if container_binary == 'podman' %}
+Type=forking
+PIDFile=/%t/%n-pid
+{% endif %}
+
+[Install]
+WantedBy=multi-user.target
--- /dev/null
+global:
+ resolve_timeout: 5m
+
+route:
+ group_by: ['alertname']
+ group_wait: 10s
+ group_interval: 10s
+ repeat_interval: 1h
+ receiver: 'ceph-dashboard'
+receivers:
+- name: 'ceph-dashboard'
+ webhook_configs:
+{% for host in groups['mgrs'] | default(groups['mons']) %}
+ - url: '{{ dashboard_protocol }}://{{ hostvars[host]['ansible_facts']['fqdn'] }}:{{ dashboard_port }}/api/prometheus_receiver'
+{% if dashboard_protocol == 'https' and alertmanager_dashboard_api_no_ssl_verify | bool %}
+ http_config:
+ tls_config:
+ insecure_skip_verify: true
+{% endif %}
+{% endfor %}
--- /dev/null
+# This file is managed by ansible, don't make changes here - they will be
+# overwritten.
+[Unit]
+Description=prometheus
+{% if container_binary == 'docker' %}
+After=docker.service network-online.target local-fs.target time-sync.target
+Requires=docker.service
+{% else %}
+After=network-online.target local-fs.target time-sync.target
+{% endif %}
+Wants=network-online.target local-fs.target time-sync.target
+
+[Service]
+EnvironmentFile=-/etc/environment
+{% if container_binary == 'podman' %}
+ExecStartPre=-/usr/bin/rm -f /%t/%n-pid /%t/%n-cid
+ExecStartPre=-/usr/bin/{{ container_binary }} rm --storage prometheus
+{% endif %}
+ExecStartPre=-/usr/bin/{{ container_binary }} rm -f prometheus
+ExecStart=/usr/bin/{{ container_binary }} run --rm --name=prometheus \
+{% if container_binary == 'podman' %}
+ -d --log-driver journald --conmon-pidfile /%t/%n-pid --cidfile /%t/%n-cid \
+{% endif %}
+ --pids-limit={{ 0 if container_binary == 'podman' else -1 }} \
+ -v "{{ prometheus_conf_dir }}:/etc/prometheus:Z" \
+ -v "{{ prometheus_data_dir }}:/prometheus:Z" \
+ --net=host \
+ --user={{ prometheus_user_id }} \
+ --cpu-period={{ prometheus_container_cpu_period }} \
+ --cpu-quota={{ prometheus_container_cpu_period * prometheus_container_cpu_cores }} \
+ --memory={{ prometheus_container_memory }}GB \
+ --memory-swap={{ prometheus_container_memory * 2 }}GB \
+ {{ prometheus_container_image }} \
+ --config.file=/etc/prometheus/prometheus.yml \
+ --storage.tsdb.path=/prometheus \
+{% if prometheus_storage_tsdb_retention_time is defined %}
+ --storage.tsdb.retention.time={{ prometheus_storage_tsdb_retention_time }} \
+{% endif %}
+ --web.external-url=http://{{ ansible_facts['fqdn'] }}:{{ prometheus_port }}/ \
+ --web.listen-address={{ grafana_server_addr }}:{{ prometheus_port }}
+{% if container_binary == 'podman' %}
+ExecStop=/usr/bin/sh -c "/usr/bin/{{ container_binary }} rm -f `cat /%t/%n-cid`"
+{% else %}
+ExecStop=/usr/bin/{{ container_binary }} stop prometheus
+{% endif %}
+KillMode=none
+Restart=always
+RestartSec=10s
+TimeoutStartSec=120
+TimeoutStopSec=15
+{% if container_binary == 'podman' %}
+Type=forking
+PIDFile=/%t/%n-pid
+{% endif %}
+
+[Install]
+WantedBy=multi-user.target
--- /dev/null
+global:
+ scrape_interval: 15s
+ evaluation_interval: 15s
+
+rule_files:
+ - '/etc/prometheus/alerting/*'
+
+scrape_configs:
+ - job_name: 'prometheus'
+ static_configs:
+ - targets: ['{{ grafana_server_addr }}:{{ prometheus_port }}']
+ - job_name: 'ceph'
+ honor_labels: true
+ static_configs:
+{% for host in groups[mgr_group_name] | default(groups[mon_group_name]) %}
+ - targets: ['{{ host }}:9283']
+ labels:
+ instance: 'ceph_cluster'
+{% endfor %}
+ - job_name: 'node'
+ static_configs:
+{% for host in (groups['all'] | difference(groups[monitoring_group_name] | union(groups.get(client_group_name, []))) | union(groups.get(osd_group_name, []))) %}
+ - targets: ['{{ host }}:{{ node_exporter_port }}']
+ labels:
+ instance: "{{ hostvars[host]['ansible_facts']['nodename'] }}"
+{% endfor %}
+ - job_name: 'grafana'
+ static_configs:
+{% for host in groups[monitoring_group_name] %}
+ - targets: ['{{ host }}:{{ node_exporter_port }}']
+ labels:
+ instance: "{{ hostvars[host]['ansible_facts']['nodename'] }}"
+{% endfor %}
+{% if iscsi_gw_group_name in groups %}
+ - job_name: 'iscsi-gws'
+ static_configs:
+{% for host in groups[iscsi_gw_group_name] %}
+ - targets: ['{{ host }}:9287']
+ labels:
+ instance: "{{ hostvars[host]['ansible_facts']['nodename'] }}"
+{% endfor %}
+{% endif %}
+alerting:
+ alertmanagers:
+ - scheme: http
+ static_configs:
+ - targets: ['{{ grafana_server_addr }}:{{ alertmanager_port }}']
--- /dev/null
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [2014] [Sébastien Han]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
--- /dev/null
+# Ansible role: ceph-rbd
+
+Documentation is available at http://docs.ceph.com/ceph-ansible/.
--- /dev/null
+---
+#########
+# SETUP #
+#########
+
+# Even though rbd-mirror nodes should not have the admin key
+# at their disposal, some people might want to have it
+# distributed on rbd-mirror nodes. Setting 'copy_admin_key' to 'true'
+# will copy the admin key to the /etc/ceph/ directory. Only
+# valid for Luminous and later releases.
+copy_admin_key: false
+
+
+#################
+# CONFIGURATION #
+#################
+
+ceph_rbd_mirror_local_user: client.rbd-mirror-peer
+ceph_rbd_mirror_configure: false
+ceph_rbd_mirror_mode: pool
+ceph_rbd_mirror_remote_cluster: remote
+
+##########
+# DOCKER #
+##########
+
+# Resource limitation
+# For the whole list of limits you can apply see: docs.docker.com/engine/admin/resource_constraints
+# Default values are based from: https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/2/html/red_hat_ceph_storage_hardware_guide/minimum_recommendations
+# These options can be passed using the 'ceph_rbd_mirror_docker_extra_env' variable.
+ceph_rbd_mirror_docker_memory_limit: "{{ ansible_facts['memtotal_mb'] }}m"
+ceph_rbd_mirror_docker_cpu_limit: 1
+
+ceph_rbd_mirror_docker_extra_env:
+ceph_config_keys: [] # DON'T TOUCH ME
+
+
+###########
+# SYSTEMD #
+###########
+# ceph_rbd_mirror_systemd_overrides will override the systemd settings
+# for the ceph-rbd-mirror services.
+# For example,to set "PrivateDevices=false" you can specify:
+#ceph_rbd_mirror_systemd_overrides:
+# Service:
+# PrivateDevices: False
--- /dev/null
+[Unit]
+Description=ceph target allowing to start/stop all ceph-rbd-mirror@.service instances at once
+PartOf=ceph.target
+Before=ceph.target
+
+[Install]
+WantedBy=multi-user.target ceph.target
\ No newline at end of file
--- /dev/null
+---
+galaxy_info:
+ company: Red Hat
+ author: Sébastien Han
+ description: Installs Ceph Mirror Agent
+ license: Apache
+ min_ansible_version: 2.7
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ galaxy_tags:
+ - system
+dependencies: []
--- /dev/null
+---
+- name: cephx tasks
+ when:
+ - cephx | bool
+ block:
+ - name: get client.bootstrap-rbd-mirror from ceph monitor
+ ceph_key:
+ name: client.bootstrap-rbd-mirror
+ cluster: "{{ cluster }}"
+ output_format: plain
+ state: info
+ environment:
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+ register: _bootstrap_rbd_mirror_key
+ delegate_to: "{{ groups.get(mon_group_name)[0] }}"
+ run_once: true
+ no_log: "{{ no_log_on_ceph_key_tasks }}"
+
+ - name: ensure /var/lib/ceph/bootstrap-rbd-mirror exists
+ file:
+ path: /var/lib/ceph/bootstrap-rbd-mirror
+ state: directory
+ owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
+ group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
+
+ - name: copy ceph key(s)
+ copy:
+ dest: "/var/lib/ceph/bootstrap-rbd-mirror/{{ cluster }}.keyring"
+ content: "{{ _bootstrap_rbd_mirror_key.stdout + '\n' }}"
+ owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
+ group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
+ mode: "{{ ceph_keyring_permissions }}"
+ no_log: "{{ no_log_on_ceph_key_tasks }}"
+
+ - name: create rbd-mirror keyrings
+ ceph_key:
+ name: "{{ item.name }}"
+ cluster: "{{ cluster }}"
+ user: client.admin
+ user_key: "/etc/ceph/{{ cluster }}.client.admin.keyring"
+ caps:
+ mon: "profile rbd-mirror"
+ osd: "profile rbd"
+ dest: "{{ item.dest }}"
+ secret: "{{ item.secret | default(omit) }}"
+ import_key: true
+ owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
+ group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
+ mode: "{{ ceph_keyring_permissions }}"
+ no_log: "{{ no_log_on_ceph_key_tasks }}"
+ environment:
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+ delegate_to: "{{ groups.get(mon_group_name)[0] }}"
+ loop:
+ - { name: "client.rbd-mirror.{{ ansible_facts['hostname'] }}",
+ dest: "/etc/ceph/{{ cluster }}.client.rbd-mirror.{{ ansible_facts['hostname'] }}.keyring" }
+ - { name: "{{ ceph_rbd_mirror_local_user }}",
+ dest: "/etc/ceph/{{ cluster }}.{{ ceph_rbd_mirror_local_user }}.keyring",
+ secret: "{{ ceph_rbd_mirror_local_user_secret | default('') }}" }
+
+ - name: get "client.rbd-mirror.{{ ansible_facts['hostname'] }}" from ceph monitor
+ ceph_key:
+ name: "client.rbd-mirror.{{ ansible_facts['hostname'] }}"
+ cluster: "{{ cluster }}"
+ output_format: plain
+ state: info
+ environment:
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+ register: _rbd_mirror_key
+ delegate_to: "{{ groups.get(mon_group_name)[0] }}"
+ run_once: true
+ no_log: "{{ no_log_on_ceph_key_tasks }}"
+
+ - name: copy ceph key
+ copy:
+ dest: "/etc/ceph/{{ cluster }}.client.rbd-mirror.{{ ansible_facts['hostname'] }}.keyring"
+ content: "{{ _rbd_mirror_key.stdout + '\n' }}"
+ owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
+ group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
+ mode: "{{ ceph_keyring_permissions }}"
+ no_log: false
+
+- name: start and add the rbd-mirror service instance
+ service:
+ name: "ceph-rbd-mirror@rbd-mirror.{{ ansible_facts['hostname'] }}"
+ state: started
+ enabled: yes
+ masked: no
+ changed_when: false
+ when:
+ - not containerized_deployment | bool
+ - ceph_rbd_mirror_remote_user is defined
+
+- name: set_fact ceph_rbd_mirror_pools
+ set_fact:
+ ceph_rbd_mirror_pools:
+ - name: "{{ ceph_rbd_mirror_pool }}"
+ when: ceph_rbd_mirror_pools is undefined
+
+- name: create pool if it doesn't exist
+ ceph_pool:
+ name: "{{ item.name }}"
+ cluster: "{{ cluster }}"
+ pg_num: "{{ item.pg_num | default(omit) }}"
+ pgp_num: "{{ item.pgp_num | default(omit) }}"
+ size: "{{ item.size | default(omit) }}"
+ min_size: "{{ item.min_size | default(omit) }}"
+ pool_type: "{{ item.type | default('replicated') }}"
+ rule_name: "{{ item.rule_name | default(omit) }}"
+ erasure_profile: "{{ item.erasure_profile | default(omit) }}"
+ pg_autoscale_mode: "{{ item.pg_autoscale_mode | default(omit) }}"
+ target_size_ratio: "{{ item.target_size_ratio | default(omit) }}"
+ application: "{{ item.application | default('rbd') }}"
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ loop: "{{ ceph_rbd_mirror_pools }}"
+ environment:
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+
+- name: enable mirroring on the pool
+ command: "{{ rbd_cmd }} --cluster {{ cluster }} mirror pool enable {{ item.name }} {{ ceph_rbd_mirror_mode }}"
+ register: result
+ changed_when: false
+ retries: 60
+ delay: 1
+ until: result is succeeded
+ loop: "{{ ceph_rbd_mirror_pools }}"
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+
+- name: add mirroring peer
+ when: ceph_rbd_mirror_remote_user is defined
+ block:
+ - name: list mirroring peer
+ command: "{{ rbd_cmd }} --cluster {{ cluster }} mirror pool info {{ item.name }}"
+ changed_when: false
+ register: mirror_peer
+ loop: "{{ ceph_rbd_mirror_pools }}"
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+
+ - name: create a temporary file
+ tempfile:
+ path: /etc/ceph
+ state: file
+ suffix: _ceph-ansible
+ register: tmp_file
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+
+ - name: write secret to temporary file
+ copy:
+ dest: "{{ tmp_file.path }}"
+ content: "{{ ceph_rbd_mirror_remote_key }}"
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+
+ - name: add a mirroring peer
+ command: "{{ rbd_cmd }} --cluster {{ cluster }} mirror pool peer add {{ item.item.name }} {{ ceph_rbd_mirror_remote_user }}@{{ ceph_rbd_mirror_remote_cluster }} --remote-mon-host {{ ceph_rbd_mirror_remote_mon_hosts }} --remote-key-file {{ tmp_file.path }}"
+ changed_when: false
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ loop: "{{ mirror_peer.results }}"
+ when: ceph_rbd_mirror_remote_user not in item.stdout
+
+ - name: rm temporary file
+ file:
+ path: "{{ tmp_file.path }}"
+ state: absent
+ delegate_to: "{{ groups[mon_group_name][0] }}"
--- /dev/null
+---
+- name: non-containerized related tasks
+ when:
+ - not containerized_deployment | bool
+ - ceph_rbd_mirror_remote_user is defined
+ block:
+ - name: install dependencies
+ package:
+ name: rbd-mirror
+ state: present
+ register: result
+ until: result is succeeded
+ tags: package-install
+
+ - name: ensure systemd service override directory exists
+ file:
+ state: directory
+ path: "/etc/systemd/system/ceph-rbd-mirror@.service.d/"
+ when:
+ - ceph_rbd_mirror_systemd_overrides is defined
+ - ansible_facts['service_mgr'] == 'systemd'
+
+ - name: add ceph-rbd-mirror systemd service overrides
+ config_template:
+ src: "ceph-rbd-mirror.service.d-overrides.j2"
+ dest: "/etc/systemd/system/ceph-rbd-mirror@.service.d/ceph-rbd-mirror-systemd-overrides.conf"
+ config_overrides: "{{ ceph_rbd_mirror_systemd_overrides | default({}) }}"
+ config_type: "ini"
+ when:
+ - ceph_rbd_mirror_systemd_overrides is defined
+ - ansible_facts['service_mgr'] == 'systemd'
+
+ - name: enable ceph-rbd-mirror.target
+ systemd:
+ name: "ceph-rbd-mirror.target"
+ state: started
+ enabled: yes
+ masked: no
+ changed_when: false
+
+- name: set_fact ceph_cmd
+ set_fact:
+ rbd_cmd: "{{ container_binary + ' run --rm --net=host -v /etc/ceph:/etc/ceph:z -v /var/lib/ceph:/var/lib/ceph:z -v /var/run/ceph:/var/run/ceph:z --entrypoint=rbd ' + ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else 'rbd' }}"
+
+- name: include start_container_rbd_mirror.yml
+ include_tasks: start_container_rbd_mirror.yml
+ when:
+ - containerized_deployment | bool
+ - ceph_rbd_mirror_remote_user is defined
+
+- name: include configure_mirroring.yml
+ include_tasks: configure_mirroring.yml
\ No newline at end of file
--- /dev/null
+---
+# Use systemd to manage container on Atomic host
+- name: include_tasks systemd.yml
+ include_tasks: systemd.yml
+
+- name: systemd start rbd mirror container
+ systemd:
+ name: ceph-rbd-mirror@rbd-mirror.{{ ansible_facts['hostname'] }}
+ state: started
+ enabled: yes
+ masked: no
+ daemon_reload: yes
--- /dev/null
+---
+- name: generate systemd unit file
+ template:
+ src: "{{ role_path }}/templates/ceph-rbd-mirror.service.j2"
+ dest: /etc/systemd/system/ceph-rbd-mirror@.service
+ owner: "root"
+ group: "root"
+ mode: "0644"
+ notify: restart ceph rbdmirrors
+
+- name: generate systemd ceph-rbd-mirror target file
+ copy:
+ src: ceph-rbd-mirror.target
+ dest: /etc/systemd/system/ceph-rbd-mirror.target
+ when: containerized_deployment | bool
+
+- name: enable ceph-rbd-mirror.target
+ service:
+ name: ceph-rbd-mirror.target
+ enabled: yes
+ daemon_reload: yes
+ when: containerized_deployment | bool
\ No newline at end of file
--- /dev/null
+# {{ ansible_managed }}
--- /dev/null
+[Unit]
+Description=Ceph RBD mirror
+PartOf=ceph-rbd-mirror.target
+{% if container_binary == 'docker' %}
+After=docker.service network-online.target local-fs.target time-sync.target
+Requires=docker.service
+{% else %}
+After=network-online.target local-fs.target time-sync.target
+{% endif %}
+Wants=network-online.target local-fs.target time-sync.target
+
+[Service]
+EnvironmentFile=-/etc/environment
+{% if container_binary == 'podman' %}
+ExecStartPre=-/usr/bin/rm -f /%t/%n-pid /%t/%n-cid
+ExecStartPre=-/usr/bin/{{ container_binary }} rm --storage ceph-rbd-mirror-{{ ansible_facts['hostname'] }}
+ExecStartPre=-/usr/bin/mkdir -p /var/log/ceph
+{% else %}
+ExecStartPre=-/usr/bin/{{ container_binary }} stop ceph-rbd-mirror-{{ ansible_facts['hostname'] }}
+{% endif %}
+ExecStartPre=-/usr/bin/{{ container_binary }} rm ceph-rbd-mirror-{{ ansible_facts['hostname'] }}
+ExecStart=/usr/bin/{{ container_binary }} run --rm --net=host \
+{% if container_binary == 'podman' %}
+ -d --log-driver journald --conmon-pidfile /%t/%n-pid --cidfile /%t/%n-cid \
+{% endif %}
+ --pids-limit={{ 0 if container_binary == 'podman' else -1 }} \
+ --memory={{ ceph_rbd_mirror_docker_memory_limit }} \
+ --cpus={{ ceph_rbd_mirror_docker_cpu_limit }} \
+ -v /var/lib/ceph:/var/lib/ceph:z \
+ -v /etc/ceph:/etc/ceph:z \
+ -v /var/run/ceph:/var/run/ceph:z \
+ -v /etc/localtime:/etc/localtime:ro \
+ -v /var/log/ceph:/var/log/ceph:z \
+ -e CLUSTER={{ cluster }} \
+ -e CEPH_DAEMON=RBD_MIRROR \
+ -e CONTAINER_IMAGE={{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} \
+ -e TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES={{ ceph_tcmalloc_max_total_thread_cache }} \
+ --name=ceph-rbd-mirror-{{ ansible_facts['hostname'] }} \
+ {{ ceph_rbd_mirror_docker_extra_env }} \
+ {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
+{% if container_binary == 'podman' %}
+ExecStop=-/usr/bin/sh -c "/usr/bin/{{ container_binary }} rm -f `cat /%t/%n-cid`"
+{% else %}
+ExecStopPost=-/usr/bin/{{ container_binary }} stop ceph-rbd-mirror-{{ ansible_facts['hostname'] }}
+{% endif %}
+KillMode=none
+Restart=always
+RestartSec=10s
+TimeoutStartSec=120
+TimeoutStopSec=15
+{% if container_binary == 'podman' %}
+Type=forking
+PIDFile=/%t/%n-pid
+{% endif %}
+
+[Install]
+WantedBy=ceph.target
--- /dev/null
+---
+# You can override vars by using host or group vars
+
+###########
+# GENERAL #
+###########
+
+haproxy_frontend_port: 80
+haproxy_frontend_ssl_port: 443
+haproxy_frontend_ssl_certificate:
+haproxy_ssl_dh_param: 4096
+haproxy_ssl_ciphers:
+ - EECDH+AESGCM
+ - EDH+AESGCM
+haproxy_ssl_options:
+ - no-sslv3
+ - no-tlsv10
+ - no-tlsv11
+ - no-tls-tickets
+#
+#virtual_ips:
+# - 192.168.238.250
+# - 192.168.238.251
+#
+#virtual_ip_netmask: 24
+#virtual_ip_interface: ens33
--- /dev/null
+---
+- name: restart haproxy
+ service:
+ name: haproxy
+ state: restarted
+
+- name: restart keepalived
+ service:
+ name: keepalived
+ state: restarted
--- /dev/null
+---
+galaxy_info:
+ author: Gui Hecheng
+ description: Config HAProxy & Keepalived
+ license: Apache
+ min_ansible_version: 2.8
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ galaxy_tags:
+ - system
+dependencies: []
--- /dev/null
+---
+- name: include_tasks pre_requisite.yml
+ include_tasks: pre_requisite.yml
+
+- name: include_tasks start_rgw_loadbalancer.yml
+ include_tasks: start_rgw_loadbalancer.yml
--- /dev/null
+---
+- name: install haproxy and keepalived
+ package:
+ name: ['haproxy', 'keepalived']
+ state: present
+ register: result
+ until: result is succeeded
+
+- name: "generate haproxy configuration file: haproxy.cfg"
+ template:
+ src: haproxy.cfg.j2
+ dest: /etc/haproxy/haproxy.cfg
+ owner: "root"
+ group: "root"
+ mode: "0644"
+ validate: "haproxy -f %s -c"
+ notify:
+ - restart haproxy
+
+- name: set_fact vip to vrrp_instance
+ set_fact:
+ vrrp_instances: "{{ vrrp_instances | default([]) | union([{ 'name': 'VI_' + index|string , 'vip': item, 'master': groups[rgwloadbalancer_group_name][index] }]) }}"
+ loop: "{{ virtual_ips | flatten(levels=1) }}"
+ loop_control:
+ index_var: index
+
+- name: "generate keepalived: configuration file: keepalived.conf"
+ template:
+ src: keepalived.conf.j2
+ dest: /etc/keepalived/keepalived.conf
+ owner: "root"
+ group: "root"
+ mode: "0644"
+ notify:
+ - restart keepalived
+
+- name: selinux related tasks
+ when:
+ - ansible_facts['os_family'] == 'RedHat'
+ - ansible_facts['selinux']['status'] == 'enabled'
+ block:
+ - name: set_fact rgw_ports
+ set_fact:
+ rgw_ports: "{{ rgw_ports | default([]) | union(hostvars[item]['rgw_instances'] | map(attribute='radosgw_frontend_port') | map('string') | list) }}"
+ with_items: "{{ groups.get(rgw_group_name, []) }}"
+
+ - name: add selinux rules
+ seport:
+ ports: "{{ rgw_ports }}"
+ proto: tcp
+ setype: http_port_t
+ state: present
--- /dev/null
+---
+- name: start haproxy
+ service:
+ name: haproxy
+ state: started
+ enabled: yes
+
+- name: start keepalived
+ service:
+ name: keepalived
+ state: started
+ enabled: yes
--- /dev/null
+# {{ ansible_managed }}
+global
+ log 127.0.0.1 local2
+
+ chroot /var/lib/haproxy
+ pidfile /var/run/haproxy.pid
+ maxconn 8000
+ user haproxy
+ group haproxy
+ daemon
+ stats socket /var/lib/haproxy/stats
+{% if haproxy_frontend_ssl_certificate %}
+ tune.ssl.default-dh-param {{ haproxy_ssl_dh_param }}
+ ssl-default-bind-ciphers {{ haproxy_ssl_ciphers | join(':') }}
+ ssl-default-bind-options {{ haproxy_ssl_options | join(' ') }}
+{% endif %}
+defaults
+ mode http
+ log global
+ option httplog
+ option dontlognull
+ option http-server-close
+ option forwardfor except 127.0.0.0/8
+ option redispatch
+ retries 3
+ timeout http-request 10s
+ timeout queue 1m
+ timeout connect 10s
+ timeout client 1m
+ timeout server 1m
+ timeout http-keep-alive 10s
+ timeout check 10s
+ maxconn 8000
+
+frontend rgw-frontend
+{% if haproxy_frontend_ssl_certificate %}
+ bind *:{{ haproxy_frontend_ssl_port }} ssl crt {{ haproxy_frontend_ssl_certificate }}
+{% else %}
+ bind *:{{ haproxy_frontend_port }}
+{% endif %}
+ default_backend rgw-backend
+
+# when running in an selinux environment, selinux restricts the ports that haproxy can
+# connect to to:
+# * 80, 81, 443, 488, 8008, 8009, 8443, 9000 (http_port_t) and,
+# * 8080, 8118, 8123, 10001-10010 (http_cache_port_t)
+#
+# Practically speaking, it would be preferable (and perhaps easier) to configure the
+# rgw daemons to listen on ports 10001-10010 and configure haproxy here to match.
+#
+# Alternatively you can add other unused ports to http_port_t or http_cache_port_t
+# with, e.g.: `semanage port -a -t http_cache_port_t -p tcp 8085`
+# (Note that ports 8081-8084 are already taken and can't be used for haproxy.)
+#
+backend rgw-backend
+ option forwardfor
+ balance static-rr
+ option httpchk HEAD /
+{% for host in groups[rgw_group_name] %}
+{% for instance in hostvars[host]['rgw_instances'] %}
+ server {{ 'server-' + hostvars[host]['ansible_facts']['hostname'] + '-' + instance['instance_name'] }} {{ instance['radosgw_address'] }}:{{ instance['radosgw_frontend_port'] }} weight 100 check
+{% endfor %}
+{% endfor %}
--- /dev/null
+# {{ ansible_managed }}
+! Configuration File for keepalived
+
+global_defs {
+ router_id CEPH_RGW
+}
+
+vrrp_script check_haproxy {
+ script "killall -0 haproxy"
+ weight -20
+ interval 2
+ rise 2
+ fall 2
+}
+
+{% for instance in vrrp_instances %}
+vrrp_instance {{ instance['name'] }} {
+ state {{ 'MASTER' if inventory_hostname == instance['master'] else 'BACKUP' }}
+ priority {{ '100' if inventory_hostname == instance['master'] else '90' }}
+ interface {{ virtual_ip_interface }}
+ virtual_router_id {{ 50 + loop.index }}
+ advert_int 1
+ authentication {
+ auth_type PASS
+ auth_pass 1234
+ }
+ virtual_ipaddress {
+ {{ instance['vip'] }}/{{ virtual_ip_netmask }} dev {{ virtual_ip_interface }}
+ }
+ track_script {
+ check_haproxy
+ }
+}
+
+{% endfor %}
--- /dev/null
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [2014] [Sébastien Han]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
--- /dev/null
+# Ansible role: ceph-rgw
+
+Documentation is available at http://docs.ceph.com/ceph-ansible/.
--- /dev/null
+---
+# You can override vars by using host or group vars
+
+###########
+# GENERAL #
+###########
+
+# Even though RGW nodes should not have the admin key
+# at their disposal, some people might want to have it
+# distributed on RGW nodes. Setting 'copy_admin_key' to 'true'
+# will copy the admin key to the /etc/ceph/ directory
+copy_admin_key: false
+
+##########
+# TUNING #
+##########
+
+# To support buckets with a very large number of objects it's
+# important to split them into shards. We suggest about 100K
+# objects per shard as a conservative maximum.
+#rgw_override_bucket_index_max_shards: 16
+
+# Consider setting a quota on buckets so that exceeding this
+# limit will require admin intervention.
+#rgw_bucket_default_quota_max_objects: 1638400 # i.e., 100K * 16
+
+# Declaring rgw_create_pools will create pools with the given number of pgs,
+# size, and type. The following are some important notes on this automatic
+# pool creation:
+# - The pools and associated pg_num's below are merely examples of pools that
+# could be automatically created when rgws are deployed.
+# - The default pg_num is 8 (from osd_pool_default_pg_num) for pool created
+# if rgw_create_pools isn't declared and configured.
+# - A pgcalc tool should be used to determine the optimal sizes for
+# the rgw.buckets.data, rgw.buckets.index pools as well as any other
+# pools declared in this dictionary.
+# https://ceph.io/pgcalc is the upstream pgcalc tool
+# https://access.redhat.com/labsinfo/cephpgc is a pgcalc tool offered by
+# Red Hat if you are using RHCS.
+# - The default value of {{ rgw_zone }} is 'default'.
+# - The type must be set as either 'replicated' or 'ec' for
+# each pool.
+# - If a pool's type is 'ec', k and m values must be set via
+# the ec_k, and ec_m variables.
+# - The rule_name key can be used with a specific crush rule value (must exist).
+# If the key doesn't exist it falls back to the default replicated_rule.
+# This only works for replicated pool type not erasure.
+
+#rgw_create_pools:
+# "{{ rgw_zone }}.rgw.buckets.data":
+# pg_num: 64
+# type: ec
+# ec_profile: myecprofile
+# ec_k: 5
+# ec_m: 3
+# "{{ rgw_zone }}.rgw.buckets.index":
+# pg_num: 16
+# size: 3
+# type: replicated
+# "{{ rgw_zone }}.rgw.meta":
+# pg_num: 8
+# size: 3
+# type: replicated
+# "{{ rgw_zone }}.rgw.log":
+# pg_num: 8
+# size: 3
+# type: replicated
+# "{{ rgw_zone }}.rgw.control":
+# pg_num: 8
+# size: 3
+# type: replicated
+# rule_name: foo
+
+
+##########
+# DOCKER #
+##########
+
+# Resource limitation
+# For the whole list of limits you can apply see: docs.docker.com/engine/admin/resource_constraints
+# Default values are based from: https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/2/html/red_hat_ceph_storage_hardware_guide/minimum_recommendations
+# These options can be passed using the 'ceph_rgw_docker_extra_env' variable.
+ceph_rgw_docker_memory_limit: "4096m"
+ceph_rgw_docker_cpu_limit: 8
+#ceph_rgw_docker_cpuset_cpus: "0,2,4,6,8,10,12,14,16"
+#ceph_rgw_docker_cpuset_mems: "0"
+
+ceph_rgw_docker_extra_env:
+ceph_config_keys: [] # DON'T TOUCH ME
+rgw_config_keys: "/" # DON'T TOUCH ME
+
+###########
+# SYSTEMD #
+###########
+# ceph_rgw_systemd_overrides will override the systemd settings
+# for the ceph-rgw services.
+# For example,to set "PrivateDevices=false" you can specify:
+#ceph_rgw_systemd_overrides:
+# Service:
+# PrivateDevices: False
--- /dev/null
+[Unit]
+Description=ceph target allowing to start/stop all ceph-radosgw@.service instances at once
+PartOf=ceph.target
+After=ceph-mon.target
+Before=ceph.target
+Wants=ceph.target ceph-mon.target
+
+[Install]
+WantedBy=multi-user.target ceph.target
\ No newline at end of file
--- /dev/null
+---
+- name: restart rgw
+ service:
+ name: "ceph-radosgw@rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}"
+ state: restarted
+ with_items: "{{ rgw_instances }}"
--- /dev/null
+---
+galaxy_info:
+ company: Red Hat
+ author: Sébastien Han
+ description: Installs Ceph Rados Gateway
+ license: Apache
+ min_ansible_version: 2.7
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ galaxy_tags:
+ - system
+dependencies: []
--- /dev/null
+---
+- name: create rados gateway directories
+ file:
+ path: "{{ item }}"
+ state: directory
+ owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
+ group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
+ mode: "{{ ceph_directories_mode }}"
+ with_items: "{{ rbd_client_admin_socket_path }}"
+
+- name: get keys from monitors
+ ceph_key:
+ name: "{{ item.name }}"
+ cluster: "{{ cluster }}"
+ output_format: plain
+ state: info
+ environment:
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+ register: _rgw_keys
+ with_items:
+ - { name: "client.bootstrap-rgw", path: "/var/lib/ceph/bootstrap-rgw/{{ cluster }}.keyring", copy_key: true }
+ - { name: "client.admin", path: "/etc/ceph/{{ cluster }}.client.admin.keyring", copy_key: "{{ copy_admin_key }}" }
+ delegate_to: "{{ groups.get(mon_group_name)[0] }}"
+ run_once: true
+ when:
+ - cephx | bool
+ - item.copy_key | bool
+ no_log: "{{ no_log_on_ceph_key_tasks }}"
+
+- name: copy ceph key(s) if needed
+ copy:
+ dest: "{{ item.item.path }}"
+ content: "{{ item.stdout + '\n' }}"
+ owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
+ group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
+ mode: "{{ ceph_keyring_permissions }}"
+ with_items: "{{ _rgw_keys.results }}"
+ when:
+ - cephx | bool
+ - item is not skipped
+ - item.item.copy_key | bool
+ no_log: "{{ no_log_on_ceph_key_tasks }}"
+
+- name: copy SSL certificate & key data to certificate path
+ copy:
+ content: "{{ radosgw_frontend_ssl_certificate_data }}"
+ dest: "{{ radosgw_frontend_ssl_certificate }}"
+ owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
+ group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
+ mode: 0440
+ when: radosgw_frontend_ssl_certificate | length > 0 and radosgw_frontend_ssl_certificate_data | length > 0
+ notify: restart ceph rgws
--- /dev/null
+---
+- name: include common.yml
+ include_tasks: common.yml
+
+- name: include_tasks pre_requisite.yml
+ include_tasks: pre_requisite.yml
+ when: not containerized_deployment | bool
+
+- name: rgw pool creation tasks
+ include_tasks: rgw_create_pools.yml
+ run_once: true
+ when: rgw_create_pools is defined
+
+- name: include_tasks openstack-keystone.yml
+ include_tasks: openstack-keystone.yml
+ when: radosgw_keystone_ssl | bool
+
+- name: include_tasks start_radosgw.yml
+ include_tasks: start_radosgw.yml
+ when:
+ - not rgw_multisite | bool
+ - not containerized_deployment | bool
+
+- name: include start_docker_rgw.yml
+ include_tasks: start_docker_rgw.yml
+ when:
+ - not rgw_multisite | bool
+ - containerized_deployment | bool
+
+- name: include_tasks multisite/main.yml
+ include_tasks: multisite/main.yml
+ when:
+ - rgw_multisite | bool
+ - not multisite_called_from_handler_role | default(False) | bool
--- /dev/null
+---
+- name: include_tasks multisite
+ include_tasks: multisite/main.yml
\ No newline at end of file
--- /dev/null
+---
+- name: create list zone_users
+ set_fact:
+ zone_users: "{{ zone_users | default([]) | union([{ 'realm': item.rgw_realm, 'zonegroup': item.rgw_zonegroup, 'zone': item.rgw_zone, 'system_access_key': item.system_access_key, 'system_secret_key': item.system_secret_key, 'user': item.rgw_zone_user, 'display_name': item.rgw_zone_user_display_name }]) }}"
+ loop: "{{ rgw_instances_all }}"
+ run_once: true
+ when:
+ - item.rgw_zonemaster | default(hostvars[item.host]['rgw_zonemaster']) | bool
+ - item.rgw_zonegroupmaster | default(hostvars[item.host]['rgw_zonegroupmaster']) | bool
+
+- name: create the zone user(s)
+ radosgw_user:
+ name: "{{ item.user }}"
+ cluster: "{{ cluster }}"
+ display_name: "{{ item.display_name }}"
+ access_key: "{{ item.system_access_key }}"
+ secret_key: "{{ item.system_secret_key }}"
+ realm: "{{ item.realm }}"
+ zonegroup: "{{ item.zonegroup }}"
+ zone: "{{ item.zone }}"
+ system: true
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ run_once: true
+ loop: "{{ zone_users }}"
+ when: zone_users is defined
+ environment:
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
--- /dev/null
+---
+- name: set_fact realms
+ set_fact:
+ realms: '{{ realms | default([]) | union([item.rgw_realm]) }}'
+ run_once: true
+ loop: "{{ rgw_instances_all }}"
+ when: item.rgw_zonemaster | default(hostvars[item.host]['rgw_zonemaster']) | bool
+
+- name: create list zonegroups
+ set_fact:
+ zonegroups: "{{ zonegroups | default([]) | union([{ 'realm': item.rgw_realm, 'zonegroup': item.rgw_zonegroup, 'is_master': item.rgw_zonegroupmaster | default(hostvars[item.host]['rgw_zonegroupmaster']) }]) }}"
+ run_once: true
+ loop: "{{ rgw_instances_all }}"
+ when: item.rgw_zonegroupmaster | default(hostvars[item.host]['rgw_zonegroupmaster']) | bool
+
+- name: create list zones
+ set_fact:
+ zones: "{{ zones | default([]) | union([{ 'realm': item.rgw_realm, 'zonegroup': item.rgw_zonegroup, 'zone': item.rgw_zone, 'is_master': item.rgw_zonemaster | default(hostvars[item.host]['rgw_zonemaster']), 'system_access_key': item.system_access_key, 'system_secret_key': item.system_secret_key }]) }}"
+ run_once: true
+ loop: "{{ rgw_instances_all }}"
+
+- name: create a list of dicts with each rgw endpoint and it's zone
+ set_fact:
+ zone_endpoint_pairs: "{{ zone_endpoint_pairs | default([]) | union([{ 'endpoint': hostvars[item.host]['rgw_multisite_proto'] + '://' + (item.radosgw_address if hostvars[item.host]['rgw_multisite_proto'] == 'http' else hostvars[item.host]['ansible_facts']['fqdn']) + ':' + item.radosgw_frontend_port | string, 'rgw_zone': item.rgw_zone, 'rgw_realm': item.rgw_realm, 'rgw_zonegroup': item.rgw_zonegroup, 'rgw_zonemaster': item.rgw_zonemaster | default(hostvars[item.host]['rgw_zonemaster']) }]) }}"
+ loop: "{{ rgw_instances_all }}"
+ run_once: true
+
+- name: create a list of zones and all their endpoints
+ set_fact:
+ zone_endpoints_list: "{{ zone_endpoints_list | default([]) | union([{'zone': item.rgw_zone, 'zonegroup': item.rgw_zonegroup, 'realm': item.rgw_realm, 'is_master': item.rgw_zonemaster, 'endpoints': ','.join(zone_endpoint_pairs | selectattr('rgw_zone','match','^'+item.rgw_zone+'$') | selectattr('rgw_realm','match','^'+item.rgw_realm+'$') | selectattr('rgw_zonegroup', 'match','^'+item.rgw_zonegroup+'$') | map(attribute='endpoint'))}]) }}"
+ loop: "{{ zone_endpoint_pairs }}"
+ run_once: true
+
+# Include the tasks depending on the zone type
+- name: include_tasks master.yml
+ include_tasks: master.yml
+
+- name: include_tasks secondary.yml
+ include_tasks: secondary.yml
+ when: deploy_secondary_zones | default(True) | bool
+
+- name: include_tasks start_radosgw.yml
+ include_tasks: ../start_radosgw.yml
+ when:
+ - not containerized_deployment | bool
+
+- name: include_tasks start_docker_rgw.yml
+ include_tasks: ../start_docker_rgw.yml
+ when:
+ - containerized_deployment | bool
--- /dev/null
+---
+- name: create the realm(s)
+ radosgw_realm:
+ name: "{{ item }}"
+ cluster: "{{ cluster }}"
+ default: "{{ true if realms | length == 1 else false }}"
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ run_once: true
+ loop: "{{ realms }}"
+ when: realms is defined
+ environment:
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+
+- name: create zonegroup(s)
+ radosgw_zonegroup:
+ name: "{{ item.zonegroup }}"
+ cluster: "{{ cluster }}"
+ realm: "{{ item.realm }}"
+ default: "{{ true if zonegroups | length == 1 else false }}"
+ master: "{{ true if item.is_master | bool else false }}"
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ run_once: true
+ loop: "{{ zonegroups }}"
+ when: zonegroups is defined
+ environment:
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+
+- name: create the master zone(s)
+ radosgw_zone:
+ name: "{{ item.zone }}"
+ cluster: "{{ cluster }}"
+ realm: "{{ item.realm }}"
+ zonegroup: "{{ item.zonegroup }}"
+ access_key: "{{ item.system_access_key }}"
+ secret_key: "{{ item.system_secret_key }}"
+ default: "{{ true if zones | length == 1 else false }}"
+ master: true
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ run_once: true
+ loop: "{{ zones }}"
+ when:
+ - zones is defined
+ - item.is_master | bool
+ environment:
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+
+- name: add endpoints to their zone groups(s)
+ radosgw_zonegroup:
+ name: "{{ item.zonegroup }}"
+ cluster: "{{ cluster }}"
+ realm: "{{ item.realm }}"
+ endpoints: "{{ item.endpoints.split(',') }}"
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ run_once: true
+ loop: "{{ zone_endpoints_list }}"
+ when:
+ - zone_endpoints_list is defined
+ - item.is_master | bool
+ environment:
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+
+- name: add endpoints to their zone(s)
+ radosgw_zone:
+ name: "{{ item.zone }}"
+ cluster: "{{ cluster }}"
+ realm: "{{ item.realm }}"
+ zonegroup: "{{ item.zonegroup }}"
+ endpoints: "{{ item.endpoints.split(',') }}"
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ run_once: true
+ loop: "{{ zone_endpoints_list }}"
+ when:
+ - zone_endpoints_list is defined
+ - item.is_master | bool
+ environment:
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+
+- name: update period for zone creation
+ command: "{{ container_exec_cmd }} radosgw-admin --cluster={{ cluster }} --rgw-realm={{ item.realm }} --rgw-zonegroup={{ item.zonegroup }} --rgw-zone={{ item.zone }} period update --commit"
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ run_once: true
+ loop: "{{ zone_endpoints_list }}"
+ when:
+ - zone_endpoints_list is defined
+ - item.is_master | bool
+
+- name: include_tasks create_zone_user.yml
+ include_tasks: create_zone_user.yml
--- /dev/null
+---
+- name: create list secondary_realms
+ set_fact:
+ secondary_realms: "{{ secondary_realms | default([]) | union([{ 'realm': item.rgw_realm, 'zonegroup': item.rgw_zonegroup, 'zone': item.rgw_zone, 'endpoint': item.endpoint, 'system_access_key': item.system_access_key, 'system_secret_key': item.system_secret_key, 'is_master': item.rgw_zonemaster | default(hostvars[item.host]['rgw_zonemaster']) }]) }}"
+ loop: "{{ rgw_instances_all }}"
+ run_once: true
+ when: not item.rgw_zonemaster | default(hostvars[item.host]['rgw_zonemaster']) | bool
+
+- name: ensure connection to primary cluster from mon
+ uri:
+ url: "{{ item.endpoint }}"
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ run_once: true
+ loop: "{{ secondary_realms }}"
+ when: secondary_realms is defined
+
+- name: ensure connection to primary cluster from rgw
+ uri:
+ url: "{{ item.endpoint }}"
+ loop: "{{ rgw_instances }}"
+ when: not item.rgw_zonemaster | default(rgw_zonemaster) | bool
+
+- name: fetch the realm(s)
+ command: "{{ container_exec_cmd }} radosgw-admin realm pull --cluster={{ cluster }} --rgw-realm={{ item.realm }} --url={{ item.endpoint }} --access-key={{ item.system_access_key }} --secret={{ item.system_secret_key }}"
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ run_once: true
+ loop: "{{ secondary_realms }}"
+ when: secondary_realms is defined
+
+- name: get the period(s)
+ command: "{{ container_exec_cmd }} radosgw-admin period get --cluster={{ cluster }} --rgw-realm={{ item.realm }}"
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ run_once: true
+ loop: "{{ secondary_realms }}"
+ when: secondary_realms is defined
+
+- name: create the zone(s)
+ radosgw_zone:
+ name: "{{ item.zone }}"
+ cluster: "{{ cluster }}"
+ realm: "{{ item.realm }}"
+ zonegroup: "{{ item.zonegroup }}"
+ access_key: "{{ item.system_access_key }}"
+ secret_key: "{{ item.system_secret_key }}"
+ default: "{{ true if zones | length == 1 else false }}"
+ master: false
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ run_once: true
+ loop: "{{ zones }}"
+ when:
+ - zones is defined
+ - not item.is_master | bool
+ environment:
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+
+- name: add endpoints to their zone(s)
+ radosgw_zone:
+ name: "{{ item.zone }}"
+ cluster: "{{ cluster }}"
+ realm: "{{ item.realm }}"
+ zonegroup: "{{ item.zonegroup }}"
+ endpoints: "{{ item.endpoints.split(',') }}"
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ run_once: true
+ loop: "{{ zone_endpoints_list }}"
+ when:
+ - zone_endpoints_list is defined
+ - not item.is_master | bool
+ environment:
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+
+- name: update period for zone creation
+ command: "{{ container_exec_cmd }} radosgw-admin --cluster={{ cluster }} --rgw-realm={{ item.realm }} --rgw-zonegroup={{ item.zonegroup }} --rgw-zone={{ item.zone }} period update --commit"
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ run_once: true
+ loop: "{{ zone_endpoints_list }}"
+ when:
+ - zone_endpoints_list is defined
+ - not item.is_master | bool
--- /dev/null
+---
+- name: install nss-tools on redhat
+ package:
+ name: nss-tools
+ state: present
+ register: result
+ until: result is succeeded
+ when: ansible_facts['pkg_mgr'] == 'yum' or ansible_facts['pkg_mgr'] == 'dnf'
+
+- name: install libnss3-tools on debian
+ package:
+ name: libnss3-tools
+ state: present
+ register: result
+ until: result is succeeded
+ when: ansible_facts['pkg_mgr'] == 'apt'
+
+- name: create nss directory for keystone certificates
+ file:
+ path: "{{ radosgw_nss_db_path }}"
+ state: directory
+ owner: root
+ group: root
+ mode: 0644
+
+- name: create nss entries for keystone certificates
+ shell: "{{ item }}"
+ args:
+ warn: no
+ changed_when: false
+ with_items:
+ - "openssl x509 -in /etc/keystone/ssl/certs/ca.pem -pubkey | certutil -d {{ radosgw_nss_db_path }} -A -n ca -t 'TCu,Cu,Tuw'"
+ - "openssl x509 -in /etc/keystone/ssl/certs/signing_cert.pem -pubkey | certutil -A -d {{ radosgw_nss_db_path }} -n signing_cert -t 'P,P,P'"
+ tags: skip_ansible_lint
--- /dev/null
+---
+- name: create rgw keyrings
+ ceph_key:
+ name: "client.rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}"
+ cluster: "{{ cluster }}"
+ user: "client.bootstrap-rgw"
+ user_key: /var/lib/ceph/bootstrap-rgw/{{ cluster }}.keyring
+ dest: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}/keyring"
+ caps:
+ osd: 'allow rwx'
+ mon: 'allow rw'
+ import_key: False
+ owner: "ceph"
+ group: "ceph"
+ mode: "0600"
+ no_log: "{{ no_log_on_ceph_key_tasks }}"
+ environment:
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment else None }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+ with_items: "{{ rgw_instances }}"
+ when: cephx | bool
\ No newline at end of file
--- /dev/null
+---
+- name: create ec profile
+ ceph_ec_profile:
+ name: "{{ item.value.ec_profile }}"
+ cluster: "{{ cluster }}"
+ k: "{{ item.value.ec_k }}"
+ m: "{{ item.value.ec_m }}"
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ loop: "{{ rgw_create_pools | dict2items }}"
+ when:
+ - item.value.type is defined
+ - item.value.type == 'ec'
+ environment:
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+
+- name: set crush rule
+ ceph_crush_rule:
+ name: "{{ item.key }}"
+ cluster: "{{ cluster }}"
+ rule_type: erasure
+ profile: "{{ item.value.ec_profile }}"
+ environment:
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+ loop: "{{ rgw_create_pools | dict2items }}"
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ when:
+ - item.value.type is defined
+ - item.value.type == 'ec'
+
+- name: create ec pools for rgw
+ ceph_pool:
+ name: "{{ item.key }}"
+ state: present
+ cluster: "{{ cluster }}"
+ pg_num: "{{ item.value.pg_num | default(omit) }}"
+ pgp_num: "{{ item.value.pgp_num | default(omit) }}"
+ size: "{{ item.value.size | default(omit) }}"
+ pg_autoscale_mode: "{{ item.value.pg_autoscale_mode | default(omit) }}"
+ target_size_ratio: "{{ item.value.target_size_ratio | default(omit) }}"
+ pool_type: erasure
+ erasure_profile: "{{ item.value.ec_profile }}"
+ application: rgw
+ loop: "{{ rgw_create_pools | dict2items }}"
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ when:
+ - item.value.type is defined
+ - item.value.type == 'ec'
+ environment:
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+
+- name: create replicated pools for rgw
+ ceph_pool:
+ name: "{{ item.key }}"
+ state: present
+ cluster: "{{ cluster }}"
+ pg_num: "{{ item.value.pg_num | default(omit) }}"
+ pgp_num: "{{ item.value.pgp_num | default(omit) }}"
+ size: "{{ item.value.size | default(omit) }}"
+ min_size: "{{ item.value.min_size | default(omit) }}"
+ pg_autoscale_mode: "{{ item.value.pg_autoscale_mode | default(omit) }}"
+ target_size_ratio: "{{ item.value.target_size_ratio | default(omit) }}"
+ pool_type: replicated
+ rule_name: "{{ item.value.rule_name | default(ceph_osd_pool_default_crush_rule_name) }}"
+ application: rgw
+ loop: "{{ rgw_create_pools | dict2items }}"
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ when: item.value.type is not defined or item.value.type == 'replicated'
+ environment:
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
--- /dev/null
+---
+- name: include_task systemd.yml
+ include_tasks: systemd.yml
+
+- name: systemd start rgw container
+ systemd:
+ name: ceph-radosgw@rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}
+ state: started
+ enabled: yes
+ masked: no
+ daemon_reload: yes
+ with_items: "{{ rgw_instances }}"
--- /dev/null
+---
+- name: ensure systemd service override directory exists
+ file:
+ state: directory
+ path: "/etc/systemd/system/ceph-radosgw@.service.d/"
+ when: ceph_rgw_systemd_overrides is defined
+
+- name: add ceph-rgw systemd service overrides
+ config_template:
+ src: "ceph-rgw.service.d-overrides.j2"
+ dest: "/etc/systemd/system/ceph-radosgw@.service.d/ceph-radosgw-systemd-overrides.conf"
+ config_overrides: "{{ ceph_rgw_systemd_overrides | default({}) }}"
+ config_type: "ini"
+ when: ceph_rgw_systemd_overrides is defined
+
+- name: start rgw instance
+ service:
+ name: ceph-radosgw@rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}
+ state: started
+ enabled: yes
+ masked: no
+ with_items: "{{ rgw_instances }}"
+ when:
+ - not rgw_multisite | bool or
+ ((rgw_multisite | bool and item.rgw_zonesecondary | default(rgw_zonesecondary) | bool and deploy_secondary_zones | default(True)) or
+ (rgw_multisite | bool and item.rgw_zonemaster | default(rgw_zonemaster)))
+
+- name: enable the ceph-radosgw.target service
+ systemd:
+ name: ceph-radosgw.target
+ enabled: yes
+ masked: no
--- /dev/null
+---
+- name: generate systemd unit file
+ template:
+ src: "{{ role_path }}/templates/ceph-radosgw.service.j2"
+ dest: /etc/systemd/system/ceph-radosgw@.service
+ owner: "root"
+ group: "root"
+ mode: "0644"
+ notify: restart ceph rgws
+
+- name: generate systemd ceph-radosgw target file
+ copy:
+ src: ceph-radosgw.target
+ dest: /etc/systemd/system/ceph-radosgw.target
+ when: containerized_deployment | bool
+
+- name: enable ceph-radosgw.target
+ service:
+ name: ceph-radosgw.target
+ enabled: yes
+ daemon_reload: yes
+ when: containerized_deployment | bool
\ No newline at end of file
--- /dev/null
+[Unit]
+Description=Ceph RGW
+PartOf=ceph-radosgw.target
+{% if container_binary == 'docker' %}
+After=docker.service network-online.target local-fs.target time-sync.target
+Requires=docker.service
+{% else %}
+After=network-online.target local-fs.target time-sync.target
+{% endif %}
+Wants=network-online.target local-fs.target time-sync.target
+{% set cpu_limit = ansible_facts['processor_vcpus']|int if ceph_rgw_docker_cpu_limit|int > ansible_facts['processor_vcpus']|int else ceph_rgw_docker_cpu_limit|int %}
+
+[Service]
+EnvironmentFile=/var/lib/ceph/radosgw/{{ cluster }}-%i/EnvironmentFile
+{% if container_binary == 'podman' %}
+ExecStartPre=-/usr/bin/rm -f /%t/%n-pid /%t/%n-cid
+ExecStartPre=-/usr/bin/{{ container_binary }} rm --storage ceph-rgw-{{ ansible_facts['hostname'] }}-${INST_NAME}
+ExecStartPre=-/usr/bin/mkdir -p /var/log/ceph
+{% else %}
+ExecStartPre=-/usr/bin/{{ container_binary }} stop ceph-rgw-{{ ansible_facts['hostname'] }}-${INST_NAME}
+{% endif %}
+ExecStartPre=-/usr/bin/{{ container_binary }} rm ceph-rgw-{{ ansible_facts['hostname'] }}-${INST_NAME}
+ExecStart=/usr/bin/{{ container_binary }} run --rm --net=host \
+{% if container_binary == 'podman' %}
+ -d --log-driver journald --conmon-pidfile /%t/%n-pid --cidfile /%t/%n-cid \
+{% endif %}
+ --pids-limit={{ 0 if container_binary == 'podman' else -1 }} \
+ --memory={{ ceph_rgw_docker_memory_limit }} \
+ --cpus={{ cpu_limit }} \
+ --security-opt label=disable \
+ {% if ceph_rgw_docker_cpuset_cpus is defined -%}
+ --cpuset-cpus="{{ ceph_rgw_docker_cpuset_cpus }}" \
+ {% endif -%}
+ {% if ceph_rgw_docker_cpuset_mems is defined -%}
+ --cpuset-mems="{{ ceph_rgw_docker_cpuset_mems }}" \
+ {% endif -%}
+ -v /var/lib/ceph:/var/lib/ceph \
+ -v /etc/ceph:/etc/ceph \
+ -v /var/run/ceph:/var/run/ceph \
+ -v /etc/localtime:/etc/localtime \
+ -v /var/log/ceph:/var/log/ceph \
+ {% if ansible_facts['os_family'] == 'RedHat' -%}
+ -v /etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted \
+ {% endif -%}
+ {% if radosgw_frontend_ssl_certificate -%}
+ -v {{ radosgw_frontend_ssl_certificate }}:{{ radosgw_frontend_ssl_certificate }} \
+ {% endif -%}
+ -e CEPH_DAEMON=RGW \
+ -e CLUSTER={{ cluster }} \
+ -e RGW_NAME={{ ansible_facts['hostname'] }}.${INST_NAME} \
+ -e CONTAINER_IMAGE={{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} \
+ -e TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES={{ ceph_tcmalloc_max_total_thread_cache }} \
+ --name=ceph-rgw-{{ ansible_facts['hostname'] }}-${INST_NAME} \
+ {{ ceph_rgw_docker_extra_env }} \
+ {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
+{% if container_binary == 'podman' %}
+ExecStop=-/usr/bin/sh -c "/usr/bin/{{ container_binary }} rm -f `cat /%t/%n-cid`"
+{% else %}
+ExecStopPost=-/usr/bin/{{ container_binary }} stop ceph-rgw-{{ ansible_facts['hostname'] }}-${INST_NAME}
+{% endif %}
+KillMode=none
+Restart=always
+RestartSec=10s
+TimeoutStartSec=120
+TimeoutStopSec=15
+{% if container_binary == 'podman' %}
+Type=forking
+PIDFile=/%t/%n-pid
+{% endif %}
+
+[Install]
+WantedBy=ceph.target
--- /dev/null
+# {{ ansible_managed }}
--- /dev/null
+---
+galaxy_info:
+ company: Red Hat
+ author: Andrew Schoen
+ description: Validates Ceph config options
+ license: Apache
+ min_ansible_version: 2.7
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ galaxy_tags:
+ - system
+dependencies: []
--- /dev/null
+---
+- name: set_fact root_device
+ set_fact:
+ root_device: "{{ ansible_facts['mounts'] | selectattr('mount', 'match', '^/$') | map(attribute='device') | first }}"
+
+- name: lvm_volumes variable's tasks related
+ when:
+ - lvm_volumes is defined
+ - lvm_volumes | length > 0
+ block:
+ - name: resolve devices in lvm_volumes
+ command: "readlink -f {{ item.data }}"
+ changed_when: false
+ register: _lvm_volumes_data_devices
+ with_items: "{{ lvm_volumes }}"
+ when: item.data_vg is undefined
+
+ - name: set_fact lvm_volumes_data_devices
+ set_fact:
+ lvm_volumes_data_devices: "{{ lvm_volumes_data_devices | default([]) + [item.stdout] }}"
+ with_items: "{{ _lvm_volumes_data_devices.results }}"
+ when: item.skipped is undefined
+
+- name: fail if root_device is passed in lvm_volumes or devices
+ fail:
+ msg: "{{ root_device }} found in either lvm_volumes or devices variable"
+ when: root_device in lvm_volumes_data_devices | default([]) or root_device in devices | default([])
+
+- name: check devices are block devices
+ block:
+ - name: get devices information
+ parted:
+ device: "{{ item }}"
+ unit: MiB
+ register: devices_parted
+ failed_when: False
+ with_items:
+ - "{{ devices | default([]) }}"
+ - "{{ dedicated_devices | default([]) }}"
+ - "{{ bluestore_wal_devices | default([]) }}"
+ - "{{ lvm_volumes_data_devices | default([]) }}"
+
+ - name: fail if one of the devices is not a device
+ fail:
+ msg: "{{ item.item }} is not a block special file!"
+ when: item.rc is defined
+ with_items: "{{ devices_parted.results }}"
+
+ - name: fail when gpt header found on osd devices
+ fail:
+ msg: "{{ item.disk.dev }} has gpt header, please remove it."
+ with_items: "{{ devices_parted.results }}"
+ when:
+ - item.skipped is undefined
+ - item.disk.table == 'gpt'
+ - item.partitions | length == 0
+
+- name: check logical volume in lvm_volumes
+ when: lvm_volumes is defined
+ block:
+ - name: check data logical volume
+ stat:
+ path: "/dev/{{ item.data_vg }}/{{ item.data }}"
+ follow: true
+ register: lvm_volumes_data
+ loop: "{{ lvm_volumes }}"
+ when:
+ - item.data is defined
+ - item.data_vg is defined
+
+ - name: fail if one of the data logical volume is not a device or doesn't exist
+ fail:
+ msg: "{{ item.item.data_vg }}/{{ item.item.data }} doesn't exist or isn't a block"
+ loop: "{{ lvm_volumes_data.results }}"
+ when:
+ - item.skipped is undefined
+ - not item.stat.exists | bool or not item.stat.isblk | bool
+
+ - name: check bluestore db logical volume
+ stat:
+ path: "/dev/{{ item.db_vg }}/{{ item.db }}"
+ follow: true
+ register: lvm_volumes_db
+ loop: "{{ lvm_volumes }}"
+ when:
+ - osd_objectstore == 'bluestore'
+ - item.db is defined
+ - item.db_vg is defined
+
+ - name: fail if one of the bluestore db logical volume is not a device or doesn't exist
+ fail:
+ msg: "{{ item.item.db_vg }}/{{ item.item.db }} doesn't exist or isn't a block"
+ loop: "{{ lvm_volumes_db.results }}"
+ when:
+ - item.skipped is undefined
+ - not item.stat.exists | bool or not item.stat.isblk | bool
+
+ - name: check bluestore wal logical volume
+ stat:
+ path: "/dev/{{ item.wal_vg }}/{{ item.wal }}"
+ follow: true
+ register: lvm_volumes_wal
+ loop: "{{ lvm_volumes }}"
+ when:
+ - osd_objectstore == 'bluestore'
+ - item.wal is defined
+ - item.wal_vg is defined
+
+ - name: fail if one of the bluestore wal logical volume is not a device or doesn't exist
+ fail:
+ msg: "{{ item.item.wal_vg }}/{{ item.item.wal }} doesn't exist or isn't a block"
+ loop: "{{ lvm_volumes_wal.results }}"
+ when:
+ - item.skipped is undefined
+ - not item.stat.exists | bool or not item.stat.isblk | bool
+
+ - name: check filestore journal logical volume
+ stat:
+ path: "/dev/{{ item.journal_vg }}/{{ item.journal }}"
+ follow: true
+ register: lvm_volumes_journal
+ loop: "{{ lvm_volumes }}"
+ when:
+ - osd_objectstore == 'filestore'
+ - item.journal is defined
+ - item.journal_vg is defined
+
+ - name: fail if one of the filestore journal logical volume is not a device or doesn't exist
+ fail:
+ msg: "{{ item.item.journal_vg }}/{{ item.item.journal }} doesn't exist or isn't a block"
+ loop: "{{ lvm_volumes_journal.results }}"
+ when:
+ - item.skipped is undefined
+ - not item.stat.exists | bool or not item.stat.isblk | bool
--- /dev/null
+---
+- name: "fail if {{ monitor_interface }} does not exist on {{ inventory_hostname }}"
+ fail:
+ msg: "{{ monitor_interface }} does not exist on {{ inventory_hostname }}"
+ when: monitor_interface not in ansible_facts['interfaces']
+
+- name: "fail if {{ monitor_interface }} is not active on {{ inventory_hostname }}"
+ fail:
+ msg: "{{ monitor_interface }} is not active on {{ inventory_hostname }}"
+ when: not hostvars[inventory_hostname]['ansible_facts'][(monitor_interface | replace('-', '_'))]['active']
+
+- name: "fail if {{ monitor_interface }} does not have any ip v4 address on {{ inventory_hostname }}"
+ fail:
+ msg: "{{ monitor_interface }} does not have any IPv4 address on {{ inventory_hostname }}"
+ when:
+ - ip_version == "ipv4"
+ - hostvars[inventory_hostname]['ansible_facts'][(monitor_interface | replace('-', '_'))]['ipv4'] is not defined
+
+- name: "fail if {{ monitor_interface }} does not have any ip v6 address on {{ inventory_hostname }}"
+ fail:
+ msg: "{{ monitor_interface }} does not have any IPv6 address on {{ inventory_hostname }}"
+ when:
+ - ip_version == "ipv6"
+ - hostvars[inventory_hostname]['ansible_facts'][(monitor_interface | replace('-', '_'))]['ipv6'] is not defined
--- /dev/null
+---
+- name: "fail if {{ radosgw_interface }} does not exist on {{ inventory_hostname }}"
+ fail:
+ msg: "{{ radosgw_interface }} does not exist on {{ inventory_hostname }}"
+ when: radosgw_interface not in ansible_facts['interfaces']
+
+- name: "fail if {{ radosgw_interface }} is not active on {{ inventory_hostname }}"
+ fail:
+ msg: "{{ radosgw_interface }} is not active on {{ inventory_hostname }}"
+ when: hostvars[inventory_hostname]['ansible_facts'][(radosgw_interface | replace('-', '_'))]['active'] == "false"
+
+- name: "fail if {{ radosgw_interface }} does not have any ip v4 address on {{ inventory_hostname }}"
+ fail:
+ msg: "{{ radosgw_interface }} does not have any IPv4 address on {{ inventory_hostname }}"
+ when:
+ - ip_version == "ipv4"
+ - hostvars[inventory_hostname]['ansible_facts'][(radosgw_interface | replace('-', '_'))]['ipv4'] is not defined
+
+- name: "fail if {{ radosgw_interface }} does not have any ip v6 address on {{ inventory_hostname }}"
+ fail:
+ msg: "{{ radosgw_interface }} does not have any IPv6 address on {{ inventory_hostname }}"
+ when:
+ - ip_version == "ipv6"
+ - hostvars[inventory_hostname]['ansible_facts'][(radosgw_interface | replace('-', '_'))]['ipv6'] is not defined
--- /dev/null
+---
+- name: "fail if {{ inventory_hostname }} does not have any {{ ip_version }} address on {{ monitor_address_block }}"
+ fail:
+ msg: "{{ inventory_hostname }} does not have any {{ ip_version }} address on {{ monitor_address_block }}"
+ when: hostvars[inventory_hostname]['ansible_facts']['all_' + ip_version + '_addresses'] | ips_in_ranges(hostvars[inventory_hostname]['monitor_address_block'].split(',')) | length == 0
--- /dev/null
+---
+- name: fail on unsupported distribution for iscsi gateways
+ fail:
+ msg: "iSCSI gateways can only be deployed on Red Hat Enterprise Linux, CentOS or Fedora"
+ when: ansible_facts['distribution'] not in ['RedHat', 'CentOS', 'Fedora', 'AlmaLinux', 'Rocky']
+
+- name: make sure gateway_ip_list is configured
+ fail:
+ msg: "you must set a list of IPs (comma separated) for gateway_ip_list"
+ when:
+ - gateway_ip_list == '0.0.0.0'
+ - not containerized_deployment | bool
+ - not use_new_ceph_iscsi | bool
+
+- name: make sure gateway_iqn is configured
+ fail:
+ msg: "you must set a iqn for the iSCSI target"
+ when:
+ - gateway_iqn | length == 0
+ - not containerized_deployment | bool
+ - not use_new_ceph_iscsi | bool
+
+- name: fail if unsupported chap configuration
+ fail:
+ msg: "Mixing clients with CHAP enabled and disabled is not supported."
+ with_items: "{{ client_connections }}"
+ when:
+ - item.status is defined
+ - item.status == "present"
+ - item.chap
+ - " '' in client_connections | selectattr('status', 'match', 'present') | map(attribute='chap') | list"
+
+- name: fail on unsupported distribution version for iscsi gateways
+ command: "grep -q {{ item }}=m {% if is_atomic|bool %}/usr/lib/ostree-boot{% else %}/boot{% endif %}/config-{{ ansible_facts['kernel'] }}"
+ register: iscsi_kernel
+ changed_when: false
+ failed_when: iscsi_kernel.rc != 0
+ loop:
+ - CONFIG_TARGET_CORE
+ - CONFIG_TCM_USER2
+ - CONFIG_ISCSI_TARGET
+ when: ansible_facts['distribution'] in ['RedHat', 'CentOS']
--- /dev/null
+---
+- name: fail if ceph_nfs_rgw_access_key or ceph_nfs_rgw_secret_key are undefined (nfs standalone)
+ fail:
+ msg: "ceph_nfs_rgw_access_key and ceph_nfs_rgw_secret_key must be set if nfs_obj_gw is True"
+ when:
+ - nfs_obj_gw | bool
+ - groups.get(mon_group_name, []) | length == 0
+ - (ceph_nfs_rgw_access_key is undefined or ceph_nfs_rgw_secret_key is undefined)
+
+- name: fail on openSUSE Leap 15.x using distro packages
+ fail:
+ msg: "ceph-nfs packages are not available from openSUSE Leap 15.x repositories (ceph_origin = 'distro')"
+ when:
+ - ceph_origin == 'distro'
+ - ansible_facts['distribution'] == 'openSUSE Leap'
--- /dev/null
+---
+- name: fail if target_size_ratio is not set when pg_autoscale_mode is True
+ fail:
+ msg: "You must set a target_size_ratio value on following pool: {{ item.name }}."
+ with_items:
+ - "{{ openstack_pools | default([]) }}"
+ - "{{ cephfs_pools | default([]) }}"
+ - "{{ pools | default([]) }}"
+ when:
+ - item.pg_autoscale_mode | default(False) | bool
+ - item.target_size_ratio is undefined
--- /dev/null
+---
+- name: ensure ceph_rbd_mirror_pool is set
+ fail:
+ msg: "ceph_rbd_mirror_pool needs to be provided"
+ when: ceph_rbd_mirror_pool | default("") | length == 0
+
+- name: ensure ceph_rbd_mirror_remote_cluster is set
+ fail:
+ msg: "ceph_rbd_mirror_remote_cluster needs to be provided"
+ when:
+ - ceph_rbd_mirror_remote_cluster | default("") | length == 0
+ - ceph_rbd_mirror_remote_user | default("") | length > 0
\ No newline at end of file
--- /dev/null
+---
+- name: validate ceph_origin
+ fail:
+ msg: "ceph_origin must be either 'repository', 'distro' or 'local'"
+ when: ceph_origin not in ['repository', 'distro', 'local']
+
+- name: validate ceph_repository
+ fail:
+ msg: "ceph_repository must be either 'community', 'rhcs', 'obs', 'dev', 'custom' or 'uca'"
+ when:
+ - ceph_origin == 'repository'
+ - ceph_repository not in ['community', 'rhcs', 'obs', 'dev', 'custom', 'uca']
+
+- name: validate ceph_repository_community
+ fail:
+ msg: "ceph_stable_release must be 'pacific'"
+ when:
+ - ceph_origin == 'repository'
+ - ceph_repository == 'community'
+ - ceph_stable_release not in ['pacific']
--- /dev/null
+---
+- name: fail if rgw_zone is default
+ fail:
+ msg: "rgw_zone cannot be named 'default'"
+ loop: "{{ rgw_instances }}"
+ when: item.rgw_zone is undefined or item.rgw_zone == 'default'
+
+- name: fail if either rgw_zonemaster or rgw_zonesecondary is undefined
+ fail:
+ msg: "rgw_zonemaster and rgw_zonesecondary must be defined"
+ loop: "{{ rgw_instances }}"
+ when: item.rgw_zonemaster | default(rgw_zonemaster) is undefined or item.rgw_zonesecondary | default(rgw_zonesecondary) is undefined
+
+- name: fail if rgw_zonemaster and rgw_zonesecondary are both true
+ fail:
+ msg: "rgw_zonemaster and rgw_zonesecondary cannot both be true"
+ loop: "{{ rgw_instances }}"
+ when:
+ - item.rgw_zonemaster | default(rgw_zonemaster) | bool
+ - item.rgw_zonesecondary | default(rgw_zonesecondary) | bool
+
+- name: fail if rgw_zonegroup is not set
+ fail:
+ msg: "rgw_zonegroup has not been set by the user"
+ loop: "{{ rgw_instances }}"
+ when: item.rgw_zonegroup is undefined
+
+- name: fail if rgw_zone_user is not set
+ fail:
+ msg: "rgw_zone_user has not been set by the user"
+ loop: "{{ rgw_instances }}"
+ when: item.rgw_zone_user is undefined
+
+- name: fail if rgw_zone_user_display_name is not set
+ fail:
+ msg: "rgw_zone_user_display_name has not been set by the user"
+ loop: "{{ rgw_instances }}"
+ when: item.rgw_zone_user_display_name is undefined
+
+- name: fail if rgw_realm is not set
+ fail:
+ msg: "rgw_realm has not been set by the user"
+ loop: "{{ rgw_instances }}"
+ when: item.rgw_realm is undefined
+
+- name: fail if system_access_key is not set
+ fail:
+ msg: "system_access_key has not been set by the user"
+ loop: "{{ rgw_instances }}"
+ when: item.system_access_key is undefined
+
+- name: fail if system_secret_key is not set
+ fail:
+ msg: "system_secret_key has not been set by the user"
+ loop: "{{ rgw_instances }}"
+ when: item.system_secret_key is undefined
+
+- name: fail if endpoint is not set
+ fail:
+ msg: "endpoint has not been set by the user"
+ loop: "{{ rgw_instances }}"
+ when:
+ - item.rgw_zonesecondary | default(rgw_zonesecondary) | bool
+ - rgw_pull_port is undefined and rgw_pullhost is undefined and item.rgw_pull_proto | default(rgw_pull_proto) is undefined
+ - item.endpoint is undefined
+
--- /dev/null
+---
+- name: fail if ec_profile is not set for ec pools
+ fail:
+ msg: "ec_profile must be set for ec pools"
+ loop: "{{ rgw_create_pools | dict2items }}"
+ when:
+ - item.value.type is defined
+ - item.value.type == 'ec'
+ - item.value.ec_profile is undefined
+
+- name: fail if ec_k is not set for ec pools
+ fail:
+ msg: "ec_k must be set for ec pools"
+ loop: "{{ rgw_create_pools | dict2items }}"
+ when:
+ - item.value.type is defined
+ - item.value.type == 'ec'
+ - item.value.ec_k is undefined
+
+- name: fail if ec_m is not set for ec pools
+ fail:
+ msg: "ec_m must be set for ec pools"
+ loop: "{{ rgw_create_pools | dict2items }}"
+ when:
+ - item.value.type is defined
+ - item.value.type == 'ec'
+ - item.value.ec_m is undefined
--- /dev/null
+---
+- name: fail on unsupported ansible version (1.X)
+ fail:
+ msg: "Ansible version must be >= 2.x, please update!"
+ when: ansible_version.major|int < 2
+
+- name: fail on unsupported ansible version
+ fail:
+ msg: "Ansible version must be 2.9!"
+ when: ansible_version.minor|int != 9
+
+- name: fail on unsupported system
+ fail:
+ msg: "System not supported {{ ansible_facts['system'] }}"
+ when: ansible_facts['system'] not in ['Linux']
+
+- name: fail on unsupported architecture
+ fail:
+ msg: "Architecture not supported {{ ansible_facts['architecture'] }}"
+ when: ansible_facts['architecture'] not in ['x86_64', 'ppc64le', 'armv7l', 'aarch64']
+
+- name: fail on unsupported distribution
+ fail:
+ msg: "Distribution not supported {{ ansible_facts['os_family'] }}"
+ when: ansible_facts['os_family'] not in ['Debian', 'RedHat', 'ClearLinux', 'Suse']
+
+- name: fail on unsupported CentOS release
+ fail:
+ msg: "CentOS release {{ ansible_facts['distribution_major_version'] }} not supported with dashboard"
+ when:
+ - ansible_facts['distribution'] == 'CentOS'
+ - ansible_facts['distribution_major_version'] | int == 7
+ - not containerized_deployment | bool
+ - dashboard_enabled | bool
+
+- name: red hat based systems tasks
+ when:
+ - ceph_repository == 'rhcs'
+ - ansible_facts['distribution'] == 'RedHat'
+ block:
+ - name: fail on unsupported distribution for red hat ceph storage
+ fail:
+ msg: "Distribution not supported {{ ansible_facts['distribution_version'] }} by Red Hat Ceph Storage, only RHEL >= 8.2"
+ when: ansible_facts['distribution_version'] is version('8.2', '<')
+
+- name: fail on unsupported distribution for ubuntu cloud archive
+ fail:
+ msg: "Distribution not supported by Ubuntu Cloud Archive: {{ ansible_facts['distribution'] }}"
+ when:
+ - ceph_repository == 'uca'
+ - ansible_facts['distribution'] != 'Ubuntu'
+
+- name: "fail on unsupported SUSE/openSUSE distribution (only 15.x supported)"
+ fail:
+ msg: "Distribution not supported: {{ ansible_facts['distribution'] }} {{ ansible_facts['distribution_major_version'] }}"
+ when:
+ - ansible_facts['distribution'] == 'openSUSE Leap' or ansible_facts['distribution'] == 'SUSE'
+ - ansible_facts['distribution_major_version'] != '15'
+
+- name: fail if systemd is not present
+ fail:
+ msg: "Systemd must be present"
+ when: ansible_facts['service_mgr'] != 'systemd'
--- /dev/null
+---
+- name: include check_system.yml
+ include_tasks: check_system.yml
+
+- name: validate repository variables in non-containerized scenario
+ include_tasks: check_repository.yml
+ when: not containerized_deployment | bool
+
+- name: validate osd_objectstore
+ fail:
+ msg: "osd_objectstore must be either 'bluestore' or 'filestore'"
+ when: osd_objectstore not in ['bluestore', 'filestore']
+
+- name: validate monitor network configuration
+ fail:
+ msg: "Either monitor_address, monitor_address_block or monitor_interface must be provided"
+ when:
+ - mon_group_name in group_names
+ - monitor_address == 'x.x.x.x'
+ - monitor_address_block == 'subnet'
+ - monitor_interface == 'interface'
+
+- name: validate radosgw network configuration
+ fail:
+ msg: "Either radosgw_address, radosgw_address_block or radosgw_interface must be provided"
+ when:
+ - rgw_group_name in group_names
+ - radosgw_address == 'x.x.x.x'
+ - radosgw_address_block == 'subnet'
+ - radosgw_interface == 'interface'
+
+- name: validate osd nodes
+ when: osd_group_name in group_names
+ block:
+ - name: validate lvm osd scenario
+ fail:
+ msg: 'devices or lvm_volumes must be defined for lvm osd scenario'
+ when:
+ - not osd_auto_discovery | default(false) | bool
+ - devices is undefined
+ - lvm_volumes is undefined
+
+ - name: validate filestore lvm osd scenario
+ fail:
+ msg: 'data and journal keys must be defined in lvm_volumes'
+ when:
+ - osd_objectstore == 'filestore'
+ - not osd_auto_discovery | default(false) | bool
+ - lvm_volumes is defined
+ - lvm_volumes | length > 0
+ - item.data is undefined or item.journal is undefined
+ with_items: '{{ lvm_volumes }}'
+
+ - name: validate bluestore lvm osd scenario
+ fail:
+ msg: 'data key must be defined in lvm_volumes'
+ when:
+ - osd_objectstore == 'bluestore'
+ - not osd_auto_discovery | default(false) | bool
+ - lvm_volumes is defined
+ - lvm_volumes | length > 0
+ - item.data is undefined
+ with_items: '{{ lvm_volumes }}'
+
+- name: debian based systems tasks
+ when: ansible_facts['os_family'] == 'Debian'
+ block:
+ - name: fail if local scenario is enabled on debian
+ fail:
+ msg: "'local' installation scenario not supported on Debian systems"
+ when: ceph_origin == 'local'
+
+ - name: fail if rhcs repository is enabled on debian
+ fail:
+ msg: "RHCS isn't supported anymore on Debian distribution"
+ when:
+ - ceph_origin == 'repository'
+ - ceph_repository == 'rhcs'
+
+# SUSE/openSUSE Leap only supports the following:
+# - ceph_origin == 'distro'
+# - ceph_origin == 'repository' and ceph_repository == 'obs'
+- name: SUSE/openSUSE Leap based system tasks
+ when: ansible_facts['os_family'] == 'Suse'
+ block:
+ - name: Check ceph_origin definition on SUSE/openSUSE Leap
+ fail:
+ msg: "Unsupported installation method origin:{{ ceph_origin }}"
+ when: ceph_origin not in ['distro', 'repository']
+
+ - name: Check ceph_repository definition on SUSE/openSUSE Leap
+ fail:
+ msg: "Unsupported installation method origin:{{ ceph_origin }} repo:{{ ceph_repository }}'
+ only valid combination is ceph_origin == 'repository' and ceph_repository == 'obs'"
+ when:
+ - ceph_origin == 'repository'
+ - ceph_repository != 'obs'
+
+- name: validate ntp daemon type
+ fail:
+ msg: "ntp_daemon_type must be one of chronyd, ntpd, or timesyncd"
+ when:
+ - ntp_service_enabled | bool
+ - ntp_daemon_type not in ['chronyd', 'ntpd', 'timesyncd']
+
+# Since NTPd can not be installed on Atomic...
+- name: abort if ntp_daemon_type is ntpd on Atomic
+ fail:
+ msg: installation can't happen on Atomic and ntpd needs to be installed
+ when:
+ - is_atomic | default(False) | bool
+ - ansible_facts['os_family'] == 'RedHat'
+ - ntp_daemon_type == 'ntpd'
+
+- name: make sure journal_size configured
+ debug:
+ msg: "WARNING: journal_size is configured to {{ journal_size }}, which is less than 5GB. This is not recommended and can lead to severe issues."
+ when:
+ - journal_size|int < 5120
+ - osd_objectstore == 'filestore'
+ - osd_group_name in group_names
+
+- name: include check_devices.yml
+ include_tasks: check_devices.yml
+ when:
+ - osd_group_name in group_names
+ - not osd_auto_discovery | default(False) | bool
+
+- name: include check_eth_mon.yml
+ include_tasks: check_eth_mon.yml
+ when:
+ - mon_group_name in group_names
+ - monitor_interface != "dummy"
+ - monitor_address == "x.x.x.x"
+ - monitor_address_block == "subnet"
+
+- name: include check_ipaddr_mon.yml
+ include_tasks: check_ipaddr_mon.yml
+ when:
+ - mon_group_name in group_names
+ - monitor_interface == "interface"
+ - monitor_address == "x.x.x.x"
+ - monitor_address_block != "subnet"
+
+- name: include check_eth_rgw.yml
+ include_tasks: check_eth_rgw.yml
+ when:
+ - rgw_group_name in group_names
+ - radosgw_interface != "dummy"
+ - radosgw_address == "x.x.x.x"
+ - radosgw_address_block == "subnet"
+
+- name: include check_rgw_pools.yml
+ include_tasks: check_rgw_pools.yml
+ when:
+ - inventory_hostname in groups.get(rgw_group_name, [])
+ - rgw_create_pools is defined
+
+- name: include check_rgw_multisite.yml
+ include_tasks: check_rgw_multisite.yml
+ when:
+ - inventory_hostname in groups.get(rgw_group_name, [])
+ - rgw_multisite | bool
+
+- name: include check_iscsi.yml
+ include_tasks: check_iscsi.yml
+ when: iscsi_gw_group_name in group_names
+
+- name: warn about radosgw_civetweb_num_threads option deprecation
+ debug:
+ msg: "WARNING: radosgw_civetweb_num_threads variable is deprecated. Please use radosgw_thread_pool_size instead"
+ when:
+ - radosgw_frontend_type == 'civetweb'
+ - radosgw_civetweb_num_threads is defined
+
+- name: include check_nfs.yml
+ include_tasks: check_nfs.yml
+ when: inventory_hostname in groups.get(nfs_group_name, [])
+
+- name: include check_rbdmirror.yml
+ include_tasks: check_rbdmirror.yml
+ when:
+ - rbdmirror_group_name in group_names
+ - ceph_rbd_mirror_configure | default(false) | bool
+
+- block:
+ - name: fail if monitoring group doesn't exist
+ fail:
+ msg: "you must add a monitoring group and add at least one node."
+ when: groups[monitoring_group_name] is undefined
+
+ - name: fail when monitoring doesn't contain at least one node.
+ fail:
+ msg: "you must add at least one node in the monitoring hosts group"
+ when: groups[monitoring_group_name] | length < 1
+
+ - name: fail when dashboard_admin_password and/or grafana_admin_password are not set
+ fail:
+ msg: "you must set dashboard_admin_password and grafana_admin_password."
+ when:
+ - dashboard_admin_password is undefined
+ or grafana_admin_password is undefined
+ when: dashboard_enabled | bool
+
+- name: validate container registry credentials
+ fail:
+ msg: 'ceph_docker_registry_username and/or ceph_docker_registry_password variables need to be set'
+ when:
+ - ceph_docker_registry_auth | bool
+ - (ceph_docker_registry_username is not defined or ceph_docker_registry_password is not defined) or
+ (ceph_docker_registry_username | string | length == 0 or ceph_docker_registry_password | string | length == 0)
+
+- name: validate container service and container package
+ fail:
+ msg: 'both container_package_name and container_service_name should be defined'
+ when:
+ - (container_package_name is undefined and container_service_name is defined) or
+ (container_package_name is defined and container_service_name is undefined)
+
+- name: validate openstack_keys key format
+ fail:
+ msg: '{{ item.name }} key format invalid'
+ with_items: '{{ openstack_keys }}'
+ when:
+ - osd_group_name in group_names
+ - openstack_keys is defined
+ - openstack_keys | length > 0
+ - item.key is defined
+ - item.key is not match("^[a-zA-Z0-9+/]{38}==$")
+
+- name: validate clients keys key format
+ fail:
+ msg: '{{ item.name }} key format invalid'
+ with_items: '{{ keys }}'
+ when:
+ - client_group_name in group_names
+ - keys is defined
+ - keys | length > 0
+ - item.key is defined
+ - item.key is not match("^[a-zA-Z0-9+/]{38}==$")
+
+- name: validate openstack_keys caps
+ fail:
+ msg: '{{ item.name }} key has no caps defined'
+ with_items: '{{ openstack_keys }}'
+ when:
+ - osd_group_name in group_names
+ - openstack_keys is defined
+ - openstack_keys | length > 0
+ - item.caps is not defined
+
+- name: validate clients keys caps
+ fail:
+ msg: '{{ item.name }} key has no caps defined'
+ with_items: '{{ keys }}'
+ when:
+ - client_group_name in group_names
+ - keys is defined
+ - keys | length > 0
+ - item.caps is not defined
+
+- name: check virtual_ips is defined
+ fail:
+ msg: "virtual_ips is not defined."
+ when:
+ - rgwloadbalancer_group_name in group_names
+ - groups[rgwloadbalancer_group_name] | length > 0
+ - virtual_ips is not defined
+
+- name: validate virtual_ips length
+ fail:
+ msg: "There are more virual_ips defined than rgwloadbalancer nodes"
+ when:
+ - rgwloadbalancer_group_name in group_names
+ - (virtual_ips | length) > (groups[rgwloadbalancer_group_name] | length)
--- /dev/null
+---
+# Defines deployment design and assigns role to server groups
+
+- hosts:
+ - mons
+ - osds
+ - mdss
+ - rgws
+ - nfss
+ - rbdmirrors
+ - clients
+ - iscsigws
+ - mgrs
+ - monitoring
+
+ gather_facts: false
+ become: True
+ any_errors_fatal: true
+
+ vars:
+ delegate_facts_host: True
+
+ pre_tasks:
+ - import_tasks: raw_install_python.yml
+
+ tasks:
+ # pre-tasks for following import -
+ - name: gather facts
+ setup:
+ gather_subset:
+ - 'all'
+ - '!facter'
+ - '!ohai'
+ when: not delegate_facts_host | bool or inventory_hostname in groups.get(client_group_name, [])
+ tags: always
+
+ - name: gather and delegate facts
+ setup:
+ gather_subset:
+ - 'all'
+ - '!facter'
+ - '!ohai'
+ delegate_to: "{{ item }}"
+ delegate_facts: True
+ with_items: "{{ groups['all'] | difference(groups.get('clients', [])) }}"
+ run_once: true
+ when: delegate_facts_host | bool
+ tags: always
+
+ - import_role:
+ name: ceph-defaults
+ tags: [with_pkg, fetch_container_image]
+ - import_role:
+ name: ceph-facts
+ - import_role:
+ name: ceph-validate
+ - import_role:
+ name: ceph-infra
+ - import_role:
+ name: ceph-handler
+ - import_role:
+ name: ceph-container-engine
+ tags: with_pkg
+ when: (group_names != ['clients']) or (inventory_hostname == groups.get('clients', [''])|first)
+ - import_role:
+ name: ceph-container-common
+ tags: fetch_container_image
+ when: (group_names != ['clients']) or (inventory_hostname == groups.get('clients', [''])|first)
+
+- hosts: mons
+ gather_facts: false
+ any_errors_fatal: true
+ tasks:
+ - name: set ceph monitor install 'In Progress'
+ run_once: true
+ set_stats:
+ data:
+ installer_phase_ceph_mon:
+ status: "In Progress"
+ start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
+
+- hosts: mons
+ become: True
+ gather_facts: false
+ any_errors_fatal: true
+ tasks:
+ - import_role:
+ name: ceph-defaults
+ tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-facts
+ tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-handler
+ tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-config
+ tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-mon
+ - import_role:
+ name: ceph-mgr
+ when: groups.get(mgr_group_name, []) | length == 0
+
+- hosts: mons
+ gather_facts: false
+ any_errors_fatal: true
+ tasks:
+ - name: set ceph monitor install 'Complete'
+ run_once: true
+ set_stats:
+ data:
+ installer_phase_ceph_mon:
+ status: "Complete"
+ end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
+
+- hosts: mgrs
+ become: True
+ gather_facts: false
+ any_errors_fatal: true
+ tasks:
+ # pre-tasks for following imports -
+ - name: set ceph manager install 'In Progress'
+ run_once: true
+ set_stats:
+ data:
+ installer_phase_ceph_mgr:
+ status: "In Progress"
+ start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
+
+ - import_role:
+ name: ceph-defaults
+ tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-facts
+ tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-handler
+ tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-config
+ tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-mgr
+
+ # post-tasks for upcoming imports -
+ - name: set ceph manager install 'Complete'
+ run_once: true
+ set_stats:
+ data:
+ installer_phase_ceph_mgr:
+ status: "Complete"
+ end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
+
+- hosts: osds
+ become: True
+ gather_facts: false
+ any_errors_fatal: true
+ tasks:
+ # pre-tasks for upcoming imports -
+ - name: set ceph osd install 'In Progress'
+ run_once: true
+ set_stats:
+ data:
+ installer_phase_ceph_osd:
+ status: "In Progress"
+ start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
+
+ - import_role:
+ name: ceph-defaults
+ tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-facts
+ tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-handler
+ tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-config
+ tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-osd
+
+ # post-tasks for preceding imports -
+ - name: set ceph osd install 'Complete'
+ run_once: true
+ set_stats:
+ data:
+ installer_phase_ceph_osd:
+ status: "Complete"
+ end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
+
+- hosts: mdss
+ become: True
+ gather_facts: false
+ any_errors_fatal: true
+ tasks:
+ # pre-tasks for following imports -
+ - name: set ceph mds install 'In Progress'
+ run_once: true
+ set_stats:
+ data:
+ installer_phase_ceph_mds:
+ status: "In Progress"
+ start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
+
+ - import_role:
+ name: ceph-defaults
+ tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-facts
+ tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-handler
+ tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-config
+ tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-mds
+
+ # post-tasks for preceding imports -
+ - name: set ceph mds install 'Complete'
+ run_once: true
+ set_stats:
+ data:
+ installer_phase_ceph_mds:
+ status: "Complete"
+ end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
+
+- hosts: rgws
+ become: True
+ gather_facts: false
+ any_errors_fatal: true
+ tasks:
+ # pre-tasks for following imports -
+ - name: set ceph rgw install 'In Progress'
+ run_once: true
+ set_stats:
+ data:
+ installer_phase_ceph_rgw:
+ status: "In Progress"
+ start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
+
+ - import_role:
+ name: ceph-defaults
+ tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-facts
+ tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-handler
+ tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-config
+ tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-rgw
+
+ # post-tasks for preceding imports -
+ - name: set ceph rgw install 'Complete'
+ run_once: true
+ set_stats:
+ data:
+ installer_phase_ceph_rgw:
+ status: "Complete"
+ end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
+
+- hosts: clients
+ become: True
+ gather_facts: false
+ any_errors_fatal: true
+ tags: 'ceph_client'
+ tasks:
+ # pre-tasks for following imports -
+ - name: set ceph client install 'In Progress'
+ run_once: true
+ set_stats:
+ data:
+ installer_phase_ceph_client:
+ status: "In Progress"
+ start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
+
+ - import_role:
+ name: ceph-defaults
+ tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-facts
+ tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-handler
+ tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-config
+ tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-client
+
+ # post-tasks for preceding imports -
+ - name: set ceph client install 'Complete'
+ run_once: true
+ set_stats:
+ data:
+ installer_phase_ceph_client:
+ status: "Complete"
+ end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
+
+- hosts: nfss
+ become: True
+ gather_facts: false
+ any_errors_fatal: true
+ tasks:
+ # pre-tasks for following imports -
+ - name: set ceph nfs install 'In Progress'
+ run_once: true
+ set_stats:
+ data:
+ installer_phase_ceph_nfs:
+ status: "In Progress"
+ start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
+
+ - import_role:
+ name: ceph-defaults
+ tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-facts
+ tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-handler
+ tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-config
+ tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-nfs
+
+ # post-tasks for following imports -
+ - name: set ceph nfs install 'Complete'
+ run_once: true
+ set_stats:
+ data:
+ installer_phase_ceph_nfs:
+ status: "Complete"
+ end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
+
+- hosts: rbdmirrors
+ become: True
+ gather_facts: false
+ any_errors_fatal: true
+ tasks:
+ # pre-tasks for following imports -
+ - name: set ceph rbd mirror install 'In Progress'
+ run_once: true
+ set_stats:
+ data:
+ installer_phase_ceph_rbdmirror:
+ status: "In Progress"
+ start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
+
+ - import_role:
+ name: ceph-defaults
+ tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-facts
+ tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-handler
+ tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-config
+ tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-rbd-mirror
+
+ # post-tasks for preceding imports -
+ - name: set ceph rbd mirror install 'Complete'
+ run_once: true
+ set_stats:
+ data:
+ installer_phase_ceph_rbdmirror:
+ status: "Complete"
+ end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
+
+- hosts:
+ - iscsigws
+ gather_facts: false
+ any_errors_fatal: true
+ become: True
+ tasks:
+ # pre-tasks for following imports -
+ - name: set ceph iscsi gateway install 'In Progress'
+ run_once: true
+ set_stats:
+ data:
+ installer_phase_ceph_iscsi_gw:
+ status: "In Progress"
+ start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
+
+ - import_role:
+ name: ceph-defaults
+ tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-facts
+ tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-handler
+ tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-config
+ tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-iscsi-gw
+
+ # post-tasks for preceding imports -
+ post_tasks:
+ - name: set ceph iscsi gw install 'Complete'
+ run_once: true
+ set_stats:
+ data:
+ installer_phase_ceph_iscsi_gw:
+ status: "Complete"
+ end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
+
+- import_playbook: dashboard.yml
+ when:
+ - dashboard_enabled | bool
+ - groups.get(monitoring_group_name, []) | length > 0
+
+- hosts:
+ - mons
+ - osds
+ - mdss
+ - rgws
+ - rbdmirrors
+ - mgrs
+
+ gather_facts: false
+ become: True
+ any_errors_fatal: true
+ pre_tasks:
+ - name: set ceph crash install 'In Progress'
+ run_once: true
+ set_stats:
+ data:
+ installer_phase_ceph_crash:
+ status: "In Progress"
+ start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
+
+ tasks:
+ - import_role:
+ name: ceph-defaults
+ - import_role:
+ name: ceph-facts
+ tasks_from: container_binary.yml
+ - import_role:
+ name: ceph-handler
+ - import_role:
+ name: ceph-crash
+
+ post_tasks:
+ - name: set ceph crash install 'Complete'
+ run_once: true
+ set_stats:
+ data:
+ installer_phase_ceph_crash:
+ status: "Complete"
+ end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
+
+- hosts: mons
+ gather_facts: false
+ become: True
+ any_errors_fatal: true
+ tasks:
+ - import_role:
+ name: ceph-defaults
+
+ - name: check if podman binary is present
+ stat:
+ path: /usr/bin/podman
+ register: podman_binary
+
+ - name: set_fact container_binary
+ set_fact:
+ container_binary: "{{ 'podman' if (podman_binary.stat.exists and ansible_facts['distribution'] == 'Fedora') or (ansible_facts['os_family'] == 'RedHat' and ansible_facts['distribution_major_version'] == '8') else 'docker' }}"
+
+ - name: get ceph status from the first monitor
+ command: >
+ {{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_facts']['hostname'] }} ceph --cluster {{ cluster }} -s
+ register: ceph_status
+ changed_when: false
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ run_once: true
+
+ - name: "show ceph status for cluster {{ cluster }}"
+ debug:
+ msg: "{{ ceph_status.stdout_lines }}"
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ run_once: true
+ when: not ceph_status.failed
--- /dev/null
+---
+# Defines deployment design and assigns role to server groups
+
+- hosts:
+ - mons
+ - osds
+ - mdss
+ - rgws
+ - nfss
+ - rbdmirrors
+ - clients
+ - mgrs
+ - iscsigws
+ - monitoring
+ - rgwloadbalancers
+
+ gather_facts: false
+ any_errors_fatal: true
+ become: true
+
+ tags: always
+
+ vars:
+ delegate_facts_host: True
+
+ pre_tasks:
+ # If we can't get python2 installed before any module is used we will fail
+ # so just try what we can to get it installed
+
+ - import_tasks: raw_install_python.yml
+
+ - name: gather facts
+ setup:
+ gather_subset:
+ - 'all'
+ - '!facter'
+ - '!ohai'
+ when:
+ - not delegate_facts_host | bool or inventory_hostname in groups.get(client_group_name, [])
+
+ - name: gather and delegate facts
+ setup:
+ gather_subset:
+ - 'all'
+ - '!facter'
+ - '!ohai'
+ delegate_to: "{{ item }}"
+ delegate_facts: True
+ with_items: "{{ groups['all'] | difference(groups.get('clients', [])) }}"
+ run_once: true
+ when: delegate_facts_host | bool
+
+ tasks:
+ - import_role:
+ name: ceph-defaults
+ - import_role:
+ name: ceph-facts
+ - import_role:
+ name: ceph-handler
+ - import_role:
+ name: ceph-validate
+ - import_role:
+ name: ceph-infra
+ - import_role:
+ name: ceph-common
+
+- hosts: mons
+ gather_facts: false
+ become: True
+ any_errors_fatal: true
+ pre_tasks:
+ - name: set ceph monitor install 'In Progress'
+ run_once: true
+ set_stats:
+ data:
+ installer_phase_ceph_mon:
+ status: "In Progress"
+ start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
+
+ tasks:
+ - import_role:
+ name: ceph-defaults
+ tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-facts
+ tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-handler
+ tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-config
+ tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-mon
+ - import_role:
+ name: ceph-mgr
+ when: groups.get(mgr_group_name, []) | length == 0
+
+ post_tasks:
+ - name: set ceph monitor install 'Complete'
+ run_once: true
+ set_stats:
+ data:
+ installer_phase_ceph_mon:
+ status: "Complete"
+ end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
+
+- hosts: mgrs
+ gather_facts: false
+ become: True
+ any_errors_fatal: true
+ pre_tasks:
+ - name: set ceph manager install 'In Progress'
+ run_once: true
+ set_stats:
+ data:
+ installer_phase_ceph_mgr:
+ status: "In Progress"
+ start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
+
+ tasks:
+ - import_role:
+ name: ceph-defaults
+ tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-facts
+ tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-handler
+ tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-config
+ tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-mgr
+
+ post_tasks:
+ - name: set ceph manager install 'Complete'
+ run_once: true
+ set_stats:
+ data:
+ installer_phase_ceph_mgr:
+ status: "Complete"
+ end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
+
+- hosts: osds
+ gather_facts: false
+ become: True
+ any_errors_fatal: true
+ pre_tasks:
+ - name: set ceph osd install 'In Progress'
+ run_once: true
+ set_stats:
+ data:
+ installer_phase_ceph_osd:
+ status: "In Progress"
+ start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
+
+ tasks:
+ - import_role:
+ name: ceph-defaults
+ tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-facts
+ tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-handler
+ tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-config
+ tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-osd
+
+ post_tasks:
+ - name: set ceph osd install 'Complete'
+ run_once: true
+ set_stats:
+ data:
+ installer_phase_ceph_osd:
+ status: "Complete"
+ end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
+
+- hosts: mdss
+ gather_facts: false
+ become: True
+ any_errors_fatal: true
+ pre_tasks:
+ - name: set ceph mds install 'In Progress'
+ run_once: true
+ set_stats:
+ data:
+ installer_phase_ceph_mds:
+ status: "In Progress"
+ start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
+
+ tasks:
+ - import_role:
+ name: ceph-defaults
+ tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-facts
+ tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-handler
+ tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-config
+ tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-mds
+
+ post_tasks:
+ - name: set ceph mds install 'Complete'
+ run_once: true
+ set_stats:
+ data:
+ installer_phase_ceph_mds:
+ status: "Complete"
+ end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
+
+- hosts: rgws
+ gather_facts: false
+ become: True
+ any_errors_fatal: true
+ pre_tasks:
+ - name: set ceph rgw install 'In Progress'
+ run_once: true
+ set_stats:
+ data:
+ installer_phase_ceph_rgw:
+ status: "In Progress"
+ start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
+
+ tasks:
+ - import_role:
+ name: ceph-defaults
+ tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-facts
+ tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-handler
+ tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-config
+ tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-rgw
+
+ post_tasks:
+ - name: set ceph rgw install 'Complete'
+ run_once: true
+ set_stats:
+ data:
+ installer_phase_ceph_rgw:
+ status: "Complete"
+ end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
+
+- hosts: clients
+ gather_facts: false
+ become: True
+ any_errors_fatal: true
+ tags: 'ceph_client'
+ pre_tasks:
+ - name: set ceph client install 'In Progress'
+ run_once: true
+ set_stats:
+ data:
+ installer_phase_ceph_client:
+ status: "In Progress"
+ start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
+
+ tasks:
+ - import_role:
+ name: ceph-defaults
+ tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-facts
+ tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-handler
+ tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-config
+ tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-client
+
+ post_tasks:
+ - name: set ceph client install 'Complete'
+ run_once: true
+ set_stats:
+ data:
+ installer_phase_ceph_client:
+ status: "Complete"
+ end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
+
+- hosts: nfss
+ gather_facts: false
+ become: True
+ any_errors_fatal: true
+ pre_tasks:
+ - name: set ceph nfs install 'In Progress'
+ run_once: true
+ set_stats:
+ data:
+ installer_phase_ceph_nfs:
+ status: "In Progress"
+ start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
+
+ tasks:
+ - import_role:
+ name: ceph-defaults
+ tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-facts
+ tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-handler
+ tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-config
+ tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-nfs
+
+ post_tasks:
+ - name: set ceph nfs install 'Complete'
+ run_once: true
+ set_stats:
+ data:
+ installer_phase_ceph_nfs:
+ status: "Complete"
+ end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
+
+- hosts: rbdmirrors
+ gather_facts: false
+ become: True
+ any_errors_fatal: true
+ pre_tasks:
+ - name: set ceph rbd mirror install 'In Progress'
+ run_once: true
+ set_stats:
+ data:
+ installer_phase_ceph_rbdmirror:
+ status: "In Progress"
+ start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
+
+ tasks:
+ - import_role:
+ name: ceph-defaults
+ tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-facts
+ tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-handler
+ tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-config
+ tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-rbd-mirror
+
+ post_tasks:
+ - name: set ceph rbd mirror install 'Complete'
+ run_once: true
+ set_stats:
+ data:
+ installer_phase_ceph_rbdmirror:
+ status: "Complete"
+ end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
+
+- hosts:
+ - iscsigws
+ gather_facts: false
+ become: True
+ any_errors_fatal: true
+ pre_tasks:
+ - name: set ceph iscsi gateway install 'In Progress'
+ run_once: true
+ set_stats:
+ data:
+ installer_phase_ceph_iscsi_gw:
+ status: "In Progress"
+ start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
+
+ tasks:
+ - import_role:
+ name: ceph-defaults
+ tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-facts
+ tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-handler
+ tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-config
+ tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-iscsi-gw
+
+ post_tasks:
+ - name: set ceph iscsi gw install 'Complete'
+ run_once: true
+ set_stats:
+ data:
+ installer_phase_ceph_iscsi_gw:
+ status: "Complete"
+ end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
+
+- hosts:
+ - rgwloadbalancers
+ gather_facts: false
+ become: True
+ any_errors_fatal: true
+ pre_tasks:
+ - name: set ceph rgw loadbalancer install 'In Progress'
+ run_once: true
+ set_stats:
+ data:
+ installer_phase_ceph_rgw_loadbalancer:
+ status: "In Progress"
+ start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
+
+ tasks:
+ - import_role:
+ name: ceph-defaults
+ tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-facts
+ tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-rgw-loadbalancer
+
+ post_tasks:
+ - name: set ceph rgw loadbalancer install 'Complete'
+ run_once: true
+ set_stats:
+ data:
+ installer_phase_ceph_rgw_loadbalancer:
+ status: "Complete"
+ end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
+
+- import_playbook: dashboard.yml
+ when:
+ - dashboard_enabled | bool
+ - groups.get(monitoring_group_name, []) | length > 0
+
+- hosts:
+ - mons
+ - osds
+ - mdss
+ - rgws
+ - rbdmirrors
+ - mgrs
+
+ gather_facts: false
+ become: True
+ any_errors_fatal: true
+ pre_tasks:
+ - name: set ceph crash install 'In Progress'
+ run_once: true
+ set_stats:
+ data:
+ installer_phase_ceph_crash:
+ status: "In Progress"
+ start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
+
+ tasks:
+ - import_role:
+ name: ceph-defaults
+ - import_role:
+ name: ceph-facts
+ tasks_from: container_binary.yml
+ - import_role:
+ name: ceph-handler
+ - import_role:
+ name: ceph-crash
+
+ post_tasks:
+ - name: set ceph crash install 'Complete'
+ run_once: true
+ set_stats:
+ data:
+ installer_phase_ceph_crash:
+ status: "Complete"
+ end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
+
+- hosts: mons
+ gather_facts: false
+ become: True
+ any_errors_fatal: true
+ tasks:
+ - import_role:
+ name: ceph-defaults
+ - name: get ceph status from the first monitor
+ command: ceph --cluster {{ cluster }} -s
+ register: ceph_status
+ changed_when: false
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ run_once: true
+
+ - name: "show ceph status for cluster {{ cluster }}"
+ debug:
+ msg: "{{ ceph_status.stdout_lines }}"
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ run_once: true
+ when:
+ - ceph_status is not skipped
+ - ceph_status is successful
--- /dev/null
+---
+- hosts: localhost
+ become: true
+ tasks:
+ - import_role:
+ name: ceph-common
+ - import_role:
+ name: ceph-mon
+ - import_role:
+ name: ceph-osd
+ - import_role:
+ name: ceph-mds
+ - import_role:
+ name: ceph-rgw
+ - import_role:
+ name: ceph-fetch-keys
--- /dev/null
+Functional tests
+================
+
+These playbooks aim to individually validate each Ceph component.
+Some of them require packages to be installed.
+Ideally you will run these tests from a client machine or from the Ansible server.
--- /dev/null
+Functional Testing
+==================
+The directory structure, files, and tests found in this directory all work
+together to provide:
+
+* a set of machines (or even a single one) so that ceph-ansible can run against
+* a "scenario" configuration file in Python, that defines what nodes are
+ configured to what roles and what 'components' they will test
+* tests (in functional/tests/) that will all run unless skipped explicitly when
+ testing a distinct feature dependant on the ansible run.
+
+
+Example run
+-----------
+The following is the easiest way to try this out locally. Both Vagrant and
+VirtualBox are required. Ensure that ``py.test`` and ``pytest-xdist`` are
+installed (with pip on a virtualenv) by using the ``requirements.txt`` file in
+the ``tests`` directory::
+
+ pip install -r requirements.txt
+
+Choose a directory in ``tests/functional`` that has 3 files:
+
+* ``Vagrantfile``
+* ``vagrant_variables.yml``
+* A Python ("scenario") file.
+
+For example in: ``tests/functional/ubuntu/16.04/mon/initial_members``::
+
+ tree .
+ .
+ ├── Vagrantfile -> ../../../../../../Vagrantfile
+ ├── scenario.py
+ └── vagrant_variables.yml
+
+ 0 directories, 3 files
+
+It is *required* to be in that directory. It is what triggers all the
+preprocessing of complex arguments based on the cluster setup.
+
+Run vagrant first to setup the environment::
+
+ vagrant up --no-provision --provider=virtualbox
+
+Then run ceph-ansible against the hosts with the distinct role (in this case we
+are deploying a monitor using ``initial_members``).
+
+And finally run ``py.test``::
+
+ py.test -v
--- /dev/null
+import pytest
+import os
+
+
+def str_to_bool(val):
+ try:
+ val = val.lower()
+ except AttributeError:
+ val = str(val).lower()
+ if val == 'true':
+ return True
+ elif val == 'false':
+ return False
+ else:
+ raise ValueError("Invalid input value: %s" % val)
+
+
+@pytest.fixture(scope="module")
+def setup(host):
+ cluster_address = ""
+ osd_ids = []
+ osds = []
+
+ ansible_vars = host.ansible.get_variables()
+ ansible_facts = host.ansible("setup")
+
+ docker = ansible_vars.get("docker")
+ container_binary = ansible_vars.get("container_binary", "")
+ osd_auto_discovery = ansible_vars.get("osd_auto_discovery")
+ group_names = ansible_vars["group_names"]
+
+ ansible_distribution = ansible_facts["ansible_facts"]["ansible_distribution"]
+
+ if ansible_distribution == "CentOS":
+ public_interface = "eth1"
+ cluster_interface = "eth2"
+ else:
+ public_interface = "ens6"
+ cluster_interface = "ens7"
+
+ subnet = ".".join(ansible_vars["public_network"].split(".")[0:-1])
+ num_mons = len(ansible_vars["groups"].get('mons', []))
+ if osd_auto_discovery:
+ num_osds = 3
+ else:
+ num_osds = len(ansible_vars.get("devices", []))
+ if not num_osds:
+ num_osds = len(ansible_vars.get("lvm_volumes", []))
+ osds_per_device = ansible_vars.get("osds_per_device", 1)
+ num_osds = num_osds * osds_per_device
+
+ # If number of devices doesn't map to number of OSDs, allow tests to define
+ # that custom number, defaulting it to ``num_devices``
+ num_osds = ansible_vars.get('num_osds', num_osds)
+ cluster_name = ansible_vars.get("cluster", "ceph")
+ conf_path = "/etc/ceph/{}.conf".format(cluster_name)
+ if "osds" in group_names:
+ cluster_address = host.interface(cluster_interface).addresses[0]
+ cmd = host.run('sudo ls /var/lib/ceph/osd/ | sed "s/.*-//"')
+ if cmd.rc == 0:
+ osd_ids = cmd.stdout.rstrip("\n").split("\n")
+ osds = osd_ids
+
+ address = host.interface(public_interface).addresses[0]
+
+ if docker and not container_binary:
+ container_binary = "podman"
+
+ data = dict(
+ cluster_name=cluster_name,
+ subnet=subnet,
+ osd_ids=osd_ids,
+ num_mons=num_mons,
+ num_osds=num_osds,
+ address=address,
+ osds=osds,
+ conf_path=conf_path,
+ public_interface=public_interface,
+ cluster_interface=cluster_interface,
+ cluster_address=cluster_address,
+ container_binary=container_binary)
+
+ return data
+
+
+@pytest.fixture()
+def node(host, request):
+ """
+ This fixture represents a single node in the ceph cluster. Using the
+ host.ansible fixture provided by testinfra it can access all the ansible
+ variables provided to it by the specific test scenario being ran.
+
+ You must include this fixture on any tests that operate on specific type
+ of node because it contains the logic to manage which tests a node
+ should run.
+ """
+ ansible_vars = host.ansible.get_variables()
+ # tox will pass in this environment variable. we need to do it this way
+ # because testinfra does not collect and provide ansible config passed in
+ # from using --extra-vars
+ ceph_stable_release = os.environ.get("CEPH_STABLE_RELEASE", "pacific")
+ rolling_update = os.environ.get("ROLLING_UPDATE", "False")
+ group_names = ansible_vars["group_names"]
+ docker = ansible_vars.get("docker")
+ dashboard = ansible_vars.get("dashboard_enabled", True)
+ radosgw_num_instances = ansible_vars.get("radosgw_num_instances", 1)
+ ceph_rbd_mirror_remote_user = ansible_vars.get('ceph_rbd_mirror_remote_user', '')
+ ceph_release_num = {
+ 'jewel': 10,
+ 'kraken': 11,
+ 'luminous': 12,
+ 'mimic': 13,
+ 'nautilus': 14,
+ 'octopus': 15,
+ 'pacific': 16,
+ 'dev': 99
+ }
+
+ # capture the initial/default state
+ test_is_applicable = False
+ for marker in request.node.iter_markers():
+ if marker.name in group_names or marker.name == 'all':
+ test_is_applicable = True
+ break
+ # Check if any markers on the test method exist in the nodes group_names.
+ # If they do not, this test is not valid for the node being tested.
+ if not test_is_applicable:
+ reason = "%s: Not a valid test for node type: %s" % (
+ request.function, group_names)
+ pytest.skip(reason)
+
+ if request.node.get_closest_marker('rbdmirror_secondary') and not ceph_rbd_mirror_remote_user: # noqa E501
+ pytest.skip('Not a valid test for a non-secondary rbd-mirror node')
+
+ if request.node.get_closest_marker('ceph_crash') and group_names in [['nfss'], ['iscsigws'], ['clients'], ['monitoring']]:
+ pytest.skip('Not a valid test for nfs, client or iscsigw nodes')
+
+ if request.node.get_closest_marker("no_docker") and docker:
+ pytest.skip(
+ "Not a valid test for containerized deployments or atomic hosts")
+
+ if request.node.get_closest_marker("docker") and not docker:
+ pytest.skip(
+ "Not a valid test for non-containerized deployments or atomic hosts") # noqa E501
+
+ if request.node.get_closest_marker("dashboard") and not dashboard:
+ pytest.skip(
+ "Not a valid test with dashboard disabled")
+
+ if request.node.get_closest_marker("dashboard") and group_names == ['clients']:
+ pytest.skip('Not a valid test for client node')
+
+ if request.node.get_closest_marker("no_rolling_update") and rolling_update:
+ pytest.skip('Not a valid test when testing rolling_update')
+
+ data = dict(
+ vars=ansible_vars,
+ docker=docker,
+ ceph_stable_release=ceph_stable_release,
+ ceph_release_num=ceph_release_num,
+ rolling_update=rolling_update,
+ radosgw_num_instances=radosgw_num_instances,
+ )
+ return data
+
+
+def pytest_collection_modifyitems(session, config, items):
+ for item in items:
+ test_path = item.location[0]
+ if "mon" in test_path:
+ item.add_marker(pytest.mark.mons)
+ elif "osd" in test_path:
+ item.add_marker(pytest.mark.osds)
+ elif "mds" in test_path:
+ item.add_marker(pytest.mark.mdss)
+ elif "mgr" in test_path:
+ item.add_marker(pytest.mark.mgrs)
+ elif "rbd-mirror" in test_path:
+ item.add_marker(pytest.mark.rbdmirrors)
+ elif "rgw" in test_path:
+ item.add_marker(pytest.mark.rgws)
+ elif "nfs" in test_path:
+ item.add_marker(pytest.mark.nfss)
+ elif "iscsi" in test_path:
+ item.add_marker(pytest.mark.iscsigws)
+ elif "grafana" in test_path:
+ item.add_marker(pytest.mark.grafanas)
+ else:
+ item.add_marker(pytest.mark.all)
+
+ if "journal_collocation" in test_path:
+ item.add_marker(pytest.mark.journal_collocation)
--- /dev/null
+ubuntu-key/
+fetch/
+vagrant_ssh_config
--- /dev/null
+../../../Vagrantfile
\ No newline at end of file
--- /dev/null
+../all_daemons/ceph-override.json
\ No newline at end of file
--- /dev/null
+../../../../Vagrantfile
\ No newline at end of file
--- /dev/null
+../../all_daemons/ceph-override.json
\ No newline at end of file
--- /dev/null
+---
+docker: True
+ceph_origin: repository
+ceph_repository: community
+containerized_deployment: true
+cluster: ceph
+public_network: "192.168.63.0/24"
+cluster_network: "192.168.64.0/24"
+monitor_interface: eth1
+radosgw_interface: eth1
+journal_size: 100
+osd_objectstore: "bluestore"
+copy_admin_key: true
+# test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb
+lvm_volumes:
+ - data: data-lv1
+ data_vg: test_group
+ - data: data-lv2
+ data_vg: test_group
+ db: journal1
+ db_vg: journals
+os_tuning_params:
+ - { name: fs.file-max, value: 26234859 }
+ceph_conf_overrides:
+ global:
+ mon_allow_pool_size_one: true
+ mon_warn_on_pool_no_redundancy: false
+ osd_pool_default_size: 1
+dashboard_enabled: False
+ceph_docker_registry: quay.ceph.io
+ceph_docker_image: ceph-ci/daemon
+ceph_docker_image_tag: latest-pacific
\ No newline at end of file
--- /dev/null
+[mons]
+mon0
+
+[osds]
+osd0
--- /dev/null
+[mons]
+mon0
+
+[osds]
+osd0
+
+[mdss]
+mds0
--- /dev/null
+---
+
+# DEPLOY CONTAINERIZED DAEMONS
+docker: true
+
+# DEFINE THE NUMBER OF VMS TO RUN
+mon_vms: 1
+osd_vms: 1
+mds_vms: 1
+rgw_vms: 0
+nfs_vms: 0
+grafana_server_vms: 0
+rbd_mirror_vms: 0
+client_vms: 0
+iscsi_gw_vms: 0
+mgr_vms: 0
+
+# INSTALL SOURCE OF CEPH
+# valid values are 'stable' and 'dev'
+ceph_install_source: stable
+
+# SUBNETS TO USE FOR THE VMS
+public_subnet: 192.168.63
+cluster_subnet: 192.168.64
+
+# MEMORY
+# set 1024 for CentOS
+memory: 1024
+
+# Ethernet interface name
+# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
+eth: 'eth1'
+
+# Disks
+# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
+# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
+disks: "[ '/dev/sdb', '/dev/sdc' ]"
+
+# VAGRANT BOX
+# Ceph boxes are *strongly* suggested. They are under better control and will
+# not get updated frequently unless required for build systems. These are (for
+# now):
+#
+# * ceph/ubuntu-xenial
+#
+# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
+# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
+# libvirt CentOS: centos/7
+# parallels Ubuntu: parallels/ubuntu-14.04
+# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
+# For more boxes have a look at:
+# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
+# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
+vagrant_box: centos/7
+#ssh_private_key_path: "~/.ssh/id_rsa"
+# The sync directory changes based on vagrant box
+# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
+#vagrant_sync_dir: /home/vagrant/sync
+vagrant_sync_dir: /vagrant
+# Disables synced folder creation. Not needed for testing, will skip mounting
+# the vagrant directory on the remote box regardless of the provider.
+vagrant_disable_synced_folder: true
+# VAGRANT URL
+# This is a URL to download an image from an alternate location. vagrant_box
+# above should be set to the filename of the image.
+# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
+# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+
+os_tuning_params:
+ - { name: fs.file-max, value: 26234859 }
--- /dev/null
+---
+ceph_origin: repository
+ceph_repository: community
+cluster: ceph
+public_network: "192.168.61.0/24"
+cluster_network: "192.168.62.0/24"
+monitor_interface: eth1
+radosgw_interface: eth1
+journal_size: 100
+osd_objectstore: "bluestore"
+copy_admin_key: true
+# test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb
+lvm_volumes:
+ - data: data-lv1
+ data_vg: test_group
+ - data: data-lv2
+ data_vg: test_group
+ db: journal1
+ db_vg: journals
+os_tuning_params:
+ - { name: fs.file-max, value: 26234859 }
+ceph_conf_overrides:
+ global:
+ mon_allow_pool_size_one: true
+ mon_warn_on_pool_no_redundancy: false
+ osd_pool_default_size: 1
+dashboard_enabled: False
\ No newline at end of file
--- /dev/null
+[mons]
+mon0
+
+[osds]
+osd0
--- /dev/null
+[mons]
+mon0
+
+[osds]
+osd0
+
+[mdss]
+mds0
--- /dev/null
+---
+
+# DEPLOY CONTAINERIZED DAEMONS
+docker: false
+
+# DEFINE THE NUMBER OF VMS TO RUN
+mon_vms: 1
+osd_vms: 1
+mds_vms: 1
+rgw_vms: 0
+nfs_vms: 0
+grafana_server_vms: 0
+rbd_mirror_vms: 0
+client_vms: 0
+iscsi_gw_vms: 0
+mgr_vms: 0
+
+# INSTALL SOURCE OF CEPH
+# valid values are 'stable' and 'dev'
+ceph_install_source: stable
+
+# SUBNETS TO USE FOR THE VMS
+public_subnet: 192.168.61
+cluster_subnet: 192.168.62
+
+# MEMORY
+# set 1024 for CentOS
+memory: 1024
+
+# Ethernet interface name
+# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
+eth: 'eth1'
+
+# Disks
+# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
+# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
+disks: "[ '/dev/sdb', '/dev/sdc' ]"
+
+# VAGRANT BOX
+# Ceph boxes are *strongly* suggested. They are under better control and will
+# not get updated frequently unless required for build systems. These are (for
+# now):
+#
+# * ceph/ubuntu-xenial
+#
+# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
+# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
+# libvirt CentOS: centos/7
+# parallels Ubuntu: parallels/ubuntu-14.04
+# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
+# For more boxes have a look at:
+# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
+# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
+vagrant_box: centos/7
+#ssh_private_key_path: "~/.ssh/id_rsa"
+# The sync directory changes based on vagrant box
+# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
+#vagrant_sync_dir: /home/vagrant/sync
+vagrant_sync_dir: /vagrant
+# Disables synced folder creation. Not needed for testing, will skip mounting
+# the vagrant directory on the remote box regardless of the provider.
+vagrant_disable_synced_folder: true
+# VAGRANT URL
+# This is a URL to download an image from an alternate location. vagrant_box
+# above should be set to the filename of the image.
+# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
+# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+
+os_tuning_params:
+ - { name: fs.file-max, value: 26234859 }
--- /dev/null
+../../../Vagrantfile
\ No newline at end of file
--- /dev/null
+../all_daemons/ceph-override.json
\ No newline at end of file
--- /dev/null
+../../../../Vagrantfile
\ No newline at end of file
--- /dev/null
+../../all_daemons/ceph-override.json
\ No newline at end of file
--- /dev/null
+---
+docker: True
+ceph_origin: repository
+ceph_repository: community
+containerized_deployment: true
+cluster: ceph
+public_network: "192.168.75.0/24"
+cluster_network: "192.168.76.0/24"
+monitor_interface: eth1
+radosgw_interface: eth1
+journal_size: 100
+osd_objectstore: "bluestore"
+copy_admin_key: true
+# test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb
+lvm_volumes:
+ - data: data-lv1
+ data_vg: test_group
+ - data: data-lv2
+ data_vg: test_group
+ db: journal1
+ db_vg: journals
+os_tuning_params:
+ - { name: fs.file-max, value: 26234859 }
+ceph_conf_overrides:
+ global:
+ mon_allow_pool_size_one: true
+ mon_warn_on_pool_no_redundancy: false
+ osd_pool_default_size: 1
+dashboard_enabled: False
+ceph_docker_registry: quay.ceph.io
+ceph_docker_image: ceph-ci/daemon
+ceph_docker_image_tag: latest-pacific
\ No newline at end of file
--- /dev/null
+[mons]
+mon0
+
+[osds]
+osd0
--- /dev/null
+[mons]
+mon0
+
+[osds]
+osd0
+
+[mgrs]
+mgr0
--- /dev/null
+---
+
+# DEPLOY CONTAINERIZED DAEMONS
+docker: true
+
+# DEFINE THE NUMBER OF VMS TO RUN
+mon_vms: 1
+osd_vms: 1
+mds_vms: 0
+rgw_vms: 0
+nfs_vms: 0
+grafana_server_vms: 0
+rbd_mirror_vms: 0
+client_vms: 0
+iscsi_gw_vms: 0
+mgr_vms: 1
+
+# INSTALL SOURCE OF CEPH
+# valid values are 'stable' and 'dev'
+ceph_install_source: stable
+
+# SUBNETS TO USE FOR THE VMS
+public_subnet: 192.168.75
+cluster_subnet: 192.168.76
+
+# MEMORY
+# set 1024 for CentOS
+memory: 1024
+
+# Ethernet interface name
+# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
+eth: 'eth1'
+
+# Disks
+# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
+# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
+disks: "[ '/dev/sdb', '/dev/sdc' ]"
+
+# VAGRANT BOX
+# Ceph boxes are *strongly* suggested. They are under better control and will
+# not get updated frequently unless required for build systems. These are (for
+# now):
+#
+# * ceph/ubuntu-xenial
+#
+# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
+# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
+# libvirt CentOS: centos/7
+# parallels Ubuntu: parallels/ubuntu-14.04
+# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
+# For more boxes have a look at:
+# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
+# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
+vagrant_box: centos/7
+#ssh_private_key_path: "~/.ssh/id_rsa"
+# The sync directory changes based on vagrant box
+# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
+#vagrant_sync_dir: /home/vagrant/sync
+vagrant_sync_dir: /vagrant
+# Disables synced folder creation. Not needed for testing, will skip mounting
+# the vagrant directory on the remote box regardless of the provider.
+vagrant_disable_synced_folder: true
+# VAGRANT URL
+# This is a URL to download an image from an alternate location. vagrant_box
+# above should be set to the filename of the image.
+# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
+# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+
+os_tuning_params:
+ - { name: fs.file-max, value: 26234859 }
--- /dev/null
+---
+ceph_origin: repository
+ceph_repository: dev
+cluster: ceph
+public_network: "192.168.73.0/24"
+cluster_network: "192.168.74.0/24"
+monitor_interface: eth1
+radosgw_interface: eth1
+journal_size: 100
+osd_objectstore: "bluestore"
+copy_admin_key: true
+# test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb
+lvm_volumes:
+ - data: data-lv1
+ data_vg: test_group
+ - data: data-lv2
+ data_vg: test_group
+ db: journal1
+ db_vg: journals
+os_tuning_params:
+ - { name: fs.file-max, value: 26234859 }
+ceph_conf_overrides:
+ global:
+ mon_allow_pool_size_one: true
+ mon_warn_on_pool_no_redundancy: false
+ osd_pool_default_size: 1
+dashboard_enabled: False
\ No newline at end of file
--- /dev/null
+[mons]
+mon0
+
+[osds]
+osd0
--- /dev/null
+[mons]
+mon0
+
+[osds]
+osd0
+
+[mgrs]
+mgr0
--- /dev/null
+---
+
+# DEPLOY CONTAINERIZED DAEMONS
+docker: false
+
+# DEFINE THE NUMBER OF VMS TO RUN
+mon_vms: 1
+osd_vms: 1
+mds_vms: 0
+rgw_vms: 0
+nfs_vms: 0
+grafana_server_vms: 0
+rbd_mirror_vms: 0
+client_vms: 0
+iscsi_gw_vms: 0
+mgr_vms: 1
+
+# INSTALL SOURCE OF CEPH
+# valid values are 'stable' and 'dev'
+ceph_install_source: stable
+
+# SUBNETS TO USE FOR THE VMS
+public_subnet: 192.168.73
+cluster_subnet: 192.168.74
+
+# MEMORY
+# set 1024 for CentOS
+memory: 512
+
+# Ethernet interface name
+# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
+eth: 'eth1'
+
+# Disks
+# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
+# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
+disks: "[ '/dev/sdb', '/dev/sdc' ]"
+
+# VAGRANT BOX
+# Ceph boxes are *strongly* suggested. They are under better control and will
+# not get updated frequently unless required for build systems. These are (for
+# now):
+#
+# * ceph/ubuntu-xenial
+#
+# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
+# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
+# libvirt CentOS: centos/7
+# parallels Ubuntu: parallels/ubuntu-14.04
+# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
+# For more boxes have a look at:
+# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
+# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
+vagrant_box: centos/7
+#ssh_private_key_path: "~/.ssh/id_rsa"
+# The sync directory changes based on vagrant box
+# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
+#vagrant_sync_dir: /home/vagrant/sync
+vagrant_sync_dir: /vagrant
+# Disables synced folder creation. Not needed for testing, will skip mounting
+# the vagrant directory on the remote box regardless of the provider.
+vagrant_disable_synced_folder: true
+# VAGRANT URL
+# This is a URL to download an image from an alternate location. vagrant_box
+# above should be set to the filename of the image.
+# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
+# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+
+os_tuning_params:
+ - { name: fs.file-max, value: 26234859 }
--- /dev/null
+../../../Vagrantfile
\ No newline at end of file
--- /dev/null
+../all_daemons/ceph-override.json
\ No newline at end of file
--- /dev/null
+../../../../Vagrantfile
\ No newline at end of file
--- /dev/null
+../../all_daemons/ceph-override.json
\ No newline at end of file
--- /dev/null
+---
+docker: True
+ceph_origin: repository
+ceph_repository: community
+containerized_deployment: true
+cluster: ceph
+public_network: "192.168.55.0/24"
+cluster_network: "192.168.56.0/24"
+monitor_interface: eth1
+radosgw_interface: eth1
+journal_size: 100
+osd_objectstore: "bluestore"
+copy_admin_key: true
+# test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb
+lvm_volumes:
+ - data: data-lv1
+ data_vg: test_group
+ - data: data-lv2
+ data_vg: test_group
+ db: journal1
+ db_vg: journals
+os_tuning_params:
+ - { name: fs.file-max, value: 26234859 }
+ceph_conf_overrides:
+ global:
+ mon_allow_pool_size_one: true
+ mon_warn_on_pool_no_redundancy: false
+ osd_pool_default_size: 1
+dashboard_enabled: False
+ceph_docker_registry: quay.ceph.io
+ceph_docker_image: ceph-ci/daemon
+ceph_docker_image_tag: latest-pacific
\ No newline at end of file
--- /dev/null
+[mons]
+mon0
+
+[osds]
+osd0
--- /dev/null
+[mons]
+mon0
+mon1
+
+[osds]
+osd0
--- /dev/null
+---
+
+# DEPLOY CONTAINERIZED DAEMONS
+docker: false
+
+# DEFINE THE NUMBER OF VMS TO RUN
+mon_vms: 2
+osd_vms: 1
+mds_vms: 0
+rgw_vms: 0
+nfs_vms: 0
+grafana_server_vms: 0
+rbd_mirror_vms: 0
+client_vms: 0
+iscsi_gw_vms: 0
+mgr_vms: 0
+
+# INSTALL SOURCE OF CEPH
+# valid values are 'stable' and 'dev'
+ceph_install_source: stable
+
+# SUBNETS TO USE FOR THE VMS
+public_subnet: 192.168.55
+cluster_subnet: 192.168.56
+
+# MEMORY
+# set 1024 for CentOS
+memory: 1024
+
+# Ethernet interface name
+# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
+eth: 'eth1'
+
+# Disks
+# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
+# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
+disks: "[ '/dev/sdb', '/dev/sdc' ]"
+
+# VAGRANT BOX
+# Ceph boxes are *strongly* suggested. They are under better control and will
+# not get updated frequently unless required for build systems. These are (for
+# now):
+#
+# * ceph/ubuntu-xenial
+#
+# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
+# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
+# libvirt CentOS: centos/7
+# parallels Ubuntu: parallels/ubuntu-14.04
+# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
+# For more boxes have a look at:
+# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
+# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
+vagrant_box: centos/atomic-host
+#ssh_private_key_path: "~/.ssh/id_rsa"
+# The sync directory changes based on vagrant box
+# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
+#vagrant_sync_dir: /home/vagrant/sync
+vagrant_sync_dir: /vagrant
+# Disables synced folder creation. Not needed for testing, will skip mounting
+# the vagrant directory on the remote box regardless of the provider.
+vagrant_disable_synced_folder: true
+# VAGRANT URL
+# This is a URL to download an image from an alternate location. vagrant_box
+# above should be set to the filename of the image.
+# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
+# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+
+os_tuning_params:
+ - { name: fs.file-max, value: 26234859 }
--- /dev/null
+---
+ceph_origin: repository
+ceph_repository: dev
+cluster: ceph
+public_network: "192.168.53.0/24"
+cluster_network: "192.168.54.0/24"
+monitor_interface: eth1
+radosgw_interface: eth1
+journal_size: 100
+osd_objectstore: "bluestore"
+copy_admin_key: true
+# test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb
+lvm_volumes:
+ - data: data-lv1
+ data_vg: test_group
+ - data: data-lv2
+ data_vg: test_group
+ db: journal1
+ db_vg: journals
+os_tuning_params:
+ - { name: fs.file-max, value: 26234859 }
+ceph_conf_overrides:
+ global:
+ mon_allow_pool_size_one: true
+ mon_warn_on_pool_no_redundancy: false
+ osd_pool_default_size: 1
+dashboard_enabled: False
\ No newline at end of file
--- /dev/null
+[mons]
+mon0
+
+[osds]
+osd0
--- /dev/null
+[mons]
+mon0
+mon1
+
+[osds]
+osd0
--- /dev/null
+---
+
+# DEPLOY CONTAINERIZED DAEMONS
+docker: false
+
+# DEFINE THE NUMBER OF VMS TO RUN
+mon_vms: 2
+osd_vms: 1
+mds_vms: 0
+rgw_vms: 0
+nfs_vms: 0
+grafana_server_vms: 0
+rbd_mirror_vms: 0
+client_vms: 0
+iscsi_gw_vms: 0
+mgr_vms: 0
+
+# INSTALL SOURCE OF CEPH
+# valid values are 'stable' and 'dev'
+ceph_install_source: stable
+
+# SUBNETS TO USE FOR THE VMS
+public_subnet: 192.168.53
+cluster_subnet: 192.168.54
+
+# MEMORY
+# set 1024 for CentOS
+memory: 512
+
+# Ethernet interface name
+# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
+eth: 'eth1'
+
+# Disks
+# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
+# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
+disks: "[ '/dev/sdb', '/dev/sdc' ]"
+
+# VAGRANT BOX
+# Ceph boxes are *strongly* suggested. They are under better control and will
+# not get updated frequently unless required for build systems. These are (for
+# now):
+#
+# * ceph/ubuntu-xenial
+#
+# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
+# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
+# libvirt CentOS: centos/7
+# parallels Ubuntu: parallels/ubuntu-14.04
+# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
+# For more boxes have a look at:
+# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
+# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
+vagrant_box: centos/atomic-host
+#ssh_private_key_path: "~/.ssh/id_rsa"
+# The sync directory changes based on vagrant box
+# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
+#vagrant_sync_dir: /home/vagrant/sync
+vagrant_sync_dir: /vagrant
+# Disables synced folder creation. Not needed for testing, will skip mounting
+# the vagrant directory on the remote box regardless of the provider.
+vagrant_disable_synced_folder: true
+# VAGRANT URL
+# This is a URL to download an image from an alternate location. vagrant_box
+# above should be set to the filename of the image.
+# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
+# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+
+os_tuning_params:
+ - { name: fs.file-max, value: 26234859 }
--- /dev/null
+../../../Vagrantfile
\ No newline at end of file
--- /dev/null
+../all_daemons/ceph-override.json
\ No newline at end of file
--- /dev/null
+../../../../Vagrantfile
\ No newline at end of file
--- /dev/null
+../../all_daemons/ceph-override.json
\ No newline at end of file
--- /dev/null
+---
+docker: True
+ceph_origin: repository
+ceph_repository: community
+containerized_deployment: true
+cluster: ceph
+public_network: "192.168.55.0/24"
+cluster_network: "192.168.56.0/24"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+journal_size: 100
+osd_objectstore: "bluestore"
+copy_admin_key: true
+# test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb
+lvm_volumes:
+ - data: data-lv1
+ data_vg: test_group
+ - data: data-lv2
+ data_vg: test_group
+ db: journal1
+ db_vg: journals
+os_tuning_params:
+ - { name: fs.file-max, value: 26234859 }
+ceph_conf_overrides:
+ global:
+ mon_allow_pool_size_one: true
+ mon_warn_on_pool_no_redundancy: false
+ osd_pool_default_size: 1
+dashboard_enabled: False
+ceph_docker_registry: quay.ceph.io
+ceph_docker_image: ceph-ci/daemon
+ceph_docker_image_tag: latest-pacific
\ No newline at end of file
--- /dev/null
+[mons]
+mon0
+
+[osds]
+osd0
--- /dev/null
+[mons]
+mon0
+
+[osds]
+osd0
+osd1
--- /dev/null
+---
+
+# DEPLOY CONTAINERIZED DAEMONS
+docker: true
+
+# DEFINE THE NUMBER OF VMS TO RUN
+mon_vms: 1
+osd_vms: 2
+mds_vms: 0
+rgw_vms: 0
+nfs_vms: 0
+grafana_server_vms: 0
+rbd_mirror_vms: 0
+client_vms: 0
+iscsi_gw_vms: 0
+mgr_vms: 0
+
+# INSTALL SOURCE OF CEPH
+# valid values are 'stable' and 'dev'
+ceph_install_source: stable
+
+# SUBNETS TO USE FOR THE VMS
+public_subnet: 192.168.55
+cluster_subnet: 192.168.56
+
+# MEMORY
+# set 1024 for CentOS
+memory: 1024
+
+# Ethernet interface name
+# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
+eth: 'eth1'
+
+# Disks
+# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
+# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
+disks: "[ '/dev/sdb', '/dev/sdc' ]"
+
+# VAGRANT BOX
+# Ceph boxes are *strongly* suggested. They are under better control and will
+# not get updated frequently unless required for build systems. These are (for
+# now):
+#
+# * ceph/ubuntu-xenial
+#
+# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
+# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
+# libvirt CentOS: centos/7
+# parallels Ubuntu: parallels/ubuntu-14.04
+# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
+# For more boxes have a look at:
+# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
+# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
+vagrant_box: centos/7
+#ssh_private_key_path: "~/.ssh/id_rsa"
+# The sync directory changes based on vagrant box
+# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
+#vagrant_sync_dir: /home/vagrant/sync
+vagrant_sync_dir: /vagrant
+# Disables synced folder creation. Not needed for testing, will skip mounting
+# the vagrant directory on the remote box regardless of the provider.
+vagrant_disable_synced_folder: true
+# VAGRANT URL
+# This is a URL to download an image from an alternate location. vagrant_box
+# above should be set to the filename of the image.
+# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
+# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+
+os_tuning_params:
+ - { name: fs.file-max, value: 26234859 }
--- /dev/null
+---
+ceph_origin: repository
+ceph_repository: community
+cluster: ceph
+public_network: "192.168.53.0/24"
+cluster_network: "192.168.54.0/24"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+journal_size: 100
+osd_objectstore: "bluestore"
+copy_admin_key: true
+# test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb
+lvm_volumes:
+ - data: data-lv1
+ data_vg: test_group
+ - data: data-lv2
+ data_vg: test_group
+ db: journal1
+ db_vg: journals
+os_tuning_params:
+ - { name: fs.file-max, value: 26234859 }
+ceph_conf_overrides:
+ global:
+ mon_allow_pool_size_one: true
+ mon_warn_on_pool_no_redundancy: false
+ osd_pool_default_size: 1
+dashboard_enabled: False
\ No newline at end of file
--- /dev/null
+[mons]
+mon0
+
+[osds]
+osd0
--- /dev/null
+[mons]
+mon0
+
+[osds]
+osd0
+osd1
--- /dev/null
+---
+
+# DEPLOY CONTAINERIZED DAEMONS
+docker: false
+
+# DEFINE THE NUMBER OF VMS TO RUN
+mon_vms: 1
+osd_vms: 2
+mds_vms: 0
+rgw_vms: 0
+nfs_vms: 0
+grafana_server_vms: 0
+rbd_mirror_vms: 0
+client_vms: 0
+iscsi_gw_vms: 0
+mgr_vms: 0
+
+# INSTALL SOURCE OF CEPH
+# valid values are 'stable' and 'dev'
+ceph_install_source: stable
+
+# SUBNETS TO USE FOR THE VMS
+public_subnet: 192.168.53
+cluster_subnet: 192.168.54
+
+# MEMORY
+# set 1024 for CentOS
+memory: 1024
+
+# Ethernet interface name
+# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
+eth: 'eth1'
+
+# Disks
+# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
+# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
+disks: "[ '/dev/sdb', '/dev/sdc' ]"
+
+# VAGRANT BOX
+# Ceph boxes are *strongly* suggested. They are under better control and will
+# not get updated frequently unless required for build systems. These are (for
+# now):
+#
+# * ceph/ubuntu-xenial
+#
+# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
+# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
+# libvirt CentOS: centos/7
+# parallels Ubuntu: parallels/ubuntu-14.04
+# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
+# For more boxes have a look at:
+# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
+# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
+vagrant_box: centos/7
+#ssh_private_key_path: "~/.ssh/id_rsa"
+# The sync directory changes based on vagrant box
+# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
+#vagrant_sync_dir: /home/vagrant/sync
+vagrant_sync_dir: /vagrant
+# Disables synced folder creation. Not needed for testing, will skip mounting
+# the vagrant directory on the remote box regardless of the provider.
+vagrant_disable_synced_folder: true
+# VAGRANT URL
+# This is a URL to download an image from an alternate location. vagrant_box
+# above should be set to the filename of the image.
+# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
+# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+
+os_tuning_params:
+ - { name: fs.file-max, value: 26234859 }
--- /dev/null
+../../../Vagrantfile
\ No newline at end of file
--- /dev/null
+../all_daemons/ceph-override.json
\ No newline at end of file
--- /dev/null
+../../../../Vagrantfile
\ No newline at end of file
--- /dev/null
+../../all_daemons/ceph-override.json
\ No newline at end of file
--- /dev/null
+---
+docker: True
+ceph_origin: repository
+ceph_repository: community
+containerized_deployment: true
+cluster: ceph
+public_network: "192.168.67.0/24"
+cluster_network: "192.168.68.0/24"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+journal_size: 100
+osd_objectstore: "bluestore"
+copy_admin_key: true
+# test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb
+lvm_volumes:
+ - data: data-lv1
+ data_vg: test_group
+ - data: data-lv2
+ data_vg: test_group
+ db: journal1
+ db_vg: journals
+os_tuning_params:
+ - { name: fs.file-max, value: 26234859 }
+ceph_conf_overrides:
+ global:
+ mon_allow_pool_size_one: true
+ mon_warn_on_pool_no_redundancy: false
+ osd_pool_default_size: 1
+dashboard_enabled: False
+ceph_docker_registry: quay.ceph.io
+ceph_docker_image: ceph-ci/daemon
+ceph_docker_image_tag: latest-pacific
\ No newline at end of file
--- /dev/null
+[mons]
+mon0
+
+[osds]
+osd0
--- /dev/null
+[mons]
+mon0
+
+[osds]
+osd0
+
+[rbdmirrors]
+rbd-mirror0
--- /dev/null
+---
+
+# DEPLOY CONTAINERIZED DAEMONS
+docker: true
+
+# DEFINE THE NUMBER OF VMS TO RUN
+mon_vms: 1
+osd_vms: 1
+mds_vms: 0
+rgw_vms: 0
+nfs_vms: 0
+grafana_server_vms: 0
+rbd_mirror_vms: 1
+client_vms: 0
+iscsi_gw_vms: 0
+mgr_vms: 0
+
+# INSTALL SOURCE OF CEPH
+# valid values are 'stable' and 'dev'
+ceph_install_source: stable
+
+# SUBNETS TO USE FOR THE VMS
+public_subnet: 192.168.67
+cluster_subnet: 192.168.68
+
+# MEMORY
+# set 1024 for CentOS
+memory: 1024
+
+# Ethernet interface name
+# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
+eth: 'eth1'
+
+# Disks
+# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
+# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
+disks: "[ '/dev/sdb', '/dev/sdc' ]"
+
+# VAGRANT BOX
+# Ceph boxes are *strongly* suggested. They are under better control and will
+# not get updated frequently unless required for build systems. These are (for
+# now):
+#
+# * ceph/ubuntu-xenial
+#
+# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
+# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
+# libvirt CentOS: centos/7
+# parallels Ubuntu: parallels/ubuntu-14.04
+# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
+# For more boxes have a look at:
+# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
+# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
+vagrant_box: centos/7
+#ssh_private_key_path: "~/.ssh/id_rsa"
+# The sync directory changes based on vagrant box
+# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
+#vagrant_sync_dir: /home/vagrant/sync
+vagrant_sync_dir: /vagrant
+# Disables synced folder creation. Not needed for testing, will skip mounting
+# the vagrant directory on the remote box regardless of the provider.
+vagrant_disable_synced_folder: true
+# VAGRANT URL
+# This is a URL to download an image from an alternate location. vagrant_box
+# above should be set to the filename of the image.
+# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
+# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+
+os_tuning_params:
+ - { name: fs.file-max, value: 26234859 }
--- /dev/null
+---
+ceph_origin: repository
+ceph_repository: community
+cluster: ceph
+public_network: "192.168.65.0/24"
+cluster_network: "192.168.66.0/24"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+journal_size: 100
+osd_objectstore: "bluestore"
+copy_admin_key: true
+# test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb
+lvm_volumes:
+ - data: data-lv1
+ data_vg: test_group
+ - data: data-lv2
+ data_vg: test_group
+ db: journal1
+ db_vg: journals
+os_tuning_params:
+ - { name: fs.file-max, value: 26234859 }
+ceph_conf_overrides:
+ global:
+ mon_allow_pool_size_one: true
+ mon_warn_on_pool_no_redundancy: false
+ osd_pool_default_size: 1
+dashboard_enabled: False
\ No newline at end of file
--- /dev/null
+[mons]
+mon0
+
+[osds]
+osd0
--- /dev/null
+[mons]
+mon0
+
+[osds]
+osd0
+
+[rbdmirrors]
+rbd-mirror0
--- /dev/null
+---
+
+# DEPLOY CONTAINERIZED DAEMONS
+docker: false
+
+# DEFINE THE NUMBER OF VMS TO RUN
+mon_vms: 1
+osd_vms: 1
+mds_vms: 0
+rgw_vms: 0
+nfs_vms: 0
+grafana_server_vms: 0
+rbd_mirror_vms: 1
+client_vms: 0
+iscsi_gw_vms: 0
+mgr_vms: 0
+
+# INSTALL SOURCE OF CEPH
+# valid values are 'stable' and 'dev'
+ceph_install_source: stable
+
+# SUBNETS TO USE FOR THE VMS
+public_subnet: 192.168.65
+cluster_subnet: 192.168.66
+
+# MEMORY
+# set 1024 for CentOS
+memory: 1024
+
+# Ethernet interface name
+# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
+eth: 'eth1'
+
+# Disks
+# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
+# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
+disks: "[ '/dev/sdb', '/dev/sdc' ]"
+
+# VAGRANT BOX
+# Ceph boxes are *strongly* suggested. They are under better control and will
+# not get updated frequently unless required for build systems. These are (for
+# now):
+#
+# * ceph/ubuntu-xenial
+#
+# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
+# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
+# libvirt CentOS: centos/7
+# parallels Ubuntu: parallels/ubuntu-14.04
+# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
+# For more boxes have a look at:
+# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
+# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
+vagrant_box: centos/7
+#ssh_private_key_path: "~/.ssh/id_rsa"
+# The sync directory changes based on vagrant box
+# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
+#vagrant_sync_dir: /home/vagrant/sync
+vagrant_sync_dir: /vagrant
+# Disables synced folder creation. Not needed for testing, will skip mounting
+# the vagrant directory on the remote box regardless of the provider.
+vagrant_disable_synced_folder: true
+# VAGRANT URL
+# This is a URL to download an image from an alternate location. vagrant_box
+# above should be set to the filename of the image.
+# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
+# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+
+os_tuning_params:
+ - { name: fs.file-max, value: 26234859 }
--- /dev/null
+../../../Vagrantfile
\ No newline at end of file
--- /dev/null
+../all_daemons/ceph-override.json
\ No newline at end of file
--- /dev/null
+../../../../Vagrantfile
\ No newline at end of file
--- /dev/null
+../../all_daemons/ceph-override.json
\ No newline at end of file
--- /dev/null
+---
+docker: True
+ceph_origin: repository
+ceph_repository: community
+containerized_deployment: true
+cluster: ceph
+public_network: "192.168.71.0/24"
+cluster_network: "192.168.72.0/24"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+journal_size: 100
+osd_objectstore: "bluestore"
+copy_admin_key: true
+# test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb
+lvm_volumes:
+ - data: data-lv1
+ data_vg: test_group
+ - data: data-lv2
+ data_vg: test_group
+ db: journal1
+ db_vg: journals
+os_tuning_params:
+ - { name: fs.file-max, value: 26234859 }
+ceph_conf_overrides:
+ global:
+ mon_allow_pool_size_one: true
+ mon_warn_on_pool_no_redundancy: false
+ osd_pool_default_size: 1
+rgw_override_bucket_index_max_shards: 16
+rgw_bucket_default_quota_max_objects: 1638400
+dashboard_enabled: False
+ceph_docker_registry: quay.ceph.io
+ceph_docker_image: ceph-ci/daemon
+ceph_docker_image_tag: latest-pacific
\ No newline at end of file
--- /dev/null
+[mons]
+mon0
+
+[osds]
+osd0
--- /dev/null
+[mons]
+mon0
+
+[osds]
+osd0
+
+[rgws]
+rgw0
--- /dev/null
+---
+
+# DEPLOY CONTAINERIZED DAEMONS
+docker: true
+
+# DEFINE THE NUMBER OF VMS TO RUN
+mon_vms: 1
+osd_vms: 1
+mds_vms: 0
+rgw_vms: 1
+nfs_vms: 0
+grafana_server_vms: 0
+rbd_mirror_vms: 0
+client_vms: 0
+iscsi_gw_vms: 0
+mgr_vms: 0
+
+# INSTALL SOURCE OF CEPH
+# valid values are 'stable' and 'dev'
+ceph_install_source: stable
+
+# SUBNETS TO USE FOR THE VMS
+public_subnet: 192.168.71
+cluster_subnet: 192.168.72
+
+# MEMORY
+# set 1024 for CentOS
+memory: 1024
+
+# Ethernet interface name
+# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
+eth: 'eth1'
+
+# Disks
+# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
+# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
+disks: "[ '/dev/sdb', '/dev/sdc' ]"
+
+# VAGRANT BOX
+# Ceph boxes are *strongly* suggested. They are under better control and will
+# not get updated frequently unless required for build systems. These are (for
+# now):
+#
+# * ceph/ubuntu-xenial
+#
+# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
+# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
+# libvirt CentOS: centos/7
+# parallels Ubuntu: parallels/ubuntu-14.04
+# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
+# For more boxes have a look at:
+# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
+# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
+vagrant_box: centos/7
+#ssh_private_key_path: "~/.ssh/id_rsa"
+# The sync directory changes based on vagrant box
+# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
+#vagrant_sync_dir: /home/vagrant/sync
+vagrant_sync_dir: /vagrant
+# Disables synced folder creation. Not needed for testing, will skip mounting
+# the vagrant directory on the remote box regardless of the provider.
+vagrant_disable_synced_folder: true
+# VAGRANT URL
+# This is a URL to download an image from an alternate location. vagrant_box
+# above should be set to the filename of the image.
+# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
+# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+
+os_tuning_params:
+ - { name: fs.file-max, value: 26234859 }
--- /dev/null
+---
+ceph_origin: repository
+ceph_repository: community
+cluster: ceph
+public_network: "192.168.69.0/24"
+cluster_network: "192.168.70.0/24"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+journal_size: 100
+osd_objectstore: "bluestore"
+copy_admin_key: true
+# test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb
+lvm_volumes:
+ - data: data-lv1
+ data_vg: test_group
+ - data: data-lv2
+ data_vg: test_group
+ db: journal1
+ db_vg: journals
+os_tuning_params:
+ - { name: fs.file-max, value: 26234859 }
+ceph_conf_overrides:
+ global:
+ mon_allow_pool_size_one: true
+ mon_warn_on_pool_no_redundancy: false
+ osd_pool_default_size: 1
+dashboard_enabled: False
\ No newline at end of file
--- /dev/null
+copy_admin_key: true
+rgw_create_pools:
+ foo:
+ pg_num: 16
+ type: replicated
+ bar:
+ pg_num: 16
+rgw_override_bucket_index_max_shards: 16
+rgw_bucket_default_quota_max_objects: 1638400
--- /dev/null
+[mons]
+mon0
+
+[osds]
+osd0
--- /dev/null
+[mons]
+mon0
+
+[osds]
+osd0
+
+[rgws]
+rgw0
--- /dev/null
+---
+
+# DEPLOY CONTAINERIZED DAEMONS
+docker: false
+
+# DEFINE THE NUMBER OF VMS TO RUN
+mon_vms: 1
+osd_vms: 1
+mds_vms: 0
+rgw_vms: 1
+nfs_vms: 0
+grafana_server_vms: 0
+rbd_mirror_vms: 0
+client_vms: 0
+iscsi_gw_vms: 0
+mgr_vms: 0
+
+# INSTALL SOURCE OF CEPH
+# valid values are 'stable' and 'dev'
+ceph_install_source: stable
+
+# SUBNETS TO USE FOR THE VMS
+public_subnet: 192.168.69
+cluster_subnet: 192.168.70
+
+# MEMORY
+# set 1024 for CentOS
+memory: 1024
+
+# Ethernet interface name
+# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
+eth: 'eth1'
+
+# Disks
+# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
+# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
+disks: "[ '/dev/sdb', '/dev/sdc' ]"
+
+# VAGRANT BOX
+# Ceph boxes are *strongly* suggested. They are under better control and will
+# not get updated frequently unless required for build systems. These are (for
+# now):
+#
+# * ceph/ubuntu-xenial
+#
+# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
+# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
+# libvirt CentOS: centos/7
+# parallels Ubuntu: parallels/ubuntu-14.04
+# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
+# For more boxes have a look at:
+# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
+# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
+vagrant_box: centos/7
+#ssh_private_key_path: "~/.ssh/id_rsa"
+# The sync directory changes based on vagrant box
+# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
+#vagrant_sync_dir: /home/vagrant/sync
+vagrant_sync_dir: /vagrant
+# Disables synced folder creation. Not needed for testing, will skip mounting
+# the vagrant directory on the remote box regardless of the provider.
+vagrant_disable_synced_folder: true
+# VAGRANT URL
+# This is a URL to download an image from an alternate location. vagrant_box
+# above should be set to the filename of the image.
+# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
+# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+
+os_tuning_params:
+ - { name: fs.file-max, value: 26234859 }
--- /dev/null
+../../../Vagrantfile
\ No newline at end of file
--- /dev/null
+../all_daemons/ceph-override.json
\ No newline at end of file
--- /dev/null
+../../../../Vagrantfile
\ No newline at end of file
--- /dev/null
+../../all_daemons/ceph-override.json
\ No newline at end of file
--- /dev/null
+---
+# this is only here to let the CI tests know
+# that this scenario is using docker
+docker: True
+
+containerized_deployment: True
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+radosgw_num_instances: 2
+ceph_mon_docker_subnet: "{{ public_network }}"
+public_network: "192.168.19.0/24"
+cluster_network: "192.168.20.0/24"
+rgw_override_bucket_index_max_shards: 16
+rgw_bucket_default_quota_max_objects: 1638400
+openstack_config: True
+dashboard_enabled: false
+ceph_conf_overrides:
+ global:
+ osd_pool_default_pg_num: 8
+ mon_allow_pool_size_one: true
+ mon_warn_on_pool_no_redundancy: false
+ osd_pool_default_size: 1
+ mon_max_pg_per_osd: 300
+handler_health_mon_check_delay: 10
+handler_health_osd_check_delay: 10
+rgw_create_pools:
+ foo:
+ pg_num: 16
+ type: replicated
+ bar:
+ pg_num: 16
+ ec:
+ pg_num: 16
+ type: ec
+ ec_profile: myecprofile
+ ec_k: 2
+ ec_m: 1
+osd_objectstore: "bluestore"
+lvm_volumes:
+ - data: data-lv1
+ data_vg: test_group
+ - data: data-lv2
+ data_vg: test_group
+ db: journal1
+ db_vg: journals
+ceph_docker_registry: quay.ceph.io
+ceph_docker_image: ceph-ci/daemon
+ceph_docker_image_tag: latest-pacific
\ No newline at end of file
--- /dev/null
+[mons]
+osd0
+osd1
+osd2
+
+[mgrs]
+osd0
+osd1
+osd2
+
+[osds]
+osd0
+osd1
+osd2
+
+[mdss]
+osd0
+
+[rgws]
+osd0
--- /dev/null
+---
+
+# DEPLOY CONTAINERIZED DAEMONS
+docker: True
+
+# DEFINE THE NUMBER OF VMS TO RUN
+mon_vms: 0
+osd_vms: 3
+mds_vms: 0
+rgw_vms: 0
+nfs_vms: 0
+grafana_server_vms: 0
+rbd_mirror_vms: 0
+client_vms: 0
+iscsi_gw_vms: 0
+mgr_vms: 0
+
+# SUBNETS TO USE FOR THE VMS
+public_subnet: 192.168.19
+cluster_subnet: 192.168.20
+
+# MEMORY
+# set 1024 for CentOS
+memory: 1024
+
+# Disks
+# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
+# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
+disks: "[ '/dev/sda', '/dev/sdb' ]"
+
+# VAGRANT BOX
+# Ceph boxes are *strongly* suggested. They are under better control and will
+# not get updated frequently unless required for build systems. These are (for
+# now):
+#
+# * ceph/ubuntu-xenial
+#
+# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
+# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
+# libvirt CentOS: centos/7
+# parallels Ubuntu: parallels/ubuntu-14.04
+# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
+# For more boxes have a look at:
+# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
+# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
+vagrant_box: centos/atomic-host
+# client_vagrant_box: centos/7
+#ssh_private_key_path: "~/.ssh/id_rsa"
+# The sync directory changes based on vagrant box
+# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
+#vagrant_sync_dir: /home/vagrant/sync
+#vagrant_sync_dir: /
+# Disables synced folder creation. Not needed for testing, will skip mounting
+# the vagrant directory on the remote box regardless of the provider.
+vagrant_disable_synced_folder: true
+# VAGRANT URL
+# This is a URL to download an image from an alternate location. vagrant_box
+# above should be set to the filename of the image.
+# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
+# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
--- /dev/null
+---
+containerized_deployment: False
+ceph_origin: repository
+ceph_repository: community
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+ceph_mon_docker_subnet: "{{ public_network }}"
+openstack_config: True
+dashboard_enabled: False
+public_network: "192.168.17.0/24"
+cluster_network: "192.168.18.0/24"
+rgw_override_bucket_index_max_shards: 16
+rgw_bucket_default_quota_max_objects: 1638400
+ceph_conf_overrides:
+ global:
+ osd_pool_default_pg_num: 8
+ mon_allow_pool_size_one: true
+ mon_warn_on_pool_no_redundancy: false
+ osd_pool_default_size: 1
+ mon_max_pg_per_osd: 300
+handler_health_mon_check_delay: 10
+handler_health_osd_check_delay: 10
+osd_objectstore: "bluestore"
+lvm_volumes:
+ - data: data-lv1
+ data_vg: test_group
+ - data: data-lv2
+ data_vg: test_group
+ db: journal1
+ db_vg: journals
+rgw_create_pools:
+ foo:
+ pg_num: 16
+ type: replicated
+ bar:
+ pg_num: 16
+ ec:
+ pg_num: 16
+ type: ec
+ ec_profile: myecprofile
+ ec_k: 2
+ ec_m: 1
\ No newline at end of file
--- /dev/null
+[mons]
+osd0
+osd1
+osd2
+
+[mgrs]
+osd0
+osd1
+osd2
+
+[osds]
+osd0
+osd1
+osd2
+
+[mdss]
+osd0
+
+[rgws]
+osd0
--- /dev/null
+---
+# DEFINE THE NUMBER OF VMS TO RUN
+mon_vms: 0
+osd_vms: 3
+mds_vms: 0
+rgw_vms: 0
+nfs_vms: 0
+grafana_server_vms: 0
+rbd_mirror_vms: 0
+client_vms: 0
+iscsi_gw_vms: 0
+mgr_vms: 0
+
+# SUBNETS TO USE FOR THE VMS
+public_subnet: 192.168.17
+cluster_subnet: 192.168.18
+
+# MEMORY
+# set 1024 for CentOS
+memory: 1024
+
+# Disks
+# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
+# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
+disks: "[ '/dev/sda', '/dev/sdb' ]"
+
+# VAGRANT BOX
+# Ceph boxes are *strongly* suggested. They are under better control and will
+# not get updated frequently unless required for build systems. These are (for
+# now):
+#
+# * ceph/ubuntu-xenial
+#
+# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
+# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
+# libvirt CentOS: centos/7
+# parallels Ubuntu: parallels/ubuntu-14.04
+# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
+# For more boxes have a look at:
+# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
+# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
+vagrant_box: centos/7
+# client_vagrant_box: centos/7
+#ssh_private_key_path: "~/.ssh/id_rsa"
+# The sync directory changes based on vagrant box
+# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
+#vagrant_sync_dir: /home/vagrant/sync
+#vagrant_sync_dir: /
+# Disables synced folder creation. Not needed for testing, will skip mounting
+# the vagrant directory on the remote box regardless of the provider.
+vagrant_disable_synced_folder: true
+# VAGRANT URL
+# This is a URL to download an image from an alternate location. vagrant_box
+# above should be set to the filename of the image.
+# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
+# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
--- /dev/null
+../../../Vagrantfile
\ No newline at end of file
--- /dev/null
+{
+ "ceph_conf_overrides": {
+ "global": {
+ "osd_pool_default_pg_num": 12,
+ "osd_pool_default_size": 1,
+ "mon_allow_pool_size_one": true,
+ "mon_warn_on_pool_no_redundancy": false,
+ "mon_max_pg_per_osd": 300
+ }
+ },
+ "cephfs_pools": [
+ {
+ "name": "cephfs_data",
+ "pg_num": 8,
+ "pgp_num": 8,
+ "rule_name": "replicated_rule",
+ "type": 1,
+ "erasure_profile": "",
+ "expected_num_objects": "",
+ "application": "cephfs",
+ "size": 2,
+ "min_size": 0
+ },
+ {
+ "name": "cephfs_metadata",
+ "pg_num": 8,
+ "pgp_num": 8,
+ "rule_name": "replicated_rule",
+ "type": 1,
+ "erasure_profile": "",
+ "expected_num_objects": "",
+ "application": "cephfs",
+ "size": 2,
+ "min_size": 0
+ }
+ ],
+ "ceph_mon_docker_memory_limit": "2g",
+ "radosgw_num_instances": 2
+}
--- /dev/null
+../../../../Vagrantfile
\ No newline at end of file
--- /dev/null
+../ceph-override.json
\ No newline at end of file
--- /dev/null
+---
+# this is only here to let the CI tests know
+# that this scenario is using docker
+docker: True
+
+containerized_deployment: True
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+ceph_mon_docker_subnet: "{{ public_network }}"
+public_network: "192.168.17.0/24"
+cluster_network: "192.168.18.0/24"
+rgw_override_bucket_index_max_shards: 16
+rgw_bucket_default_quota_max_objects: 1638400
+ceph_conf_overrides:
+ global:
+ mon_allow_pool_size_one: true
+ mon_warn_on_pool_no_redundancy: false
+ osd_pool_default_size: 1
+ mon_max_pg_per_osd: 300
+openstack_config: True
+openstack_glance_pool:
+ name: "images"
+ size: 1
+ target_size_ratio: 0.2
+openstack_cinder_pool:
+ name: "volumes"
+ rule_name: "HDD"
+ size: 1
+openstack_pools:
+ - "{{ openstack_glance_pool }}"
+ - "{{ openstack_cinder_pool }}"
+docker_pull_timeout: 600s
+handler_health_mon_check_delay: 10
+handler_health_osd_check_delay: 10
+mds_max_mds: 2
+dashboard_admin_password: $sX!cD$rYU6qR^B!
+grafana_admin_password: +xFRe+RES@7vg24n
+ceph_docker_registry: quay.ceph.io
+ceph_docker_image: ceph-ci/daemon
+ceph_docker_image_tag: latest-pacific
+node_exporter_container_image: "quay.ceph.io/prometheus/node-exporter:v0.17.0"
+prometheus_container_image: "quay.ceph.io/prometheus/prometheus:v2.7.2"
+alertmanager_container_image: "quay.ceph.io/prometheus/alertmanager:v0.16.2"
+grafana_container_image: "quay.ceph.io/app-sre/grafana:6.7.4"
--- /dev/null
+---
+user_config: True
+copy_admin_key: True
+test:
+ name: "test"
+ rule_name: "HDD"
+ size: 1
+test2:
+ name: "test2"
+ size: 1
+pools:
+ - "{{ test }}"
+ - "{{ test2 }}"
--- /dev/null
+---
+generate_crt: True
--- /dev/null
+---
+create_crush_tree: True
+crush_rule_config: True
+crush_rule_hdd:
+ name: HDD
+ root: default
+ type: host
+ class: hdd
+ default: true
+crush_rules:
+ - "{{ crush_rule_hdd }}"
--- /dev/null
+---
+osd_objectstore: "bluestore"
+lvm_volumes:
+ - data: data-lv1
+ data_vg: test_group
+ - data: data-lv2
+ data_vg: test_group
+ db: journal1
+ db_vg: journals
\ No newline at end of file
--- /dev/null
+---
+copy_admin_key: True
+rgw_create_pools:
+ foo:
+ pg_num: 16
+ type: replicated
+ bar:
+ pg_num: 16
--- /dev/null
+[mons]
+mon0 monitor_address=192.168.17.10
+mon1 monitor_interface="{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+mon2 monitor_address=192.168.17.12
+
+[mgrs]
+mgr0
+
+[osds]
+osd0 osd_crush_location="{ 'root': 'HDD', 'rack': 'mon-rackkkk', 'pod': 'monpod', 'host': 'osd0' }"
+osd1 osd_crush_location="{ 'root': 'default', 'host': 'osd1' }"
+osd2 osd_crush_location="{ 'root': 'default', 'host': 'osd2' }" devices="['/dev/sda', '/dev/sdb']" dedicated_devices="['/dev/sdc']" lvm_volumes="[]"
+
+[mdss]
+mds0
+mds1
+mds2
+
+[rgws]
+rgw0
+
+[nfss]
+nfs0
+
+[clients]
+client0
+client1
+
+[rbdmirrors]
+rbd-mirror0
+
+[iscsigws]
+iscsi-gw0
+
+[monitoring]
+mon0
--- /dev/null
+---
+
+# DEPLOY CONTAINERIZED DAEMONS
+docker: True
+
+# DEFINE THE NUMBER OF VMS TO RUN
+mon_vms: 3
+osd_vms: 3
+mds_vms: 3
+rgw_vms: 1
+nfs_vms: 1
+grafana_server_vms: 0
+rbd_mirror_vms: 1
+client_vms: 2
+iscsi_gw_vms: 1
+mgr_vms: 1
+
+# SUBNETS TO USE FOR THE VMS
+public_subnet: 192.168.17
+cluster_subnet: 192.168.18
+
+# MEMORY
+# set 1024 for CentOS
+memory: 1024
+
+# Disks
+# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
+# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
+disks: "[ '/dev/sda', '/dev/sdb' ]"
+
+# VAGRANT BOX
+# Ceph boxes are *strongly* suggested. They are under better control and will
+# not get updated frequently unless required for build systems. These are (for
+# now):
+#
+# * ceph/ubuntu-xenial
+#
+# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
+# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
+# libvirt CentOS: centos/7
+# parallels Ubuntu: parallels/ubuntu-14.04
+# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
+# For more boxes have a look at:
+# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
+# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
+vagrant_box: centos/atomic-host
+#client_vagrant_box: centos/7
+#ssh_private_key_path: "~/.ssh/id_rsa"
+# The sync directory changes based on vagrant box
+# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
+#vagrant_sync_dir: /home/vagrant/sync
+vagrant_sync_dir: /vagrant
+# Disables synced folder creation. Not needed for testing, will skip mounting
+# the vagrant directory on the remote box regardless of the provider.
+vagrant_disable_synced_folder: true
+# VAGRANT URL
+# This is a URL to download an image from an alternate location. vagrant_box
+# above should be set to the filename of the image.
+# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
+# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
--- /dev/null
+---
+ceph_origin: repository
+ceph_repository: community
+public_network: "192.168.1.0/24"
+cluster_network: "192.168.2.0/24"
+radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+ceph_conf_overrides:
+ global:
+ mon_allow_pool_size_one: true
+ mon_warn_on_pool_no_redundancy: false
+ osd_pool_default_size: 1
+ mon_max_pg_per_osd: 300
+openstack_config: True
+openstack_glance_pool:
+ name: "images"
+ size: 1
+ application: rbd
+ target_size_ratio: 0.2
+openstack_cinder_pool:
+ name: "volumes"
+ rule_name: "HDD"
+ size: 1
+ application: rbd
+openstack_pools:
+ - "{{ openstack_glance_pool }}"
+ - "{{ openstack_cinder_pool }}"
+handler_health_mon_check_delay: 10
+handler_health_osd_check_delay: 10
+mds_max_mds: 2
+dashboard_admin_password: $sX!cD$rYU6qR^B!
+grafana_admin_password: +xFRe+RES@7vg24n
+ceph_docker_registry: quay.ceph.io
+node_exporter_container_image: "quay.ceph.io/prometheus/node-exporter:v0.17.0"
+prometheus_container_image: "quay.ceph.io/prometheus/prometheus:v2.7.2"
+alertmanager_container_image: "quay.ceph.io/prometheus/alertmanager:v0.16.2"
+grafana_container_image: "quay.ceph.io/app-sre/grafana:6.7.4"
+grafana_server_group_name: ceph_monitoring
--- /dev/null
+---
+copy_admin_key: True
+user_config: True
+test:
+ name: "test"
+ rule_name: "HDD"
+ size: 1
+test2:
+ name: "test2"
+ size: 1
+pools:
+ - "{{ test }}"
+ - "{{ test2 }}"
--- /dev/null
+---
+generate_crt: True
\ No newline at end of file
--- /dev/null
+---
+create_crush_tree: True
+crush_rule_config: True
+crush_rule_hdd:
+ name: HDD
+ root: default
+ type: host
+ class: hdd
+ default: true
+crush_rules:
+ - "{{ crush_rule_hdd }}"
\ No newline at end of file
--- /dev/null
+copy_admin_key: true
+nfs_file_gw: false
+nfs_obj_gw: true
+ganesha_conf_overrides: |
+ CACHEINODE {
+ Entries_HWMark = 100000;
+ }
+nfs_ganesha_stable: true
+nfs_ganesha_dev: false
+nfs_ganesha_flavor: "ceph_master"
+nfs_ganesha_stable_branch: "V3.5-stable"
--- /dev/null
+---
+os_tuning_params:
+ - { name: fs.file-max, value: 26234859 }
+osd_objectstore: "bluestore"
+lvm_volumes:
+ - data: data-lv1
+ data_vg: test_group
+ - data: data-lv2
+ data_vg: test_group
+ db: journal1
+ db_vg: journals
\ No newline at end of file
--- /dev/null
+copy_admin_key: true
+rgw_create_pools:
+ foo:
+ pg_num: 16
+ type: replicated
+ bar:
+ pg_num: 16
+rgw_override_bucket_index_max_shards: 16
+rgw_bucket_default_quota_max_objects: 1638400
--- /dev/null
+[mons]
+mon0 monitor_address=192.168.1.10
+mon1 monitor_interface="{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+mon2 monitor_address=192.168.1.12
+
+[mgrs]
+mgr0
+
+[osds]
+osd0 osd_crush_location="{ 'root': 'HDD', 'rack': 'mon-rackkkk', 'pod': 'monpod', 'host': 'osd0' }"
+osd1 osd_crush_location="{ 'root': 'default', 'host': 'osd1' }"
+osd2 osd_crush_location="{ 'root': 'default', 'host': 'osd2' }" devices="['/dev/sda', '/dev/sdb']" dedicated_devices="['/dev/sdc']" lvm_volumes="[]"
+
+[mdss]
+mds0
+mds1
+mds2
+
+[rgws]
+rgw0
+
+[clients]
+client0
+client1
+
+[nfss]
+nfs0
+
+[rbdmirrors]
+rbd-mirror0
+
+[iscsigws]
+iscsi-gw0
+
+[ceph_monitoring]
+mon0
--- /dev/null
+[all:vars]
+docker=True
+
+[mons]
+mon0 monitor_address=192.168.1.10
+mon1 monitor_interface="{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+mon2 monitor_address=192.168.1.12
+
+[mgrs]
+mgr0
+
+[osds]
+osd0
+
+[mdss]
+mds0
+mds1
+mds2
+
+[rgws]
+rgw0
+
+[clients]
+client0
+
+[monitoring]
+mon0
\ No newline at end of file
--- /dev/null
+---
+
+# DEPLOY CONTAINERIZED DAEMONS
+docker: false
+
+# DEFINE THE NUMBER OF VMS TO RUN
+mon_vms: 3
+osd_vms: 3
+mds_vms: 3
+rgw_vms: 1
+nfs_vms: 1
+grafana_server_vms: 0
+rbd_mirror_vms: 1
+client_vms: 2
+iscsi_gw_vms: 1
+mgr_vms: 1
+
+# INSTALL SOURCE OF CEPH
+# valid values are 'stable' and 'dev'
+ceph_install_source: stable
+
+# SUBNETS TO USE FOR THE VMS
+public_subnet: 192.168.1
+cluster_subnet: 192.168.2
+
+# MEMORY
+# set 1024 for CentOS
+memory: 1024
+
+# Ethernet interface name
+# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
+eth: 'eth1'
+
+# Disks
+# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
+# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
+disks: "[ '/dev/sdb', '/dev/sdc' ]"
+
+# VAGRANT BOX
+# Ceph boxes are *strongly* suggested. They are under better control and will
+# not get updated frequently unless required for build systems. These are (for
+# now):
+#
+# * ceph/ubuntu-xenial
+#
+# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
+# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
+# libvirt CentOS: centos/7
+# parallels Ubuntu: parallels/ubuntu-14.04
+# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
+# For more boxes have a look at:
+# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
+# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
+vagrant_box: centos/7
+#ssh_private_key_path: "~/.ssh/id_rsa"
+# The sync directory changes based on vagrant box
+# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
+#vagrant_sync_dir: /home/vagrant/sync
+vagrant_sync_dir: /vagrant
+# Disables synced folder creation. Not needed for testing, will skip mounting
+# the vagrant directory on the remote box regardless of the provider.
+vagrant_disable_synced_folder: true
+# VAGRANT URL
+# This is a URL to download an image from an alternate location. vagrant_box
+# above should be set to the filename of the image.
+# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
+# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+
+os_tuning_params:
+ - { name: fs.file-max, value: 26234859 }
+
+# VM prefix name, need to match the hostname
+# label_prefix: ceph
--- /dev/null
+../../../Vagrantfile
\ No newline at end of file
--- /dev/null
+---
+monitor_interface: eth1
+public_network: "192.168.30.0/24"
+cluster_network: "192.168.31.0/24"
+dashboard_admin_password: $sX!cD$rYU6qR^B!
+ceph_docker_registry: quay.ceph.io
+ceph_docker_image: ceph-ci/daemon-base
+ceph_docker_image_tag: latest-pacific-devel
+containerized_deployment: true
--- /dev/null
+[mons]
+mon0
+mon1
+mon2
+
+[mgrs]
+mon0
+mon1
+mon2
+
+[osds]
+osd0
+osd1
+
+[mdss]
+mds0
+
+[rgws]
+rgw0
+
+[nfss]
+nfs0
+
+[rbdmirrors]
+rbd-mirror0
+
+[iscsigws]
+iscsi-gw0
+
+[monitoring]
+mon0
--- /dev/null
+---
+
+# DEPLOY CONTAINERIZED DAEMONS
+docker: True
+
+# DEFINE THE NUMBER OF VMS TO RUN
+mon_vms: 3
+osd_vms: 2
+mds_vms: 1
+rgw_vms: 1
+nfs_vms: 1
+grafana_server_vms: 0
+rbd_mirror_vms: 1
+client_vms: 0
+iscsi_gw_vms: 1
+mgr_vms: 0
+
+# SUBNETS TO USE FOR THE VMS
+public_subnet: 192.168.30
+cluster_subnet: 192.168.31
+
+# MEMORY
+# set 1024 for CentOS
+memory: 2048
+
+vagrant_box: centos/stream8
+# The sync directory changes based on vagrant box
+# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
+#vagrant_sync_dir: /home/vagrant/sync
+vagrant_sync_dir: /vagrant
+# Disables synced folder creation. Not needed for testing, will skip mounting
+# the vagrant directory on the remote box regardless of the provider.
+vagrant_disable_synced_folder: true
--- /dev/null
+---
+- hosts: all
+ become: yes
+ tasks:
+ - name: import_role ceph-defaults
+ import_role:
+ name: ceph-defaults
+
+ - name: import_role ceph-facts
+ import_role:
+ name: ceph-facts
+ tasks_from: container_binary.yml
+
+ - import_role:
+ name: ceph-facts
+ tasks_from: set_radosgw_address.yml
+
+ - name: set_fact ceph_cmd
+ set_fact:
+ ceph_cmd: "{{ container_binary + ' run --rm --net=host -v /etc/ceph:/etc/ceph:z -v /var/lib/ceph:/var/lib/ceph:z -v /var/run/ceph:/var/run/ceph:z --entrypoint=ceph ' + ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else 'ceph' }}"
+
+ - name: get some ceph status outputs
+ command: "{{ ceph_cmd }} --connect-timeout 10 --cluster {{ cluster }} {{ item }}"
+ register: ceph_status
+ run_once: True
+ delegate_to: mon0
+ failed_when: false
+ changed_when: false
+ with_items:
+ - "-s -f json-pretty"
+ - "osd tree"
+ - "osd dump"
+ - "pg dump"
+ - "versions"
+ - "health detail -f json-pretty"
+
+ - name: save ceph status to file
+ copy:
+ content: "{{ item.stdout }}"
+ dest: "{{ archive_path }}/{{ item.item | regex_replace(' ', '_') }}.log"
+ delegate_to: localhost
+ run_once: True
+ with_items: "{{ ceph_status.results }}"
+
+ - name: get mgr log
+ shell: journalctl -l -u ceph-mgr@{{ ansible_facts['hostname'] }} > /var/log/ceph/ceph-mgr.{{ ansible_facts['hostname'] }}.log
+ changed_when: false
+ when:
+ - inventory_hostname in groups.get(mgr_group_name, [])
+ or
+ (groups.get(mgr_group_name, []) | length == 0 and inventory_hostname in groups.get(mon_group_name, []))
+
+ - name: get rgw log
+ shell: journalctl -l -u ceph-radosgw@rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }} > /var/log/ceph/ceph-radosgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}.log
+ changed_when: false
+ with_items: "{{ rgw_instances | default([]) }}"
+ when: inventory_hostname in groups.get(rgw_group_name, [])
+
+ - name: find ceph config file and logs
+ find:
+ paths:
+ - /etc/ceph
+ - /var/log/ceph
+ patterns:
+ - "*.conf"
+ - "*.log"
+ register: results
+
+ - name: collect ceph config file and logs
+ fetch:
+ src: "{{ item.path }}"
+ dest: "{{ archive_path }}/{{ inventory_hostname }}/"
+ flat: yes
+ with_items: "{{ results.files }}"
--- /dev/null
+../../../Vagrantfile
\ No newline at end of file
--- /dev/null
+../all_daemons/ceph-override.json
\ No newline at end of file
--- /dev/null
+../../../../Vagrantfile
\ No newline at end of file
--- /dev/null
+../../all_daemons/ceph-override.json
\ No newline at end of file
--- /dev/null
+---
+# this is only here to let the CI tests know
+# that this scenario is using docker
+docker: True
+
+containerized_deployment: True
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+radosgw_num_instances: 2
+ceph_mon_docker_subnet: "{{ public_network }}"
+public_network: "192.168.15.0/24"
+cluster_network: "192.168.16.0/24"
+rgw_override_bucket_index_max_shards: 16
+rgw_bucket_default_quota_max_objects: 1638400
+ceph_conf_overrides:
+ global:
+ osd_pool_default_pg_num: 8
+ mon_allow_pool_size_one: true
+ mon_warn_on_pool_no_redundancy: false
+ osd_pool_default_size: 1
+ mon_max_pg_per_osd: 300
+handler_health_mon_check_delay: 10
+handler_health_osd_check_delay: 10
+dashboard_admin_password: $sX!cD$rYU6qR^B!
+dashboard_admin_user_ro: true
+grafana_admin_password: +xFRe+RES@7vg24n
+ceph_docker_registry: quay.ceph.io
+ceph_docker_image: ceph-ci/daemon
+ceph_docker_image_tag: latest-pacific
+node_exporter_container_image: "quay.ceph.io/prometheus/node-exporter:v0.17.0"
+prometheus_container_image: "quay.ceph.io/prometheus/prometheus:v2.7.2"
+alertmanager_container_image: "quay.ceph.io/prometheus/alertmanager:v0.16.2"
+grafana_container_image: "quay.ceph.io/app-sre/grafana:6.7.4"
--- /dev/null
+---
+user_config: True
+test:
+ name: "test"
+ rule_name: "HDD"
+test2:
+ name: "test2"
+ rule_name: "HDD"
+pools:
+ - "{{ test }}"
+ - "{{ test2 }}"
--- /dev/null
+---
+osd_objectstore: "bluestore"
+lvm_volumes:
+ - data: data-lv1
+ data_vg: test_group
+ - data: data-lv2
+ data_vg: test_group
+ db: journal1
+ db_vg: journals
\ No newline at end of file
--- /dev/null
+---
+rgw_create_pools:
+ foo:
+ pg_num: 16
+ type: replicated
+ bar:
+ pg_num: 16
--- /dev/null
+[mons]
+mon0
+mon1
+mon2
+
+[osds]
+osd0
+osd1
+
+[mdss]
+mds0
+rgw0
+
+[rgws]
+rgw0
+mds0
+
+[rbdmirrors]
+rgw0
+mds0
+
+#[nfss]
+#rgw0
+#mds0
+
+[monitoring]
+mon0
\ No newline at end of file
--- /dev/null
+---
+
+# DEPLOY CONTAINERIZED DAEMONS
+docker: True
+
+# DEFINE THE NUMBER OF VMS TO RUN
+mon_vms: 3
+osd_vms: 2
+mds_vms: 1
+rgw_vms: 1
+nfs_vms: 0
+grafana_server_vms: 0
+rbd_mirror_vms: 0
+client_vms: 0
+iscsi_gw_vms: 0
+mgr_vms: 0
+
+# SUBNETS TO USE FOR THE VMS
+public_subnet: 192.168.15
+cluster_subnet: 192.168.16
+
+# MEMORY
+# set 1024 for CentOS
+memory: 1024
+
+# Disks
+# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
+# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
+disks: "[ '/dev/sda', '/dev/sdb' ]"
+
+# VAGRANT BOX
+# Ceph boxes are *strongly* suggested. They are under better control and will
+# not get updated frequently unless required for build systems. These are (for
+# now):
+#
+# * ceph/ubuntu-xenial
+#
+# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
+# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
+# libvirt CentOS: centos/7
+# parallels Ubuntu: parallels/ubuntu-14.04
+# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
+# For more boxes have a look at:
+# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
+# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
+vagrant_box: centos/atomic-host
+# client_vagrant_box: centos/7
+#ssh_private_key_path: "~/.ssh/id_rsa"
+# The sync directory changes based on vagrant box
+# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
+#vagrant_sync_dir: /home/vagrant/sync
+#vagrant_sync_dir: /
+# Disables synced folder creation. Not needed for testing, will skip mounting
+# the vagrant directory on the remote box regardless of the provider.
+vagrant_disable_synced_folder: true
+# VAGRANT URL
+# This is a URL to download an image from an alternate location. vagrant_box
+# above should be set to the filename of the image.
+# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
+# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
--- /dev/null
+---
+containerized_deployment: False
+ceph_origin: repository
+ceph_repository: community
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+ceph_mon_docker_subnet: "{{ public_network }}"
+public_network: "192.168.15.0/24"
+cluster_network: "192.168.16.0/24"
+rgw_override_bucket_index_max_shards: 16
+rgw_bucket_default_quota_max_objects: 1638400
+ceph_conf_overrides:
+ global:
+ osd_pool_default_pg_num: 8
+ mon_allow_pool_size_one: true
+ mon_warn_on_pool_no_redundancy: false
+ osd_pool_default_size: 1
+ mon_max_pg_per_osd: 300
+handler_health_mon_check_delay: 10
+handler_health_osd_check_delay: 10
+dashboard_admin_password: $sX!cD$rYU6qR^B!
+dashboard_admin_user_ro: true
+grafana_admin_password: +xFRe+RES@7vg24n
+ceph_docker_registry: quay.ceph.io
+node_exporter_container_image: "quay.ceph.io/prometheus/node-exporter:v0.17.0"
+prometheus_container_image: "quay.ceph.io/prometheus/prometheus:v2.7.2"
+alertmanager_container_image: "quay.ceph.io/prometheus/alertmanager:v0.16.2"
+grafana_container_image: "quay.ceph.io/app-sre/grafana:6.7.4"
\ No newline at end of file
--- /dev/null
+---
+user_config: True
+test:
+ name: "test"
+ rule_name: "HDD"
+test2:
+ name: "test2"
+ rule_name: "HDD"
+pools:
+ - "{{ test }}"
+ - "{{ test2 }}"
--- /dev/null
+---
+osd_objectstore: "bluestore"
+lvm_volumes:
+ - data: data-lv1
+ data_vg: test_group
+ - data: data-lv2
+ data_vg: test_group
+ db: journal1
+ db_vg: journals
\ No newline at end of file
--- /dev/null
+---
+rgw_create_pools:
+ foo:
+ pg_num: 16
+ type: replicated
+ bar:
+ pg_num: 16
--- /dev/null
+[mons]
+mon0
+mon1
+mon2
+
+[osds]
+osd0
+osd1
+
+[mdss]
+mds0
+rgw0
+
+[rgws]
+osd0
+rgw0
+mds0
+
+[rbdmirrors]
+rgw0
+mds0
+
+#[nfss]
+#rgw0
+#mds0
+
+[monitoring]
+mon0
\ No newline at end of file
--- /dev/null
+---
+# DEFINE THE NUMBER OF VMS TO RUN
+mon_vms: 3
+osd_vms: 2
+mds_vms: 1
+rgw_vms: 1
+nfs_vms: 0
+grafana_server_vms: 0
+rbd_mirror_vms: 0
+client_vms: 0
+iscsi_gw_vms: 0
+mgr_vms: 0
+
+# SUBNETS TO USE FOR THE VMS
+public_subnet: 192.168.15
+cluster_subnet: 192.168.16
+
+# MEMORY
+# set 1024 for CentOS
+memory: 1024
+
+# Disks
+# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
+# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
+disks: "[ '/dev/sda', '/dev/sdb' ]"
+
+# VAGRANT BOX
+# Ceph boxes are *strongly* suggested. They are under better control and will
+# not get updated frequently unless required for build systems. These are (for
+# now):
+#
+# * ceph/ubuntu-xenial
+#
+# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
+# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
+# libvirt CentOS: centos/7
+# parallels Ubuntu: parallels/ubuntu-14.04
+# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
+# For more boxes have a look at:
+# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
+# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
+vagrant_box: centos/7
+# client_vagrant_box: centos/7
+#ssh_private_key_path: "~/.ssh/id_rsa"
+# The sync directory changes based on vagrant box
+# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
+#vagrant_sync_dir: /home/vagrant/sync
+#vagrant_sync_dir: /
+# Disables synced folder creation. Not needed for testing, will skip mounting
+# the vagrant directory on the remote box regardless of the provider.
+vagrant_disable_synced_folder: true
+# VAGRANT URL
+# This is a URL to download an image from an alternate location. vagrant_box
+# above should be set to the filename of the image.
+# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
+# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
--- /dev/null
+---
+- hosts: localhost
+ gather_facts: false
+ become: no
+ tags: vagrant_setup
+ tasks:
+
+
+ - block:
+ - name: set_fact group_vars_path
+ set_fact:
+ group_vars_path: "{{ change_dir + '/inventory/group_vars' if 'external_clients' in change_dir.split('/') else change_dir + '/group_vars' }}"
+
+ - block:
+ - name: change ceph_repository to 'dev'
+ replace:
+ regexp: "ceph_repository:.*"
+ replace: "ceph_repository: dev"
+ dest: "{{ group_vars_path }}/all"
+
+ - block:
+ - name: ensure nfs_ganesha_stable is set to False
+ replace:
+ regexp: "nfs_ganesha_stable:.*"
+ replace: "nfs_ganesha_stable: false"
+ dest: "{{ group_vars_path }}/nfss"
+
+ - name: ensure nfs_ganesha_dev is set to True
+ replace:
+ regexp: "nfs_ganesha_dev:.*"
+ replace: "nfs_ganesha_dev: true"
+ dest: "{{ group_vars_path }}/nfss"
+ when:
+ - setup_nfs_dev_repo | default(True) | bool
+ - "'all_daemons' in group_vars_path.split('/')"
+ when: change_dir is defined
+
+ - name: print contents of {{ group_vars_path }}/all
+ command: "cat {{ group_vars_path }}/all"
+ when: dev_setup
--- /dev/null
+../../../Vagrantfile
\ No newline at end of file
--- /dev/null
+{
+ "ceph_conf_overrides": {
+ "global": {
+ "osd_pool_default_pg_num": 12,
+ "osd_pool_default_size": 1,
+ "mon_allow_pool_size_one": true,
+ "mon_warn_on_pool_no_redundancy": false
+ }
+ },
+ "cephfs_pools": [
+ {
+ "name": "cephfs_data",
+ "pg_num": 8,
+ "pgp_num": 8,
+ "rule_name": "replicated_rule",
+ "type": 1,
+ "erasure_profile": "",
+ "expected_num_objects": "",
+ "application": "cephfs",
+ "size": 3,
+ "min_size": 0
+ },
+ {
+ "name": "cephfs_metadata",
+ "pg_num": 8,
+ "pgp_num": 8,
+ "rule_name": "replicated_rule",
+ "type": 1,
+ "erasure_profile": "",
+ "expected_num_objects": "",
+ "application": "cephfs",
+ "size": 3,
+ "min_size": 0
+ }
+ ],
+ "ceph_mon_docker_memory_limit": "2g"
+}
--- /dev/null
+---
+# this is only here to let the CI tests know
+# that this scenario is using docker
+docker: True
+container_binary: docker
+
+containerized_deployment: True
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+ceph_mon_docker_subnet: "{{ public_network }}"
+public_network: "192.168.58.0/24"
+cluster_network: "192.168.59.0/24"
+rgw_override_bucket_index_max_shards: 16
+rgw_bucket_default_quota_max_objects: 1638400
+ceph_conf_overrides:
+ global:
+ mon_allow_pool_size_one: true
+ mon_warn_on_pool_no_redundancy: false
+ osd_pool_default_size: 1
+openstack_config: False
+openstack_glance_pool:
+ name: "images"
+ rule_name: "HDD"
+ size: 1
+openstack_cinder_pool:
+ name: "volumes"
+ rule_name: "HDD"
+ size: 1
+openstack_pools:
+ - "{{ openstack_glance_pool }}"
+ - "{{ openstack_cinder_pool }}"
+handler_health_mon_check_delay: 10
+handler_health_osd_check_delay: 10
+dashboard_admin_password: $sX!cD$rYU6qR^B!
+grafana_admin_password: +xFRe+RES@7vg24n
+ceph_docker_registry: quay.ceph.io
+ceph_docker_image: ceph-ci/daemon
+ceph_docker_image_tag: latest-pacific
+node_exporter_container_image: "quay.ceph.io/prometheus/node-exporter:v0.17.0"
+prometheus_container_image: "quay.ceph.io/prometheus/prometheus:v2.7.2"
+alertmanager_container_image: "quay.ceph.io/prometheus/alertmanager:v0.16.2"
+grafana_container_image: "quay.ceph.io/app-sre/grafana:6.7.4"
--- /dev/null
+---
+user_config: True
+copy_admin_key: True
+test:
+ name: "test"
+ rule_name: "HDD"
+test2:
+ name: "test2"
+ rule_name: "HDD"
+pools:
+ - "{{ test }}"
+ - "{{ test2 }}"
--- /dev/null
+---
+generate_crt: True
--- /dev/null
+---
+create_crush_tree: False
+crush_rule_config: False
+crush_rule_hdd:
+ name: HDD
+ root: default
+ type: host
+ class: hdd
+ default: true
+crush_rules:
+ - "{{ crush_rule_hdd }}"
--- /dev/null
+---
+osd_objectstore: "bluestore"
+lvm_volumes:
+ - data: data-lv1
+ data_vg: test_group
+ - data: data-lv2
+ data_vg: test_group
+ db: journal1
+ db_vg: journals
\ No newline at end of file
--- /dev/null
+---
+copy_admin_key: True
+rgw_create_pools:
+ foo:
+ pg_num: 16
+ bar:
+ pg_num: 16
--- /dev/null
+[mons]
+mon0
+
+[osds]
+osd0
+
+[mgrs]
+mon0
+
+[monitoring]
+mon0
\ No newline at end of file
--- /dev/null
+---
+
+# DEPLOY CONTAINERIZED DAEMONS
+docker: True
+
+# DEFINE THE NUMBER OF VMS TO RUN
+mon_vms: 1
+osd_vms: 1
+mds_vms: 0
+rgw_vms: 0
+nfs_vms: 0
+grafana_server_vms: 0
+rbd_mirror_vms: 0
+client_vms: 0
+iscsi_gw_vms: 0
+mgr_vms: 0
+
+# SUBNETS TO USE FOR THE VMS
+public_subnet: 192.168.58
+cluster_subnet: 192.168.59
+
+# MEMORY
+# set 1024 for CentOS
+memory: 2048
+
+vagrant_box: centos/7
+# The sync directory changes based on vagrant box
+# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
+#vagrant_sync_dir: /home/vagrant/sync
+vagrant_sync_dir: /vagrant
+# Disables synced folder creation. Not needed for testing, will skip mounting
+# the vagrant directory on the remote box regardless of the provider.
+vagrant_disable_synced_folder: true
--- /dev/null
+../../../Vagrantfile
\ No newline at end of file
--- /dev/null
+../all_daemons/ceph-override.json
\ No newline at end of file
--- /dev/null
+../../../../Vagrantfile
\ No newline at end of file
--- /dev/null
+../../all_daemons/ceph-override.json
\ No newline at end of file
--- /dev/null
+[clients]
+client0
+client1
\ No newline at end of file
--- /dev/null
+---
+# this is only here to let the CI tests know
+# that this scenario is using docker
+docker: True
+
+containerized_deployment: True
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+radosgw_num_instances: 2
+ceph_mon_docker_subnet: "{{ public_network }}"
+public_network: "192.168.31.0/24"
+cluster_network: "192.168.32.0/24"
+rgw_override_bucket_index_max_shards: 16
+rgw_bucket_default_quota_max_objects: 1638400
+openstack_config: True
+dashboard_enabled: false
+ceph_conf_overrides:
+ global:
+ osd_pool_default_pg_num: 8
+ mon_allow_pool_size_one: true
+ mon_warn_on_pool_no_redundancy: false
+ osd_pool_default_size: 1
+handler_health_mon_check_delay: 10
+handler_health_osd_check_delay: 10
+rgw_create_pools:
+ foo:
+ pg_num: 16
+ bar:
+ pg_num: 16
+osd_objectstore: "bluestore"
+lvm_volumes:
+ - data: data-lv1
+ data_vg: test_group
+ - data: data-lv2
+ data_vg: test_group
+ db: journal1
+ db_vg: journals
+fsid: 40358a87-ab6e-4bdc-83db-1d909147861c
+generate_fsid: false
+ceph_docker_registry: quay.ceph.io
+ceph_docker_image: ceph-ci/daemon
+ceph_docker_image_tag: latest-pacific
\ No newline at end of file
--- /dev/null
+---
+copy_admin_key: True
+user_config: True
+test:
+ name: "test"
+test2:
+ name: "test2"
+pools:
+ - "{{ test }}"
+ - "{{ test2 }}"
\ No newline at end of file
--- /dev/null
+[mons]
+mon0
+mon1
+mon2
+
+[mgrs]
+mon0
--- /dev/null
+---
+
+# DEPLOY CONTAINERIZED DAEMONS
+docker: True
+
+# DEFINE THE NUMBER OF VMS TO RUN
+mon_vms: 3
+osd_vms: 0
+mds_vms: 0
+rgw_vms: 0
+nfs_vms: 0
+grafana_server_vms: 0
+rbd_mirror_vms: 0
+client_vms: 2
+iscsi_gw_vms: 0
+mgr_vms: 0
+
+# SUBNETS TO USE FOR THE VMS
+public_subnet: 192.168.31
+cluster_subnet: 192.168.32
+
+# MEMORY
+# set 1024 for CentOS
+memory: 1024
+
+# Disks
+# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
+# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
+disks: "[ '/dev/sda', '/dev/sdb' ]"
+
+# VAGRANT BOX
+# Ceph boxes are *strongly* suggested. They are under better control and will
+# not get updated frequently unless required for build systems. These are (for
+# now):
+#
+# * ceph/ubuntu-xenial
+#
+# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
+# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
+# libvirt CentOS: centos/7
+# parallels Ubuntu: parallels/ubuntu-14.04
+# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
+# For more boxes have a look at:
+# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
+# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
+vagrant_box: centos/atomic-host
+# client_vagrant_box: centos/7
+#ssh_private_key_path: "~/.ssh/id_rsa"
+# The sync directory changes based on vagrant box
+# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
+#vagrant_sync_dir: /home/vagrant/sync
+#vagrant_sync_dir: /
+# Disables synced folder creation. Not needed for testing, will skip mounting
+# the vagrant directory on the remote box regardless of the provider.
+vagrant_disable_synced_folder: true
+# VAGRANT URL
+# This is a URL to download an image from an alternate location. vagrant_box
+# above should be set to the filename of the image.
+# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
+# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
--- /dev/null
+[clients]
+client0
+client1
\ No newline at end of file
--- /dev/null
+---
+containerized_deployment: False
+ceph_origin: repository
+ceph_repository: community
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+ceph_mon_docker_subnet: "{{ public_network }}"
+openstack_config: True
+dashboard_enabled: False
+public_network: "192.168.31.0/24"
+cluster_network: "192.168.32.0/24"
+rgw_override_bucket_index_max_shards: 16
+rgw_bucket_default_quota_max_objects: 1638400
+ceph_conf_overrides:
+ global:
+ osd_pool_default_pg_num: 8
+ mon_allow_pool_size_one: true
+ mon_warn_on_pool_no_redundancy: false
+ osd_pool_default_size: 1
+handler_health_mon_check_delay: 10
+handler_health_osd_check_delay: 10
+osd_objectstore: "bluestore"
+lvm_volumes:
+ - data: data-lv1
+ data_vg: test_group
+ - data: data-lv2
+ data_vg: test_group
+ db: journal1
+ db_vg: journals
+rgw_create_pools:
+ foo:
+ pg_num: 16
+ bar:
+ pg_num: 16
+fsid: 40358a87-ab6e-4bdc-83db-1d909147861c
+generate_fsid: false
\ No newline at end of file
--- /dev/null
+---
+copy_admin_key: True
+user_config: True
+test:
+ name: "test"
+test2:
+ name: "test2"
+pools:
+ - "{{ test }}"
+ - "{{ test2 }}"
\ No newline at end of file
--- /dev/null
+[mons]
+mon0
+mon1
+mon2
+
+[mgrs]
+mon0
--- /dev/null
+---
+# DEFINE THE NUMBER OF VMS TO RUN
+mon_vms: 3
+osd_vms: 0
+mds_vms: 0
+rgw_vms: 0
+nfs_vms: 0
+grafana_server_vms: 0
+rbd_mirror_vms: 0
+client_vms: 2
+iscsi_gw_vms: 0
+mgr_vms: 0
+
+# SUBNETS TO USE FOR THE VMS
+public_subnet: 192.168.31
+cluster_subnet: 192.168.32
+
+# MEMORY
+# set 1024 for CentOS
+memory: 1024
+
+# Disks
+# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
+# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
+disks: "[ '/dev/sda', '/dev/sdb' ]"
+
+# VAGRANT BOX
+# Ceph boxes are *strongly* suggested. They are under better control and will
+# not get updated frequently unless required for build systems. These are (for
+# now):
+#
+# * ceph/ubuntu-xenial
+#
+# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
+# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
+# libvirt CentOS: centos/7
+# parallels Ubuntu: parallels/ubuntu-14.04
+# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
+# For more boxes have a look at:
+# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
+# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
+vagrant_box: centos/7
+# client_vagrant_box: centos/7
+#ssh_private_key_path: "~/.ssh/id_rsa"
+# The sync directory changes based on vagrant box
+# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
+#vagrant_sync_dir: /home/vagrant/sync
+#vagrant_sync_dir: /
+# Disables synced folder creation. Not needed for testing, will skip mounting
+# the vagrant directory on the remote box regardless of the provider.
+vagrant_disable_synced_folder: true
+# VAGRANT URL
+# This is a URL to download an image from an alternate location. vagrant_box
+# above should be set to the filename of the image.
+# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
+# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
--- /dev/null
+---
+- hosts: clients
+ gather_facts: false
+ become: yes
+ tasks:
+
+ - name: get keys from monitors
+ command: "{{ 'podman exec ceph-mon-mon0' if containerized_deployment | bool else '' }} ceph --cluster ceph auth get client.admin"
+ register: _key
+ delegate_to: "{{ groups.get('mons')[0] }}"
+ run_once: true
+
+ - name: create /etc/ceph
+ file:
+ path: /etc/ceph
+ state: directory
+ owner: 167
+ group: 167
+ mode: "0755"
+
+ - name: copy ceph key(s) if needed
+ copy:
+ dest: "/etc/ceph/ceph.client.admin.keyring"
+ content: "{{ _key.stdout + '\n' }}"
+ owner: 167
+ group: 167
+ mode: "0600"
--- /dev/null
+../../../Vagrantfile
\ No newline at end of file
--- /dev/null
+../all_daemons/ceph-override.json
\ No newline at end of file
--- /dev/null
+../../../../Vagrantfile
\ No newline at end of file
--- /dev/null
+../../all_daemons/ceph-override.json
\ No newline at end of file
--- /dev/null
+---
+
+# this is only here to let the CI tests know
+# that this scenario is using docker
+docker: True
+
+ceph_origin: repository
+ceph_repository: community
+public_network: "192.168.43.0/24"
+cluster_network: "192.168.44.0/24"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+journal_size: 2048
+copy_admin_key: true
+containerized_deployment: true
+# test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb
+os_tuning_params:
+ - { name: fs.file-max, value: 26234859 }
+ceph_conf_overrides:
+ global:
+ mon_allow_pool_size_one: true
+ mon_warn_on_pool_no_redundancy: false
+ osd_pool_default_size: 1
+dashboard_enabled: False
+handler_health_mon_check_delay: 10
+handler_health_osd_check_delay: 10
+ceph_docker_registry: quay.ceph.io
+ceph_docker_image: ceph-ci/daemon
+ceph_docker_image_tag: latest-pacific
\ No newline at end of file
--- /dev/null
+[mons]
+mon0
+
+[osds]
+osd0 osd_objectstore=filestore lvm_volumes="[{'data': 'data-lv1', 'journal': '/dev/sdc1', 'data_vg': 'test_group'},{'data': 'data-lv2', 'data_vg': 'test_group', 'journal': 'journal1', 'journal_vg': 'journals'}]"
+osd1 osd_objectstore=filestore lvm_volumes="[{'data': 'data-lv1', 'journal': '/dev/sdc1', 'data_vg': 'test_group'},{'data': 'data-lv2', 'data_vg': 'test_group', 'journal': 'journal1', 'journal_vg': 'journals'}]" dmcrypt=true
+osd2 osd_objectstore=filestore devices="['/dev/sda', '/dev/sdb']" dedicated_devices="['/dev/sdc']" journal_size=2048
+osd3 osd_objectstore=filestore lvm_volumes="[{'data': '/dev/sda', 'journal': '/dev/sdc1'},{'data': '/dev/sdb', 'journal': '/dev/sdc2'}]"
+osd4 osd_objectstore=filestore lvm_volumes="[{'data': '/dev/sda', 'journal': '/dev/sdc1'},{'data': '/dev/sdb', 'journal': '/dev/sdc2'}]" dmcrypt=true
+osd5 osd_objectstore=filestore osd_auto_discovery=true journal_size=2048
\ No newline at end of file
--- /dev/null
+---
+
+# DEPLOY CONTAINERIZED DAEMONS
+docker: true
+
+# DEFINE THE NUMBER OF VMS TO RUN
+mon_vms: 1
+osd_vms: 6
+mds_vms: 0
+rgw_vms: 0
+nfs_vms: 0
+grafana_server_vms: 0
+rbd_mirror_vms: 0
+client_vms: 0
+iscsi_gw_vms: 0
+mgr_vms: 0
+
+# INSTALL SOURCE OF CEPH
+# valid values are 'stable' and 'dev'
+ceph_install_source: stable
+
+# SUBNETS TO USE FOR THE VMS
+public_subnet: 192.168.43
+cluster_subnet: 192.168.44
+
+# MEMORY
+# set 1024 for CentOS
+memory: 1024
+
+# Ethernet interface name
+# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
+eth: 'eth1'
+
+# Disks
+# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
+# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
+disks: "[ '/dev/sdb', '/dev/sdc' ]"
+
+# VAGRANT BOX
+# Ceph boxes are *strongly* suggested. They are under better control and will
+# not get updated frequently unless required for build systems. These are (for
+# now):
+#
+# * ceph/ubuntu-xenial
+#
+# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
+# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
+# libvirt CentOS: centos/7
+# parallels Ubuntu: parallels/ubuntu-14.04
+# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
+# For more boxes have a look at:
+# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
+# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
+vagrant_box: centos/atomic-host
+#ssh_private_key_path: "~/.ssh/id_rsa"
+# The sync directory changes based on vagrant box
+# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
+#vagrant_sync_dir: /home/vagrant/sync
+vagrant_sync_dir: /vagrant
+# Disables synced folder creation. Not needed for testing, will skip mounting
+# the vagrant directory on the remote box regardless of the provider.
+vagrant_disable_synced_folder: true
+# VAGRANT URL
+# This is a URL to download an image from an alternate location. vagrant_box
+# above should be set to the filename of the image.
+# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
+# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+
+os_tuning_params:
+ - { name: fs.file-max, value: 26234859 }
--- /dev/null
+---
+
+ceph_origin: repository
+ceph_repository: community
+public_network: "192.168.41.0/24"
+cluster_network: "192.168.42.0/24"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+journal_size: 2048
+copy_admin_key: true
+# test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb
+os_tuning_params:
+ - { name: fs.file-max, value: 26234859 }
+ceph_conf_overrides:
+ global:
+ mon_allow_pool_size_one: true
+ mon_warn_on_pool_no_redundancy: false
+ osd_pool_default_size: 1
+dashboard_enabled: False
+handler_health_mon_check_delay: 10
+handler_health_osd_check_delay: 10
\ No newline at end of file
--- /dev/null
+[mons]
+mon0
+
+[osds]
+osd0 osd_objectstore=filestore lvm_volumes="[{'data': 'data-lv1', 'journal': '/dev/sdc1', 'data_vg': 'test_group'},{'data': 'data-lv2', 'data_vg': 'test_group', 'journal': 'journal1', 'journal_vg': 'journals'}]"
+osd1 osd_objectstore=filestore lvm_volumes="[{'data': 'data-lv1', 'journal': '/dev/sdc1', 'data_vg': 'test_group'},{'data': 'data-lv2', 'data_vg': 'test_group', 'journal': 'journal1', 'journal_vg': 'journals'}]" dmcrypt=true
+osd2 osd_objectstore=filestore devices="['/dev/sda', '/dev/sdb']" dedicated_devices="['/dev/sdc']" journal_size=2048
+osd3 osd_objectstore=filestore lvm_volumes="[{'data': '/dev/sda', 'journal': '/dev/sdc1'},{'data': '/dev/sdb', 'journal': '/dev/sdc2'}]"
+osd4 osd_objectstore=filestore lvm_volumes="[{'data': '/dev/sda', 'journal': '/dev/sdc1'},{'data': '/dev/sdb', 'journal': '/dev/sdc2'}]" dmcrypt=true
+osd5 osd_objectstore=filestore osd_auto_discovery=true journal_size=2048
\ No newline at end of file
--- /dev/null
+---
+
+# DEPLOY CONTAINERIZED DAEMONS
+docker: false
+
+# DEFINE THE NUMBER OF VMS TO RUN
+mon_vms: 1
+osd_vms: 6
+mds_vms: 0
+rgw_vms: 0
+nfs_vms: 0
+grafana_server_vms: 0
+rbd_mirror_vms: 0
+client_vms: 0
+iscsi_gw_vms: 0
+mgr_vms: 0
+
+# INSTALL SOURCE OF CEPH
+# valid values are 'stable' and 'dev'
+ceph_install_source: stable
+
+# SUBNETS TO USE FOR THE VMS
+public_subnet: 192.168.41
+cluster_subnet: 192.168.42
+
+# MEMORY
+# set 1024 for CentOS
+memory: 1024
+
+# Ethernet interface name
+# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
+eth: 'eth1'
+
+# Disks
+# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
+# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
+disks: "[ '/dev/sdb', '/dev/sdc' ]"
+
+# VAGRANT BOX
+# Ceph boxes are *strongly* suggested. They are under better control and will
+# not get updated frequently unless required for build systems. These are (for
+# now):
+#
+# * ceph/ubuntu-xenial
+#
+# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
+# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
+# libvirt CentOS: centos/7
+# parallels Ubuntu: parallels/ubuntu-14.04
+# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
+# For more boxes have a look at:
+# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
+# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
+vagrant_box: centos/7
+#ssh_private_key_path: "~/.ssh/id_rsa"
+# The sync directory changes based on vagrant box
+# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
+#vagrant_sync_dir: /home/vagrant/sync
+vagrant_sync_dir: /vagrant
+# Disables synced folder creation. Not needed for testing, will skip mounting
+# the vagrant directory on the remote box regardless of the provider.
+vagrant_disable_synced_folder: true
+# VAGRANT URL
+# This is a URL to download an image from an alternate location. vagrant_box
+# above should be set to the filename of the image.
+# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
+# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+
+os_tuning_params:
+ - { name: fs.file-max, value: 26234859 }
--- /dev/null
+../../../Vagrantfile
\ No newline at end of file
--- /dev/null
+---
+
+logfile_path: ./lv-create.log
+# Path of nvme device primed for LV creation for journals and data. Only one NVMe device is allowed at a time. Providing a list will not work in this case.
+nvme_device: /dev/sdb
+
+# Path of hdd devices designated for LV creation.
+hdd_devices:
+ - /dev/sdc
+
+journal_size: 1024
+
+# This var is a list of bucket index LVs created on the NVMe device. We recommend one be created but you can add others
+nvme_device_lvs:
+ - lv_name: "ceph-bucket-index-1"
+ size: 100%FREE
+ journal_name: "ceph-journal-bucket-index-1-{{ nvme_device_basename }}"
+
+## TYPICAL USERS WILL NOT NEED TO CHANGE VARS FROM HERE DOWN ##
+
+# all hdd's have to be the same size and the LVs on them are dedicated for OSD data
+hdd_lv_size: 100%FREE
+
+# Since this playbook can be run multiple times across different devices, {{ var.split('/')[-1] }} is used quite frequently in this play-book.
+# This is used to strip the device name away from its path (ex: sdc from /dev/sdc) to differenciate the names of vgs, journals, or lvs if the prefixes are not changed across multiple runs.
+nvme_device_basename: "{{ nvme_device.split('/')[-1] }}"
+
+# Only one volume group is created in the playbook for all the LVs on NVMe. This volume group takes up the entire device specified in "nvme_device".
+nvme_vg_name: "ceph-nvme-vg-{{ nvme_device_basename }}"
+
+hdd_vg_prefix: "ceph-hdd-vg"
+hdd_lv_prefix: "ceph-hdd-lv"
+hdd_journal_prefix: "ceph-journal"
--- /dev/null
+[osds]
+osd0
--- /dev/null
+---
+
+# DEPLOY CONTAINERIZED DAEMONS
+docker: false
+
+# DEFINE THE NUMBER OF VMS TO RUN
+mon_vms: 0
+osd_vms: 1
+mds_vms: 0
+rgw_vms: 0
+nfs_vms: 0
+grafana_server_vms: 0
+rbd_mirror_vms: 0
+client_vms: 0
+iscsi_gw_vms: 0
+mgr_vms: 0
+
+# INSTALL SOURCE OF CEPH
+# valid values are 'stable' and 'dev'
+ceph_install_source: stable
+
+# SUBNETS TO USE FOR THE VMS
+public_subnet: 192.168.39
+cluster_subnet: 192.168.40
+
+# MEMORY
+# set 1024 for CentOS
+memory: 1024
+
+# Ethernet interface name
+# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
+eth: 'eth1'
+
+# Disks
+# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
+# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
+disks: "[ '/dev/sdb', '/dev/sdc' ]"
+
+# VAGRANT BOX
+# Ceph boxes are *strongly* suggested. They are under better control and will
+# not get updated frequently unless required for build systems. These are (for
+# now):
+#
+# * ceph/ubuntu-xenial
+#
+# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
+# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
+# libvirt CentOS: centos/7
+# parallels Ubuntu: parallels/ubuntu-14.04
+# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
+# For more boxes have a look at:
+# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
+# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
+vagrant_box: centos/7
+#ssh_private_key_path: "~/.ssh/id_rsa"
+# The sync directory changes based on vagrant box
+# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
+#vagrant_sync_dir: /home/vagrant/sync
+vagrant_sync_dir: /vagrant
+# Disables synced folder creation. Not needed for testing, will skip mounting
+# the vagrant directory on the remote box regardless of the provider.
+vagrant_disable_synced_folder: true
+# VAGRANT URL
+# This is a URL to download an image from an alternate location. vagrant_box
+# above should be set to the filename of the image.
+# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
+# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+
+os_tuning_params:
+ - { name: fs.file-max, value: 26234859 }
--- /dev/null
+../../../Vagrantfile
\ No newline at end of file
--- /dev/null
+../all_daemons/ceph-override.json
\ No newline at end of file
--- /dev/null
+../../../../Vagrantfile
\ No newline at end of file
--- /dev/null
+../../all_daemons/ceph-override.json
\ No newline at end of file
--- /dev/null
+---
+
+# this is only here to let the CI tests know
+# that this scenario is using docker
+docker: True
+
+containerized_deployment: True
+ceph_origin: repository
+ceph_repository: dev
+cluster: ceph
+public_network: "192.168.39.0/24"
+cluster_network: "192.168.40.0/24"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+journal_size: 100
+osd_objectstore: "bluestore"
+crush_device_class: test
+copy_admin_key: true
+osd_auto_discovery: true
+os_tuning_params:
+ - { name: fs.file-max, value: 26234859 }
+ceph_conf_overrides:
+ global:
+ mon_allow_pool_size_one: true
+ mon_warn_on_pool_no_redundancy: false
+ osd_pool_default_size: 1
+dashboard_enabled: False
+handler_health_mon_check_delay: 10
+handler_health_osd_check_delay: 10
+ceph_docker_registry: quay.ceph.io
+ceph_docker_image: ceph-ci/daemon
+ceph_docker_image_tag: latest-pacific
\ No newline at end of file
--- /dev/null
+[mons]
+mon0
+
+[osds]
+osd0
--- /dev/null
+---
+
+# DEPLOY CONTAINERIZED DAEMONS
+docker: true
+
+# DEFINE THE NUMBER OF VMS TO RUN
+mon_vms: 1
+osd_vms: 1
+mds_vms: 0
+rgw_vms: 0
+nfs_vms: 0
+grafana_server_vms: 0
+rbd_mirror_vms: 0
+client_vms: 0
+iscsi_gw_vms: 0
+mgr_vms: 0
+
+# INSTALL SOURCE OF CEPH
+# valid values are 'stable' and 'dev'
+ceph_install_source: stable
+
+# SUBNETS TO USE FOR THE VMS
+public_subnet: 192.168.39
+cluster_subnet: 192.168.40
+
+# MEMORY
+# set 1024 for CentOS
+memory: 2048
+
+# Ethernet interface name
+# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
+eth: 'eth1'
+
+# Disks
+# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
+# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
+disks: "[ '/dev/sdb', '/dev/sdc' ]"
+
+# VAGRANT BOX
+# Ceph boxes are *strongly* suggested. They are under better control and will
+# not get updated frequently unless required for build systems. These are (for
+# now):
+#
+# * ceph/ubuntu-xenial
+#
+# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
+# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
+# libvirt CentOS: centos/7
+# parallels Ubuntu: parallels/ubuntu-14.04
+# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
+# For more boxes have a look at:
+# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
+# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
+vagrant_box: centos/atomic-host
+#ssh_private_key_path: "~/.ssh/id_rsa"
+# The sync directory changes based on vagrant box
+# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
+#vagrant_sync_dir: /home/vagrant/sync
+vagrant_sync_dir: /vagrant
+# Disables synced folder creation. Not needed for testing, will skip mounting
+# the vagrant directory on the remote box regardless of the provider.
+vagrant_disable_synced_folder: true
+# VAGRANT URL
+# This is a URL to download an image from an alternate location. vagrant_box
+# above should be set to the filename of the image.
+# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
+# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+
+os_tuning_params:
+ - { name: fs.file-max, value: 26234859 }
--- /dev/null
+---
+
+ceph_origin: repository
+ceph_repository: community
+cluster: ceph
+public_network: "192.168.39.0/24"
+cluster_network: "192.168.40.0/24"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+osd_objectstore: "bluestore"
+crush_device_class: test
+copy_admin_key: true
+osd_auto_discovery: true
+os_tuning_params:
+ - { name: fs.file-max, value: 26234859 }
+ceph_conf_overrides:
+ global:
+ mon_allow_pool_size_one: true
+ mon_warn_on_pool_no_redundancy: false
+ osd_pool_default_size: 1
+dashboard_enabled: False
+handler_health_mon_check_delay: 10
+handler_health_osd_check_delay: 10
\ No newline at end of file
--- /dev/null
+[mons]
+mon0
+
+[osds]
+osd0
--- /dev/null
+---
+
+# DEPLOY CONTAINERIZED DAEMONS
+docker: false
+
+# DEFINE THE NUMBER OF VMS TO RUN
+mon_vms: 1
+osd_vms: 1
+mds_vms: 0
+rgw_vms: 0
+nfs_vms: 0
+grafana_server_vms: 0
+rbd_mirror_vms: 0
+client_vms: 0
+iscsi_gw_vms: 0
+mgr_vms: 0
+
+# INSTALL SOURCE OF CEPH
+# valid values are 'stable' and 'dev'
+ceph_install_source: stable
+
+# SUBNETS TO USE FOR THE VMS
+public_subnet: 192.168.39
+cluster_subnet: 192.168.40
+
+# MEMORY
+# set 1024 for CentOS
+memory: 1024
+
+# Ethernet interface name
+# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
+eth: 'eth1'
+
+# Disks
+# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
+# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
+disks: "[ '/dev/sdb', '/dev/sdc' ]"
+
+# VAGRANT BOX
+# Ceph boxes are *strongly* suggested. They are under better control and will
+# not get updated frequently unless required for build systems. These are (for
+# now):
+#
+# * ceph/ubuntu-xenial
+#
+# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
+# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
+# libvirt CentOS: centos/7
+# parallels Ubuntu: parallels/ubuntu-14.04
+# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
+# For more boxes have a look at:
+# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
+# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
+vagrant_box: centos/7
+#ssh_private_key_path: "~/.ssh/id_rsa"
+# The sync directory changes based on vagrant box
+# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
+#vagrant_sync_dir: /home/vagrant/sync
+vagrant_sync_dir: /vagrant
+# Disables synced folder creation. Not needed for testing, will skip mounting
+# the vagrant directory on the remote box regardless of the provider.
+vagrant_disable_synced_folder: true
+# VAGRANT URL
+# This is a URL to download an image from an alternate location. vagrant_box
+# above should be set to the filename of the image.
+# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
+# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+
+os_tuning_params:
+ - { name: fs.file-max, value: 26234859 }
--- /dev/null
+../../../Vagrantfile
\ No newline at end of file
--- /dev/null
+../all_daemons/ceph-override.json
\ No newline at end of file
--- /dev/null
+../../../../Vagrantfile
\ No newline at end of file
--- /dev/null
+../../all_daemons/ceph-override.json
\ No newline at end of file
--- /dev/null
+---
+
+# this is only here to let the CI tests know
+# that this scenario is using docker
+docker: True
+
+containerized_deployment: True
+ceph_origin: repository
+ceph_repository: community
+cluster: ceph
+public_network: "192.168.39.0/24"
+cluster_network: "192.168.40.0/24"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+journal_size: 2048
+osd_objectstore: "bluestore"
+crush_device_class: test
+copy_admin_key: true
+devices:
+ - /dev/sdb
+ - /dev/disk/by-id/ata-QEMU_HARDDISK_QM00003
+os_tuning_params:
+ - { name: fs.file-max, value: 26234859 }
+ceph_conf_overrides:
+ global:
+ mon_allow_pool_size_one: true
+ mon_warn_on_pool_no_redundancy: false
+ osd_pool_default_size: 1
+dashboard_enabled: False
+handler_health_mon_check_delay: 10
+handler_health_osd_check_delay: 10
+ceph_docker_registry: quay.ceph.io
+ceph_docker_image: ceph-ci/daemon
+ceph_docker_image_tag: latest-pacific
\ No newline at end of file
--- /dev/null
+[mons]
+mon0
+
+[osds]
+osd0
+osd1 osd_objectstore=filestore devices="['/dev/sda', '/dev/sdb']" dedicated_devices="['/dev/sdc']" journal_size=2048
\ No newline at end of file
--- /dev/null
+---
+
+# DEPLOY CONTAINERIZED DAEMONS
+docker: true
+
+# DEFINE THE NUMBER OF VMS TO RUN
+mon_vms: 1
+osd_vms: 2
+mds_vms: 0
+rgw_vms: 0
+nfs_vms: 0
+grafana_server_vms: 0
+rbd_mirror_vms: 0
+client_vms: 0
+iscsi_gw_vms: 0
+mgr_vms: 0
+
+# INSTALL SOURCE OF CEPH
+# valid values are 'stable' and 'dev'
+ceph_install_source: stable
+
+# SUBNETS TO USE FOR THE VMS
+public_subnet: 192.168.39
+cluster_subnet: 192.168.40
+
+# MEMORY
+# set 1024 for CentOS
+memory: 1024
+
+# Ethernet interface name
+# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
+eth: 'eth1'
+
+# Disks
+# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
+# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
+disks: "[ '/dev/sdb', '/dev/sdc' ]"
+
+# VAGRANT BOX
+# Ceph boxes are *strongly* suggested. They are under better control and will
+# not get updated frequently unless required for build systems. These are (for
+# now):
+#
+# * ceph/ubuntu-xenial
+#
+# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
+# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
+# libvirt CentOS: centos/7
+# parallels Ubuntu: parallels/ubuntu-14.04
+# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
+# For more boxes have a look at:
+# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
+# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
+vagrant_box: centos/atomic-host
+#ssh_private_key_path: "~/.ssh/id_rsa"
+# The sync directory changes based on vagrant box
+# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
+#vagrant_sync_dir: /home/vagrant/sync
+vagrant_sync_dir: /vagrant
+# Disables synced folder creation. Not needed for testing, will skip mounting
+# the vagrant directory on the remote box regardless of the provider.
+vagrant_disable_synced_folder: true
+# VAGRANT URL
+# This is a URL to download an image from an alternate location. vagrant_box
+# above should be set to the filename of the image.
+# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
+# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+
+os_tuning_params:
+ - { name: fs.file-max, value: 26234859 }
--- /dev/null
+---
+
+ceph_origin: repository
+ceph_repository: community
+cluster: ceph
+public_network: "192.168.39.0/24"
+cluster_network: "192.168.40.0/24"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+osd_objectstore: "bluestore"
+crush_device_class: test
+copy_admin_key: true
+devices:
+ - /dev/disk/by-id/ata-QEMU_HARDDISK_QM00002
+ - /dev/sdc
+os_tuning_params:
+ - { name: fs.file-max, value: 26234859 }
+ceph_conf_overrides:
+ global:
+ mon_allow_pool_size_one: true
+ mon_warn_on_pool_no_redundancy: false
+ osd_pool_default_size: 1
+dashboard_enabled: False
+handler_health_mon_check_delay: 10
+handler_health_osd_check_delay: 10
\ No newline at end of file
--- /dev/null
+[mons]
+mon0
+
+[osds]
+osd0
+osd1 osd_objectstore=filestore devices="['/dev/sda', '/dev/sdb']" dedicated_devices="['/dev/sdc']" journal_size=2048
\ No newline at end of file
--- /dev/null
+---
+
+# DEPLOY CONTAINERIZED DAEMONS
+docker: false
+
+# DEFINE THE NUMBER OF VMS TO RUN
+mon_vms: 1
+osd_vms: 2
+mds_vms: 0
+rgw_vms: 0
+nfs_vms: 0
+grafana_server_vms: 0
+rbd_mirror_vms: 0
+client_vms: 0
+iscsi_gw_vms: 0
+mgr_vms: 0
+
+# INSTALL SOURCE OF CEPH
+# valid values are 'stable' and 'dev'
+ceph_install_source: stable
+
+# SUBNETS TO USE FOR THE VMS
+public_subnet: 192.168.39
+cluster_subnet: 192.168.40
+
+# MEMORY
+# set 1024 for CentOS
+memory: 1024
+
+# Ethernet interface name
+# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
+eth: 'eth1'
+
+# Disks
+# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
+# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
+disks: "[ '/dev/sdb', '/dev/sdc' ]"
+
+# VAGRANT BOX
+# Ceph boxes are *strongly* suggested. They are under better control and will
+# not get updated frequently unless required for build systems. These are (for
+# now):
+#
+# * ceph/ubuntu-xenial
+#
+# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
+# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
+# libvirt CentOS: centos/7
+# parallels Ubuntu: parallels/ubuntu-14.04
+# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
+# For more boxes have a look at:
+# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
+# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
+vagrant_box: centos/7
+#ssh_private_key_path: "~/.ssh/id_rsa"
+# The sync directory changes based on vagrant box
+# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
+#vagrant_sync_dir: /home/vagrant/sync
+vagrant_sync_dir: /vagrant
+# Disables synced folder creation. Not needed for testing, will skip mounting
+# the vagrant directory on the remote box regardless of the provider.
+vagrant_disable_synced_folder: true
+# VAGRANT URL
+# This is a URL to download an image from an alternate location. vagrant_box
+# above should be set to the filename of the image.
+# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
+# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+
+os_tuning_params:
+ - { name: fs.file-max, value: 26234859 }
--- /dev/null
+../../../Vagrantfile
\ No newline at end of file
--- /dev/null
+../all_daemons/ceph-override.json
\ No newline at end of file
--- /dev/null
+../../../../Vagrantfile
\ No newline at end of file
--- /dev/null
+../../all_daemons/ceph-override.json
\ No newline at end of file
--- /dev/null
+---
+
+# this is only here to let the CI tests know
+# that this scenario is using docker
+docker: True
+
+ceph_origin: repository
+ceph_repository: community
+public_network: "192.168.33.0/24"
+cluster_network: "192.168.34.0/24"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+journal_size: 100
+copy_admin_key: true
+containerized_deployment: true
+# test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb
+os_tuning_params:
+ - { name: fs.file-max, value: 26234859 }
+ceph_conf_overrides:
+ global:
+ mon_allow_pool_size_one: true
+ mon_warn_on_pool_no_redundancy: false
+ osd_pool_default_size: 1
+dashboard_enabled: False
+handler_health_mon_check_delay: 10
+handler_health_osd_check_delay: 10
+openstack_config: True
+openstack_glance_pool:
+ name: "images"
+ type: 3
+ size: 1
+ application: rbd
+ target_size_ratio: 0.2
+openstack_cinder_pool:
+ name: "volumes"
+ size: 1
+ application: rbd
+openstack_pools:
+ - "{{ openstack_glance_pool }}"
+ - "{{ openstack_cinder_pool }}"
+ceph_docker_registry: quay.ceph.io
+ceph_docker_image: ceph-ci/daemon
+ceph_docker_image_tag: latest-pacific
\ No newline at end of file
--- /dev/null
+[mons]
+mon0
+
+[osds]
+osd0 osd_objectstore=filestore lvm_volumes="[{'data': 'data-lv1', 'journal': '/dev/sdc1', 'data_vg': 'test_group'},{'data': 'data-lv2', 'data_vg': 'test_group', 'journal': 'journal1', 'journal_vg': 'journals'}]"
+osd1 osd_objectstore=filestore lvm_volumes="[{'data': 'data-lv1', 'journal': '/dev/sdc1', 'data_vg': 'test_group'},{'data': 'data-lv2', 'data_vg': 'test_group', 'journal': 'journal1', 'journal_vg': 'journals'}]" dmcrypt=true
+osd2 osd_objectstore=bluestore lvm_volumes="[{'data': 'data-lv1', 'data_vg': 'test_group'},{'data': 'data-lv2', 'data_vg': 'test_group', 'db': 'journal1', 'db_vg': 'journals'}]"
+osd3 osd_objectstore=bluestore lvm_volumes="[{'data': 'data-lv1', 'data_vg': 'test_group'},{'data': 'data-lv2', 'data_vg': 'test_group'}]" dmcrypt=true
--- /dev/null
+---
+
+# DEPLOY CONTAINERIZED DAEMONS
+docker: true
+
+# DEFINE THE NUMBER OF VMS TO RUN
+mon_vms: 1
+osd_vms: 4
+mds_vms: 0
+rgw_vms: 0
+nfs_vms: 0
+grafana_server_vms: 0
+rbd_mirror_vms: 0
+client_vms: 0
+iscsi_gw_vms: 0
+mgr_vms: 0
+
+# INSTALL SOURCE OF CEPH
+# valid values are 'stable' and 'dev'
+ceph_install_source: stable
+
+# SUBNETS TO USE FOR THE VMS
+public_subnet: 192.168.33
+cluster_subnet: 192.168.34
+
+# MEMORY
+# set 1024 for CentOS
+memory: 1024
+
+# Ethernet interface name
+# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
+eth: 'eth1'
+
+# Disks
+# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
+# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
+disks: "[ '/dev/sdb', '/dev/sdc' ]"
+
+# VAGRANT BOX
+# Ceph boxes are *strongly* suggested. They are under better control and will
+# not get updated frequently unless required for build systems. These are (for
+# now):
+#
+# * ceph/ubuntu-xenial
+#
+# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
+# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
+# libvirt CentOS: centos/7
+# parallels Ubuntu: parallels/ubuntu-14.04
+# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
+# For more boxes have a look at:
+# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
+# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
+vagrant_box: centos/atomic-host
+#ssh_private_key_path: "~/.ssh/id_rsa"
+# The sync directory changes based on vagrant box
+# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
+#vagrant_sync_dir: /home/vagrant/sync
+vagrant_sync_dir: /vagrant
+# Disables synced folder creation. Not needed for testing, will skip mounting
+# the vagrant directory on the remote box regardless of the provider.
+vagrant_disable_synced_folder: true
+# VAGRANT URL
+# This is a URL to download an image from an alternate location. vagrant_box
+# above should be set to the filename of the image.
+# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
+# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+
+os_tuning_params:
+ - { name: fs.file-max, value: 26234859 }
--- /dev/null
+---
+
+ceph_origin: repository
+ceph_repository: community
+public_network: "192.168.39.0/24"
+cluster_network: "192.168.40.0/24"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+journal_size: 100
+copy_admin_key: true
+# test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb
+os_tuning_params:
+ - { name: fs.file-max, value: 26234859 }
+ceph_conf_overrides:
+ global:
+ mon_allow_pool_size_one: true
+ mon_warn_on_pool_no_redundancy: false
+ osd_pool_default_size: 1
+dashboard_enabled: False
+handler_health_mon_check_delay: 10
+handler_health_osd_check_delay: 10
+openstack_config: True
+openstack_glance_pool:
+ name: "images"
+ type: 3
+ size: 1
+ application: rbd
+ target_size_ratio: 0.2
+openstack_cinder_pool:
+ name: "volumes"
+ size: 1
+ application: rbd
+openstack_pools:
+ - "{{ openstack_glance_pool }}"
+ - "{{ openstack_cinder_pool }}"
\ No newline at end of file
--- /dev/null
+[mons]
+mon0
+
+[osds]
+osd0 osd_objectstore=filestore lvm_volumes="[{'data': 'data-lv1', 'journal': '/dev/sdc1', 'data_vg': 'test_group'},{'data': 'data-lv2', 'data_vg': 'test_group', 'journal': 'journal1', 'journal_vg': 'journals'}]"
+osd1 osd_objectstore=filestore lvm_volumes="[{'data': 'data-lv1', 'journal': '/dev/sdc1', 'data_vg': 'test_group'},{'data': 'data-lv2', 'data_vg': 'test_group', 'journal': 'journal1', 'journal_vg': 'journals'}]" dmcrypt=true
+osd2 osd_objectstore=bluestore lvm_volumes="[{'data': 'data-lv1', 'data_vg': 'test_group'},{'data': 'data-lv2', 'data_vg': 'test_group', 'db': 'journal1', 'db_vg': 'journals'}]"
+osd3 osd_objectstore=bluestore lvm_volumes="[{'data': 'data-lv1', 'data_vg': 'test_group'},{'data': 'data-lv2', 'data_vg': 'test_group'}]" dmcrypt=true
--- /dev/null
+---
+
+# DEPLOY CONTAINERIZED DAEMONS
+docker: false
+
+# DEFINE THE NUMBER OF VMS TO RUN
+mon_vms: 1
+osd_vms: 4
+mds_vms: 0
+rgw_vms: 0
+nfs_vms: 0
+grafana_server_vms: 0
+rbd_mirror_vms: 0
+client_vms: 0
+iscsi_gw_vms: 0
+mgr_vms: 0
+
+# INSTALL SOURCE OF CEPH
+# valid values are 'stable' and 'dev'
+ceph_install_source: stable
+
+# SUBNETS TO USE FOR THE VMS
+public_subnet: 192.168.39
+cluster_subnet: 192.168.40
+
+# MEMORY
+# set 1024 for CentOS
+memory: 1024
+
+# Ethernet interface name
+# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
+eth: 'eth1'
+
+# Disks
+# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
+# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
+disks: "[ '/dev/sdb', '/dev/sdc' ]"
+
+# VAGRANT BOX
+# Ceph boxes are *strongly* suggested. They are under better control and will
+# not get updated frequently unless required for build systems. These are (for
+# now):
+#
+# * ceph/ubuntu-xenial
+#
+# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
+# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
+# libvirt CentOS: centos/7
+# parallels Ubuntu: parallels/ubuntu-14.04
+# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
+# For more boxes have a look at:
+# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
+# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
+vagrant_box: centos/7
+#ssh_private_key_path: "~/.ssh/id_rsa"
+# The sync directory changes based on vagrant box
+# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
+#vagrant_sync_dir: /home/vagrant/sync
+vagrant_sync_dir: /vagrant
+# Disables synced folder creation. Not needed for testing, will skip mounting
+# the vagrant directory on the remote box regardless of the provider.
+vagrant_disable_synced_folder: true
+# VAGRANT URL
+# This is a URL to download an image from an alternate location. vagrant_box
+# above should be set to the filename of the image.
+# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
+# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+
+os_tuning_params:
+ - { name: fs.file-max, value: 26234859 }
--- /dev/null
+---
+- hosts: all
+ gather_facts: false
+ become: yes
+ tasks:
+ - import_tasks: ../../raw_install_python.yml
+
+- hosts: osds
+ gather_facts: false
+ become: yes
+ tasks:
+ - name: check if it is atomic host
+ stat:
+ path: /run/ostree-booted
+ register: stat_ostree
+ tags: always
+
+ - name: set_fact is_atomic
+ set_fact:
+ is_atomic: '{{ stat_ostree.stat.exists }}'
+ tags: always
+
+ # Some images may not have lvm2 installed
+ - name: install lvm2
+ package:
+ name: lvm2
+ state: present
+ register: result
+ until: result is succeeded
+ when: not is_atomic | bool
+
+ - name: create volume group
+ lvg:
+ vg: test_group
+ pvs: "{{ pv_devices[0] | default('/dev/sdb') }}"
+
+ - name: create logical volume 1
+ lvol:
+ vg: test_group
+ lv: data-lv1
+ size: 50%FREE
+ shrink: false
+
+ - name: create logical volume 2
+ lvol:
+ vg: test_group
+ lv: data-lv2
+ size: 100%FREE
+ shrink: false
+
+ - name: partition "{{ pv_devices[1] | default('/dev/sdc') }}"for journals
+ parted:
+ device: "{{ pv_devices[1] | default('/dev/sdc') }}"
+ number: 1
+ part_start: 0%
+ part_end: 50%
+ unit: '%'
+ label: gpt
+ state: present
+ tags: partitions
+
+ - name: partition "{{ pv_devices[1] | default('/dev/sdc') }}"for journals
+ parted:
+ device: "{{ pv_devices[1] | default('/dev/sdc') }}"
+ number: 2
+ part_start: 50%
+ part_end: 100%
+ unit: '%'
+ state: present
+ label: gpt
+ tags: partitions
+
+ - name: create journals vg from "{{ pv_devices[1] | default('/dev/sdc') }}2"
+ lvg:
+ vg: journals
+ pvs: "{{ pv_devices[1] | default('/dev/sdc') }}2"
+
+ - name: create journal1 lv
+ lvol:
+ vg: journals
+ lv: journal1
+ size: 100%FREE
+ shrink: false
--- /dev/null
+../../../Vagrantfile
\ No newline at end of file
--- /dev/null
+---
+
+ceph_origin: repository
+ceph_repository: community
+cluster: test
+public_network: "192.168.1.0/24"
+cluster_network: "192.168.2.0/24"
+monitor_interface: eth1
+journal_size: 100
+osd_objectstore: "bluestore"
+devices:
+ - '/dev/sdb'
+ - '/dev/sdc'
+osd_scenario: "collocated"
+copy_admin_key: false
+os_tuning_params:
+ - { name: kernel.pid_max, value: 4194303 }
+ - { name: fs.file-max, value: 26234859 }
+ceph_conf_overrides:
+ global:
+ osd_pool_default_pg_num: 8
+ mon_allow_pool_size_one: true
+ mon_warn_on_pool_no_redundancy: false
+ osd_pool_default_size: 1
+dashboard_admin_password: $sX!cD$rYU6qR^B!
+grafana_admin_password: +xFRe+RES@7vg24n
\ No newline at end of file
--- /dev/null
+[mons]
+mon0
+mon1
+mon2
+
+[osds]
+osd0
+
+[mgrs]
+mon0
--- /dev/null
+---
+
+# DEPLOY CONTAINERIZED DAEMONS
+docker: false
+
+# DEFINE THE NUMBER OF VMS TO RUN
+mon_vms: 3
+osd_vms: 1
+mds_vms: 0
+rgw_vms: 0
+nfs_vms: 0
+grafana_server_vms: 0
+rbd_mirror_vms: 0
+client_vms: 0
+iscsi_gw_vms: 0
+mgr_vms: 0
+
+
+# INSTALL SOURCE OF CEPH
+# valid values are 'stable' and 'dev'
+ceph_install_source: stable
+
+# SUBNETS TO USE FOR THE VMS
+public_subnet: 192.168.1
+cluster_subnet: 192.168.2
+
+# MEMORY
+# set 1024 for CentOS
+memory: 512
+
+# Ethernet interface name
+# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
+eth: 'eth1'
+
+# Disks
+# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
+# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
+disks: "[ '/dev/sdb', '/dev/sdc' ]"
+
+# VAGRANT BOX
+# Ceph boxes are *strongly* suggested. They are under better control and will
+# not get updated frequently unless required for build systems. These are (for
+# now):
+#
+# * ceph/ubuntu-xenial
+#
+# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
+# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
+# libvirt CentOS: centos/7
+# parallels Ubuntu: parallels/ubuntu-14.04
+# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
+# For more boxes have a look at:
+# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
+# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
+vagrant_box: centos/7
+#ssh_private_key_path: "~/.ssh/id_rsa"
+# The sync directory changes based on vagrant box
+# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
+#vagrant_sync_dir: /home/vagrant/sync
+#vagrant_sync_dir: /
+# Disables synced folder creation. Not needed for testing, will skip mounting
+# the vagrant directory on the remote box regardless of the provider.
+vagrant_disable_synced_folder: true
+# VAGRANT URL
+# This is a URL to download an image from an alternate location. vagrant_box
+# above should be set to the filename of the image.
+# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
+# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+
+os_tuning_params:
+ - { name: kernel.pid_max, value: 4194303 }
+ - { name: fs.file-max, value: 26234859 }
+
--- /dev/null
+../../../Vagrantfile
\ No newline at end of file
--- /dev/null
+../all_daemons/ceph-override.json
\ No newline at end of file
--- /dev/null
+all:
+ vars:
+ admin_secret: AQBSV4xaAAAAABAA3VUTiOZTHecau2SnAEVPYQ==
+ ceph_conf_overrides:
+ global: {osd_pool_default_pg_num: 8, osd_pool_default_pgp_num: 8, osd_pool_default_size: 1,
+ mon_allow_pool_size_one: true,
+ mon_warn_on_pool_no_redundancy: false,
+ rgw_keystone_accepted_roles: 'Member, admin', rgw_keystone_admin_domain: default,
+ rgw_keystone_admin_password: RtYPg7AUdsZCGv4Z4rF8FvnaR, rgw_keystone_admin_project: service,
+ rgw_keystone_admin_user: swift, rgw_keystone_api_version: 3, rgw_keystone_implicit_tenants: 'true',
+ rgw_keystone_url: 'http://192.168.95.10:5000', rgw_s3_auth_use_keystone: 'true', rgw_keystone_revocation_interval: 0}
+ cluster: mycluster
+ ceph_docker_image: ceph-ci/daemon
+ ceph_docker_image_tag: latest-pacific
+ ceph_docker_registry: quay.ceph.io
+ cephfs_data_pool:
+ name: 'manila_data'
+ application: "cephfs"
+ cephfs_metadata_pool:
+ name: 'manila_metadata'
+ application: "cephfs"
+ cephfs_pools:
+ - "{{ cephfs_data_pool }}"
+ - "{{ cephfs_metadata_pool }}"
+ cluster_network: 192.168.96.0/24
+ containerized_deployment: true
+ devices: [/dev/sda, /dev/sdb, /dev/sdc]
+ docker: true
+ fsid: 6e008d48-1661-11e8-8546-008c3214218a
+ generate_fsid: false
+ ip_version: ipv4
+ ireallymeanit: 'yes'
+ keys:
+ - {key: AQAN0RdbAAAAABAA3CpSKRVDrENjkOSunEFZ0A==, mgr_cap: 'allow *', mode: '0600', mon_cap: 'allow r', name: client.openstack, osd_cap: "allow class-read object_prefix rbd_children, allow rwx pool=volumes, allow rwx pool=backups, allow rwx pool=vms, allow rwx pool=images, allow rwx pool=metrics"}
+ - {key: AQAN0RdbAAAAABAAtV5Dq28z4H6XxwhaNEaFZg==, mds_cap: 'allow *', mgr_cap: 'allow *', mode: '0600', mon_cap: 'allow r, allow command "auth del", allow command "auth caps", allow command "auth get", allow command "auth get-or-create"', name: client.manila, osd_cap: 'allow rw'}
+ - {key: AQAN0RdbAAAAABAAH5D3WgMN9Rxw3M8jkpMIfg==, mgr_cap: 'allow *', mode: '0600', mon_cap: 'allow rw', name: client.radosgw, osd_cap: 'allow rwx'}
+ monitor_address_block: 192.168.95.0/24
+ monitor_secret: AQBSV4xaAAAAABAALqm4vRHcITs4/041TwluMg==
+ ntp_service_enabled: false
+ openstack_config: true
+ openstack_keys:
+ - {key: AQAN0RdbAAAAABAA3CpSKRVDrENjkOSunEFZ0A==, mgr_cap: 'allow *', mode: '0600', mon_cap: 'allow r', name: client.openstack, osd_cap: "allow class-read object_prefix rbd_children, allow rwx pool=volumes, allow rwx pool=backups, allow rwx pool=vms, allow rwx pool=images, allow rwx pool=metrics"}
+ - {key: AQAN0RdbAAAAABAAtV5Dq28z4H6XxwhaNEaFZg==, mds_cap: 'allow *', mgr_cap: 'allow *', mode: '0600', mon_cap: 'allow r, allow command "auth del", allow command "auth caps", allow command "auth get", allow command "auth get-or-create"', name: client.manila, osd_cap: 'allow rw'}
+ - {key: AQAN0RdbAAAAABAAH5D3WgMN9Rxw3M8jkpMIfg==, mgr_cap: 'allow *', mode: '0600', mon_cap: 'allow rw', name: client.radosgw, osd_cap: 'allow rwx'}
+ openstack_pools:
+ - {name: images, pg_num: 8, rule_name: 'replicated_rule'}
+ - {name: metrics, pg_num: 8, rule_name: 'replicated_rule'}
+ - {name: backups, pg_num: 8, rule_name: 'replicated_rule'}
+ - {name: vms, pg_num: 8, rule_name: 'replicated_rule'}
+ - {name: volumes, pg_num: 8, rule_name: 'replicated_rule'}
+ pools: []
+ public_network: 192.168.95.0/24
+ radosgw_address_block: 192.168.95.0/24
+ radosgw_civetweb_port: '8080'
+ radosgw_keystone_ssl: false
+ user_config: true
+ dashboard_enabled: false
+clients:
+ hosts:
+ client0: {}
+ client1: {}
+ client2: {}
+mdss:
+ hosts:
+ mon0: {}
+mgrs:
+ hosts:
+ mon0: {}
+mons:
+ hosts:
+ mon0: {}
+ mon1: {}
+ mon2: {}
+nfss:
+ hosts: {}
+osds:
+ hosts:
+ osd0: {}
+ osd1: {}
+ osd2: {}
+rbdmirrors:
+ hosts: {}
+rgws:
+ hosts:
+ mon0: {}
+ osd0: {}
--- /dev/null
+---
+
+# DEPLOY CONTAINERIZED DAEMONS
+docker: True
+
+# DEFINE THE NUMBER OF VMS TO RUN
+mon_vms: 3
+osd_vms: 3
+mds_vms: 0
+rgw_vms: 0
+nfs_vms: 0
+grafana_server_vms: 0
+rbd_mirror_vms: 0
+client_vms: 3
+iscsi_gw_vms: 0
+mgr_vms: 0
+
+# SUBNETS TO USE FOR THE VMS
+public_subnet: 192.168.95
+cluster_subnet: 192.168.96
+
+# MEMORY
+# set 1024 for CentOS
+memory: 1024
+
+# Disks
+# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
+# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
+disks: "[ '/dev/sda', '/dev/sdb', '/dev/sdc' ]"
+
+# VAGRANT BOX
+# Ceph boxes are *strongly* suggested. They are under better control and will
+# not get updated frequently unless required for build systems. These are (for
+# now):
+#
+# * ceph/ubuntu-xenial
+#
+# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
+# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
+# libvirt CentOS: centos/7
+# parallels Ubuntu: parallels/ubuntu-14.04
+# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
+# For more boxes have a look at:
+# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
+# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
+vagrant_box: centos/atomic-host
+# client_vagrant_box: centos/7
+#ssh_private_key_path: "~/.ssh/id_rsa"
+# The sync directory changes based on vagrant box
+# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
+#vagrant_sync_dir: /home/vagrant/sync
+#vagrant_sync_dir: /
+# Disables synced folder creation. Not needed for testing, will skip mounting
+# the vagrant directory on the remote box regardless of the provider.
+vagrant_disable_synced_folder: true
+# VAGRANT URL
+# This is a URL to download an image from an alternate location. vagrant_box
+# above should be set to the filename of the image.
+# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
+# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
--- /dev/null
+../../../Vagrantfile
\ No newline at end of file
--- /dev/null
+../all_daemons/ceph-override.json
\ No newline at end of file
--- /dev/null
+---
+# this is only here to let the CI tests know
+# that this scenario is using docker
+docker: True
+
+containerized_deployment: True
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+ceph_mon_docker_subnet: "{{ public_network }}"
+public_network: "192.168.30.0/24"
+cluster_network: "192.168.31.0/24"
+rgw_override_bucket_index_max_shards: 16
+rgw_bucket_default_quota_max_objects: 1638400
+ceph_conf_overrides:
+ global:
+ mon_allow_pool_size_one: true
+ mon_warn_on_pool_no_redundancy: false
+ osd_pool_default_size: 1
+openstack_config: True
+openstack_glance_pool:
+ name: "images"
+ rule_name: "HDD"
+ size: 1
+openstack_cinder_pool:
+ name: "volumes"
+ rule_name: "HDD"
+ size: 1
+openstack_pools:
+ - "{{ openstack_glance_pool }}"
+ - "{{ openstack_cinder_pool }}"
+handler_health_mon_check_delay: 10
+handler_health_osd_check_delay: 10
+dashboard_admin_password: $sX!cD$rYU6qR^B!
+grafana_admin_password: +xFRe+RES@7vg24n
+ceph_docker_registry: quay.ceph.io
+ceph_docker_image: ceph-ci/daemon
+ceph_docker_image_tag: latest-pacific
+node_exporter_container_image: "quay.ceph.io/prometheus/node-exporter:v0.17.0"
+prometheus_container_image: "quay.ceph.io/prometheus/prometheus:v2.7.2"
+alertmanager_container_image: "quay.ceph.io/prometheus/alertmanager:v0.16.2"
+grafana_container_image: "quay.ceph.io/app-sre/grafana:6.7.4"
--- /dev/null
+---
+user_config: True
+copy_admin_key: True
+test:
+ name: "test"
+ rule_name: "HDD"
+test2:
+ name: "test2"
+ rule_name: "HDD"
+pools:
+ - "{{ test }}"
+ - "{{ test2 }}"
--- /dev/null
+---
+generate_crt: True
--- /dev/null
+---
+create_crush_tree: True
+crush_rule_config: True
+crush_rule_hdd:
+ name: HDD
+ root: default
+ type: host
+ class: hdd
+ default: true
+crush_rules:
+ - "{{ crush_rule_hdd }}"
--- /dev/null
+---
+osd_objectstore: "bluestore"
+lvm_volumes:
+ - data: data-lv1
+ data_vg: test_group
+ - data: data-lv2
+ data_vg: test_group
+ db: journal1
+ db_vg: journals
\ No newline at end of file
--- /dev/null
+---
+copy_admin_key: True
+rgw_create_pools:
+ foo:
+ pg_num: 16
+ type: replicated
+ bar:
--- /dev/null
+[mons]
+mon0
+mon1
+mon2
+
+[osds]
+osd0 osd_crush_location="{ 'root': 'HDD', 'rack': 'mon-rackkkk', 'pod': 'monpod', 'host': 'osd0' }"
+osd1 osd_crush_location="{ 'root': 'default', 'host': 'osd1' }"
+
+[mdss]
+mds0
+
+[rgws]
+rgw0
+
+#[nfss]
+#nfs0
+
+[clients]
+client0
+client1
+
+[rbdmirrors]
+rbd-mirror0
+
+[iscsigws]
+iscsi-gw0
+
+[monitoring]
+mon0
+
+#[all:vars]
+#ansible_python_interpreter=/usr/bin/python3
\ No newline at end of file
--- /dev/null
+---
+
+# DEPLOY CONTAINERIZED DAEMONS
+docker: True
+
+# DEFINE THE NUMBER OF VMS TO RUN
+mon_vms: 3
+osd_vms: 2
+mds_vms: 1
+rgw_vms: 1
+nfs_vms: 0
+grafana_server_vms: 0
+rbd_mirror_vms: 1
+client_vms: 2
+iscsi_gw_vms: 1
+mgr_vms: 0
+
+# SUBNETS TO USE FOR THE VMS
+public_subnet: 192.168.30
+cluster_subnet: 192.168.31
+
+# MEMORY
+# set 1024 for CentOS
+memory: 2048
+
+vagrant_box: centos/atomic-host
+# The sync directory changes based on vagrant box
+# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
+#vagrant_sync_dir: /home/vagrant/sync
+vagrant_sync_dir: /vagrant
+# Disables synced folder creation. Not needed for testing, will skip mounting
+# the vagrant directory on the remote box regardless of the provider.
+vagrant_disable_synced_folder: true
--- /dev/null
+---
+- hosts: client0
+ gather_facts: false
+ become: yes
+ tasks:
+ - name: check if it is atomic host
+ stat:
+ path: /run/ostree-booted
+ register: stat_ostree
+ tags: always
+
+# all our containerized job are based on atomic os, so we can rely on is_atomic to detect
+# whether we are running a containerized job
+ - name: set_fact is_atomic
+ set_fact:
+ is_atomic: '{{ stat_ostree.stat.exists }}'
+ tags: always
+
+ - name: load rbd module
+ modprobe:
+ name: rbd
+ state: present
+ delegate_to: "{{ item }}"
+ with_items:
+ - mon0
+ - client0
+
+ - name: create an rbd image - non container
+ command: "rbd create --size=1024 test/rbd_test"
+ delegate_to: "mon0"
+ when:
+ - not is_atomic | bool
+ - not containerized_deployment | default(false) | bool
+
+ - name: create an rbd image - container
+ command: "podman run --rm -v /etc/ceph:/etc/ceph --net=host --entrypoint=rbd {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} create --size=1024 test/rbd_test"
+ delegate_to: "mon0"
+ when: is_atomic | bool or containerized_deployment | default(false) | bool
+
+ - name: non container
+ when:
+ - not is_atomic | bool
+ - not containerized_deployment | default(false) | bool
+ block:
+ - name: disable features unsupported by the kernel
+ command: rbd feature disable test/rbd_test object-map fast-diff deep-flatten
+
+ - name: map a device
+ command: rbd map test/rbd_test
+
+ - name: container
+ when: is_atomic | bool or containerized_deployment | default(false) | bool
+ block:
+ - name: disable features unsupported by the kernel
+ command: "podman run --rm -v /etc/ceph:/etc/ceph --net=host --entrypoint=rbd {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} feature disable test/rbd_test object-map fast-diff deep-flatten"
+
+ - name: map a device
+ command: "podman run --rm --privileged -v /etc/ceph:/etc/ceph -v /dev:/dev --net=host --entrypoint=rbd {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} map test/rbd_test"
--- /dev/null
+---
+- hosts: mon0
+ gather_facts: True
+ become: True
+ tasks:
+ - name: import_role ceph-defaults
+ import_role:
+ name: ceph-defaults
+
+ - name: import_role ceph-facts
+ include_role:
+ name: ceph-facts
+ tasks_from: "container_binary.yml"
+
+ - name: set_fact ceph_cmd
+ set_fact:
+ rbd_cmd: "{{ container_binary + ' run --rm --net=host -v /etc/ceph:/etc/ceph:z -v /var/lib/ceph:/var/lib/ceph:z -v /var/run/ceph:/var/run/ceph:z --entrypoint=rbd ' + ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else 'rbd' }}"
+
+ - name: create an image in rbd mirrored pool
+ command: "{{ rbd_cmd }} create foo --size 1024 --pool {{ ceph_rbd_mirror_pool }} --image-feature exclusive-lock,journaling"
+ changed_when: false
+ tags: primary
+
+ - name: check the image is replicated
+ command: "{{ rbd_cmd }} --pool {{ ceph_rbd_mirror_pool }} ls --format json"
+ register: rbd_ls
+ changed_when: false
+ tags: secondary
+ retries: 30
+ delay: 1
+ until: "'foo' in (rbd_ls.stdout | default('{}') | from_json)"
+
--- /dev/null
+../../../Vagrantfile
\ No newline at end of file
--- /dev/null
+../../../../Vagrantfile
\ No newline at end of file
--- /dev/null
+---
+docker: True
+containerized_deployment: true
+ceph_origin: repository
+ceph_repository: community
+cluster: ceph
+public_network: "192.168.144.0/24"
+cluster_network: "192.168.145.0/24"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+journal_size: 100
+osd_objectstore: "bluestore"
+# test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb
+lvm_volumes:
+ - data: data-lv1
+ data_vg: test_group
+ - data: data-lv2
+ data_vg: test_group
+ db: journal1
+ db_vg: journals
+os_tuning_params:
+ - { name: fs.file-max, value: 26234859 }
+ceph_conf_overrides:
+ global:
+ mon_allow_pool_size_one: true
+ mon_warn_on_pool_no_redundancy: false
+ osd_pool_default_size: 1
+ mon_max_pg_per_osd: 512
+dashboard_enabled: False
+ceph_docker_registry: quay.ceph.io
+ceph_docker_image: ceph-ci/daemon
+ceph_docker_image_tag: latest-main
--- /dev/null
+[mons]
+mon0
+
+[mgrs]
+mon0
+
+[osds]
+osd0
+
+[rbdmirrors]
+osd0
--- /dev/null
+../../../../../Vagrantfile
\ No newline at end of file
--- /dev/null
+---
+docker: True
+containerized_deployment: true
+ceph_origin: repository
+ceph_repository: community
+cluster: ceph
+public_network: "192.168.146.0/24"
+cluster_network: "192.168.147.0/24"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+journal_size: 100
+osd_objectstore: "bluestore"
+# test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb
+lvm_volumes:
+ - data: data-lv1
+ data_vg: test_group
+ - data: data-lv2
+ data_vg: test_group
+ db: journal1
+ db_vg: journals
+os_tuning_params:
+ - { name: fs.file-max, value: 26234859 }
+ceph_conf_overrides:
+ global:
+ mon_allow_pool_size_one: true
+ mon_warn_on_pool_no_redundancy: false
+ osd_pool_default_size: 1
+ mon_max_pg_per_osd: 512
+dashboard_enabled: False
+ceph_docker_registry: quay.ceph.io
+ceph_docker_image: ceph-ci/daemon
+ceph_docker_image_tag: latest-main
--- /dev/null
+[mons]
+mon0
+
+[mgrs]
+mon0
+
+[osds]
+osd0
+
+[rbdmirrors]
+osd0
+
--- /dev/null
+---
+
+# DEPLOY CONTAINERIZED DAEMONS
+docker: true
+
+# DEFINE THE NUMBER OF VMS TO RUN
+mon_vms: 1
+osd_vms: 1
+mds_vms: 0
+rgw_vms: 0
+nfs_vms: 0
+grafana_server_vms: 0
+rbd_mirror_vms: 0
+client_vms: 0
+iscsi_gw_vms: 0
+mgr_vms: 0
+
+# INSTALL SOURCE OF CEPH
+# valid values are 'stable' and 'dev'
+ceph_install_source: stable
+
+# SUBNETS TO USE FOR THE VMS
+public_subnet: 192.168.146
+cluster_subnet: 192.168.147
+
+# MEMORY
+# set 1024 for CentOS
+memory: 1024
+
+# Ethernet interface name
+# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
+eth: 'eth1'
+
+# Disks
+# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
+# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
+disks: "[ '/dev/sdb', '/dev/sdc' ]"
+
+# VAGRANT BOX
+# Ceph boxes are *strongly* suggested. They are under better control and will
+# not get updated frequently unless required for build systems. These are (for
+# now):
+#
+# * ceph/ubuntu-xenial
+#
+# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
+# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
+# libvirt CentOS: centos/7
+# parallels Ubuntu: parallels/ubuntu-14.04
+# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
+# For more boxes have a look at:
+# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
+# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
+vagrant_box: centos/atomic-host
+#ssh_private_key_path: "~/.ssh/id_rsa"
+# The sync directory changes based on vagrant box
+# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
+#vagrant_sync_dir: /home/vagrant/sync
+vagrant_sync_dir: /vagrant
+# Disables synced folder creation. Not needed for testing, will skip mounting
+# the vagrant directory on the remote box regardless of the provider.
+vagrant_disable_synced_folder: true
+# VAGRANT URL
+# This is a URL to download an image from an alternate location. vagrant_box
+# above should be set to the filename of the image.
+# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
+# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+
+os_tuning_params:
+ - { name: fs.file-max, value: 26234859 }
--- /dev/null
+---
+
+# DEPLOY CONTAINERIZED DAEMONS
+docker: true
+
+# DEFINE THE NUMBER OF VMS TO RUN
+mon_vms: 1
+osd_vms: 1
+mds_vms: 0
+rgw_vms: 0
+nfs_vms: 0
+grafana_server_vms: 0
+rbd_mirror_vms: 0
+client_vms: 0
+iscsi_gw_vms: 0
+mgr_vms: 0
+
+# INSTALL SOURCE OF CEPH
+# valid values are 'stable' and 'dev'
+ceph_install_source: stable
+
+# SUBNETS TO USE FOR THE VMS
+public_subnet: 192.168.144
+cluster_subnet: 192.168.145
+
+# MEMORY
+# set 1024 for CentOS
+memory: 1024
+
+# Ethernet interface name
+# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
+eth: 'eth1'
+
+# Disks
+# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
+# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
+disks: "[ '/dev/sdb', '/dev/sdc' ]"
+
+# VAGRANT BOX
+# Ceph boxes are *strongly* suggested. They are under better control and will
+# not get updated frequently unless required for build systems. These are (for
+# now):
+#
+# * ceph/ubuntu-xenial
+#
+# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
+# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
+# libvirt CentOS: centos/7
+# parallels Ubuntu: parallels/ubuntu-14.04
+# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
+# For more boxes have a look at:
+# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
+# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
+vagrant_box: centos/atomic-host
+#ssh_private_key_path: "~/.ssh/id_rsa"
+# The sync directory changes based on vagrant box
+# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
+#vagrant_sync_dir: /home/vagrant/sync
+vagrant_sync_dir: /vagrant
+# Disables synced folder creation. Not needed for testing, will skip mounting
+# the vagrant directory on the remote box regardless of the provider.
+vagrant_disable_synced_folder: true
+# VAGRANT URL
+# This is a URL to download an image from an alternate location. vagrant_box
+# above should be set to the filename of the image.
+# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
+# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+
+os_tuning_params:
+ - { name: fs.file-max, value: 26234859 }
--- /dev/null
+---
+ceph_origin: repository
+ceph_repository: community
+cluster: ceph
+public_network: "192.168.140.0/24"
+cluster_network: "192.168.141.0/24"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+journal_size: 100
+osd_objectstore: "bluestore"
+# test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb
+lvm_volumes:
+ - data: data-lv1
+ data_vg: test_group
+ - data: data-lv2
+ data_vg: test_group
+ db: journal1
+ db_vg: journals
+os_tuning_params:
+ - { name: fs.file-max, value: 26234859 }
+ceph_conf_overrides:
+ global:
+ mon_allow_pool_size_one: true
+ mon_warn_on_pool_no_redundancy: false
+ osd_pool_default_size: 1
+ mon_max_pg_per_osd: 512
+dashboard_enabled: False
--- /dev/null
+[mons]
+mon0
+
+[mgrs]
+mon0
+
+[osds]
+osd0
+
+[rbdmirrors]
+osd0
+
--- /dev/null
+../../../../Vagrantfile
\ No newline at end of file
--- /dev/null
+---
+ceph_origin: repository
+ceph_repository: community
+cluster: ceph
+public_network: "192.168.142.0/24"
+cluster_network: "192.168.143.0/24"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+journal_size: 100
+osd_objectstore: "bluestore"
+# test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb
+lvm_volumes:
+ - data: data-lv1
+ data_vg: test_group
+ - data: data-lv2
+ data_vg: test_group
+ db: journal1
+ db_vg: journals
+os_tuning_params:
+ - { name: fs.file-max, value: 26234859 }
+ceph_conf_overrides:
+ global:
+ mon_allow_pool_size_one: true
+ mon_warn_on_pool_no_redundancy: false
+ osd_pool_default_size: 1
+ mon_max_pg_per_osd: 512
+dashboard_enabled: False
--- /dev/null
+[mons]
+mon0
+
+[mgrs]
+mon0
+
+[osds]
+osd0
+
+[rbdmirrors]
+osd0
+
--- /dev/null
+---
+
+# DEPLOY CONTAINERIZED DAEMONS
+docker: false
+
+# DEFINE THE NUMBER OF VMS TO RUN
+mon_vms: 1
+osd_vms: 1
+mds_vms: 0
+rgw_vms: 0
+nfs_vms: 0
+grafana_server_vms: 0
+rbd_mirror_vms: 0
+client_vms: 0
+iscsi_gw_vms: 0
+mgr_vms: 0
+
+# INSTALL SOURCE OF CEPH
+# valid values are 'stable' and 'dev'
+ceph_install_source: stable
+
+# SUBNETS TO USE FOR THE VMS
+public_subnet: 192.168.142
+cluster_subnet: 192.168.143
+
+# MEMORY
+# set 1024 for CentOS
+memory: 1024
+
+# Ethernet interface name
+# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
+eth: 'eth1'
+
+# Disks
+# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
+# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
+disks: "[ '/dev/sdb', '/dev/sdc' ]"
+
+# VAGRANT BOX
+# Ceph boxes are *strongly* suggested. They are under better control and will
+# not get updated frequently unless required for build systems. These are (for
+# now):
+#
+# * ceph/ubuntu-xenial
+#
+# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
+# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
+# libvirt CentOS: centos/7
+# parallels Ubuntu: parallels/ubuntu-14.04
+# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
+# For more boxes have a look at:
+# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
+# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
+vagrant_box: centos/stream8
+#ssh_private_key_path: "~/.ssh/id_rsa"
+# The sync directory changes based on vagrant box
+# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
+#vagrant_sync_dir: /home/vagrant/sync
+vagrant_sync_dir: /vagrant
+# Disables synced folder creation. Not needed for testing, will skip mounting
+# the vagrant directory on the remote box regardless of the provider.
+vagrant_disable_synced_folder: true
+# VAGRANT URL
+# This is a URL to download an image from an alternate location. vagrant_box
+# above should be set to the filename of the image.
+# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
+# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+
+os_tuning_params:
+ - { name: fs.file-max, value: 26234859 }
--- /dev/null
+---
+
+# DEPLOY CONTAINERIZED DAEMONS
+docker: false
+
+# DEFINE THE NUMBER OF VMS TO RUN
+mon_vms: 1
+osd_vms: 1
+mds_vms: 0
+rgw_vms: 0
+nfs_vms: 0
+grafana_server_vms: 0
+rbd_mirror_vms: 0
+client_vms: 0
+iscsi_gw_vms: 0
+mgr_vms: 0
+
+# INSTALL SOURCE OF CEPH
+# valid values are 'stable' and 'dev'
+ceph_install_source: stable
+
+# SUBNETS TO USE FOR THE VMS
+public_subnet: 192.168.140
+cluster_subnet: 192.168.141
+
+# MEMORY
+# set 1024 for CentOS
+memory: 1024
+
+# Ethernet interface name
+# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
+eth: 'eth1'
+
+# Disks
+# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
+# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
+disks: "[ '/dev/sdb', '/dev/sdc' ]"
+
+# VAGRANT BOX
+# Ceph boxes are *strongly* suggested. They are under better control and will
+# not get updated frequently unless required for build systems. These are (for
+# now):
+#
+# * ceph/ubuntu-xenial
+#
+# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
+# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
+# libvirt CentOS: centos/7
+# parallels Ubuntu: parallels/ubuntu-14.04
+# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
+# For more boxes have a look at:
+# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
+# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
+vagrant_box: centos/stream8
+#ssh_private_key_path: "~/.ssh/id_rsa"
+# The sync directory changes based on vagrant box
+# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
+#vagrant_sync_dir: /home/vagrant/sync
+vagrant_sync_dir: /vagrant
+# Disables synced folder creation. Not needed for testing, will skip mounting
+# the vagrant directory on the remote box regardless of the provider.
+vagrant_disable_synced_folder: true
+# VAGRANT URL
+# This is a URL to download an image from an alternate location. vagrant_box
+# above should be set to the filename of the image.
+# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
+# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+
+os_tuning_params:
+ - { name: fs.file-max, value: 26234859 }
--- /dev/null
+---
+- hosts: all
+ gather_facts: true
+ tasks:
+ - name: reboot the machines
+ reboot:
+ reboot_timeout: 180
+ test_command: uptime
+ become: yes
--- /dev/null
+../../../Vagrantfile
\ No newline at end of file
--- /dev/null
+../all_daemons/ceph-override.json
\ No newline at end of file
--- /dev/null
+../../../../Vagrantfile
\ No newline at end of file
--- /dev/null
+../../all_daemons/ceph-override.json
\ No newline at end of file
--- /dev/null
+---
+docker: True
+containerized_deployment: true
+ceph_origin: repository
+ceph_repository: community
+cluster: ceph
+public_network: "192.168.105.0/24"
+cluster_network: "192.168.106.0/24"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+journal_size: 100
+osd_objectstore: "bluestore"
+copy_admin_key: true
+# test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb
+lvm_volumes:
+ - data: data-lv1
+ data_vg: test_group
+ - data: data-lv2
+ data_vg: test_group
+ db: journal1
+ db_vg: journals
+os_tuning_params:
+ - { name: fs.file-max, value: 26234859 }
+ceph_conf_overrides:
+ global:
+ mon_allow_pool_size_one: true
+ mon_warn_on_pool_no_redundancy: false
+ osd_pool_default_size: 1
+ mon_max_pg_per_osd: 512
+dashboard_enabled: False
+ceph_docker_registry: quay.ceph.io
+ceph_docker_image: ceph-ci/daemon
+ceph_docker_image_tag: latest-pacific
--- /dev/null
+---
+copy_admin_key: true
+# Enable Multisite support
+rgw_multisite: true
+rgw_multisite_proto: http
+rgw_create_pools:
+ foo:
+ pg_num: 16
+ type: replicated
+ bar:
+ pg_num: 16
+rgw_override_bucket_index_max_shards: 16
+rgw_bucket_default_quota_max_objects: 1638400
--- /dev/null
+---
+rgw_instances:
+ - instance_name: 'rgw0'
+ rgw_zonemaster: True
+ rgw_zonesecondary: False
+ rgw_zonegroupmaster: True
+ rgw_realm: 'canada'
+ rgw_zonegroup: 'zonegroup-canada'
+ rgw_zone: montreal-00
+ radosgw_address: "{{ _radosgw_address }}"
+ radosgw_frontend_port: 8080
+ rgw_zone_user: jacques.chirac
+ rgw_zone_user_display_name: "Jacques Chirac"
+ system_access_key: P9Eb6S8XNyo4dtZZUUMy
+ system_secret_key: qqHCUtfdNnpHq3PZRHW5un9l0bEBM812Uhow0XfB
+ - instance_name: 'rgw1'
+ rgw_zonemaster: false
+ rgw_zonesecondary: true
+ rgw_zonegroupmaster: false
+ rgw_realm: 'france'
+ rgw_zonegroup: 'zonegroup-france'
+ rgw_zone: montreal-01
+ radosgw_address: "{{ _radosgw_address }}"
+ radosgw_frontend_port: 8081
+ rgw_zone_user: edward.lewis
+ rgw_zone_user_display_name: "Edward Lewis"
+ system_access_key: yu17wkvAx3B8Wyn08XoF
+ system_secret_key: 5YZfaSUPqxSNIkZQQA3lBZ495hnIV6k2HAz710BY
+ endpoint: http://192.168.107.12:8081
+# functional testing
+rgw_multisite_endpoint_addr: 192.168.105.12
+radosgw_num_instances: 2
--- /dev/null
+---
+rgw_zonemaster: true
+rgw_zonesecondary: false
+rgw_zonegroupmaster: true
+rgw_multisite_proto: http
+rgw_instances:
+ - instance_name: 'rgw0'
+ rgw_realm: 'foo'
+ rgw_zonegroup: 'zonegroup123'
+ rgw_zone: 'gotham_city'
+ radosgw_address: "{{ _radosgw_address }}"
+ radosgw_frontend_port: 8080
+ rgw_zone_user: batman
+ rgw_zone_user_display_name: "Batman"
+ system_access_key: 9WA1GN33IUYC717S8KB2
+ system_secret_key: R2vWXyboYw9nluehMgtATBGDBZSuWLnR0M4xNa1W
+ - instance_name: 'rgw1'
+ rgw_realm: 'bar'
+ rgw_zonegroup: 'zonegroup456'
+ rgw_zone: 'metropolis'
+ radosgw_address: "{{ _radosgw_address }}"
+ radosgw_frontend_port: 8081
+ rgw_zone_user: superman
+ rgw_zone_user_display_name: "Superman"
+ system_access_key: S96CJL44E29AN91Y3ZC5
+ system_secret_key: ha7yWiIi7bSV2vAqMBfKjYIVKMfOBaGkWrUZifRt
+# functional testing
+rgw_multisite_endpoint_addr: 192.168.105.11
+radosgw_num_instances: 2
--- /dev/null
+[mons]
+mon0
+
+[osds]
+osd0
+
+[rgws]
+osd0
+rgw0
\ No newline at end of file
--- /dev/null
+../../../../../Vagrantfile
\ No newline at end of file
--- /dev/null
+---
+docker: True
+containerized_deployment: true
+ceph_origin: repository
+ceph_repository: community
+cluster: ceph
+public_network: "192.168.107.0/24"
+cluster_network: "192.168.108.0/24"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+journal_size: 100
+osd_objectstore: "bluestore"
+copy_admin_key: true
+# test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb
+lvm_volumes:
+ - data: data-lv1
+ data_vg: test_group
+ - data: data-lv2
+ data_vg: test_group
+ db: journal1
+ db_vg: journals
+os_tuning_params:
+ - { name: fs.file-max, value: 26234859 }
+ceph_conf_overrides:
+ global:
+ mon_allow_pool_size_one: true
+ mon_warn_on_pool_no_redundancy: false
+ osd_pool_default_size: 1
+ mon_max_pg_per_osd: 512
+dashboard_enabled: False
+ceph_docker_registry: quay.ceph.io
+ceph_docker_image: ceph-ci/daemon
+ceph_docker_image_tag: latest-pacific
--- /dev/null
+---
+# Enable Multisite support
+rgw_multisite: true
+rgw_multisite_proto: http
+rgw_create_pools:
+ foo:
+ pg_num: 16
+ type: replicated
+ bar:
+ pg_num: 16
+rgw_override_bucket_index_max_shards: 16
+rgw_bucket_default_quota_max_objects: 1638400
--- /dev/null
+---
+rgw_instances:
+ - instance_name: 'rgw0'
+ rgw_zonemaster: false
+ rgw_zonesecondary: true
+ rgw_zonegroupmaster: false
+ rgw_realm: 'canada'
+ rgw_zonegroup: 'zonegroup-canada'
+ rgw_zone: paris-00
+ radosgw_address: "{{ _radosgw_address }}"
+ radosgw_frontend_port: 8080
+ rgw_zone_user: jacques.chirac
+ rgw_zone_user_display_name: "Jacques Chirac"
+ system_access_key: P9Eb6S8XNyo4dtZZUUMy
+ system_secret_key: qqHCUtfdNnpHq3PZRHW5un9l0bEBM812Uhow0XfB
+ endpoint: http://192.168.105.12:8080
+ - instance_name: 'rgw1'
+ rgw_zonemaster: True
+ rgw_zonesecondary: False
+ rgw_zonegroupmaster: True
+ rgw_realm: 'france'
+ rgw_zonegroup: 'zonegroup-france'
+ rgw_zone: paris-01
+ radosgw_address: "{{ _radosgw_address }}"
+ radosgw_frontend_port: 8081
+ rgw_zone_user: edward.lewis
+ rgw_zone_user_display_name: "Edward Lewis"
+ system_access_key: yu17wkvAx3B8Wyn08XoF
+ system_secret_key: 5YZfaSUPqxSNIkZQQA3lBZ495hnIV6k2HAz710BY
+# functional testing
+rgw_multisite_endpoint_addr: 192.168.107.12
+radosgw_num_instances: 2
--- /dev/null
+---
+rgw_zonemaster: false
+rgw_zonesecondary: true
+rgw_zonegroupmaster: false
+rgw_multisite_proto: http
+rgw_instances:
+ - instance_name: 'rgw0'
+ rgw_realm: 'foo'
+ rgw_zonegroup: 'zonegroup123'
+ rgw_zone: 'gotham_city-secondary'
+ radosgw_address: "{{ _radosgw_address }}"
+ radosgw_frontend_port: 8080
+ rgw_zone_user: batman
+ rgw_zone_user_display_name: "Batman"
+ system_access_key: 9WA1GN33IUYC717S8KB2
+ system_secret_key: R2vWXyboYw9nluehMgtATBGDBZSuWLnR0M4xNa1W
+ endpoint: http://192.168.105.11:8080
+ - instance_name: 'rgw1'
+ rgw_realm: 'bar'
+ rgw_zonegroup: 'zonegroup456'
+ rgw_zone: 'metropolis-secondary'
+ radosgw_address: "{{ _radosgw_address }}"
+ radosgw_frontend_port: 8081
+ rgw_zone_user: superman
+ rgw_zone_user_display_name: "Superman"
+ system_access_key: S96CJL44E29AN91Y3ZC5
+ system_secret_key: ha7yWiIi7bSV2vAqMBfKjYIVKMfOBaGkWrUZifRt
+ endpoint: http://192.168.105.11:8081
+# functional testing
+rgw_multisite_endpoint_addr: 192.168.107.11
+radosgw_num_instances: 2
--- /dev/null
+[mons]
+mon0
+
+[osds]
+osd0
+
+[rgws]
+osd0
+rgw0
\ No newline at end of file
--- /dev/null
+---
+
+# DEPLOY CONTAINERIZED DAEMONS
+docker: true
+
+# DEFINE THE NUMBER OF VMS TO RUN
+mon_vms: 1
+osd_vms: 1
+mds_vms: 0
+rgw_vms: 1
+nfs_vms: 0
+grafana_server_vms: 0
+rbd_mirror_vms: 0
+client_vms: 0
+iscsi_gw_vms: 0
+mgr_vms: 0
+
+# INSTALL SOURCE OF CEPH
+# valid values are 'stable' and 'dev'
+ceph_install_source: stable
+
+# SUBNETS TO USE FOR THE VMS
+public_subnet: 192.168.107
+cluster_subnet: 192.168.108
+
+# MEMORY
+# set 1024 for CentOS
+memory: 1024
+
+# Ethernet interface name
+# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
+eth: 'eth1'
+
+# Disks
+# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
+# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
+disks: "[ '/dev/sdb', '/dev/sdc' ]"
+
+# VAGRANT BOX
+# Ceph boxes are *strongly* suggested. They are under better control and will
+# not get updated frequently unless required for build systems. These are (for
+# now):
+#
+# * ceph/ubuntu-xenial
+#
+# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
+# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
+# libvirt CentOS: centos/7
+# parallels Ubuntu: parallels/ubuntu-14.04
+# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
+# For more boxes have a look at:
+# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
+# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
+vagrant_box: centos/atomic-host
+#ssh_private_key_path: "~/.ssh/id_rsa"
+# The sync directory changes based on vagrant box
+# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
+#vagrant_sync_dir: /home/vagrant/sync
+vagrant_sync_dir: /vagrant
+# Disables synced folder creation. Not needed for testing, will skip mounting
+# the vagrant directory on the remote box regardless of the provider.
+vagrant_disable_synced_folder: true
+# VAGRANT URL
+# This is a URL to download an image from an alternate location. vagrant_box
+# above should be set to the filename of the image.
+# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
+# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+
+os_tuning_params:
+ - { name: fs.file-max, value: 26234859 }
--- /dev/null
+---
+
+# DEPLOY CONTAINERIZED DAEMONS
+docker: true
+
+# DEFINE THE NUMBER OF VMS TO RUN
+mon_vms: 1
+osd_vms: 1
+mds_vms: 0
+rgw_vms: 1
+nfs_vms: 0
+grafana_server_vms: 0
+rbd_mirror_vms: 0
+client_vms: 0
+iscsi_gw_vms: 0
+mgr_vms: 0
+
+# INSTALL SOURCE OF CEPH
+# valid values are 'stable' and 'dev'
+ceph_install_source: stable
+
+# SUBNETS TO USE FOR THE VMS
+public_subnet: 192.168.105
+cluster_subnet: 192.168.106
+
+# MEMORY
+# set 1024 for CentOS
+memory: 1024
+
+# Ethernet interface name
+# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
+eth: 'eth1'
+
+# Disks
+# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
+# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
+disks: "[ '/dev/sdb', '/dev/sdc' ]"
+
+# VAGRANT BOX
+# Ceph boxes are *strongly* suggested. They are under better control and will
+# not get updated frequently unless required for build systems. These are (for
+# now):
+#
+# * ceph/ubuntu-xenial
+#
+# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
+# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
+# libvirt CentOS: centos/7
+# parallels Ubuntu: parallels/ubuntu-14.04
+# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
+# For more boxes have a look at:
+# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
+# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
+vagrant_box: centos/atomic-host
+#ssh_private_key_path: "~/.ssh/id_rsa"
+# The sync directory changes based on vagrant box
+# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
+#vagrant_sync_dir: /home/vagrant/sync
+vagrant_sync_dir: /vagrant
+# Disables synced folder creation. Not needed for testing, will skip mounting
+# the vagrant directory on the remote box regardless of the provider.
+vagrant_disable_synced_folder: true
+# VAGRANT URL
+# This is a URL to download an image from an alternate location. vagrant_box
+# above should be set to the filename of the image.
+# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
+# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+
+os_tuning_params:
+ - { name: fs.file-max, value: 26234859 }
--- /dev/null
+---
+ceph_origin: repository
+ceph_repository: community
+cluster: ceph
+public_network: "192.168.101.0/24"
+cluster_network: "192.168.102.0/24"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+journal_size: 100
+osd_objectstore: "bluestore"
+copy_admin_key: true
+# test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb
+lvm_volumes:
+ - data: data-lv1
+ data_vg: test_group
+ - data: data-lv2
+ data_vg: test_group
+ db: journal1
+ db_vg: journals
+os_tuning_params:
+ - { name: fs.file-max, value: 26234859 }
+ceph_conf_overrides:
+ global:
+ mon_allow_pool_size_one: true
+ mon_warn_on_pool_no_redundancy: false
+ osd_pool_default_size: 1
+ mon_max_pg_per_osd: 512
+dashboard_enabled: False
--- /dev/null
+---
+copy_admin_key: true
+# Enable Multisite support
+rgw_multisite: true
+rgw_multisite_proto: http
+rgw_create_pools:
+ foo:
+ pg_num: 16
+ type: replicated
+ bar:
+ pg_num: 16
+rgw_override_bucket_index_max_shards: 16
+rgw_bucket_default_quota_max_objects: 1638400
--- /dev/null
+---
+rgw_instances:
+ - instance_name: 'rgw0'
+ rgw_zonemaster: True
+ rgw_zonesecondary: False
+ rgw_zonegroupmaster: True
+ rgw_realm: 'canada'
+ rgw_zonegroup: 'zonegroup-canada'
+ rgw_zone: montreal-00
+ radosgw_address: "{{ _radosgw_address }}"
+ radosgw_frontend_port: 8080
+ rgw_zone_user: jacques.chirac
+ rgw_zone_user_display_name: "Jacques Chirac"
+ system_access_key: P9Eb6S8XNyo4dtZZUUMy
+ system_secret_key: qqHCUtfdNnpHq3PZRHW5un9l0bEBM812Uhow0XfB
+ - instance_name: 'rgw1'
+ rgw_zonemaster: false
+ rgw_zonesecondary: true
+ rgw_zonegroupmaster: false
+ rgw_realm: 'france'
+ rgw_zonegroup: 'zonegroup-france'
+ rgw_zone: montreal-01
+ radosgw_address: "{{ _radosgw_address }}"
+ radosgw_frontend_port: 8081
+ rgw_zone_user: edward.lewis
+ rgw_zone_user_display_name: "Edward Lewis"
+ system_access_key: yu17wkvAx3B8Wyn08XoF
+ system_secret_key: 5YZfaSUPqxSNIkZQQA3lBZ495hnIV6k2HAz710BY
+ endpoint: http://192.168.103.12:8081
+# functional testing
+rgw_multisite_endpoint_addr: 192.168.101.12
+radosgw_num_instances: 2
--- /dev/null
+rgw_zonemaster: true
+rgw_zonesecondary: false
+rgw_zonegroupmaster: true
+rgw_multisite_proto: http
+rgw_instances:
+ - instance_name: 'rgw0'
+ rgw_realm: 'foo'
+ rgw_zonegroup: 'zonegroup123'
+ rgw_zone: 'gotham_city'
+ radosgw_address: "{{ _radosgw_address }}"
+ radosgw_frontend_port: 8080
+ rgw_zone_user: batman
+ rgw_zone_user_display_name: "Batman"
+ system_access_key: 9WA1GN33IUYC717S8KB2
+ system_secret_key: R2vWXyboYw9nluehMgtATBGDBZSuWLnR0M4xNa1W
+ - instance_name: 'rgw1'
+ rgw_realm: 'bar'
+ rgw_zonegroup: 'zonegroup456'
+ rgw_zone: 'metropolis'
+ radosgw_address: "{{ _radosgw_address }}"
+ radosgw_frontend_port: 8081
+ rgw_zone_user: superman
+ rgw_zone_user_display_name: "Superman"
+ system_access_key: S96CJL44E29AN91Y3ZC5
+ system_secret_key: ha7yWiIi7bSV2vAqMBfKjYIVKMfOBaGkWrUZifRt
+# functional testing
+rgw_multisite_endpoint_addr: 192.168.101.11
+radosgw_num_instances: 2
--- /dev/null
+[mons]
+mon0
+
+[osds]
+osd0
+
+[rgws]
+osd0
+rgw0
--- /dev/null
+../../../../Vagrantfile
\ No newline at end of file
--- /dev/null
+---
+ceph_origin: repository
+ceph_repository: community
+cluster: ceph
+public_network: "192.168.103.0/24"
+cluster_network: "192.168.104.0/24"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+journal_size: 100
+osd_objectstore: "bluestore"
+copy_admin_key: true
+# test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb
+lvm_volumes:
+ - data: data-lv1
+ data_vg: test_group
+ - data: data-lv2
+ data_vg: test_group
+ db: journal1
+ db_vg: journals
+os_tuning_params:
+ - { name: fs.file-max, value: 26234859 }
+ceph_conf_overrides:
+ global:
+ mon_allow_pool_size_one: true
+ mon_warn_on_pool_no_redundancy: false
+ osd_pool_default_size: 1
+ mon_max_pg_per_osd: 512
+dashboard_enabled: False
--- /dev/null
+---
+# Enable Multisite support
+rgw_multisite: true
+rgw_multisite_proto: http
+rgw_create_pools:
+ foo:
+ pg_num: 16
+ type: replicated
+ bar:
+ pg_num: 16
+rgw_override_bucket_index_max_shards: 16
+rgw_bucket_default_quota_max_objects: 1638400
--- /dev/null
+---
+rgw_instances:
+ - instance_name: 'rgw0'
+ rgw_zonemaster: false
+ rgw_zonesecondary: true
+ rgw_zonegroupmaster: false
+ rgw_realm: 'canada'
+ rgw_zonegroup: 'zonegroup-canada'
+ rgw_zone: paris-00
+ radosgw_address: "{{ _radosgw_address }}"
+ radosgw_frontend_port: 8080
+ rgw_zone_user: jacques.chirac
+ rgw_zone_user_display_name: "Jacques Chirac"
+ system_access_key: P9Eb6S8XNyo4dtZZUUMy
+ system_secret_key: qqHCUtfdNnpHq3PZRHW5un9l0bEBM812Uhow0XfB
+ endpoint: http://192.168.101.12:8080
+ - instance_name: 'rgw1'
+ rgw_zonemaster: True
+ rgw_zonesecondary: False
+ rgw_zonegroupmaster: True
+ rgw_realm: 'france'
+ rgw_zonegroup: 'zonegroup-france'
+ rgw_zone: paris-01
+ radosgw_address: "{{ _radosgw_address }}"
+ radosgw_frontend_port: 8081
+ rgw_zone_user: edward.lewis
+ rgw_zone_user_display_name: "Edward Lewis"
+ system_access_key: yu17wkvAx3B8Wyn08XoF
+ system_secret_key: 5YZfaSUPqxSNIkZQQA3lBZ495hnIV6k2HAz710BY
+# functional testing
+rgw_multisite_endpoint_addr: 192.168.103.12
+radosgw_num_instances: 2
--- /dev/null
+---
+rgw_zonemaster: false
+rgw_zonesecondary: true
+rgw_zonegroupmaster: false
+rgw_multisite_proto: http
+rgw_instances:
+ - instance_name: 'rgw0'
+ rgw_realm: 'foo'
+ rgw_zonegroup: 'zonegroup123'
+ rgw_zone: 'gotham_city-secondary'
+ radosgw_address: "{{ _radosgw_address }}"
+ radosgw_frontend_port: 8080
+ rgw_zone_user: batman
+ rgw_zone_user_display_name: "Batman"
+ system_access_key: 9WA1GN33IUYC717S8KB2
+ system_secret_key: R2vWXyboYw9nluehMgtATBGDBZSuWLnR0M4xNa1W
+ endpoint: http://192.168.101.11:8080
+ - instance_name: 'rgw1'
+ rgw_realm: 'bar'
+ rgw_zonegroup: 'zonegroup456'
+ rgw_zone: 'metropolis-secondary'
+ radosgw_address: "{{ _radosgw_address }}"
+ radosgw_frontend_port: 8081
+ rgw_zone_user: superman
+ rgw_zone_user_display_name: "Superman"
+ system_access_key: S96CJL44E29AN91Y3ZC5
+ system_secret_key: ha7yWiIi7bSV2vAqMBfKjYIVKMfOBaGkWrUZifRt
+ endpoint: http://192.168.101.11:8081
+# functional testing
+rgw_multisite_endpoint_addr: 192.168.103.11
+radosgw_num_instances: 2
--- /dev/null
+[mons]
+mon0
+
+[osds]
+osd0
+
+[rgws]
+osd0
+rgw0
--- /dev/null
+---
+
+# DEPLOY CONTAINERIZED DAEMONS
+docker: false
+
+# DEFINE THE NUMBER OF VMS TO RUN
+mon_vms: 1
+osd_vms: 1
+mds_vms: 0
+rgw_vms: 1
+nfs_vms: 0
+grafana_server_vms: 0
+rbd_mirror_vms: 0
+client_vms: 0
+iscsi_gw_vms: 0
+mgr_vms: 0
+
+# INSTALL SOURCE OF CEPH
+# valid values are 'stable' and 'dev'
+ceph_install_source: stable
+
+# SUBNETS TO USE FOR THE VMS
+public_subnet: 192.168.103
+cluster_subnet: 192.168.104
+
+# MEMORY
+# set 1024 for CentOS
+memory: 1024
+
+# Ethernet interface name
+# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
+eth: 'eth1'
+
+# Disks
+# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
+# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
+disks: "[ '/dev/sdb', '/dev/sdc' ]"
+
+# VAGRANT BOX
+# Ceph boxes are *strongly* suggested. They are under better control and will
+# not get updated frequently unless required for build systems. These are (for
+# now):
+#
+# * ceph/ubuntu-xenial
+#
+# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
+# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
+# libvirt CentOS: centos/7
+# parallels Ubuntu: parallels/ubuntu-14.04
+# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
+# For more boxes have a look at:
+# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
+# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
+vagrant_box: centos/7
+#ssh_private_key_path: "~/.ssh/id_rsa"
+# The sync directory changes based on vagrant box
+# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
+#vagrant_sync_dir: /home/vagrant/sync
+vagrant_sync_dir: /vagrant
+# Disables synced folder creation. Not needed for testing, will skip mounting
+# the vagrant directory on the remote box regardless of the provider.
+vagrant_disable_synced_folder: true
+# VAGRANT URL
+# This is a URL to download an image from an alternate location. vagrant_box
+# above should be set to the filename of the image.
+# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
+# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+
+os_tuning_params:
+ - { name: fs.file-max, value: 26234859 }
--- /dev/null
+---
+
+# DEPLOY CONTAINERIZED DAEMONS
+docker: false
+
+# DEFINE THE NUMBER OF VMS TO RUN
+mon_vms: 1
+osd_vms: 1
+mds_vms: 0
+rgw_vms: 1
+nfs_vms: 0
+grafana_server_vms: 0
+rbd_mirror_vms: 0
+client_vms: 0
+iscsi_gw_vms: 0
+mgr_vms: 0
+
+# INSTALL SOURCE OF CEPH
+# valid values are 'stable' and 'dev'
+ceph_install_source: stable
+
+# SUBNETS TO USE FOR THE VMS
+public_subnet: 192.168.101
+cluster_subnet: 192.168.102
+
+# MEMORY
+# set 1024 for CentOS
+memory: 1024
+
+# Ethernet interface name
+# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
+eth: 'eth1'
+
+# Disks
+# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
+# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
+disks: "[ '/dev/sdb', '/dev/sdc' ]"
+
+# VAGRANT BOX
+# Ceph boxes are *strongly* suggested. They are under better control and will
+# not get updated frequently unless required for build systems. These are (for
+# now):
+#
+# * ceph/ubuntu-xenial
+#
+# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
+# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
+# libvirt CentOS: centos/7
+# parallels Ubuntu: parallels/ubuntu-14.04
+# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
+# For more boxes have a look at:
+# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
+# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
+vagrant_box: centos/7
+#ssh_private_key_path: "~/.ssh/id_rsa"
+# The sync directory changes based on vagrant box
+# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
+#vagrant_sync_dir: /home/vagrant/sync
+vagrant_sync_dir: /vagrant
+# Disables synced folder creation. Not needed for testing, will skip mounting
+# the vagrant directory on the remote box regardless of the provider.
+vagrant_disable_synced_folder: true
+# VAGRANT URL
+# This is a URL to download an image from an alternate location. vagrant_box
+# above should be set to the filename of the image.
+# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
+# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+
+os_tuning_params:
+ - { name: fs.file-max, value: 26234859 }
--- /dev/null
+---
+- hosts: rgws
+ gather_facts: True
+ become: True
+ tasks:
+ - name: import_role ceph-defaults
+ import_role:
+ name: ceph-defaults
+
+ - name: import_role ceph-facts
+ include_role:
+ name: ceph-facts
+ tasks_from: "{{ item }}.yml"
+ with_items:
+ - set_radosgw_address
+ - container_binary
+
+ - name: install s3cmd
+ package:
+ name: s3cmd
+ state: present
+ register: result
+ until: result is succeeded
+ when: not containerized_deployment | bool
+
+ - name: generate and upload a random 10Mb file - containerized deployment
+ shell: >
+ {{ container_binary }} run --rm --name=rgw_multisite_test --entrypoint=bash {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} -c 'dd if=/dev/urandom of=/tmp/testinfra-{{ item.rgw_realm }}.img bs=1M count=10;
+ s3cmd --no-ssl --access_key={{ item.system_access_key }} --secret_key={{ item.system_secret_key }} --host={{ item.radosgw_address }}:{{ item.radosgw_frontend_port }} --host-bucket={{ item.radosgw_address }}:{{ item.radosgw_frontend_port }} mb s3://testinfra-{{ item.rgw_realm }};
+ s3cmd --no-ssl --access_key={{ item.system_access_key }} --secret_key={{ item.system_secret_key }} --host={{ item.radosgw_address }}:{{ item.radosgw_frontend_port }} --host-bucket={{ item.radosgw_address }}:{{ item.radosgw_frontend_port }} put /tmp/testinfra-{{ item.rgw_realm }}.img s3://testinfra-{{ item.rgw_realm }}'
+ with_items: "{{ rgw_instances_host }}"
+ tags: upload
+ when:
+ - item.rgw_zonemaster | default(rgw_zonemaster) | bool
+ - containerized_deployment | bool
+
+ - name: generate and upload a random a 10Mb file - non containerized
+ shell: |
+ dd if=/dev/urandom of=/tmp/testinfra-{{ item.rgw_realm }}.img bs=1M count=10;
+ s3cmd --no-ssl --access_key={{ item.system_access_key }} --secret_key={{ item.system_secret_key }} --host={{ item.radosgw_address }}:{{ item.radosgw_frontend_port }} --host-bucket={{ item.radosgw_address }}:{{ item.radosgw_frontend_port }} mb s3://testinfra-{{ item.rgw_realm }};
+ s3cmd --no-ssl --access_key={{ item.system_access_key }} --secret_key={{ item.system_secret_key }} --host={{ item.radosgw_address }}:{{ item.radosgw_frontend_port }} --host-bucket={{ item.radosgw_address }}:{{ item.radosgw_frontend_port }} put /tmp/testinfra-{{ item.rgw_realm }}.img s3://testinfra-{{ item.rgw_realm }};
+ with_items: "{{ rgw_instances_host }}"
+ tags: upload
+ when:
+ - item.rgw_zonemaster | default(rgw_zonemaster) | bool
+ - not containerized_deployment | bool
+
+ - name: get info from replicated file - containerized deployment
+ command: >
+ {{ container_binary }} run --rm --name=rgw_multisite_test --entrypoint=s3cmd {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} --no-ssl --access_key={{ item.system_access_key }} --secret_key={{ item.system_secret_key }} --host={{ item.radosgw_address }}:{{ item.radosgw_frontend_port }} --host-bucket={{ item.radosgw_address }}:{{ item.radosgw_frontend_port }} info s3://testinfra-{{ item.rgw_realm }}/testinfra-{{ item.rgw_realm }}.img
+ with_items: "{{ rgw_instances_host }}"
+ register: result
+ retries: 60
+ delay: 1
+ until: result is succeeded
+ tags: download
+ when:
+ - not item.rgw_zonemaster | default(rgw_zonemaster) | bool
+ - containerized_deployment | bool
+
+ - name: get info from replicated file - non containerized
+ command: >
+ s3cmd --no-ssl --access_key={{ item.system_access_key }} --secret_key={{ item.system_secret_key }} --host={{ item.radosgw_address }}:{{ item.radosgw_frontend_port }} --host-bucket={{ item.radosgw_address }}:{{ item.radosgw_frontend_port }} info s3://testinfra-{{ item.rgw_realm }}/testinfra-{{ item.rgw_realm }}.img
+ with_items: "{{ rgw_instances_host }}"
+ register: result
+ retries: 60
+ delay: 1
+ until: result is succeeded
+ tags: download
+ when:
+ - not item.rgw_zonemaster | default(rgw_zonemaster) | bool
+ - not containerized_deployment | bool
--- /dev/null
+---
+- hosts: localhost
+ gather_facts: false
+ become: yes
+ tags: vagrant_setup
+ tasks:
+
+ - name: change centos/7 vagrant box name to rhel7
+ replace:
+ regexp: "centos/7"
+ replace: "rhel7"
+ dest: "{{ change_dir }}/vagrant_variables.yml"
+ when: change_dir is defined
+
+ - name: change ceph/ubuntu-xenial vagrant box name to rhel7
+ replace:
+ regexp: "ceph/ubuntu-xenial"
+ replace: "rhel7"
+ dest: "{{ change_dir }}/vagrant_variables.yml"
+ when: change_dir is defined
+
+ - name: change centos/atomic-host vagrant box name to rhel7
+ replace:
+ regexp: "centos/atomic-host"
+ replace: "rhel7"
+ dest: "{{ change_dir }}/vagrant_variables.yml"
+ when: change_dir is defined
+
+ - name: change ceph_origin to distro
+ replace:
+ regexp: "ceph_origin:.*"
+ replace: "ceph_origin: distro"
+ dest: "{{ change_dir }}/group_vars/all"
+ when: change_dir is defined
+
+ - name: change ceph_repository to rhcs
+ replace:
+ regexp: "ceph_repository:.*"
+ replace: "ceph_repository: rhcs"
+ dest: "{{ change_dir }}/group_vars/all"
+ when: change_dir is defined
+
+ - name: print contents of {{ change_dir }}/group_vars/all
+ command: "cat {{ change_dir }}/group_vars/all"
+
+- hosts: all
+ gather_facts: true
+ become: yes
+ tasks:
+
+ - name: check if it is Atomic host
+ stat:
+ path: /run/ostree-booted
+ register: stat_ostree
+ check_mode: no
+
+ - name: set fact for using Atomic host
+ set_fact:
+ is_atomic: '{{ stat_ostree.stat.exists }}'
+
+ - name: install nightly rhel7 repo
+ get_url:
+ url: "{{ rhel7_repo_url }}"
+ dest: /etc/yum.repos.d
+ owner: root
+ group: root
+ when: not is_atomic | bool
+
+ - name: enable the rhel-7-extras-nightly repo
+ command: "yum-config-manager --enable rhel-7-extras-nightly"
+
+ - name: set MTU on eth0
+ command: "ifconfig eth0 mtu 1400 up"
+
+ - name: set MTU on eth1
+ command: "ifconfig eth1 mtu 1400 up"
+
+ - name: install docker
+ package:
+ name: docker
+ state: present
+ register: result
+ until: result is succeeded
+ when: ansible_facts['os_family'] == 'RedHat'
+
+ - name: allow insecure docker registries
+ lineinfile:
+ line: 'INSECURE_REGISTRY="--insecure-registry {{ ceph_docker_registry }}"'
+ dest: "/etc/sysconfig/docker"
+
+ - name: restart docker
+ service:
+ name: docker
+ state: restarted
+
+- hosts: mons:mgrs
+ gather_facts: false
+ become: yes
+ tasks:
+
+ - name: install ceph mon repo
+ yum_repository:
+ name: ceph-mon
+ description: repo for rhcs ceph-mon
+ baseurl: "{{ repo_url }}/MON/x86_64/os/"
+ gpgcheck: no
+ enabled: yes
+ when: not is_atomic | bool
+
+- hosts: osds
+ gather_facts: false
+ become: yes
+ tasks:
+
+ - name: install ceph osd repo
+ yum_repository:
+ name: ceph-osd
+ description: repo for rhcs ceph-osd
+ baseurl: "{{ repo_url }}/OSD/x86_64/os/"
+ gpgcheck: no
+ enabled: yes
+ when: not is_atomic | bool
+
+ - name: set MTU on eth2
+ command: "ifconfig eth2 mtu 1400 up"
+
+- hosts: mdss:rgws:clients
+ gather_facts: false
+ become: yes
+ tasks:
+
+ - name: install ceph tools repo
+ yum_repository:
+ name: ceph-osd
+ description: repo for rhcs ceph tools
+ baseurl: "{{ repo_url }}/Tools/x86_64/os/"
+ gpgcheck: no
+ enabled: yes
+ when: not is_atomic | bool
--- /dev/null
+---
+- hosts: all
+ gather_facts: true
+ become: yes
+ tasks:
+
+ - name: check if it is Atomic host
+ stat: path=/run/ostree-booted
+ register: stat_ostree
+ check_mode: no
+
+ - name: set fact for using Atomic host
+ set_fact:
+ is_atomic: '{{ stat_ostree.stat.exists }}'
+
+ - name: update the system
+ command: dnf update -y
+ changed_when: false
+ when: not is_atomic | bool
+
+ - name: get root mount information
+ set_fact:
+ rootmount: "{{ ansible_facts['mounts']|json_query('[?mount==`/`]|[0]') }}"
+
+ # mount -o remount doesn't work on RHEL 8 for now
+ - name: add mount options to /
+ mount:
+ path: '{{ rootmount.mount }}'
+ src: '{{ rootmount.device }}'
+ opts: "noatime,nodiratime{% if ansible_facts['os_family'] == 'RedHat' and ansible_facts['distribution_major_version'] | int < 8 %},nobarrier{% endif %}"
+ fstype: '{{ rootmount.fstype }}'
+ state: mounted
+
+ # we need to install this so the Socket testinfra module
+ # can use netcat for testing
+ - name: install net-tools
+ package:
+ name: net-tools
+ state: present
+ register: result
+ until: result is succeeded
+ when: not is_atomic | bool
+
+ - name: centos based systems - configure repos
+ block:
+ - name: disable fastest mirror detection
+ ini_file:
+ path: /etc/yum/pluginconf.d/fastestmirror.conf
+ section: main
+ option: enabled
+ value: 0
+ - name: install epel
+ package:
+ name: epel-release
+ state: present
+ register: result
+ until: result is succeeded
+ - name: enable local epel repository
+ ini_file:
+ path: /etc/yum.repos.d/epel.repo
+ section: epel
+ option: baseurl
+ value: http://apt-mirror.front.sepia.ceph.com/epel7/
+ - name: disable remote epel repository
+ ini_file:
+ path: /etc/yum.repos.d/epel.repo
+ section: epel
+ option: metalink
+ state: absent
+ when:
+ - ansible_facts['distribution'] == 'CentOS'
+ - ansible_facts['distribution_major_version'] | int == 7
+ - not is_atomic | bool
+
+ - name: resize logical volume for root partition to fill remaining free space
+ lvol:
+ lv: root
+ vg: atomicos
+ size: +100%FREE
+ resizefs: yes
+ when: is_atomic | bool
--- /dev/null
+../../../Vagrantfile
\ No newline at end of file
--- /dev/null
+../all_daemons/ceph-override.json
\ No newline at end of file
--- /dev/null
+../../../../Vagrantfile
\ No newline at end of file
--- /dev/null
+../../all_daemons/ceph-override.json
\ No newline at end of file
--- /dev/null
+---
+# this is only here to let the CI tests know
+# that this scenario is using docker
+docker: True
+
+containerized_deployment: True
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+ceph_mon_docker_subnet: "{{ public_network }}"
+public_network: "192.168.79.0/24"
+cluster_network: "192.168.80.0/24"
+ceph_conf_overrides:
+ global:
+ mon_allow_pool_size_one: true
+ mon_warn_on_pool_no_redundancy: false
+ osd_pool_default_size: 1
+openstack_config: False
+dashboard_enabled: False
+copy_admin_key: True
+ceph_docker_registry: quay.ceph.io
+ceph_docker_image: ceph-ci/daemon
+ceph_docker_image_tag: latest-pacific
\ No newline at end of file
--- /dev/null
+---
+create_crush_tree: False
+crush_rule_config: False
\ No newline at end of file
--- /dev/null
+---
+osd_objectstore: "bluestore"
+lvm_volumes:
+ - data: data-lv1
+ data_vg: test_group
+ - data: data-lv2
+ data_vg: test_group
+ db: journal1
+ db_vg: journals
\ No newline at end of file
--- /dev/null
+[mons]
+mon0
+
+[osds]
+osd0
+
+[mdss]
+mds0
--- /dev/null
+---
+
+# DEPLOY CONTAINERIZED DAEMONS
+docker: True
+
+# DEFINE THE NUMBER OF VMS TO RUN
+mon_vms: 1
+osd_vms: 1
+mds_vms: 1
+rgw_vms: 0
+nfs_vms: 0
+grafana_server_vms: 0
+rbd_mirror_vms: 0
+client_vms: 0
+iscsi_gw_vms: 0
+mgr_vms: 0
+
+# SUBNETS TO USE FOR THE VMS
+public_subnet: 192.168.79
+cluster_subnet: 192.168.80
+
+# MEMORY
+# set 1024 for CentOS
+memory: 1024
+
+# Disks
+# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
+# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
+disks: "[ '/dev/sda', '/dev/sdb' ]"
+
+# VAGRANT BOX
+# Ceph boxes are *strongly* suggested. They are under better control and will
+# not get updated frequently unless required for build systems. These are (for
+# now):
+#
+# * ceph/ubuntu-xenial
+#
+# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
+# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
+# libvirt CentOS: centos/7
+# parallels Ubuntu: parallels/ubuntu-14.04
+# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
+# For more boxes have a look at:
+# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
+# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
+vagrant_box: centos/atomic-host
+#client_vagrant_box: centos/7
+#ssh_private_key_path: "~/.ssh/id_rsa"
+# The sync directory changes based on vagrant box
+# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
+#vagrant_sync_dir: /home/vagrant/sync
+vagrant_sync_dir: /vagrant
+# Disables synced folder creation. Not needed for testing, will skip mounting
+# the vagrant directory on the remote box regardless of the provider.
+vagrant_disable_synced_folder: true
+# VAGRANT URL
+# This is a URL to download an image from an alternate location. vagrant_box
+# above should be set to the filename of the image.
+# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
+# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
--- /dev/null
+---
+ceph_origin: repository
+ceph_repository: community
+public_network: "192.168.77.0/24"
+cluster_network: "192.168.78.0/24"
+monitor_interface: eth1
+radosgw_interface: eth1
+journal_size: 100
+osd_objectstore: "bluestore"
+copy_admin_key: true
+ceph_conf_overrides:
+ global:
+ mon_allow_pool_size_one: true
+ mon_warn_on_pool_no_redundancy: false
+ osd_pool_default_size: 1
+dashboard_enabled: False
\ No newline at end of file
--- /dev/null
+---
+create_crush_tree: False
+crush_rule_config: False
\ No newline at end of file
--- /dev/null
+---
+os_tuning_params:
+ - { name: fs.file-max, value: 26234859 }
+osd_objectstore: "bluestore"
+lvm_volumes:
+ - data: data-lv1
+ data_vg: test_group
+ - data: data-lv2
+ data_vg: test_group
+ db: journal1
+ db_vg: journals
\ No newline at end of file
--- /dev/null
+[mons]
+mon0
+
+[osds]
+osd0
+
+[mdss]
+mds0
--- /dev/null
+---
+
+# DEPLOY CONTAINERIZED DAEMONS
+docker: false
+
+# DEFINE THE NUMBER OF VMS TO RUN
+mon_vms: 1
+osd_vms: 1
+mds_vms: 1
+rgw_vms: 0
+nfs_vms: 0
+grafana_server_vms: 0
+rbd_mirror_vms: 0
+client_vms: 0
+iscsi_gw_vms: 0
+mgr_vms: 0
+
+# SUBNETS TO USE FOR THE VMS
+public_subnet: 192.168.77
+cluster_subnet: 192.168.78
+
+# MEMORY
+# set 1024 for CentOS
+memory: 1024
+
+# Ethernet interface name
+# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
+eth: 'eth1'
+
+# Disks
+# For Xenial use disks: [ '/dev/sdb', '/dev/sdc' ]
+# For CentOS7 use disks: [ '/dev/sda', '/dev/sdb' ]
+disks: [ '/dev/sdb', '/dev/sdc' ]
+
+# VAGRANT BOX
+# Ceph boxes are *strongly* suggested. They are under better control and will
+# not get updated frequently unless required for build systems. These are (for
+# now):
+#
+# * ceph/ubuntu-xenial
+#
+# Ubuntu: ceph/ubuntu-xenial or bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
+# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
+# openSUSE: opensuse/openSUSE-42.3-x86_64
+# libvirt CentOS: centos/7
+# parallels Ubuntu: parallels/ubuntu-14.04
+# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
+# For more boxes have a look at:
+# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
+# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
+vagrant_box: centos/7
+# The sync directory changes based on vagrant box
+# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
+vagrant_sync_dir: /home/vagrant/sync
+# VAGRANT URL
+# This is a URL to download an image from an alternate location. vagrant_box
+# above should be set to the filename of the image.
+# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
+# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+
+os_tuning_params:
+ - { name: fs.file-max, value: 26234859 }
+
+# Debug mode, runs Ansible with -vvvv
+debug: false
--- /dev/null
+../../../Vagrantfile
\ No newline at end of file
--- /dev/null
+../all_daemons/ceph-override.json
\ No newline at end of file
--- /dev/null
+../../../../Vagrantfile
\ No newline at end of file
--- /dev/null
+../../all_daemons/ceph-override.json
\ No newline at end of file
--- /dev/null
+---
+# this is only here to let the CI tests know
+# that this scenario is using docker
+docker: True
+
+containerized_deployment: True
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+ceph_mon_docker_subnet: "{{ public_network }}"
+public_network: "192.168.83.0/24"
+cluster_network: "192.168.84.0/24"
+ceph_conf_overrides:
+ global:
+ mon_allow_pool_size_one: true
+ mon_warn_on_pool_no_redundancy: false
+ osd_pool_default_size: 1
+openstack_config: False
+dashboard_enabled: False
+ceph_docker_registry: quay.ceph.io
+ceph_docker_image: ceph-ci/daemon
+ceph_docker_image_tag: latest-pacific
\ No newline at end of file
--- /dev/null
+---
+create_crush_tree: False
+crush_rule_config: False
\ No newline at end of file
--- /dev/null
+---
+osd_objectstore: "bluestore"
+lvm_volumes:
+ - data: data-lv1
+ data_vg: test_group
+ - data: data-lv2
+ data_vg: test_group
+ db: journal1
+ db_vg: journals
\ No newline at end of file
--- /dev/null
+[mons]
+mon0
+
+[osds]
+osd0
+
+[mgrs]
+mgr0
+mgr1
--- /dev/null
+---
+
+# DEPLOY CONTAINERIZED DAEMONS
+docker: True
+
+# DEFINE THE NUMBER OF VMS TO RUN
+mon_vms: 1
+osd_vms: 1
+mds_vms: 0
+rgw_vms: 0
+nfs_vms: 0
+grafana_server_vms: 0
+rbd_mirror_vms: 0
+client_vms: 0
+iscsi_gw_vms: 0
+mgr_vms: 2
+
+# SUBNETS TO USE FOR THE VMS
+public_subnet: 192.168.83
+cluster_subnet: 192.168.84
+
+# MEMORY
+# set 1024 for CentOS
+memory: 1024
+
+# Disks
+# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
+# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
+disks: "[ '/dev/sda', '/dev/sdb' ]"
+
+# VAGRANT BOX
+# Ceph boxes are *strongly* suggested. They are under better control and will
+# not get updated frequently unless required for build systems. These are (for
+# now):
+#
+# * ceph/ubuntu-xenial
+#
+# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
+# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
+# libvirt CentOS: centos/7
+# parallels Ubuntu: parallels/ubuntu-14.04
+# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
+# For more boxes have a look at:
+# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
+# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
+vagrant_box: centos/atomic-host
+#client_vagrant_box: centos/7
+#ssh_private_key_path: "~/.ssh/id_rsa"
+# The sync directory changes based on vagrant box
+# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
+#vagrant_sync_dir: /home/vagrant/sync
+vagrant_sync_dir: /vagrant
+# Disables synced folder creation. Not needed for testing, will skip mounting
+# the vagrant directory on the remote box regardless of the provider.
+vagrant_disable_synced_folder: true
+# VAGRANT URL
+# This is a URL to download an image from an alternate location. vagrant_box
+# above should be set to the filename of the image.
+# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
+# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
--- /dev/null
+---
+ceph_origin: repository
+ceph_repository: dev
+public_network: "192.168.81.0/24"
+cluster_network: "192.168.82.0/24"
+monitor_interface: eth1
+radosgw_interface: eth1
+ceph_conf_overrides:
+ global:
+ mon_allow_pool_size_one: true
+ mon_warn_on_pool_no_redundancy: false
+ osd_pool_default_size: 1
+dashboard_enabled: False
\ No newline at end of file
--- /dev/null
+---
+create_crush_tree: False
+crush_rule_config: False
\ No newline at end of file
--- /dev/null
+---
+os_tuning_params:
+ - { name: fs.file-max, value: 26234859 }
+osd_objectstore: "bluestore"
+lvm_volumes:
+ - data: data-lv1
+ data_vg: test_group
+ - data: data-lv2
+ data_vg: test_group
+ db: journal1
+ db_vg: journals
\ No newline at end of file
--- /dev/null
+[mons]
+mon0
+
+[osds]
+osd0
+
+[mgrs]
+mgr0
+mgr1
--- /dev/null
+---
+
+# DEPLOY CONTAINERIZED DAEMONS
+docker: false
+
+# DEFINE THE NUMBER OF VMS TO RUN
+mon_vms: 1
+osd_vms: 1
+mds_vms: 0
+rgw_vms: 0
+nfs_vms: 0
+grafana_server_vms: 0
+rbd_mirror_vms: 0
+client_vms: 0
+iscsi_gw_vms: 0
+mgr_vms: 2
+
+# INSTALL SOURCE OF CEPH
+# valid values are 'stable' and 'dev'
+ceph_install_source: stable
+
+# SUBNETS TO USE FOR THE VMS
+public_subnet: 192.168.81
+cluster_subnet: 192.168.82
+
+# MEMORY
+# set 1024 for CentOS
+memory: 1024
+
+# Ethernet interface name
+# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
+eth: 'eth1'
+
+# Disks
+# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
+# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
+disks: "[ '/dev/sdb', '/dev/sdc' ]"
+
+# VAGRANT BOX
+# Ceph boxes are *strongly* suggested. They are under better control and will
+# not get updated frequently unless required for build systems. These are (for
+# now):
+#
+# * ceph/ubuntu-xenial
+#
+# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
+# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
+# libvirt CentOS: centos/7
+# parallels Ubuntu: parallels/ubuntu-14.04
+# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
+# For more boxes have a look at:
+# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
+# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
+vagrant_box: centos/7
+#ssh_private_key_path: "~/.ssh/id_rsa"
+# The sync directory changes based on vagrant box
+# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
+#vagrant_sync_dir: /home/vagrant/sync
+vagrant_sync_dir: /vagrant
+# Disables synced folder creation. Not needed for testing, will skip mounting
+# the vagrant directory on the remote box regardless of the provider.
+vagrant_disable_synced_folder: true
+# VAGRANT URL
+# This is a URL to download an image from an alternate location. vagrant_box
+# above should be set to the filename of the image.
+# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
+# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+
+os_tuning_params:
+ - { name: fs.file-max, value: 26234859 }
+
+# VM prefix name, need to match the hostname
+# label_prefix: ceph
--- /dev/null
+../../../Vagrantfile
\ No newline at end of file
--- /dev/null
+../all_daemons/ceph-override.json
\ No newline at end of file
--- /dev/null
+../../../../Vagrantfile
\ No newline at end of file
--- /dev/null
+../../all_daemons/ceph-override.json
\ No newline at end of file
--- /dev/null
+---
+# this is only here to let the CI tests know
+# that this scenario is using docker
+docker: True
+
+containerized_deployment: True
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+ceph_mon_docker_subnet: "{{ public_network }}"
+public_network: "192.168.17.0/24"
+cluster_network: "192.168.18.0/24"
+ceph_conf_overrides:
+ global:
+ mon_allow_pool_size_one: true
+ mon_warn_on_pool_no_redundancy: false
+ osd_pool_default_size: 1
+openstack_config: False
+dashboard_enabled: False
+ceph_docker_registry: quay.ceph.io
+ceph_docker_image: ceph-ci/daemon
+ceph_docker_image_tag: latest-pacific
\ No newline at end of file
--- /dev/null
+---
+create_crush_tree: False
+crush_rule_config: False
\ No newline at end of file
--- /dev/null
+---
+osd_objectstore: "bluestore"
+lvm_volumes:
+ - data: data-lv1
+ data_vg: test_group
+ - data: data-lv2
+ data_vg: test_group
+ db: journal1
+ db_vg: journals
\ No newline at end of file
--- /dev/null
+[mons]
+mon0
+mon1
+mon2
+
+[osds]
+osd0
\ No newline at end of file
--- /dev/null
+---
+
+# DEPLOY CONTAINERIZED DAEMONS
+docker: True
+
+# DEFINE THE NUMBER OF VMS TO RUN
+mon_vms: 3
+osd_vms: 1
+mds_vms: 0
+rgw_vms: 0
+nfs_vms: 0
+grafana_server_vms: 0
+rbd_mirror_vms: 0
+client_vms: 0
+iscsi_gw_vms: 0
+mgr_vms: 0
+
+# SUBNETS TO USE FOR THE VMS
+public_subnet: 192.168.17
+cluster_subnet: 192.168.18
+
+# MEMORY
+# set 1024 for CentOS
+memory: 1024
+
+# Disks
+# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
+# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
+disks: "[ '/dev/sda', '/dev/sdb' ]"
+
+# VAGRANT BOX
+# Ceph boxes are *strongly* suggested. They are under better control and will
+# not get updated frequently unless required for build systems. These are (for
+# now):
+#
+# * ceph/ubuntu-xenial
+#
+# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
+# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
+# libvirt CentOS: centos/7
+# parallels Ubuntu: parallels/ubuntu-14.04
+# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
+# For more boxes have a look at:
+# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
+# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
+vagrant_box: centos/atomic-host
+#client_vagrant_box: centos/7
+#ssh_private_key_path: "~/.ssh/id_rsa"
+# The sync directory changes based on vagrant box
+# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
+#vagrant_sync_dir: /home/vagrant/sync
+vagrant_sync_dir: /vagrant
+# Disables synced folder creation. Not needed for testing, will skip mounting
+# the vagrant directory on the remote box regardless of the provider.
+vagrant_disable_synced_folder: true
+# VAGRANT URL
+# This is a URL to download an image from an alternate location. vagrant_box
+# above should be set to the filename of the image.
+# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
+# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
--- /dev/null
+---
+ceph_origin: repository
+ceph_repository: community
+public_network: "192.168.1.0/24"
+cluster_network: "192.168.2.0/24"
+ceph_conf_overrides:
+ global:
+ mon_allow_pool_size_one: true
+ mon_warn_on_pool_no_redundancy: false
+ osd_pool_default_size: 1
+dashboard_enabled: False
\ No newline at end of file
--- /dev/null
+---
+create_crush_tree: False
+crush_rule_config: False
\ No newline at end of file
--- /dev/null
+---
+os_tuning_params:
+ - { name: fs.file-max, value: 26234859 }
+osd_objectstore: "bluestore"
+lvm_volumes:
+ - data: data-lv1
+ data_vg: test_group
+ - data: data-lv2
+ data_vg: test_group
+ db: journal1
+ db_vg: journals
\ No newline at end of file
--- /dev/null
+[mons]
+mon0 monitor_address=192.168.1.10
+mon1 monitor_interface="{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+mon2 monitor_address=192.168.1.12
+
+[osds]
+osd0
\ No newline at end of file
--- /dev/null
+[all:vars]
+docker=True
+
+[mons]
+mon0 monitor_address=192.168.1.10
+mon1 monitor_interface="{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+mon2 monitor_address=192.168.1.12
+
+[osds]
+osd0
+
+[mdss]
+mds0
+
+[rgws]
+rgw0
+
+[clients]
+client0
--- /dev/null
+---
+
+# DEPLOY CONTAINERIZED DAEMONS
+docker: false
+
+# DEFINE THE NUMBER OF VMS TO RUN
+mon_vms: 3
+osd_vms: 1
+mds_vms: 0
+rgw_vms: 0
+nfs_vms: 0
+grafana_server_vms: 0
+rbd_mirror_vms: 0
+client_vms: 0
+iscsi_gw_vms: 0
+mgr_vms: 0
+
+# INSTALL SOURCE OF CEPH
+# valid values are 'stable' and 'dev'
+ceph_install_source: stable
+
+# SUBNETS TO USE FOR THE VMS
+public_subnet: 192.168.1
+cluster_subnet: 192.168.2
+
+# MEMORY
+# set 1024 for CentOS
+memory: 1024
+
+# Ethernet interface name
+# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
+eth: 'eth1'
+
+# Disks
+# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
+# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
+disks: "[ '/dev/sdb', '/dev/sdc' ]"
+
+# VAGRANT BOX
+# Ceph boxes are *strongly* suggested. They are under better control and will
+# not get updated frequently unless required for build systems. These are (for
+# now):
+#
+# * ceph/ubuntu-xenial
+#
+# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
+# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
+# libvirt CentOS: centos/7
+# parallels Ubuntu: parallels/ubuntu-14.04
+# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
+# For more boxes have a look at:
+# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
+# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
+vagrant_box: centos/7
+#ssh_private_key_path: "~/.ssh/id_rsa"
+# The sync directory changes based on vagrant box
+# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
+#vagrant_sync_dir: /home/vagrant/sync
+vagrant_sync_dir: /vagrant
+# Disables synced folder creation. Not needed for testing, will skip mounting
+# the vagrant directory on the remote box regardless of the provider.
+vagrant_disable_synced_folder: true
+# VAGRANT URL
+# This is a URL to download an image from an alternate location. vagrant_box
+# above should be set to the filename of the image.
+# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
+# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+
+os_tuning_params:
+ - { name: fs.file-max, value: 26234859 }
+
+# VM prefix name, need to match the hostname
+# label_prefix: ceph
--- /dev/null
+../../../Vagrantfile
\ No newline at end of file
--- /dev/null
+../all_daemons/ceph-override.json
\ No newline at end of file
--- /dev/null
+../../../../Vagrantfile
\ No newline at end of file
--- /dev/null
+../../all_daemons/ceph-override.json
\ No newline at end of file
--- /dev/null
+---
+# this is only here to let the CI tests know
+# that this scenario is using docker
+docker: True
+
+containerized_deployment: True
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+ceph_mon_docker_subnet: "{{ public_network }}"
+public_network: "192.168.73.0/24"
+cluster_network: "192.168.74.0/24"
+ceph_conf_overrides:
+ global:
+ mon_allow_pool_size_one: true
+ mon_warn_on_pool_no_redundancy: false
+ osd_pool_default_size: 1
+openstack_config: False
+dashboard_enabled: False
+copy_admin_key: True
+ceph_docker_registry: quay.ceph.io
+ceph_docker_image: ceph-ci/daemon
+ceph_docker_image_tag: latest-pacific
\ No newline at end of file
--- /dev/null
+---
+journal_size: 100
--- /dev/null
+[mons]
+mon0
+
+[osds]
+osd0 osd_objectstore=filestore lvm_volumes="[{'data': 'data-lv1', 'journal': '/dev/sdc1', 'data_vg': 'test_group'},{'data': 'data-lv2', 'data_vg': 'test_group', 'journal': 'journal1', 'journal_vg': 'journals'}]"
+osd1 osd_objectstore=filestore lvm_volumes="[{'data': 'data-lv1', 'journal': '/dev/sdc1', 'data_vg': 'test_group'},{'data': 'data-lv2', 'data_vg': 'test_group', 'journal': 'journal1', 'journal_vg': 'journals'}]" dmcrypt=true
+osd2 osd_objectstore=bluestore lvm_volumes="[{'data': 'data-lv1', 'data_vg': 'test_group'},{'data': 'data-lv2', 'data_vg': 'test_group', 'db': 'journal1', 'db_vg': 'journals'}]"
+osd3 osd_objectstore=bluestore lvm_volumes="[{'data': 'data-lv1', 'data_vg': 'test_group'},{'data': 'data-lv2', 'data_vg': 'test_group'}]" dmcrypt=true
\ No newline at end of file
--- /dev/null
+---
+
+# DEPLOY CONTAINERIZED DAEMONS
+docker: True
+
+# DEFINE THE NUMBER OF VMS TO RUN
+mon_vms: 1
+osd_vms: 4
+mds_vms: 0
+rgw_vms: 0
+nfs_vms: 0
+grafana_server_vms: 0
+rbd_mirror_vms: 0
+client_vms: 0
+iscsi_gw_vms: 0
+mgr_vms: 0
+
+# SUBNETS TO USE FOR THE VMS
+public_subnet: 192.168.73
+cluster_subnet: 192.168.74
+
+# MEMORY
+# set 1024 for CentOS
+memory: 1024
+
+# Disks
+# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
+# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
+disks: "[ '/dev/sda', '/dev/sdb' ]"
+
+# VAGRANT BOX
+# Ceph boxes are *strongly* suggested. They are under better control and will
+# not get updated frequently unless required for build systems. These are (for
+# now):
+#
+# * ceph/ubuntu-xenial
+#
+# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
+# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
+# libvirt CentOS: centos/7
+# parallels Ubuntu: parallels/ubuntu-14.04
+# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
+# For more boxes have a look at:
+# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
+# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
+vagrant_box: centos/atomic-host
+#client_vagrant_box: centos/7
+#ssh_private_key_path: "~/.ssh/id_rsa"
+# The sync directory changes based on vagrant box
+# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
+#vagrant_sync_dir: /home/vagrant/sync
+vagrant_sync_dir: /vagrant
+# Disables synced folder creation. Not needed for testing, will skip mounting
+# the vagrant directory on the remote box regardless of the provider.
+vagrant_disable_synced_folder: true
+# VAGRANT URL
+# This is a URL to download an image from an alternate location. vagrant_box
+# above should be set to the filename of the image.
+# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
+# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
--- /dev/null
+---
+ceph_origin: repository
+ceph_repository: community
+public_network: "192.168.71.0/24"
+cluster_network: "192.168.72.0/24"
+ceph_conf_overrides:
+ global:
+ osd_pool_default_size: 3
+openstack_config: False
+dashboard_enabled: False
+copy_admin_key: True
\ No newline at end of file
--- /dev/null
+---
+os_tuning_params:
+ - { name: fs.file-max, value: 26234859 }
+journal_size: 100
--- /dev/null
+[mons]
+mon0 monitor_address=192.168.71.10
+
+[osds]
+osd0 osd_objectstore=filestore lvm_volumes="[{'data': 'data-lv1', 'journal': '/dev/sdc1', 'data_vg': 'test_group'},{'data': 'data-lv2', 'data_vg': 'test_group', 'journal': 'journal1', 'journal_vg': 'journals'}]"
+osd1 osd_objectstore=filestore lvm_volumes="[{'data': 'data-lv1', 'journal': '/dev/sdc1', 'data_vg': 'test_group'},{'data': 'data-lv2', 'data_vg': 'test_group', 'journal': 'journal1', 'journal_vg': 'journals'}]" dmcrypt=true
+osd2 osd_objectstore=bluestore lvm_volumes="[{'data': 'data-lv1', 'data_vg': 'test_group'},{'data': 'data-lv2', 'data_vg': 'test_group', 'db': 'journal1', 'db_vg': 'journals'}]"
+osd3 osd_objectstore=bluestore lvm_volumes="[{'data': 'data-lv1', 'data_vg': 'test_group'},{'data': 'data-lv2', 'data_vg': 'test_group'}]" dmcrypt=true
--- /dev/null
+---
+
+# DEPLOY CONTAINERIZED DAEMONS
+docker: false
+
+# DEFINE THE NUMBER OF VMS TO RUN
+mon_vms: 1
+osd_vms: 4
+mds_vms: 0
+rgw_vms: 0
+nfs_vms: 0
+grafana_server_vms: 0
+rbd_mirror_vms: 0
+client_vms: 0
+iscsi_gw_vms: 0
+mgr_vms: 0
+
+# INSTALL SOURCE OF CEPH
+# valid values are 'stable' and 'dev'
+ceph_install_source: stable
+
+# SUBNETS TO USE FOR THE VMS
+public_subnet: 192.168.71
+cluster_subnet: 192.168.72
+
+# MEMORY
+# set 1024 for CentOS
+memory: 1024
+
+# Ethernet interface name
+# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
+eth: 'eth1'
+
+# Disks
+# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
+# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
+disks: "[ '/dev/sdb', '/dev/sdc' ]"
+
+# VAGRANT BOX
+# Ceph boxes are *strongly* suggested. They are under better control and will
+# not get updated frequently unless required for build systems. These are (for
+# now):
+#
+# * ceph/ubuntu-xenial
+#
+# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
+# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
+# libvirt CentOS: centos/7
+# parallels Ubuntu: parallels/ubuntu-14.04
+# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
+# For more boxes have a look at:
+# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
+# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
+vagrant_box: centos/7
+#ssh_private_key_path: "~/.ssh/id_rsa"
+# The sync directory changes based on vagrant box
+# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
+#vagrant_sync_dir: /home/vagrant/sync
+vagrant_sync_dir: /vagrant
+# Disables synced folder creation. Not needed for testing, will skip mounting
+# the vagrant directory on the remote box regardless of the provider.
+vagrant_disable_synced_folder: true
+# VAGRANT URL
+# This is a URL to download an image from an alternate location. vagrant_box
+# above should be set to the filename of the image.
+# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
+# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+
+os_tuning_params:
+ - { name: fs.file-max, value: 26234859 }
+
+# VM prefix name, need to match the hostname
+# label_prefix: ceph
--- /dev/null
+../../../Vagrantfile
\ No newline at end of file
--- /dev/null
+../all_daemons/ceph-override.json
\ No newline at end of file
--- /dev/null
+../../../../Vagrantfile
\ No newline at end of file
--- /dev/null
+../../all_daemons/ceph-override.json
\ No newline at end of file
--- /dev/null
+---
+# this is only here to let the CI tests know
+# that this scenario is using docker
+docker: True
+public_network: "192.168.87.0/24"
+cluster_network: "192.168.88.0/24"
+containerized_deployment: True
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+ceph_mon_docker_subnet: "{{ public_network }}"
+ceph_conf_overrides:
+ global:
+ mon_allow_pool_size_one: true
+ mon_warn_on_pool_no_redundancy: false
+ osd_pool_default_size: 1
+openstack_config: False
+dashboard_enabled: False
+copy_admin_key: True
+ceph_docker_registry: quay.ceph.io
+ceph_docker_image: ceph-ci/daemon
+ceph_docker_image_tag: latest-pacific
\ No newline at end of file
--- /dev/null
+---
+create_crush_tree: False
+crush_rule_config: False
\ No newline at end of file
--- /dev/null
+---
+osd_objectstore: "bluestore"
+lvm_volumes:
+ - data: data-lv1
+ data_vg: test_group
+ - data: data-lv2
+ data_vg: test_group
+ db: journal1
+ db_vg: journals
\ No newline at end of file
--- /dev/null
+[mons]
+mon0
+
+[osds]
+osd0
+
+[rbdmirrors]
+rbd-mirror0
--- /dev/null
+---
+
+# DEPLOY CONTAINERIZED DAEMONS
+docker: True
+
+# DEFINE THE NUMBER OF VMS TO RUN
+mon_vms: 1
+osd_vms: 1
+mds_vms: 0
+rgw_vms: 0
+nfs_vms: 0
+grafana_server_vms: 0
+rbd_mirror_vms: 1
+client_vms: 0
+iscsi_gw_vms: 0
+mgr_vms: 0
+
+# SUBNETS TO USE FOR THE VMS
+public_subnet: 192.168.87
+cluster_subnet: 192.168.88
+
+# MEMORY
+# set 1024 for CentOS
+memory: 1024
+
+# Disks
+# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
+# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
+disks: "[ '/dev/sda', '/dev/sdb' ]"
+
+# VAGRANT BOX
+# Ceph boxes are *strongly* suggested. They are under better control and will
+# not get updated frequently unless required for build systems. These are (for
+# now):
+#
+# * ceph/ubuntu-xenial
+#
+# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
+# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
+# libvirt CentOS: centos/7
+# parallels Ubuntu: parallels/ubuntu-14.04
+# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
+# For more boxes have a look at:
+# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
+# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
+vagrant_box: centos/atomic-host
+#client_vagrant_box: centos/7
+#ssh_private_key_path: "~/.ssh/id_rsa"
+# The sync directory changes based on vagrant box
+# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
+#vagrant_sync_dir: /home/vagrant/sync
+vagrant_sync_dir: /vagrant
+# Disables synced folder creation. Not needed for testing, will skip mounting
+# the vagrant directory on the remote box regardless of the provider.
+vagrant_disable_synced_folder: true
+# VAGRANT URL
+# This is a URL to download an image from an alternate location. vagrant_box
+# above should be set to the filename of the image.
+# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
+# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
--- /dev/null
+---
+ceph_origin: repository
+ceph_repository: community
+public_network: "192.168.85.0/24"
+cluster_network: "192.168.86.0/24"
+monitor_interface: eth1
+osd_objectstore: "bluestore"
+copy_admin_key: true
+ceph_conf_overrides:
+ global:
+ mon_allow_pool_size_one: true
+ mon_warn_on_pool_no_redundancy: false
+ osd_pool_default_size: 1
+dashboard_enabled: False
\ No newline at end of file
--- /dev/null
+---
+create_crush_tree: False
+crush_rule_config: False
\ No newline at end of file
--- /dev/null
+---
+os_tuning_params:
+ - { name: fs.file-max, value: 26234859 }
+osd_objectstore: "bluestore"
+lvm_volumes:
+ - data: data-lv1
+ data_vg: test_group
+ - data: data-lv2
+ data_vg: test_group
+ db: journal1
+ db_vg: journals
\ No newline at end of file
--- /dev/null
+[mons]
+mon0
+
+[osds]
+osd0
+
+[rbdmirrors]
+rbd-mirror0
--- /dev/null
+---
+
+# DEPLOY CONTAINERIZED DAEMONS
+docker: false
+
+# DEFINE THE NUMBER OF VMS TO RUN
+mon_vms: 1
+osd_vms: 1
+mds_vms: 0
+rgw_vms: 0
+nfs_vms: 0
+grafana_server_vms: 0
+rbd_mirror_vms: 1
+client_vms: 0
+iscsi_gw_vms: 0
+mgr_vms: 0
+
+# INSTALL SOURCE OF CEPH
+# valid values are 'stable' and 'dev'
+ceph_install_source: stable
+
+# SUBNETS TO USE FOR THE VMS
+public_subnet: 192.168.85
+cluster_subnet: 192.168.86
+
+# MEMORY
+# set 1024 for CentOS
+memory: 1024
+
+# Ethernet interface name
+# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
+eth: 'eth1'
+
+# Disks
+# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
+# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
+disks: "[ '/dev/sdb', '/dev/sdc' ]"
+
+# VAGRANT BOX
+# Ceph boxes are *strongly* suggested. They are under better control and will
+# not get updated frequently unless required for build systems. These are (for
+# now):
+#
+# * ceph/ubuntu-xenial
+#
+# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
+# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
+# libvirt CentOS: centos/7
+# parallels Ubuntu: parallels/ubuntu-14.04
+# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
+# For more boxes have a look at:
+# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
+# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
+vagrant_box: centos/7
+#ssh_private_key_path: "~/.ssh/id_rsa"
+# The sync directory changes based on vagrant box
+# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
+#vagrant_sync_dir: /home/vagrant/sync
+vagrant_sync_dir: /vagrant
+# Disables synced folder creation. Not needed for testing, will skip mounting
+# the vagrant directory on the remote box regardless of the provider.
+vagrant_disable_synced_folder: true
+# VAGRANT URL
+# This is a URL to download an image from an alternate location. vagrant_box
+# above should be set to the filename of the image.
+# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
+# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+
+os_tuning_params:
+ - { name: fs.file-max, value: 26234859 }
+
+# VM prefix name, need to match the hostname
+# label_prefix: ceph
--- /dev/null
+../../../Vagrantfile
\ No newline at end of file
--- /dev/null
+../all_daemons/ceph-override.json
\ No newline at end of file
--- /dev/null
+../../../../Vagrantfile
\ No newline at end of file
--- /dev/null
+../../all_daemons/ceph-override.json
\ No newline at end of file
--- /dev/null
+---
+# this is only here to let the CI tests know
+# that this scenario is using docker
+docker: True
+
+containerized_deployment: True
+public_network: "192.168.91.0/24"
+cluster_network: "192.168.92.0/24"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+ceph_mon_docker_subnet: "{{ public_network }}"
+ceph_conf_overrides:
+ global:
+ mon_allow_pool_size_one: true
+ mon_warn_on_pool_no_redundancy: false
+ osd_pool_default_size: 1
+openstack_config: False
+dashboard_enabled: False
+copy_admin_key: True
+ceph_docker_registry: quay.ceph.io
+ceph_docker_image: ceph-ci/daemon
+ceph_docker_image_tag: latest-pacific
\ No newline at end of file
--- /dev/null
+---
+create_crush_tree: False
+crush_rule_config: False
\ No newline at end of file
--- /dev/null
+---
+osd_objectstore: "bluestore"
+lvm_volumes:
+ - data: data-lv1
+ data_vg: test_group
+ - data: data-lv2
+ data_vg: test_group
+ db: journal1
+ db_vg: journals
\ No newline at end of file
--- /dev/null
+---
+copy_admin_key: true
+rgw_create_pools:
+ foo:
+ pg_num: 16
+ type: replicated
+ bar:
+ pg_num: 16
+rgw_override_bucket_index_max_shards: 16
+rgw_bucket_default_quota_max_objects: 1638400
--- /dev/null
+[mons]
+mon0
+
+[osds]
+osd0
+
+[rgws]
+rgw0
--- /dev/null
+---
+
+# DEPLOY CONTAINERIZED DAEMONS
+docker: True
+
+# DEFINE THE NUMBER OF VMS TO RUN
+mon_vms: 1
+osd_vms: 1
+mds_vms: 0
+rgw_vms: 1
+nfs_vms: 0
+grafana_server_vms: 0
+rbd_mirror_vms: 0
+client_vms: 0
+iscsi_gw_vms: 0
+mgr_vms: 0
+
+# SUBNETS TO USE FOR THE VMS
+public_subnet: 192.168.91
+cluster_subnet: 192.168.92
+
+# MEMORY
+# set 1024 for CentOS
+memory: 1024
+
+# Disks
+# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
+# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
+disks: "[ '/dev/sda', '/dev/sdb' ]"
+
+# VAGRANT BOX
+# Ceph boxes are *strongly* suggested. They are under better control and will
+# not get updated frequently unless required for build systems. These are (for
+# now):
+#
+# * ceph/ubuntu-xenial
+#
+# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
+# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
+# libvirt CentOS: centos/7
+# parallels Ubuntu: parallels/ubuntu-14.04
+# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
+# For more boxes have a look at:
+# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
+# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
+vagrant_box: centos/atomic-host
+#client_vagrant_box: centos/7
+#ssh_private_key_path: "~/.ssh/id_rsa"
+# The sync directory changes based on vagrant box
+# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
+#vagrant_sync_dir: /home/vagrant/sync
+vagrant_sync_dir: /vagrant
+# Disables synced folder creation. Not needed for testing, will skip mounting
+# the vagrant directory on the remote box regardless of the provider.
+vagrant_disable_synced_folder: true
+# VAGRANT URL
+# This is a URL to download an image from an alternate location. vagrant_box
+# above should be set to the filename of the image.
+# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
+# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
--- /dev/null
+---
+ceph_origin: repository
+ceph_repository: dev
+public_network: "192.168.89.0/24"
+cluster_network: "192.168.90.0/24"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+osd_objectstore: "bluestore"
+copy_admin_key: true
+ceph_conf_overrides:
+ global:
+ mon_allow_pool_size_one: true
+ mon_warn_on_pool_no_redundancy: false
+ osd_pool_default_size: 1
+dashboard_enabled: False
--- /dev/null
+---
+create_crush_tree: False
+crush_rule_config: False
\ No newline at end of file
--- /dev/null
+---
+os_tuning_params:
+ - { name: fs.file-max, value: 26234859 }
+osd_objectstore: "bluestore"
+lvm_volumes:
+ - data: data-lv1
+ data_vg: test_group
+ - data: data-lv2
+ data_vg: test_group
+ db: journal1
+ db_vg: journals
\ No newline at end of file
--- /dev/null
+---
+copy_admin_key: true
+rgw_create_pools:
+ foo:
+ pg_num: 16
+ type: replicated
+ bar:
+ pg_num: 16
+rgw_override_bucket_index_max_shards: 16
+rgw_bucket_default_quota_max_objects: 1638400
--- /dev/null
+[mons]
+mon0
+
+[osds]
+osd0
+
+[rgws]
+rgw0
--- /dev/null
+---
+
+# DEPLOY CONTAINERIZED DAEMONS
+docker: false
+
+# DEFINE THE NUMBER OF VMS TO RUN
+mon_vms: 1
+osd_vms: 1
+mds_vms: 0
+rgw_vms: 1
+nfs_vms: 0
+grafana_server_vms: 0
+rbd_mirror_vms: 0
+client_vms: 0
+iscsi_gw_vms: 0
+mgr_vms: 0
+
+# INSTALL SOURCE OF CEPH
+# valid values are 'stable' and 'dev'
+ceph_install_source: stable
+
+# SUBNETS TO USE FOR THE VMS
+public_subnet: 192.168.89
+cluster_subnet: 192.168.90
+
+# MEMORY
+# set 1024 for CentOS
+memory: 1024
+
+# Ethernet interface name
+# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
+eth: 'eth1'
+
+# Disks
+# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
+# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
+disks: "[ '/dev/sdb', '/dev/sdc' ]"
+
+# VAGRANT BOX
+# Ceph boxes are *strongly* suggested. They are under better control and will
+# not get updated frequently unless required for build systems. These are (for
+# now):
+#
+# * ceph/ubuntu-xenial
+#
+# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
+# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
+# libvirt CentOS: centos/7
+# parallels Ubuntu: parallels/ubuntu-14.04
+# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
+# For more boxes have a look at:
+# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
+# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
+vagrant_box: centos/7
+#ssh_private_key_path: "~/.ssh/id_rsa"
+# The sync directory changes based on vagrant box
+# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
+#vagrant_sync_dir: /home/vagrant/sync
+vagrant_sync_dir: /vagrant
+# Disables synced folder creation. Not needed for testing, will skip mounting
+# the vagrant directory on the remote box regardless of the provider.
+vagrant_disable_synced_folder: true
+# VAGRANT URL
+# This is a URL to download an image from an alternate location. vagrant_box
+# above should be set to the filename of the image.
+# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
+# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+
+os_tuning_params:
+ - { name: fs.file-max, value: 26234859 }
+
+# VM prefix name, need to match the hostname
+# label_prefix: ceph
--- /dev/null
+../../../Vagrantfile
\ No newline at end of file
--- /dev/null
+{
+ "ceph_conf_overrides": {
+ "global": {
+ "auth_allow_insecure_global_id_reclaim": false,
+ "osd_pool_default_pg_num": 12,
+ "osd_pool_default_size": 1,
+ "mon_allow_pool_size_one": true,
+ "mon_warn_on_pool_no_redundancy": false,
+ "mon_max_pg_per_osd": 300
+ }
+ }
+ ],
+ "ceph_mon_docker_memory_limit": "2g",
+ "radosgw_num_instances": 2
+}
--- /dev/null
+../../../../Vagrantfile
\ No newline at end of file
--- /dev/null
+../ceph-override.json
\ No newline at end of file
--- /dev/null
+---
+# this is only here to let the CI tests know
+# that this scenario is using docker
+docker: True
+
+containerized_deployment: True
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+ceph_mon_docker_subnet: "{{ public_network }}"
+public_network: "192.168.5.0/24"
+cluster_network: "192.168.6.0/24"
+rgw_override_bucket_index_max_shards: 16
+rgw_bucket_default_quota_max_objects: 1638400
+ceph_conf_overrides:
+ global:
+ auth_allow_insecure_global_id_reclaim: false
+ mon_allow_pool_size_one: true
+ mon_warn_on_pool_no_redundancy: false
+ osd_pool_default_size: 1
+ mon_max_pg_per_osd: 300
+openstack_config: false
+docker_pull_timeout: 600s
+handler_health_mon_check_delay: 10
+handler_health_osd_check_delay: 10
+mds_max_mds: 2
+# TODO: add monitoring later
+dashboard_enabled: false
+dashboard_admin_password: $sX!cD$rYU6qR^B!
+grafana_admin_password: +xFRe+RES@7vg24n
+ceph_docker_registry: quay.ceph.io
+ceph_docker_image: ceph-ci/daemon
+ceph_docker_image_tag: latest-pacific
+node_exporter_container_image: "quay.ceph.io/prometheus/node-exporter:v0.17.0"
+prometheus_container_image: "quay.ceph.io/prometheus/prometheus:v2.7.2"
+alertmanager_container_image: "quay.ceph.io/prometheus/alertmanager:v0.16.2"
+grafana_container_image: "quay.ceph.io/app-sre/grafana:6.7.4"
--- /dev/null
+---
+user_config: True
+copy_admin_key: True
+test:
+ name: "test"
+ rule_name: "HDD"
+ size: 1
+test2:
+ name: "test2"
+ size: 1
+pools:
+ - "{{ test }}"
+ - "{{ test2 }}"
--- /dev/null
+---
+generate_crt: True
--- /dev/null
+---
+create_crush_tree: false
+crush_rule_config: false
--- /dev/null
+---
+osd_objectstore: "bluestore"
+devices:
+ - /dev/sda
+ - /dev/sdb
+ - /dev/sdc
--- /dev/null
+---
+copy_admin_key: True
+rgw_create_pools:
+ foo:
+ pg_num: 16
+ type: replicated
+ bar:
+ pg_num: 16
--- /dev/null
+[mons]
+mon0 monitor_address=192.168.5.10
+mon1 monitor_interface="{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+mon2 monitor_address=192.168.5.12
+
+[mgrs]
+mon0
+mon1
+
+[osds]
+osd0
+osd1
+osd2
+
+[rgws]
+rgw0
+rgw1
--- /dev/null
+---
+
+# DEPLOY CONTAINERIZED DAEMONS
+docker: True
+
+# DEFINE THE NUMBER OF VMS TO RUN
+mon_vms: 3
+osd_vms: 3
+mds_vms: 0
+rgw_vms: 2
+nfs_vms: 0
+grafana_server_vms: 0
+rbd_mirror_vms: 0
+client_vms: 0
+iscsi_gw_vms: 0
+mgr_vms: 0
+
+# SUBNETS TO USE FOR THE VMS
+public_subnet: 192.168.5
+cluster_subnet: 192.168.6
+
+# MEMORY
+# set 1024 for CentOS
+memory: 1024
+
+# Disks
+# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
+# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
+disks: "[ '/dev/sda', '/dev/sdb' ]"
+
+# VAGRANT BOX
+# Ceph boxes are *strongly* suggested. They are under better control and will
+# not get updated frequently unless required for build systems. These are (for
+# now):
+#
+# * ceph/ubuntu-xenial
+#
+# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
+# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
+# libvirt CentOS: centos/7
+# parallels Ubuntu: parallels/ubuntu-14.04
+# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
+# For more boxes have a look at:
+# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
+# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
+vagrant_box: centos/atomic-host
+#client_vagrant_box: centos/7
+#ssh_private_key_path: "~/.ssh/id_rsa"
+# The sync directory changes based on vagrant box
+# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
+#vagrant_sync_dir: /home/vagrant/sync
+vagrant_sync_dir: /vagrant
+# Disables synced folder creation. Not needed for testing, will skip mounting
+# the vagrant directory on the remote box regardless of the provider.
+vagrant_disable_synced_folder: true
+# VAGRANT URL
+# This is a URL to download an image from an alternate location. vagrant_box
+# above should be set to the filename of the image.
+# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
+# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
--- /dev/null
+---
+ceph_origin: repository
+ceph_repository: community
+public_network: "192.168.3.0/24"
+cluster_network: "192.168.4.0/24"
+radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+ceph_conf_overrides:
+ global:
+ auth_allow_insecure_global_id_reclaim: false
+ mon_allow_pool_size_one: true
+ mon_warn_on_pool_no_redundancy: false
+ osd_pool_default_size: 1
+ mon_max_pg_per_osd: 300
+openstack_config: false
+handler_health_mon_check_delay: 10
+handler_health_osd_check_delay: 10
+mds_max_mds: 2
+# TODO: add monitoring later
+dashboard_enabled: false
+dashboard_admin_password: $sX!cD$rYU6qR^B!
+grafana_admin_password: +xFRe+RES@7vg24n
+ceph_docker_registry: quay.ceph.io
+node_exporter_container_image: "quay.ceph.io/prometheus/node-exporter:v0.17.0"
+prometheus_container_image: "quay.ceph.io/prometheus/prometheus:v2.7.2"
+alertmanager_container_image: "quay.ceph.io/prometheus/alertmanager:v0.16.2"
+grafana_container_image: "quay.ceph.io/app-sre/grafana:6.7.4"
+grafana_server_group_name: ceph_monitoring
--- /dev/null
+---
+copy_admin_key: True
+user_config: True
+test:
+ name: "test"
+ rule_name: "HDD"
+ size: 1
+test2:
+ name: "test2"
+ size: 1
+pools:
+ - "{{ test }}"
+ - "{{ test2 }}"
--- /dev/null
+---
+generate_crt: True
--- /dev/null
+---
+create_crush_tree: false
+crush_rule_config: false
--- /dev/null
+copy_admin_key: true
+nfs_file_gw: false
+nfs_obj_gw: true
+ganesha_conf_overrides: |
+ CACHEINODE {
+ Entries_HWMark = 100000;
+ }
+nfs_ganesha_stable: true
+nfs_ganesha_dev: false
+nfs_ganesha_flavor: "ceph_master"
--- /dev/null
+---
+os_tuning_params:
+ - { name: fs.file-max, value: 26234859 }
+osd_objectstore: "bluestore"
+devices:
+ - /dev/sda
+ - /dev/sdb
+ - /dev/sdc
--- /dev/null
+copy_admin_key: true
+rgw_create_pools:
+ foo:
+ pg_num: 16
+ type: replicated
+ bar:
+ pg_num: 16
+rgw_override_bucket_index_max_shards: 16
+rgw_bucket_default_quota_max_objects: 1638400
--- /dev/null
+[mons]
+mon0 monitor_address=192.168.3.10
+mon1 monitor_interface="{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+mon2 monitor_address=192.168.3.12
+
+[mgrs]
+mon0
+mon1
+
+[osds]
+osd0
+osd1
+osd2
+
+[rgws]
+rgw0
+rgw1
+
--- /dev/null
+---
+
+# DEPLOY CONTAINERIZED DAEMONS
+docker: false
+
+# DEFINE THE NUMBER OF VMS TO RUN
+mon_vms: 3
+osd_vms: 3
+mds_vms: 0
+rgw_vms: 2
+nfs_vms: 0
+grafana_server_vms: 0
+rbd_mirror_vms: 0
+client_vms: 0
+iscsi_gw_vms: 0
+mgr_vms: 0
+
+# INSTALL SOURCE OF CEPH
+# valid values are 'stable' and 'dev'
+ceph_install_source: stable
+
+# SUBNETS TO USE FOR THE VMS
+public_subnet: 192.168.3
+cluster_subnet: 192.168.4
+
+# MEMORY
+# set 1024 for CentOS
+memory: 1024
+
+# Ethernet interface name
+# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
+eth: 'eth1'
+
+# Disks
+# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
+# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
+disks: "[ '/dev/sdb', '/dev/sdc' ]"
+
+# VAGRANT BOX
+# Ceph boxes are *strongly* suggested. They are under better control and will
+# not get updated frequently unless required for build systems. These are (for
+# now):
+#
+# * ceph/ubuntu-xenial
+#
+# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
+# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
+# libvirt CentOS: centos/7
+# parallels Ubuntu: parallels/ubuntu-14.04
+# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
+# For more boxes have a look at:
+# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
+# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
+vagrant_box: centos/7
+#ssh_private_key_path: "~/.ssh/id_rsa"
+# The sync directory changes based on vagrant box
+# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
+#vagrant_sync_dir: /home/vagrant/sync
+vagrant_sync_dir: /vagrant
+# Disables synced folder creation. Not needed for testing, will skip mounting
+# the vagrant directory on the remote box regardless of the provider.
+vagrant_disable_synced_folder: true
+# VAGRANT URL
+# This is a URL to download an image from an alternate location. vagrant_box
+# above should be set to the filename of the image.
+# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
+# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+
+os_tuning_params:
+ - { name: fs.file-max, value: 26234859 }
+
+# VM prefix name, need to match the hostname
+# label_prefix: ceph
--- /dev/null
+import pytest
+
+
+class TestGrafanas(object):
+
+ @pytest.mark.dashboard
+ @pytest.mark.no_docker
+ def test_grafana_dashboard_is_installed(self, node, host):
+ assert host.package("ceph-grafana-dashboards").is_installed
+
+ @pytest.mark.dashboard
+ @pytest.mark.parametrize('svc', [
+ 'alertmanager', 'grafana-server', 'prometheus'
+ ])
+ def test_grafana_service_enabled_and_running(self, node, host, svc):
+ s = host.service(svc)
+ assert s.is_enabled
+ assert s.is_running
+
+ @pytest.mark.dashboard
+ @pytest.mark.parametrize('port', [
+ '3000', '9092', '9093'
+ ])
+ def test_grafana_socket(self, node, host, setup, port):
+ s = host.socket('tcp://%s:%s' % (setup["address"], port))
+ assert s.is_listening
--- /dev/null
+import pytest
+
+
+class TestiSCSIs(object):
+
+ @pytest.mark.no_docker
+ @pytest.mark.parametrize('pkg', [
+ 'ceph-iscsi',
+ 'targetcli',
+ 'tcmu-runner'
+ ])
+ def test_iscsi_package_is_installed(self, node, host, pkg):
+ assert host.package(pkg).is_installed
+
+ @pytest.mark.parametrize('svc', [
+ 'rbd-target-api',
+ 'rbd-target-gw',
+ 'tcmu-runner'
+ ])
+ def test_iscsi_service_enabled_and_running(self, node, host, svc):
+ s = host.service(svc)
+ assert s.is_enabled
+ assert s.is_running
--- /dev/null
+import pytest
+import json
+
+
+class TestMDSs(object):
+
+ @pytest.mark.no_docker
+ def test_mds_is_installed(self, node, host):
+ assert host.package("ceph-mds").is_installed
+
+ def test_mds_service_enabled_and_running(self, node, host):
+ service_name = "ceph-mds@{hostname}".format(
+ hostname=node["vars"]["inventory_hostname"]
+ )
+ s = host.service(service_name)
+ assert s.is_enabled
+ assert s.is_running
+
+ def test_mds_is_up(self, node, host, setup):
+ hostname = node["vars"]["inventory_hostname"]
+ container_binary = setup['container_binary']
+ if node["docker"]:
+ container_exec_cmd = '{container_binary} exec ceph-mds-{hostname}'.format( # noqa E501
+ hostname=hostname, container_binary=container_binary)
+ else:
+ container_exec_cmd = ''
+
+ cmd = "sudo {container_exec_cmd} ceph --name client.bootstrap-mds --keyring /var/lib/ceph/bootstrap-mds/{cluster}.keyring --cluster={cluster} --connect-timeout 5 -f json -s".format( # noqa E501
+ container_exec_cmd=container_exec_cmd,
+ cluster=setup['cluster_name']
+ )
+ cluster_status = json.loads(host.check_output(cmd))
+ assert (cluster_status['fsmap'].get('up', 0) + cluster_status['fsmap'].get( # noqa E501
+ 'up:standby', 0)) == len(node["vars"]["groups"]["mdss"])
--- /dev/null
+import pytest
+import json
+
+
+class TestMGRs(object):
+
+ @pytest.mark.no_docker
+ def test_mgr_is_installed(self, node, host):
+ assert host.package("ceph-mgr").is_installed
+
+ @pytest.mark.dashboard
+ @pytest.mark.no_docker
+ def test_mgr_dashboard_is_installed(self, node, host):
+ assert host.package("ceph-mgr-dashboard").is_installed
+
+ def test_mgr_service_is_enabled_and_running(self, node, host):
+ service_name = "ceph-mgr@{hostname}".format(
+ hostname=node["vars"]["inventory_hostname"]
+ )
+ s = host.service(service_name)
+ assert s.is_enabled
+ assert s.is_running
+
+ @pytest.mark.dashboard
+ @pytest.mark.parametrize('port', [
+ '8443', '9283'
+ ])
+ def test_mgr_dashboard_is_listening(self, node, host, setup, port):
+ s = host.socket('tcp://%s:%s' % (setup["address"], port))
+ assert s.is_listening
+
+ def test_mgr_is_up(self, node, host, setup):
+ hostname = node["vars"]["inventory_hostname"]
+ cluster = setup["cluster_name"]
+ container_binary = setup["container_binary"]
+ if node['docker']:
+ container_exec_cmd = '{container_binary} exec ceph-mgr-{hostname}'.format( # noqa E501
+ hostname=hostname, container_binary=container_binary)
+ else:
+ container_exec_cmd = ''
+ cmd = "sudo {container_exec_cmd} ceph --name mgr.{hostname} --keyring /var/lib/ceph/mgr/{cluster}-{hostname}/keyring --cluster={cluster} --connect-timeout 5 -f json -s".format( # noqa E501
+ container_exec_cmd=container_exec_cmd,
+ hostname=node["vars"]["inventory_hostname"],
+ cluster=cluster
+ )
+ output_raw = host.check_output(cmd)
+ output_json = json.loads(output_raw)
+
+ assert output_json['mgrmap']['available']
--- /dev/null
+import pytest
+import re
+
+
+class TestMons(object):
+
+ @pytest.mark.no_docker
+ def test_ceph_mon_package_is_installed(self, node, host):
+ assert host.package("ceph-mon").is_installed
+
+ @pytest.mark.parametrize("mon_port", [3300, 6789])
+ def test_mon_listens(self, node, host, setup, mon_port):
+ assert host.socket("tcp://{address}:{port}".format(
+ address=setup["address"],
+ port=mon_port
+ )).is_listening
+
+ def test_mon_service_enabled_and_running(self, node, host):
+ service_name = "ceph-mon@{hostname}".format(
+ hostname=node["vars"]["inventory_hostname"]
+ )
+ s = host.service(service_name)
+ assert s.is_enabled
+ assert s.is_running
+
+ @pytest.mark.no_docker
+ def test_can_get_cluster_health(self, node, host, setup):
+ cmd = "sudo ceph --cluster={} --connect-timeout 5 -s".format(setup["cluster_name"]) # noqa E501
+ output = host.check_output(cmd)
+ assert output.strip().startswith("cluster")
+
+ def test_ceph_config_has_inital_members_line(self, node, host, setup):
+ assert host.file(setup["conf_path"]).contains("^mon initial members = .*$")
+
+ def test_initial_members_line_has_correct_value(self, node, host, setup):
+ mon_initial_members_line = host.check_output("grep 'mon initial members = ' /etc/ceph/{cluster}.conf".format(cluster=setup['cluster_name'])) # noqa E501
+ result = True
+ for host in node["vars"]["groups"]["mons"]:
+ pattern = re.compile(host)
+ if pattern.search(mon_initial_members_line) == None: # noqa E501
+ result = False
+ assert result
--- /dev/null
+import json
+import pytest
+
+
+class TestNFSs(object):
+
+ @pytest.mark.no_rolling_update
+ @pytest.mark.no_docker
+ @pytest.mark.parametrize('pkg', [
+ 'nfs-ganesha',
+ 'nfs-ganesha-rgw'
+ ])
+ def test_nfs_ganesha_package_is_installed(self, node, host, pkg):
+ assert host.package(pkg).is_installed
+
+ @pytest.mark.no_docker
+ def test_nfs_service_enabled_and_running(self, node, host):
+ s = host.service("nfs-ganesha")
+ assert s.is_enabled
+ assert s.is_running
+
+ @pytest.mark.no_docker
+ def test_nfs_config_override(self, node, host):
+ assert host.file(
+ "/etc/ganesha/ganesha.conf").contains("Entries_HWMark")
+
+ @pytest.mark.no_rolling_update
+ def test_nfs_is_up(self, node, host, setup):
+ hostname = node["vars"]["inventory_hostname"]
+ cluster = setup['cluster_name']
+ container_binary = setup["container_binary"]
+ if node['docker']:
+ container_exec_cmd = '{container_binary} exec ceph-nfs-{hostname}'.format( # noqa E501
+ hostname=hostname, container_binary=container_binary)
+ else:
+ container_exec_cmd = ''
+ cmd = "sudo {container_exec_cmd} ceph --name client.rgw.{hostname} --keyring /var/lib/ceph/radosgw/{cluster}-rgw.{hostname}/keyring --cluster={cluster} --connect-timeout 5 -f json -s".format( # noqa E501
+ container_exec_cmd=container_exec_cmd,
+ hostname=hostname,
+ cluster=cluster
+ )
+ output = host.check_output(cmd)
+ keys = [i for i in json.loads(
+ output)["servicemap"]["services"]["rgw-nfs"]["daemons"].keys()]
+ keys.remove('summary')
+ daemons = json.loads(output)["servicemap"]["services"]["rgw-nfs"]["daemons"]
+ hostnames = []
+ for key in keys:
+ hostnames.append(daemons[key]['metadata']['hostname'])
+
+
+# NOTE (guits): This check must be fixed. (Permission denied error)
+# @pytest.mark.no_docker
+# def test_nfs_rgw_fsal_export(self, node, host):
+# if(host.mount_point("/mnt").exists):
+# cmd = host.run("sudo umount /mnt")
+# assert cmd.rc == 0
+# cmd = host.run("sudo mount.nfs localhost:/ceph /mnt/")
+# assert cmd.rc == 0
+# assert host.mount_point("/mnt").exists
--- /dev/null
+import pytest
+
+
+class TestNodeExporter(object):
+
+ @pytest.mark.dashboard
+ def test_node_exporter_service_enabled_and_running(self, node, host):
+ s = host.service("node_exporter")
+ assert s.is_enabled
+ assert s.is_running
+
+ @pytest.mark.dashboard
+ def test_node_exporter_socket(self, node, host):
+ assert host.socket('tcp://9100').is_listening
--- /dev/null
+import pytest
+import json
+
+
+class TestOSDs(object):
+
+ @pytest.mark.no_docker
+ def test_ceph_osd_package_is_installed(self, node, host):
+ assert host.package("ceph-osd").is_installed
+
+ def test_osds_listen_on_public_network(self, node, host, setup):
+ # TODO: figure out way to paramaterize this test
+ nb_port = (setup["num_osds"] * 4)
+ assert host.check_output(
+ "netstat -lntp | grep ceph-osd | grep %s | wc -l" % (setup["address"])) == str(nb_port) # noqa E501
+
+ def test_osds_listen_on_cluster_network(self, node, host, setup):
+ # TODO: figure out way to paramaterize this test
+ nb_port = (setup["num_osds"] * 4)
+ assert host.check_output("netstat -lntp | grep ceph-osd | grep %s | wc -l" % # noqa E501
+ (setup["cluster_address"])) == str(nb_port)
+
+ def test_osd_service_enabled_and_running(self, node, host, setup):
+ # TODO: figure out way to paramaterize node['osds'] for this test
+ for osd in setup["osds"]:
+ s = host.service("ceph-osd@%s" % osd)
+ assert s.is_enabled
+ assert s.is_running
+
+ @pytest.mark.no_docker
+ def test_osd_are_mounted(self, node, host, setup):
+ # TODO: figure out way to paramaterize setup['osd_ids'] for this test
+ for osd_id in setup["osd_ids"]:
+ osd_path = "/var/lib/ceph/osd/{cluster}-{osd_id}".format(
+ cluster=setup["cluster_name"],
+ osd_id=osd_id,
+ )
+ assert host.mount_point(osd_path).exists
+
+ @pytest.mark.no_docker
+ @pytest.mark.parametrize('cmd', [
+ 'ceph-volume',
+ 'ceph-volume-systemd'
+ ])
+ def test_ceph_volume_command_exists(self, node, host, cmd):
+ assert host.exists(cmd)
+
+ def _get_osd_id_from_host(self, node, osd_tree):
+ children = []
+ for n in osd_tree['nodes']:
+ if n['name'] == node['vars']['inventory_hostname'] and n['type'] == 'host': # noqa E501
+ children = n['children']
+ return children
+
+ def _get_nb_up_osds_from_ids(self, node, osd_tree):
+ nb_up = 0
+ ids = self._get_osd_id_from_host(node, osd_tree)
+ for n in osd_tree['nodes']:
+ if n['id'] in ids and n['status'] == 'up':
+ nb_up += 1
+ return nb_up
+
+ @pytest.mark.no_docker
+ def test_all_osds_are_up_and_in(self, node, host, setup):
+ cmd = "sudo ceph --cluster={cluster} --connect-timeout 5 --keyring /var/lib/ceph/bootstrap-osd/{cluster}.keyring -n client.bootstrap-osd osd tree -f json".format( # noqa E501
+ cluster=setup["cluster_name"])
+ output = json.loads(host.check_output(cmd))
+ assert setup["num_osds"] == self._get_nb_up_osds_from_ids(node, output)
+
+ @pytest.mark.docker
+ def test_all_docker_osds_are_up_and_in(self, node, host, setup):
+ container_binary = setup["container_binary"]
+ osd_id = host.check_output(container_binary + " ps -q --filter='name="
+ "ceph-osd' | head -1")
+ cmd = "sudo {container_binary} exec {osd_id} ceph --cluster={cluster} --connect-timeout 5 --keyring /var/lib/ceph/bootstrap-osd/{cluster}.keyring -n client.bootstrap-osd osd tree -f json".format( # noqa E501
+ osd_id=osd_id,
+ cluster=setup["cluster_name"],
+ container_binary=container_binary
+ )
+ output = json.loads(host.check_output(cmd))
+ assert setup["num_osds"] == self._get_nb_up_osds_from_ids(node, output)
--- /dev/null
+import pytest
+import json
+
+
+class TestRbdMirrors(object):
+
+ @pytest.mark.rbdmirror_secondary
+ @pytest.mark.no_docker
+ def test_rbd_mirror_is_installed(self, node, host):
+ assert host.package("rbd-mirror").is_installed
+
+ @pytest.mark.rbdmirror_secondary
+ def test_rbd_mirror_service_enabled_and_running(self, node, host):
+ service_name = "ceph-rbd-mirror@rbd-mirror.{hostname}".format(
+ hostname=node["vars"]["inventory_hostname"]
+ )
+ s = host.service(service_name)
+ assert s.is_enabled
+ assert s.is_running
+
+ @pytest.mark.rbdmirror_secondary
+ def test_rbd_mirror_is_up(self, node, host, setup):
+ hostname = node["vars"]["inventory_hostname"]
+ cluster = setup["cluster_name"]
+ container_binary = setup["container_binary"]
+ daemons = []
+ if node['docker']:
+ container_exec_cmd = '{container_binary} exec ceph-rbd-mirror-{hostname}'.format( # noqa E501
+ hostname=hostname, container_binary=container_binary)
+ else:
+ container_exec_cmd = ''
+ hostname = node["vars"]["inventory_hostname"]
+ cluster = setup['cluster_name']
+ cmd = "sudo {container_exec_cmd} ceph --name client.bootstrap-rbd-mirror --keyring /var/lib/ceph/bootstrap-rbd-mirror/{cluster}.keyring --cluster={cluster} --connect-timeout 5 -f json -s".format( # noqa E501
+ container_exec_cmd=container_exec_cmd,
+ hostname=hostname,
+ cluster=cluster
+ )
+ output = host.check_output(cmd)
+ status = json.loads(output)
+ daemon_ids = [i for i in status["servicemap"]["services"]
+ ["rbd-mirror"]["daemons"].keys() if i != "summary"]
+ for daemon_id in daemon_ids:
+ daemons.append(status["servicemap"]["services"]["rbd-mirror"]
+ ["daemons"][daemon_id]["metadata"]["hostname"])
+ assert hostname in daemons
--- /dev/null
+import pytest
+import json
+
+
+class TestRGWs(object):
+
+ @pytest.mark.no_docker
+ def test_rgw_is_installed(self, node, host):
+ result = host.package("radosgw").is_installed
+ if not result:
+ result = host.package("ceph-radosgw").is_installed
+ assert result
+
+ def test_rgw_service_enabled_and_running(self, node, host):
+ for i in range(int(node["radosgw_num_instances"])):
+ service_name = "ceph-radosgw@rgw.{hostname}.rgw{seq}".format(
+ hostname=node["vars"]["inventory_hostname"],
+ seq=i
+ )
+ s = host.service(service_name)
+ assert s.is_enabled
+ assert s.is_running
+
+ def test_rgw_is_up(self, node, host, setup):
+ hostname = node["vars"]["inventory_hostname"]
+ cluster = setup["cluster_name"]
+ container_binary = setup["container_binary"]
+ if node['docker']:
+ container_exec_cmd = '{container_binary} exec ceph-rgw-{hostname}-rgw0'.format( # noqa E501
+ hostname=hostname, container_binary=container_binary)
+ else:
+ container_exec_cmd = ''
+ cmd = "sudo {container_exec_cmd} ceph --name client.bootstrap-rgw --keyring /var/lib/ceph/bootstrap-rgw/{cluster}.keyring --cluster={cluster} --connect-timeout 5 -f json -s".format( # noqa E501
+ container_exec_cmd=container_exec_cmd,
+ hostname=hostname,
+ cluster=cluster
+ )
+ output = host.check_output(cmd)
+ keys = [i for i in json.loads(
+ output)["servicemap"]["services"]["rgw"]["daemons"].keys()]
+ keys.remove('summary')
+ daemons = json.loads(output)["servicemap"]["services"]["rgw"]["daemons"]
+ hostnames = []
+ for key in keys:
+ hostnames.append(daemons[key]['metadata']['hostname'])
+
+ @pytest.mark.no_docker
+ def test_rgw_http_endpoint(self, node, host, setup):
+ # rgw frontends ip_addr is configured on public_interface
+ ip_addr = host.interface(setup['public_interface']).addresses[0]
+ for i in range(int(node["radosgw_num_instances"])):
+ assert host.socket(
+ "tcp://{ip_addr}:{port}".format(ip_addr=ip_addr,
+ port=(8080+i))
+ ).is_listening # noqa E501
--- /dev/null
+import pytest
+import json
+
+
+class TestRGWs(object):
+
+ @pytest.mark.no_docker
+ def test_rgw_bucket_default_quota_is_set(self, node, host, setup):
+ assert host.file(setup["conf_path"]).contains(
+ "rgw override bucket index max shards")
+ assert host.file(setup["conf_path"]).contains(
+ "rgw bucket default quota max objects")
+
+ @pytest.mark.no_docker
+ def test_rgw_bucket_default_quota_is_applied(self, node, host, setup):
+ radosgw_admin_cmd = "timeout --foreground -s KILL 5 sudo radosgw-admin --cluster={cluster} -n client.rgw.{hostname}.rgw0 --keyring /var/lib/ceph/radosgw/{cluster}-rgw.{hostname}.rgw0/keyring user info --uid=test --format=json".format( # noqa E501
+ hostname=node["vars"]["inventory_hostname"],
+ cluster=setup['cluster_name']
+ )
+ radosgw_admin_output = host.run(radosgw_admin_cmd)
+ if radosgw_admin_output.rc == 22:
+ radosgw_admin_cmd = "timeout --foreground -s KILL 5 sudo radosgw-admin --cluster={cluster} -n client.rgw.{hostname}.rgw0 --keyring /var/lib/ceph/radosgw/{cluster}-rgw.{hostname}.rgw0/keyring user create --uid=test --display-name Test".format( # noqa E501
+ hostname=node["vars"]["inventory_hostname"],
+ cluster=setup['cluster_name']
+ )
+ radosgw_admin_output = host.run(radosgw_admin_cmd)
+ radosgw_admin_output_json = json.loads(radosgw_admin_output.stdout)
+ assert radosgw_admin_output_json["bucket_quota"]["enabled"] == True # noqa E501
+ assert radosgw_admin_output_json["bucket_quota"]["max_objects"] == 1638400 # noqa E501
+
+ @pytest.mark.no_docker
+ def test_rgw_tuning_pools_are_set(self, node, host, setup):
+ pools = node["vars"]["rgw_create_pools"]
+ if pools is None:
+ pytest.skip('rgw_create_pools not defined, nothing to test')
+ for pool_name in pools.keys():
+ cmd = host.run("sudo ceph --cluster={cluster} --connect-timeout 5 -n client.rgw.{hostname}.rgw0 --keyring /var/lib/ceph/radosgw/{cluster}-rgw.{hostname}.rgw0/keyring osd pool get {pool_name} size".format( # noqa E501
+ hostname=node["vars"]["inventory_hostname"],
+ cluster=setup['cluster_name'],
+ pool_name=pool_name
+ ))
+ assert cmd.rc == 0
+
+ @pytest.mark.docker
+ def test_docker_rgw_tuning_pools_are_set(self, node, host, setup):
+ hostname = node["vars"]["inventory_hostname"]
+ cluster = setup['cluster_name']
+ container_binary = setup["container_binary"]
+ pools = node["vars"].get("rgw_create_pools")
+ if pools is None:
+ pytest.skip('rgw_create_pools not defined, nothing to test')
+ for pool_name in pools.keys():
+ cmd = host.run("sudo {container_binary} exec ceph-rgw-{hostname}-rgw0 ceph --cluster={cluster} -n client.rgw.{hostname}.rgw0 --connect-timeout 5 --keyring /var/lib/ceph/radosgw/{cluster}-rgw.{hostname}.rgw0/keyring osd pool get {pool_name} size".format( # noqa E501
+ hostname=hostname,
+ cluster=cluster,
+ pool_name=pool_name,
+ container_binary=container_binary
+ ))
+ assert cmd.rc == 0
--- /dev/null
+import pytest
+import re
+
+
+class TestInstall(object):
+
+ def test_ceph_dir_exists_and_is_directory(self, host, node):
+ f = host.file('/etc/ceph')
+ assert f.exists
+ assert f.is_directory
+
+ def test_ceph_conf_exists_and_is_file(self, host, node, setup):
+ f = host.file(setup["conf_path"])
+ assert f.exists
+ assert f.is_file
+
+ @pytest.mark.no_docker
+ def test_ceph_command_exists(self, host, node):
+ assert host.exists("ceph")
+
+
+class TestCephConf(object):
+
+ def test_mon_host_line_has_correct_value(self, node, host, setup):
+ mon_host_line = host.check_output("grep 'mon host = ' /etc/ceph/{cluster}.conf".format(cluster=setup['cluster_name'])) # noqa E501
+ result = True
+ for x in range(0, setup["num_mons"]):
+ pattern = re.compile(("v2:{subnet}.1{x}:3300,v1:{subnet}.1{x}:6789".format(subnet=setup["subnet"], x=x))) # noqa E501
+ if pattern.search(mon_host_line) is None:
+ result = False
+ assert result
+
+
+class TestCephCrash(object):
+ @pytest.mark.no_docker
+ @pytest.mark.ceph_crash
+ def test_ceph_crash_service_enabled_and_running(self, node, host):
+ s = host.service("ceph-crash")
+ assert s.is_enabled
+ assert s.is_running
+
+ @pytest.mark.docker
+ @pytest.mark.ceph_crash
+ def test_ceph_crash_service_enabled_and_running_container(self, node, host):
+ s = host.service("ceph-crash@{hostname}".format(hostname=node["vars"]["inventory_hostname"]))
+ assert s.is_enabled
+ assert s.is_running
--- /dev/null
+[mons]
+localhost
+
+[osds]
+localhost
+
+[rgws]
+localhost
+
+[mdss]
+localhost
--- /dev/null
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+import json
+
+
+def set_module_args(args):
+ if '_ansible_remote_tmp' not in args:
+ args['_ansible_remote_tmp'] = '/tmp'
+ if '_ansible_keep_remote_files' not in args:
+ args['_ansible_keep_remote_files'] = False
+
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args)
+
+
+class AnsibleExitJson(Exception):
+ pass
+
+
+class AnsibleFailJson(Exception):
+ pass
+
+
+def exit_json(*args, **kwargs):
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs):
+ raise AnsibleFailJson(kwargs)
--- /dev/null
+import sys
+import pytest
+
+sys.path.append('./library')
+import ceph_crush # noqa: E402
+
+
+class TestCephCrushModule(object):
+
+ def test_no_host(self):
+ location = [
+ ("chassis", "monchassis"),
+ ("rack", "monrack"),
+ ("row", "marow"),
+ ("pdu", "monpdu"),
+ ("pod", "monpod"),
+ ("room", "maroom"),
+ ("datacenter", "mondc"),
+ ("region", "maregion"),
+ ("root", "maroute"),
+ ]
+ with pytest.raises(Exception):
+ ceph_crush.sort_osd_crush_location(location, None)
+
+ def test_lower_than_two_bucket(self):
+ location = [
+ ("chassis", "monchassis"),
+ ]
+ with pytest.raises(Exception):
+ ceph_crush.sort_osd_crush_location(location, None)
+
+ def test_invalid_bucket_type(self):
+ location = [
+ ("host", "monhost"),
+ ("chassis", "monchassis"),
+ ("rackyyyyy", "monrack"),
+ ]
+ with pytest.raises(Exception):
+ ceph_crush.sort_osd_crush_location(location, None)
+
+ def test_ordering(self):
+ expected_result = [
+ ("host", "monhost"),
+ ("chassis", "monchassis"),
+ ("rack", "monrack"),
+ ("row", "marow"),
+ ("pdu", "monpdu"),
+ ("pod", "monpod"),
+ ("room", "maroom"),
+ ("datacenter", "mondc"),
+ ("region", "maregion"),
+ ("root", "maroute"),
+ ]
+ expected_result_reverse = expected_result[::-1]
+ result = ceph_crush.sort_osd_crush_location(expected_result_reverse, None)
+ assert expected_result == result
+
+ def test_generate_commands(self):
+ cluster = "test"
+ expected_command_list = [
+ ['ceph', '--cluster', cluster, 'osd', 'crush', "add-bucket", "monhost", "host"],
+ ['ceph', '--cluster', cluster, 'osd', 'crush', "add-bucket", "monchassis", "chassis"],
+ ['ceph', '--cluster', cluster, 'osd', 'crush', "move", "monhost", "chassis=monchassis"],
+ ['ceph', '--cluster', cluster, 'osd', 'crush', "add-bucket", "monrack", "rack"],
+ ['ceph', '--cluster', cluster, 'osd', 'crush', "move", "monchassis", "rack=monrack"],
+ ]
+
+ location = [
+ ("host", "monhost"),
+ ("chassis", "monchassis"),
+ ("rack", "monrack"),
+ ]
+ result = ceph_crush.create_and_move_buckets_list(cluster, location)
+ assert result == expected_command_list
+
+ def test_generate_commands_container(self):
+ cluster = "test"
+ containerized = "docker exec -ti ceph-mon"
+ expected_command_list = [
+ ['docker', 'exec', '-ti', 'ceph-mon', 'ceph', '--cluster', cluster, 'osd', 'crush', "add-bucket", "monhost", "host"],
+ ['docker', 'exec', '-ti', 'ceph-mon', 'ceph', '--cluster', cluster, 'osd', 'crush', "add-bucket", "monchassis", "chassis"],
+ ['docker', 'exec', '-ti', 'ceph-mon', 'ceph', '--cluster', cluster, 'osd', 'crush', "move", "monhost", "chassis=monchassis"],
+ ['docker', 'exec', '-ti', 'ceph-mon', 'ceph', '--cluster', cluster, 'osd', 'crush', "add-bucket", "monrack", "rack"],
+ ['docker', 'exec', '-ti', 'ceph-mon', 'ceph', '--cluster', cluster, 'osd', 'crush', "move", "monchassis", "rack=monrack"],
+ ]
+
+ location = [
+ ("host", "monhost"),
+ ("chassis", "monchassis"),
+ ("rack", "monrack"),
+ ]
+ result = ceph_crush.create_and_move_buckets_list(cluster, location, containerized)
+ assert result == expected_command_list
--- /dev/null
+from mock.mock import patch
+import os
+import pytest
+import ca_test_common
+import ceph_crush_rule
+
+fake_cluster = 'ceph'
+fake_container_binary = 'podman'
+fake_container_image = 'quay.ceph.io/ceph/daemon:latest'
+fake_name = 'foo'
+fake_bucket_root = 'default'
+fake_bucket_type = 'host'
+fake_device_class = 'ssd'
+fake_profile = 'default'
+fake_user = 'client.admin'
+fake_keyring = '/etc/ceph/{}.{}.keyring'.format(fake_cluster, fake_user)
+
+
+class TestCephCrushRuleModule(object):
+
+ @patch('ansible.module_utils.basic.AnsibleModule.fail_json')
+ def test_without_parameters(self, m_fail_json):
+ ca_test_common.set_module_args({})
+ m_fail_json.side_effect = ca_test_common.fail_json
+
+ with pytest.raises(ca_test_common.AnsibleFailJson) as result:
+ ceph_crush_rule.main()
+
+ result = result.value.args[0]
+ assert result['msg'] == 'missing required arguments: name'
+
+ @patch('ansible.module_utils.basic.AnsibleModule.fail_json')
+ def test_with_name_only(self, m_fail_json):
+ ca_test_common.set_module_args({
+ 'name': fake_name
+ })
+ m_fail_json.side_effect = ca_test_common.fail_json
+
+ with pytest.raises(ca_test_common.AnsibleFailJson) as result:
+ ceph_crush_rule.main()
+
+ result = result.value.args[0]
+ assert result['msg'] == 'state is present but all of the following are missing: rule_type'
+
+ @patch('ansible.module_utils.basic.AnsibleModule.exit_json')
+ def test_with_check_mode(self, m_exit_json):
+ ca_test_common.set_module_args({
+ 'name': fake_name,
+ 'rule_type': 'replicated',
+ 'bucket_root': fake_bucket_root,
+ 'bucket_type': fake_bucket_type,
+ '_ansible_check_mode': True
+ })
+ m_exit_json.side_effect = ca_test_common.exit_json
+
+ with pytest.raises(ca_test_common.AnsibleExitJson) as result:
+ ceph_crush_rule.main()
+
+ result = result.value.args[0]
+ assert not result['changed']
+ assert result['rc'] == 0
+ assert not result['stdout']
+ assert not result['stderr']
+
+ @patch('ansible.module_utils.basic.AnsibleModule.exit_json')
+ @patch('ansible.module_utils.basic.AnsibleModule.run_command')
+ def test_create_non_existing_replicated_rule(self, m_run_command, m_exit_json):
+ ca_test_common.set_module_args({
+ 'name': fake_name,
+ 'rule_type': 'replicated',
+ 'bucket_root': fake_bucket_root,
+ 'bucket_type': fake_bucket_type
+ })
+ m_exit_json.side_effect = ca_test_common.exit_json
+ get_rc = 2
+ get_stderr = 'Error ENOENT: unknown crush rule \'{}\''.format(fake_name)
+ get_stdout = ''
+ create_rc = 0
+ create_stderr = ''
+ create_stdout = ''
+ m_run_command.side_effect = [
+ (get_rc, get_stdout, get_stderr),
+ (create_rc, create_stdout, create_stderr)
+ ]
+
+ with pytest.raises(ca_test_common.AnsibleExitJson) as result:
+ ceph_crush_rule.main()
+
+ result = result.value.args[0]
+ assert result['changed']
+ assert result['cmd'] == ['ceph', '-n', fake_user, '-k', fake_keyring,
+ '--cluster', fake_cluster, 'osd', 'crush', 'rule',
+ 'create-replicated', fake_name, fake_bucket_root, fake_bucket_type]
+ assert result['rc'] == create_rc
+ assert result['stderr'] == create_stderr
+ assert result['stdout'] == create_stdout
+
+ @patch('ansible.module_utils.basic.AnsibleModule.exit_json')
+ @patch('ansible.module_utils.basic.AnsibleModule.run_command')
+ def test_create_existing_replicated_rule(self, m_run_command, m_exit_json):
+ ca_test_common.set_module_args({
+ 'name': fake_name,
+ 'rule_type': 'replicated',
+ 'bucket_root': fake_bucket_root,
+ 'bucket_type': fake_bucket_type
+ })
+ m_exit_json.side_effect = ca_test_common.exit_json
+ rc = 0
+ stderr = ''
+ stdout = '{{"rule_name":"{}","type":1,"steps":[{{"item_name":"{}"}},{{"type":"{}"}}]}}'.format(fake_name, fake_bucket_root, fake_bucket_type)
+ m_run_command.return_value = rc, stdout, stderr
+
+ with pytest.raises(ca_test_common.AnsibleExitJson) as result:
+ ceph_crush_rule.main()
+
+ result = result.value.args[0]
+ assert not result['changed']
+ assert result['cmd'] == ['ceph', '-n', fake_user, '-k', fake_keyring,
+ '--cluster', fake_cluster, 'osd', 'crush', 'rule',
+ 'dump', fake_name, '--format=json']
+ assert result['rc'] == 0
+ assert result['stderr'] == stderr
+ assert result['stdout'] == stdout
+
+ @patch('ansible.module_utils.basic.AnsibleModule.exit_json')
+ @patch('ansible.module_utils.basic.AnsibleModule.run_command')
+ def test_create_non_existing_replicated_rule_device_class(self, m_run_command, m_exit_json):
+ ca_test_common.set_module_args({
+ 'name': fake_name,
+ 'rule_type': 'replicated',
+ 'bucket_root': fake_bucket_root,
+ 'bucket_type': fake_bucket_type,
+ 'device_class': fake_device_class
+ })
+ m_exit_json.side_effect = ca_test_common.exit_json
+ get_rc = 2
+ get_stderr = 'Error ENOENT: unknown crush rule \'{}\''.format(fake_name)
+ get_stdout = ''
+ create_rc = 0
+ create_stderr = ''
+ create_stdout = ''
+ m_run_command.side_effect = [
+ (get_rc, get_stdout, get_stderr),
+ (create_rc, create_stdout, create_stderr)
+ ]
+
+ with pytest.raises(ca_test_common.AnsibleExitJson) as result:
+ ceph_crush_rule.main()
+
+ result = result.value.args[0]
+ assert result['changed']
+ assert result['cmd'] == ['ceph', '-n', fake_user, '-k', fake_keyring,
+ '--cluster', fake_cluster, 'osd', 'crush', 'rule',
+ 'create-replicated', fake_name, fake_bucket_root, fake_bucket_type, fake_device_class]
+ assert result['rc'] == create_rc
+ assert result['stderr'] == create_stderr
+ assert result['stdout'] == create_stdout
+
+ @patch('ansible.module_utils.basic.AnsibleModule.exit_json')
+ @patch('ansible.module_utils.basic.AnsibleModule.run_command')
+ def test_create_existing_replicated_rule_device_class(self, m_run_command, m_exit_json):
+ ca_test_common.set_module_args({
+ 'name': fake_name,
+ 'rule_type': 'replicated',
+ 'bucket_root': fake_bucket_root,
+ 'bucket_type': fake_bucket_type,
+ 'device_class': fake_device_class
+ })
+ m_exit_json.side_effect = ca_test_common.exit_json
+ rc = 0
+ stderr = ''
+ stdout = '{{"rule_name":"{}","type":1,"steps":[{{"item_name":"{}"}},{{"type":"{}"}}]}}'.format(fake_name, fake_bucket_root, fake_bucket_type)
+ m_run_command.return_value = rc, stdout, stderr
+
+ with pytest.raises(ca_test_common.AnsibleExitJson) as result:
+ ceph_crush_rule.main()
+
+ result = result.value.args[0]
+ assert not result['changed']
+ assert result['cmd'] == ['ceph', '-n', fake_user, '-k', fake_keyring,
+ '--cluster', fake_cluster, 'osd', 'crush', 'rule',
+ 'dump', fake_name, '--format=json']
+ assert result['rc'] == 0
+ assert result['stderr'] == stderr
+ assert result['stdout'] == stdout
+
+ @patch('ansible.module_utils.basic.AnsibleModule.exit_json')
+ @patch('ansible.module_utils.basic.AnsibleModule.run_command')
+ def test_create_non_existing_erasure_rule(self, m_run_command, m_exit_json):
+ ca_test_common.set_module_args({
+ 'name': fake_name,
+ 'rule_type': 'erasure',
+ 'profile': fake_profile
+ })
+ m_exit_json.side_effect = ca_test_common.exit_json
+ get_rc = 2
+ get_stderr = 'Error ENOENT: unknown crush rule \'{}\''.format(fake_name)
+ get_stdout = ''
+ create_rc = 0
+ create_stderr = ''
+ create_stdout = 'created rule {} at 1'.format(fake_name)
+ m_run_command.side_effect = [
+ (get_rc, get_stdout, get_stderr),
+ (create_rc, create_stdout, create_stderr)
+ ]
+
+ with pytest.raises(ca_test_common.AnsibleExitJson) as result:
+ ceph_crush_rule.main()
+
+ result = result.value.args[0]
+ assert result['changed']
+ assert result['cmd'] == ['ceph', '-n', fake_user, '-k', fake_keyring,
+ '--cluster', fake_cluster, 'osd', 'crush', 'rule',
+ 'create-erasure', fake_name, fake_profile]
+ assert result['rc'] == create_rc
+ assert result['stderr'] == create_stderr
+ assert result['stdout'] == create_stdout
+
+ @patch('ansible.module_utils.basic.AnsibleModule.exit_json')
+ @patch('ansible.module_utils.basic.AnsibleModule.run_command')
+ def test_create_existing_erasure_rule(self, m_run_command, m_exit_json):
+ ca_test_common.set_module_args({
+ 'name': fake_name,
+ 'rule_type': 'erasure',
+ 'profile': fake_profile
+ })
+ m_exit_json.side_effect = ca_test_common.exit_json
+ rc = 0
+ stderr = ''
+ stdout = '{{"type":3,"rule_name":"{}","steps":[{{"item_name":"default"}},{{"type":"host"}}]}}'.format(fake_name)
+ m_run_command.return_value = rc, stdout, stderr
+
+ with pytest.raises(ca_test_common.AnsibleExitJson) as result:
+ ceph_crush_rule.main()
+
+ result = result.value.args[0]
+ assert not result['changed']
+ assert result['cmd'] == ['ceph', '-n', fake_user, '-k', fake_keyring,
+ '--cluster', fake_cluster, 'osd', 'crush', 'rule',
+ 'dump', fake_name, '--format=json']
+ assert result['rc'] == 0
+ assert result['stderr'] == stderr
+ assert result['stdout'] == stdout
+
+ @patch('ansible.module_utils.basic.AnsibleModule.fail_json')
+ @patch('ansible.module_utils.basic.AnsibleModule.run_command')
+ def test_update_existing_replicated_rule(self, m_run_command, m_fail_json):
+ ca_test_common.set_module_args({
+ 'name': fake_name,
+ 'rule_type': 'replicated',
+ 'bucket_root': fake_bucket_root,
+ 'bucket_type': fake_bucket_type,
+ 'device_class': fake_device_class
+ })
+ m_fail_json.side_effect = ca_test_common.fail_json
+ rc = 0
+ stderr = ''
+ stdout = '{{"type":3,"rule_name":"{}","steps":[{{"item_name":"default"}},{{"type":"host"}}]}}'.format(fake_name)
+ m_run_command.return_value = rc, stdout, stderr
+
+ with pytest.raises(ca_test_common.AnsibleFailJson) as result:
+ ceph_crush_rule.main()
+
+ result = result.value.args[0]
+ print(result)
+ assert not result['changed']
+ assert result['msg'] == 'Can not convert crush rule {} to replicated'.format(fake_name)
+ assert result['rc'] == 1
+
+ @patch('ansible.module_utils.basic.AnsibleModule.fail_json')
+ @patch('ansible.module_utils.basic.AnsibleModule.run_command')
+ def test_update_existing_erasure_rule(self, m_run_command, m_fail_json):
+ ca_test_common.set_module_args({
+ 'name': fake_name,
+ 'rule_type': 'erasure',
+ 'profile': fake_profile
+ })
+ m_fail_json.side_effect = ca_test_common.fail_json
+ rc = 0
+ stderr = ''
+ stdout = '{{"type":1,"rule_name":"{}","steps":[{{"item_name":"default"}},{{"type":"host"}}]}}'.format(fake_name)
+ m_run_command.return_value = rc, stdout, stderr
+
+ with pytest.raises(ca_test_common.AnsibleFailJson) as result:
+ ceph_crush_rule.main()
+
+ result = result.value.args[0]
+ print(result)
+ assert not result['changed']
+ assert result['msg'] == 'Can not convert crush rule {} to erasure'.format(fake_name)
+ assert result['rc'] == 1
+
+ @patch('ansible.module_utils.basic.AnsibleModule.exit_json')
+ @patch('ansible.module_utils.basic.AnsibleModule.run_command')
+ def test_remove_non_existing_rule(self, m_run_command, m_exit_json):
+ ca_test_common.set_module_args({
+ 'name': fake_name,
+ 'state': 'absent'
+ })
+ m_exit_json.side_effect = ca_test_common.exit_json
+ rc = 2
+ stderr = 'Error ENOENT: unknown crush rule \'{}\''.format(fake_name)
+ stdout = ''
+ m_run_command.return_value = rc, stdout, stderr
+
+ with pytest.raises(ca_test_common.AnsibleExitJson) as result:
+ ceph_crush_rule.main()
+
+ result = result.value.args[0]
+ assert not result['changed']
+ assert result['cmd'] == ['ceph', '-n', fake_user, '-k', fake_keyring,
+ '--cluster', fake_cluster, 'osd', 'crush', 'rule',
+ 'dump', fake_name, '--format=json']
+ assert result['rc'] == 0
+ assert result['stderr'] == stderr
+ assert result['stdout'] == "Crush Rule {} doesn't exist".format(fake_name)
+
+ @patch('ansible.module_utils.basic.AnsibleModule.exit_json')
+ @patch('ansible.module_utils.basic.AnsibleModule.run_command')
+ def test_remove_existing_rule(self, m_run_command, m_exit_json):
+ ca_test_common.set_module_args({
+ 'name': fake_name,
+ 'state': 'absent'
+ })
+ m_exit_json.side_effect = ca_test_common.exit_json
+ get_rc = 0
+ get_stderr = ''
+ get_stdout = '{{"rule_name":"{}","steps":[{{"item_name":"{}"}},{{"type":"{}"}}]}}'.format(fake_name, fake_bucket_root, fake_bucket_type)
+ remove_rc = 0
+ remove_stderr = ''
+ remove_stdout = ''
+ m_run_command.side_effect = [
+ (get_rc, get_stdout, get_stderr),
+ (remove_rc, remove_stdout, remove_stderr)
+ ]
+
+ with pytest.raises(ca_test_common.AnsibleExitJson) as result:
+ ceph_crush_rule.main()
+
+ result = result.value.args[0]
+ assert result['changed']
+ assert result['cmd'] == ['ceph', '-n', fake_user, '-k', fake_keyring,
+ '--cluster', fake_cluster, 'osd', 'crush', 'rule',
+ 'rm', fake_name]
+ assert result['rc'] == remove_rc
+ assert result['stderr'] == remove_stderr
+ assert result['stdout'] == remove_stdout
+
+ @patch('ansible.module_utils.basic.AnsibleModule.exit_json')
+ @patch('ansible.module_utils.basic.AnsibleModule.run_command')
+ def test_get_non_existing_rule(self, m_run_command, m_exit_json):
+ ca_test_common.set_module_args({
+ 'name': fake_name,
+ 'state': 'info'
+ })
+ m_exit_json.side_effect = ca_test_common.exit_json
+ rc = 2
+ stderr = 'Error ENOENT: unknown crush rule \'{}\''.format(fake_name)
+ stdout = ''
+ m_run_command.return_value = rc, stdout, stderr
+
+ with pytest.raises(ca_test_common.AnsibleExitJson) as result:
+ ceph_crush_rule.main()
+
+ result = result.value.args[0]
+ assert not result['changed']
+ assert result['cmd'] == ['ceph', '-n', fake_user, '-k', fake_keyring,
+ '--cluster', fake_cluster, 'osd', 'crush', 'rule',
+ 'dump', fake_name, '--format=json']
+ assert result['rc'] == rc
+ assert result['stderr'] == stderr
+ assert result['stdout'] == stdout
+
+ @patch('ansible.module_utils.basic.AnsibleModule.exit_json')
+ @patch('ansible.module_utils.basic.AnsibleModule.run_command')
+ def test_get_existing_rule(self, m_run_command, m_exit_json):
+ ca_test_common.set_module_args({
+ 'name': fake_name,
+ 'state': 'info'
+ })
+ m_exit_json.side_effect = ca_test_common.exit_json
+ rc = 0
+ stderr = ''
+ stdout = '{{"rule_name":"{}","steps":[{{"item_name":"{}"}},{{"type":"{}"}}]}}'.format(fake_name, fake_bucket_root, fake_bucket_type)
+ m_run_command.return_value = rc, stdout, stderr
+
+ with pytest.raises(ca_test_common.AnsibleExitJson) as result:
+ ceph_crush_rule.main()
+
+ result = result.value.args[0]
+ assert not result['changed']
+ assert result['cmd'] == ['ceph', '-n', fake_user, '-k', fake_keyring,
+ '--cluster', fake_cluster, 'osd', 'crush', 'rule',
+ 'dump', fake_name, '--format=json']
+ assert result['rc'] == rc
+ assert result['stderr'] == stderr
+ assert result['stdout'] == stdout
+
+ @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary})
+ @patch.dict(os.environ, {'CEPH_CONTAINER_IMAGE': fake_container_image})
+ @patch('ansible.module_utils.basic.AnsibleModule.exit_json')
+ @patch('ansible.module_utils.basic.AnsibleModule.run_command')
+ def test_with_container(self, m_run_command, m_exit_json):
+ ca_test_common.set_module_args({
+ 'name': fake_name,
+ 'state': 'info'
+ })
+ m_exit_json.side_effect = ca_test_common.exit_json
+ rc = 0
+ stderr = ''
+ stdout = '{{"rule_name":"{}","steps":[{{"item_name":"{}"}},{{"type":"{}"}}]}}'.format(fake_name, fake_bucket_root, fake_bucket_type)
+ m_run_command.return_value = rc, stdout, stderr
+
+ with pytest.raises(ca_test_common.AnsibleExitJson) as result:
+ ceph_crush_rule.main()
+
+ result = result.value.args[0]
+ assert not result['changed']
+ assert result['cmd'] == [fake_container_binary, 'run', '--rm', '--net=host',
+ '-v', '/etc/ceph:/etc/ceph:z',
+ '-v', '/var/lib/ceph/:/var/lib/ceph/:z',
+ '-v', '/var/log/ceph/:/var/log/ceph/:z',
+ '--entrypoint=ceph', fake_container_image,
+ '-n', fake_user, '-k', fake_keyring,
+ '--cluster', fake_cluster, 'osd', 'crush',
+ 'rule', 'dump', fake_name, '--format=json']
+ assert result['rc'] == rc
+ assert result['stderr'] == stderr
+ assert result['stdout'] == stdout
--- /dev/null
+from mock.mock import MagicMock, patch
+import pytest
+import os
+import ca_test_common
+import ceph_dashboard_user
+
+fake_container_binary = 'podman'
+fake_container_image = 'docker.io/ceph/daemon:latest'
+
+
+class TestCephDashboardUserModule(object):
+ def setup_method(self):
+ self.fake_binary = 'ceph'
+ self.fake_cluster = 'ceph'
+ self.fake_name = 'foo'
+ self.fake_user = 'foo'
+ self.fake_password = 'bar'
+ self.fake_roles = ['read-only', 'block-manager']
+ self.fake_params = {'cluster': self.fake_cluster,
+ 'name': self.fake_user,
+ 'password': self.fake_password,
+ 'roles': self.fake_roles}
+ self.fake_module = MagicMock()
+ self.fake_module.params = self.fake_params
+
+ def test_create_user(self):
+ self.fake_module.params = self.fake_params
+ expected_cmd = [
+ self.fake_binary,
+ '-n', 'client.admin',
+ '-k', '/etc/ceph/ceph.client.admin.keyring',
+ '--cluster', self.fake_cluster,
+ 'dashboard', 'ac-user-create',
+ '-i', '-',
+ self.fake_user
+ ]
+
+ assert ceph_dashboard_user.create_user(self.fake_module) == expected_cmd
+
+ @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary})
+ @patch.dict(os.environ, {'CEPH_CONTAINER_IMAGE': fake_container_image})
+ def test_create_user_container(self):
+ fake_container_cmd = [
+ fake_container_binary,
+ 'run',
+ '--interactive',
+ '--rm',
+ '--net=host',
+ '-v', '/etc/ceph:/etc/ceph:z',
+ '-v', '/var/lib/ceph/:/var/lib/ceph/:z',
+ '-v', '/var/log/ceph/:/var/log/ceph/:z',
+ '--entrypoint=' + self.fake_binary,
+ fake_container_image
+ ]
+ self.fake_module.params = self.fake_params
+ expected_cmd = fake_container_cmd + [
+ '-n', 'client.admin',
+ '-k', '/etc/ceph/ceph.client.admin.keyring',
+ '--cluster', self.fake_cluster,
+ 'dashboard', 'ac-user-create',
+ '-i', '-',
+ self.fake_user
+ ]
+
+ assert ceph_dashboard_user.create_user(self.fake_module, container_image=fake_container_image) == expected_cmd
+
+ def test_set_roles(self):
+ self.fake_module.params = self.fake_params
+ expected_cmd = [
+ self.fake_binary,
+ '-n', 'client.admin',
+ '-k', '/etc/ceph/ceph.client.admin.keyring',
+ '--cluster', self.fake_cluster,
+ 'dashboard', 'ac-user-set-roles',
+ self.fake_user
+ ]
+ expected_cmd.extend(self.fake_roles)
+
+ assert ceph_dashboard_user.set_roles(self.fake_module) == expected_cmd
+
+ def test_set_password(self):
+ self.fake_module.params = self.fake_params
+ expected_cmd = [
+ self.fake_binary,
+ '-n', 'client.admin',
+ '-k', '/etc/ceph/ceph.client.admin.keyring',
+ '--cluster', self.fake_cluster,
+ 'dashboard', 'ac-user-set-password',
+ '-i', '-',
+ self.fake_user
+ ]
+
+ assert ceph_dashboard_user.set_password(self.fake_module) == expected_cmd
+
+ @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary})
+ @patch.dict(os.environ, {'CEPH_CONTAINER_IMAGE': fake_container_image})
+ def test_set_password_container(self):
+ fake_container_cmd = [
+ fake_container_binary,
+ 'run',
+ '--interactive',
+ '--rm',
+ '--net=host',
+ '-v', '/etc/ceph:/etc/ceph:z',
+ '-v', '/var/lib/ceph/:/var/lib/ceph/:z',
+ '-v', '/var/log/ceph/:/var/log/ceph/:z',
+ '--entrypoint=' + self.fake_binary,
+ fake_container_image
+ ]
+ self.fake_module.params = self.fake_params
+ expected_cmd = fake_container_cmd + [
+ '-n', 'client.admin',
+ '-k', '/etc/ceph/ceph.client.admin.keyring',
+ '--cluster', self.fake_cluster,
+ 'dashboard', 'ac-user-set-password',
+ '-i', '-',
+ self.fake_user
+ ]
+
+ assert ceph_dashboard_user.set_password(self.fake_module, container_image=fake_container_image) == expected_cmd
+
+ def test_get_user(self):
+ self.fake_module.params = self.fake_params
+ expected_cmd = [
+ self.fake_binary,
+ '-n', 'client.admin',
+ '-k', '/etc/ceph/ceph.client.admin.keyring',
+ '--cluster', self.fake_cluster,
+ 'dashboard', 'ac-user-show',
+ self.fake_user,
+ '--format=json'
+ ]
+
+ assert ceph_dashboard_user.get_user(self.fake_module) == expected_cmd
+
+ def test_remove_user(self):
+ self.fake_module.params = self.fake_params
+ expected_cmd = [
+ self.fake_binary,
+ '-n', 'client.admin',
+ '-k', '/etc/ceph/ceph.client.admin.keyring',
+ '--cluster', self.fake_cluster,
+ 'dashboard', 'ac-user-delete',
+ self.fake_user
+ ]
+
+ assert ceph_dashboard_user.remove_user(self.fake_module) == expected_cmd
+
+ @patch('ansible.module_utils.basic.AnsibleModule.fail_json')
+ @patch('ansible.module_utils.basic.AnsibleModule.run_command')
+ def test_create_user_fail_with_weak_password(self, m_run_command, m_fail_json):
+ ca_test_common.set_module_args(self.fake_module.params)
+ m_fail_json.side_effect = ca_test_common.fail_json
+ get_rc = 2
+ get_stderr = 'Error ENOENT: User {} does not exist.'.format(self.fake_user)
+ get_stdout = ''
+ create_rc = 22
+ create_stderr = 'Error EINVAL: Password is too weak.'
+ create_stdout = ''
+ m_run_command.side_effect = [
+ (get_rc, get_stdout, get_stderr),
+ (create_rc, create_stdout, create_stderr)
+ ]
+
+ with pytest.raises(ca_test_common.AnsibleFailJson) as result:
+ ceph_dashboard_user.main()
+
+ result = result.value.args[0]
+ assert result['msg'] == create_stderr
+ assert result['rc'] == 1
--- /dev/null
+from mock.mock import MagicMock, patch
+import ca_test_common
+import ceph_ec_profile
+import pytest
+
+
+class TestCephEcProfile(object):
+ def setup_method(self):
+ self.fake_params = []
+ self.fake_binary = 'ceph'
+ self.fake_cluster = 'ceph'
+ self.fake_name = 'foo'
+ self.fake_k = 2
+ self.fake_m = 4
+ self.fake_module = MagicMock()
+ self.fake_module.params = self.fake_params
+
+ def test_get_profile(self):
+ expected_cmd = [
+ self.fake_binary,
+ '-n', 'client.admin',
+ '-k', '/etc/ceph/ceph.client.admin.keyring',
+ '--cluster', self.fake_cluster,
+ 'osd', 'erasure-code-profile',
+ 'get', self.fake_name,
+ '--format=json'
+ ]
+
+ assert ceph_ec_profile.get_profile(self.fake_module, self.fake_name) == expected_cmd
+
+ @pytest.mark.parametrize("stripe_unit,force", [(False, False),
+ (32, True),
+ (False, True),
+ (32, False)])
+ def test_create_profile(self, stripe_unit, force):
+ expected_cmd = [
+ self.fake_binary,
+ '-n', 'client.admin',
+ '-k', '/etc/ceph/ceph.client.admin.keyring',
+ '--cluster', self.fake_cluster,
+ 'osd', 'erasure-code-profile',
+ 'set', self.fake_name,
+ 'k={}'.format(self.fake_k), 'm={}'.format(self.fake_m),
+ ]
+ if stripe_unit:
+ expected_cmd.append('stripe_unit={}'.format(stripe_unit))
+ if force:
+ expected_cmd.append('--force')
+
+ assert ceph_ec_profile.create_profile(self.fake_module,
+ self.fake_name,
+ self.fake_k,
+ self.fake_m,
+ stripe_unit,
+ self.fake_cluster,
+ force) == expected_cmd
+
+ def test_delete_profile(self):
+ expected_cmd = [
+ self.fake_binary,
+ '-n', 'client.admin',
+ '-k', '/etc/ceph/ceph.client.admin.keyring',
+ '--cluster', self.fake_cluster,
+ 'osd', 'erasure-code-profile',
+ 'rm', self.fake_name
+ ]
+
+ assert ceph_ec_profile.delete_profile(self.fake_module,
+ self.fake_name,
+ self.fake_cluster) == expected_cmd
+
+ @patch('ansible.module_utils.basic.AnsibleModule.fail_json')
+ @patch('ansible.module_utils.basic.AnsibleModule.exit_json')
+ @patch('ceph_ec_profile.exec_command')
+ def test_state_present_nothing_to_update(self, m_exec_command, m_exit_json, m_fail_json):
+ ca_test_common.set_module_args({"state": "present",
+ "name": "foo",
+ "k": 2,
+ "m": 4,
+ "stripe_unit": 32,
+ })
+ m_exit_json.side_effect = ca_test_common.exit_json
+ m_fail_json.side_effect = ca_test_common.fail_json
+ m_exec_command.return_value = (0,
+ ['ceph', 'osd', 'erasure-code-profile', 'get', 'foo', '--format', 'json'],
+ '{"crush-device-class":"","crush-failure-domain":"host","crush-root":"default","jerasure-per-chunk-alignment":"false","k":"2","m":"4","plugin":"jerasure","stripe_unit":"32","technique":"reed_sol_van","w":"8"}', # noqa: E501
+ '')
+
+ with pytest.raises(ca_test_common.AnsibleExitJson) as r:
+ ceph_ec_profile.run_module()
+
+ result = r.value.args[0]
+ assert not result['changed']
+ assert result['cmd'] == ['ceph', 'osd', 'erasure-code-profile', 'get', 'foo', '--format', 'json']
+ assert result['stdout'] == '{"crush-device-class":"","crush-failure-domain":"host","crush-root":"default","jerasure-per-chunk-alignment":"false","k":"2","m":"4","plugin":"jerasure","stripe_unit":"32","technique":"reed_sol_van","w":"8"}' # noqa: E501
+ assert not result['stderr']
+ assert result['rc'] == 0
+
+ @patch('ansible.module_utils.basic.AnsibleModule.fail_json')
+ @patch('ansible.module_utils.basic.AnsibleModule.exit_json')
+ @patch('ceph_ec_profile.exec_command')
+ def test_state_present_profile_to_update(self, m_exec_command, m_exit_json, m_fail_json):
+ ca_test_common.set_module_args({"state": "present",
+ "name": "foo",
+ "k": 2,
+ "m": 6,
+ "stripe_unit": 32
+ })
+ m_exit_json.side_effect = ca_test_common.exit_json
+ m_fail_json.side_effect = ca_test_common.fail_json
+ m_exec_command.side_effect = [
+ (0,
+ ['ceph', 'osd', 'erasure-code-profile', 'get', 'foo', '--format', 'json'],
+ '{"crush-device-class":"","crush-failure-domain":"host","crush-root":"default","jerasure-per-chunk-alignment":"false","k":"2","m":"4","plugin":"jerasure","stripe_unit":"32","technique":"reed_sol_van","w":"8"}', # noqa: E501
+ ''),
+ (0,
+ ['ceph', 'osd', 'erasure-code-profile', 'set', 'foo', 'k=2', 'm=6', 'stripe_unit=32', '--force'],
+ '',
+ ''
+ )
+ ]
+
+ with pytest.raises(ca_test_common.AnsibleExitJson) as r:
+ ceph_ec_profile.run_module()
+
+ result = r.value.args[0]
+ assert result['changed']
+ assert result['cmd'] == ['ceph', 'osd', 'erasure-code-profile', 'set', 'foo', 'k=2', 'm=6', 'stripe_unit=32', '--force']
+ assert not result['stdout']
+ assert not result['stderr']
+ assert result['rc'] == 0
+
+ @patch('ansible.module_utils.basic.AnsibleModule.fail_json')
+ @patch('ansible.module_utils.basic.AnsibleModule.exit_json')
+ @patch('ceph_ec_profile.exec_command')
+ def test_state_present_profile_doesnt_exist(self, m_exec_command, m_exit_json, m_fail_json):
+ ca_test_common.set_module_args({"state": "present",
+ "name": "foo",
+ "k": 2,
+ "m": 4,
+ "stripe_unit": 32
+ })
+ m_exit_json.side_effect = ca_test_common.exit_json
+ m_fail_json.side_effect = ca_test_common.fail_json
+ m_exec_command.side_effect = [
+ (2,
+ ['ceph', 'osd', 'erasure-code-profile', 'get', 'foo', '--format', 'json'],
+ '',
+ "Error ENOENT: unknown erasure code profile 'foo'"),
+ (0,
+ ['ceph', 'osd', 'erasure-code-profile', 'set', 'foo', 'k=2', 'm=4', 'stripe_unit=32', '--force'],
+ '',
+ ''
+ )
+ ]
+
+ with pytest.raises(ca_test_common.AnsibleExitJson) as r:
+ ceph_ec_profile.run_module()
+
+ result = r.value.args[0]
+ assert result['changed']
+ assert result['cmd'] == ['ceph', 'osd', 'erasure-code-profile', 'set', 'foo', 'k=2', 'm=4', 'stripe_unit=32', '--force']
+ assert not result['stdout']
+ assert not result['stderr']
+ assert result['rc'] == 0
+
+ @patch('ansible.module_utils.basic.AnsibleModule.fail_json')
+ @patch('ansible.module_utils.basic.AnsibleModule.exit_json')
+ @patch('ceph_ec_profile.exec_command')
+ def test_state_absent_on_existing_profile(self, m_exec_command, m_exit_json, m_fail_json):
+ ca_test_common.set_module_args({"state": "absent",
+ "name": "foo"
+ })
+ m_exit_json.side_effect = ca_test_common.exit_json
+ m_fail_json.side_effect = ca_test_common.fail_json
+ m_exec_command.return_value = (0,
+ ['ceph', 'osd', 'erasure-code-profile', 'rm', 'foo'],
+ '',
+ '')
+
+ with pytest.raises(ca_test_common.AnsibleExitJson) as r:
+ ceph_ec_profile.run_module()
+
+ result = r.value.args[0]
+ assert result['changed']
+ assert result['cmd'] == ['ceph', 'osd', 'erasure-code-profile', 'rm', 'foo']
+ assert result['stdout'] == 'Profile foo removed.'
+ assert not result['stderr']
+ assert result['rc'] == 0
+
+ @patch('ansible.module_utils.basic.AnsibleModule.fail_json')
+ @patch('ansible.module_utils.basic.AnsibleModule.exit_json')
+ @patch('ceph_ec_profile.exec_command')
+ def test_state_absent_on_nonexisting_profile(self, m_exec_command, m_exit_json, m_fail_json):
+ ca_test_common.set_module_args({"state": "absent",
+ "name": "foo"
+ })
+ m_exit_json.side_effect = ca_test_common.exit_json
+ m_fail_json.side_effect = ca_test_common.fail_json
+ m_exec_command.return_value = (0,
+ ['ceph', 'osd', 'erasure-code-profile', 'rm', 'foo'],
+ '',
+ 'erasure-code-profile foo does not exist')
+
+ with pytest.raises(ca_test_common.AnsibleExitJson) as r:
+ ceph_ec_profile.run_module()
+
+ result = r.value.args[0]
+ assert not result['changed']
+ assert result['cmd'] == ['ceph', 'osd', 'erasure-code-profile', 'rm', 'foo']
+ assert result['stdout'] == "Skipping, the profile foo doesn't exist"
+ assert result['stderr'] == 'erasure-code-profile foo does not exist'
+ assert result['rc'] == 0
+
+ @patch('ansible.module_utils.basic.AnsibleModule.exit_json')
+ def test_check_mode(self, m_exit_json):
+ ca_test_common.set_module_args({
+ 'name': 'foo',
+ 'k': 2,
+ 'm': 4,
+ '_ansible_check_mode': True
+ })
+ m_exit_json.side_effect = ca_test_common.exit_json
+
+ with pytest.raises(ca_test_common.AnsibleExitJson) as result:
+ ceph_ec_profile.run_module()
+
+ result = result.value.args[0]
+ assert not result['changed']
+ assert result['rc'] == 0
+ assert not result['stdout']
+ assert not result['stderr']
--- /dev/null
+from mock.mock import MagicMock
+import ceph_fs
+
+
+fake_binary = 'ceph'
+fake_cluster = 'ceph'
+fake_container_binary = 'podman'
+fake_container_image = 'docker.io/ceph/daemon:latest'
+fake_container_cmd = [
+ fake_container_binary,
+ 'run',
+ '--rm',
+ '--net=host',
+ '-v', '/etc/ceph:/etc/ceph:z',
+ '-v', '/var/lib/ceph/:/var/lib/ceph/:z',
+ '-v', '/var/log/ceph/:/var/log/ceph/:z',
+ '--entrypoint=' + fake_binary,
+ fake_container_image
+]
+fake_fs = 'foo'
+fake_data_pool = 'bar_data'
+fake_metadata_pool = 'bar_metadata'
+fake_max_mds = 2
+fake_params = {'cluster': fake_cluster,
+ 'name': fake_fs,
+ 'data': fake_data_pool,
+ 'metadata': fake_metadata_pool,
+ 'max_mds': fake_max_mds}
+
+
+class TestCephFsModule(object):
+
+ def test_create_fs(self):
+ fake_module = MagicMock()
+ fake_module.params = fake_params
+ expected_cmd = [
+ fake_binary,
+ '-n', 'client.admin',
+ '-k', '/etc/ceph/ceph.client.admin.keyring',
+ '--cluster', fake_cluster,
+ 'fs', 'new',
+ fake_fs,
+ fake_metadata_pool,
+ fake_data_pool
+ ]
+
+ assert ceph_fs.create_fs(fake_module) == expected_cmd
+
+ def test_set_fs(self):
+ fake_module = MagicMock()
+ fake_module.params = fake_params
+ expected_cmd = [
+ fake_binary,
+ '-n', 'client.admin',
+ '-k', '/etc/ceph/ceph.client.admin.keyring',
+ '--cluster', fake_cluster,
+ 'fs', 'set',
+ fake_fs,
+ 'max_mds',
+ str(fake_max_mds)
+ ]
+
+ assert ceph_fs.set_fs(fake_module) == expected_cmd
+
+ def test_get_fs(self):
+ fake_module = MagicMock()
+ fake_module.params = fake_params
+ expected_cmd = [
+ fake_binary,
+ '-n', 'client.admin',
+ '-k', '/etc/ceph/ceph.client.admin.keyring',
+ '--cluster', fake_cluster,
+ 'fs', 'get',
+ fake_fs,
+ '--format=json'
+ ]
+
+ assert ceph_fs.get_fs(fake_module) == expected_cmd
+
+ def test_remove_fs(self):
+ fake_module = MagicMock()
+ fake_module.params = fake_params
+ expected_cmd = [
+ fake_binary,
+ '-n', 'client.admin',
+ '-k', '/etc/ceph/ceph.client.admin.keyring',
+ '--cluster', fake_cluster,
+ 'fs', 'rm',
+ fake_fs,
+ '--yes-i-really-mean-it'
+ ]
+
+ assert ceph_fs.remove_fs(fake_module) == expected_cmd
+
+ def test_fail_fs(self):
+ fake_module = MagicMock()
+ fake_module.params = fake_params
+ expected_cmd = [
+ fake_binary,
+ '-n', 'client.admin',
+ '-k', '/etc/ceph/ceph.client.admin.keyring',
+ '--cluster', fake_cluster,
+ 'fs', 'fail',
+ fake_fs
+ ]
+
+ assert ceph_fs.fail_fs(fake_module) == expected_cmd
--- /dev/null
+import json
+import os
+import mock
+import pytest
+import ca_test_common
+import ceph_key
+
+
+@mock.patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': 'docker'})
+class TestCephKeyModule(object):
+
+ def test_generate_secret(self):
+ expected_length = 40
+ result = len(ceph_key.generate_secret())
+ assert result == expected_length
+
+ def test_generate_caps_ceph_authtool(self):
+ fake_caps = {
+ 'mon': 'allow *',
+ 'osd': 'allow rwx',
+ }
+ fake_type = "ceph-authtool"
+ expected_command_list = [
+ '--cap',
+ 'mon',
+ 'allow *',
+ '--cap',
+ 'osd',
+ 'allow rwx'
+ ]
+ result = ceph_key.generate_caps(fake_type, fake_caps)
+ assert result == expected_command_list
+
+ def test_generate_caps_not_ceph_authtool(self):
+ fake_caps = {
+ 'mon': 'allow *',
+ 'osd': 'allow rwx',
+ }
+ fake_type = ""
+ expected_command_list = [
+ 'mon',
+ 'allow *',
+ 'osd',
+ 'allow rwx'
+ ]
+ result = ceph_key.generate_caps(fake_type, fake_caps)
+ assert result == expected_command_list
+
+ def test_generate_ceph_cmd_list_non_container(self):
+ fake_cluster = "fake"
+ fake_args = ['arg']
+ fake_user = "fake-user"
+ fake_key = "/tmp/my-key"
+ expected_command_list = [
+ 'ceph',
+ '-n',
+ "fake-user",
+ '-k',
+ "/tmp/my-key",
+ '--cluster',
+ fake_cluster,
+ 'auth',
+ 'arg'
+ ]
+ result = ceph_key.generate_ceph_cmd(
+ fake_cluster, fake_args, fake_user, fake_key)
+ assert result == expected_command_list
+
+ def test_generate_ceph_cmd_list_container(self):
+ fake_cluster = "fake"
+ fake_args = ['arg']
+ fake_user = "fake-user"
+ fake_key = "/tmp/my-key"
+ fake_container_image = "quay.ceph.io/ceph-ci/daemon:latest-luminous"
+ expected_command_list = ['docker',
+ 'run',
+ '--rm',
+ '--net=host', # noqa E501
+ '-v', '/etc/ceph:/etc/ceph:z',
+ '-v', '/var/lib/ceph/:/var/lib/ceph/:z',
+ '-v', '/var/log/ceph/:/var/log/ceph/:z',
+ '--entrypoint=ceph',
+ 'quay.ceph.io/ceph-ci/daemon:latest-luminous',
+ '-n',
+ "fake-user",
+ '-k',
+ "/tmp/my-key",
+ '--cluster',
+ fake_cluster,
+ 'auth',
+ 'arg']
+ result = ceph_key.generate_ceph_cmd(
+ fake_cluster, fake_args, fake_user, fake_key, fake_container_image)
+ assert result == expected_command_list
+
+ def test_generate_ceph_authtool_cmd_non_container_no_auid(self):
+ fake_cluster = "fake"
+ fake_name = "client.fake"
+ fake_secret = "super-secret"
+ fake_caps = {
+ 'mon': 'allow *',
+ 'osd': 'allow rwx',
+ }
+ fake_dest = "/fake/ceph"
+ fake_keyring_filename = fake_cluster + "." + fake_name + ".keyring"
+ fake_file_destination = os.path.join(fake_dest, fake_keyring_filename)
+ expected_command_list = [
+ 'ceph-authtool',
+ '--create-keyring',
+ fake_file_destination,
+ '--name',
+ fake_name,
+ '--add-key',
+ fake_secret,
+ '--cap',
+ 'mon',
+ 'allow *',
+ '--cap',
+ 'osd',
+ 'allow rwx',
+ ]
+ result = ceph_key.generate_ceph_authtool_cmd(
+ fake_cluster, fake_name, fake_secret, fake_caps, fake_file_destination) # noqa E501
+ assert result == expected_command_list
+
+ def test_generate_ceph_authtool_cmd_container(self):
+ fake_cluster = "fake"
+ fake_name = "client.fake"
+ fake_secret = "super-secret"
+ fake_caps = {
+ 'mon': 'allow *',
+ 'osd': 'allow rwx',
+ }
+ fake_dest = "/fake/ceph"
+ fake_keyring_filename = fake_cluster + "." + fake_name + ".keyring"
+ fake_file_destination = os.path.join(fake_dest, fake_keyring_filename)
+ fake_container_image = "quay.ceph.io/ceph-ci/daemon:latest-luminous"
+ expected_command_list = ['docker',
+ 'run',
+ '--rm',
+ '--net=host',
+ '-v', '/etc/ceph:/etc/ceph:z',
+ '-v', '/var/lib/ceph/:/var/lib/ceph/:z',
+ '-v', '/var/log/ceph/:/var/log/ceph/:z',
+ '--entrypoint=ceph-authtool',
+ 'quay.ceph.io/ceph-ci/daemon:latest-luminous',
+ '--create-keyring',
+ fake_file_destination,
+ '--name',
+ fake_name,
+ '--add-key',
+ fake_secret,
+ '--cap',
+ 'mon',
+ 'allow *',
+ '--cap',
+ 'osd',
+ 'allow rwx']
+ result = ceph_key.generate_ceph_authtool_cmd(
+ fake_cluster, fake_name, fake_secret, fake_caps, fake_file_destination, fake_container_image) # noqa E501
+ assert result == expected_command_list
+
+ def test_create_key_non_container(self):
+ fake_module = "fake"
+ fake_user = 'client.admin'
+ fake_user_key = '/etc/ceph/fake.client.admin.keyring'
+ fake_result = " fake"
+ fake_cluster = "fake"
+ fake_name = "client.fake"
+ fake_secret = "super-secret"
+ fake_caps = {
+ 'mon': 'allow *',
+ 'osd': 'allow rwx',
+ }
+ fake_import_key = True
+ fake_dest = "/fake/ceph"
+ fake_keyring_filename = fake_cluster + "." + fake_name + ".keyring"
+ fake_file_destination = os.path.join(fake_dest, fake_keyring_filename)
+ expected_command_list = [
+ ['ceph-authtool', '--create-keyring', fake_file_destination, '--name', fake_name,
+ '--add-key', fake_secret, '--cap', 'mon', 'allow *', '--cap', 'osd', 'allow rwx'],
+ ['ceph', '-n', fake_user, '-k', fake_user_key, '--cluster', fake_cluster, 'auth',
+ 'import', '-i', fake_file_destination],
+ ]
+ result = ceph_key.create_key(fake_module, fake_result, fake_cluster, fake_user, fake_user_key,
+ fake_name, fake_secret, fake_caps, fake_import_key, fake_file_destination)
+ assert result == expected_command_list
+
+ def test_create_key_container(self):
+ fake_module = "fake"
+ fake_user = 'client.admin'
+ fake_user_key = '/etc/ceph/fake.client.admin.keyring'
+ fake_result = "fake"
+ fake_cluster = "fake"
+ fake_name = "client.fake"
+ fake_secret = "super-secret"
+ fake_caps = {
+ 'mon': 'allow *',
+ 'osd': 'allow rwx',
+ }
+ fake_dest = "/fake/ceph"
+ fake_import_key = True
+ fake_keyring_filename = fake_cluster + "." + fake_name + ".keyring"
+ fake_file_destination = os.path.join(fake_dest, fake_keyring_filename)
+ fake_container_image = "quay.ceph.io/ceph-ci/daemon:latest-luminous"
+ expected_command_list = [
+ ['docker',
+ 'run',
+ '--rm',
+ '--net=host',
+ '-v', '/etc/ceph:/etc/ceph:z',
+ '-v', '/var/lib/ceph/:/var/lib/ceph/:z',
+ '-v', '/var/log/ceph/:/var/log/ceph/:z',
+ '--entrypoint=ceph-authtool',
+ 'quay.ceph.io/ceph-ci/daemon:latest-luminous',
+ '--create-keyring', fake_file_destination,
+ '--name', fake_name,
+ '--add-key', fake_secret,
+ '--cap', 'mon', 'allow *',
+ '--cap', 'osd', 'allow rwx'],
+ ['docker',
+ 'run',
+ '--rm',
+ '--net=host',
+ '-v', '/etc/ceph:/etc/ceph:z',
+ '-v', '/var/lib/ceph/:/var/lib/ceph/:z',
+ '-v', '/var/log/ceph/:/var/log/ceph/:z',
+ '--entrypoint=ceph',
+ 'quay.ceph.io/ceph-ci/daemon:latest-luminous',
+ '-n', 'client.admin',
+ '-k', '/etc/ceph/fake.client.admin.keyring',
+ '--cluster', fake_cluster,
+ 'auth', 'import',
+ '-i', fake_file_destination]]
+ result = ceph_key.create_key(fake_module, fake_result, fake_cluster, fake_user, fake_user_key, fake_name,
+ fake_secret, fake_caps, fake_import_key, fake_file_destination, fake_container_image)
+ assert result == expected_command_list
+
+ def test_create_key_non_container_no_import(self):
+ fake_module = "fake"
+ fake_user = 'client.admin'
+ fake_user_key = '/etc/ceph/fake.client.admin.keyring'
+ fake_result = "fake"
+ fake_cluster = "fake"
+ fake_name = "client.fake"
+ fake_secret = "super-secret"
+ fake_caps = {
+ 'mon': 'allow *',
+ 'osd': 'allow rwx',
+ }
+ fake_dest = "/fake/ceph"
+ fake_import_key = False
+ fake_keyring_filename = fake_cluster + "." + fake_name + ".keyring"
+ fake_file_destination = os.path.join(fake_dest, fake_keyring_filename)
+ # create_key passes (one for ceph-authtool and one for itself) itw own array so the expected result is an array within an array # noqa E501
+ expected_command_list = [[
+ 'ceph-authtool',
+ '--create-keyring',
+ fake_file_destination,
+ '--name',
+ fake_name,
+ '--add-key',
+ fake_secret,
+ '--cap',
+ 'mon',
+ 'allow *',
+ '--cap',
+ 'osd',
+ 'allow rwx', ]
+ ]
+ result = ceph_key.create_key(fake_module, fake_result, fake_cluster, fake_user, fake_user_key,
+ fake_name, fake_secret, fake_caps, fake_import_key, fake_file_destination) # noqa E501
+ assert result == expected_command_list
+
+ def test_create_key_container_no_import(self):
+ fake_module = "fake"
+ fake_user = 'client.admin'
+ fake_user_key = '/etc/ceph/fake.client.admin.keyring'
+ fake_result = "fake"
+ fake_cluster = "fake"
+ fake_name = "client.fake"
+ fake_secret = "super-secret"
+ fake_caps = {
+ 'mon': 'allow *',
+ 'osd': 'allow rwx',
+ }
+ fake_dest = "/fake/ceph"
+ fake_import_key = False
+ fake_keyring_filename = fake_cluster + "." + fake_name + ".keyring"
+ fake_file_destination = os.path.join(fake_dest, fake_keyring_filename)
+ # create_key passes (one for ceph-authtool and one for itself) itw own array so the expected result is an array within an array # noqa E501
+ fake_container_image = "quay.ceph.io/ceph-ci/daemon:latest-luminous"
+ expected_command_list = [['docker', # noqa E128
+ 'run',
+ '--rm',
+ '--net=host',
+ '-v', '/etc/ceph:/etc/ceph:z',
+ '-v', '/var/lib/ceph/:/var/lib/ceph/:z',
+ '-v', '/var/log/ceph/:/var/log/ceph/:z',
+ '--entrypoint=ceph-authtool',
+ 'quay.ceph.io/ceph-ci/daemon:latest-luminous',
+ '--create-keyring',
+ fake_file_destination,
+ '--name',
+ fake_name,
+ '--add-key',
+ fake_secret,
+ '--cap',
+ 'mon',
+ 'allow *',
+ '--cap',
+ 'osd',
+ 'allow rwx']]
+ result = ceph_key.create_key(fake_module, fake_result, fake_cluster, fake_user, fake_user_key, fake_name,
+ fake_secret, fake_caps, fake_import_key, fake_file_destination, fake_container_image)
+ assert result == expected_command_list
+
+ def test_delete_key_non_container(self):
+ fake_user = 'client.admin'
+ fake_user_key = '/etc/ceph/fake.client.admin.keyring'
+ fake_cluster = "fake"
+ fake_name = "client.fake"
+ expected_command_list = [
+ ['ceph', '-n', 'client.admin', '-k', '/etc/ceph/fake.client.admin.keyring',
+ '--cluster', fake_cluster, 'auth', 'del', fake_name],
+ ]
+ result = ceph_key.delete_key(fake_cluster, fake_user, fake_user_key, fake_name)
+ assert result == expected_command_list
+
+ def test_delete_key_container(self):
+ fake_user = 'client.admin'
+ fake_user_key = '/etc/ceph/fake.client.admin.keyring'
+ fake_cluster = "fake"
+ fake_name = "client.fake"
+ fake_container_image = "quay.ceph.io/ceph-ci/daemon:latest-luminous"
+ expected_command_list = [['docker',
+ 'run',
+ '--rm',
+ '--net=host',
+ '-v', '/etc/ceph:/etc/ceph:z',
+ '-v', '/var/lib/ceph/:/var/lib/ceph/:z',
+ '-v', '/var/log/ceph/:/var/log/ceph/:z',
+ '--entrypoint=ceph',
+ 'quay.ceph.io/ceph-ci/daemon:latest-luminous',
+ '-n', 'client.admin',
+ '-k', '/etc/ceph/fake.client.admin.keyring',
+ '--cluster', fake_cluster,
+ 'auth', 'del', fake_name]]
+ result = ceph_key.delete_key(
+ fake_cluster, fake_user, fake_user_key, fake_name, fake_container_image)
+ assert result == expected_command_list
+
+ @pytest.mark.parametrize('output_format', ['json', 'plain', 'xml', 'yaml'])
+ def test_info_key_non_container(self, output_format):
+ fake_user = 'client.admin'
+ fake_user_key = '/etc/ceph/fake.client.admin.keyring'
+ fake_cluster = "fake"
+ fake_name = "client.fake"
+ fake_user = "fake-user"
+ expected_command_list = [
+ ['ceph', '-n', fake_user, '-k', fake_user_key, '--cluster', fake_cluster, 'auth',
+ 'get', fake_name, '-f', output_format],
+ ]
+ result = ceph_key.info_key(
+ fake_cluster, fake_name, fake_user, fake_user_key, output_format)
+ assert result == expected_command_list
+
+ @pytest.mark.parametrize('output_format', ['json', 'plain', 'xml', 'yaml'])
+ def test_info_key_container_json(self, output_format):
+ fake_cluster = "fake"
+ fake_name = "client.fake"
+ fake_user = 'client.admin'
+ fake_user_key = '/etc/ceph/fake.client.admin.keyring'
+ fake_container_image = "quay.ceph.io/ceph-ci/daemon:latest-luminous"
+ expected_command_list = [['docker', # noqa E128
+ 'run',
+ '--rm',
+ '--net=host',
+ '-v', '/etc/ceph:/etc/ceph:z',
+ '-v', '/var/lib/ceph/:/var/lib/ceph/:z',
+ '-v', '/var/log/ceph/:/var/log/ceph/:z',
+ '--entrypoint=ceph',
+ 'quay.ceph.io/ceph-ci/daemon:latest-luminous',
+ '-n', fake_user,
+ '-k', fake_user_key,
+ '--cluster', fake_cluster,
+ 'auth', 'get', fake_name,
+ '-f', output_format]]
+ result = ceph_key.info_key(
+ fake_cluster, fake_name, fake_user, fake_user_key, output_format, fake_container_image) # noqa E501
+ assert result == expected_command_list
+
+ def test_list_key_non_container(self):
+ fake_cluster = "fake"
+ fake_user = "fake-user"
+ fake_key = "/tmp/my-key"
+ expected_command_list = [
+ ['ceph', '-n', "fake-user", '-k', "/tmp/my-key",
+ '--cluster', fake_cluster, 'auth', 'ls', '-f', 'json'],
+ ]
+ result = ceph_key.list_keys(fake_cluster, fake_user, fake_key)
+ assert result == expected_command_list
+
+ def test_get_key_container(self):
+ fake_cluster = "fake"
+ fake_user = 'client.admin'
+ fake_user_key = '/etc/ceph/fake.client.admin.keyring'
+ fake_name = "client.fake"
+ fake_container_image = "quay.ceph.io/ceph-ci/daemon:latest-luminous"
+ fake_dest = "/fake/ceph"
+ fake_keyring_filename = fake_cluster + "." + fake_name + ".keyring"
+ fake_file_destination = os.path.join(fake_dest, fake_keyring_filename)
+ expected_command_list = [['docker', # noqa E128
+ 'run',
+ '--rm',
+ '--net=host',
+ '-v', '/etc/ceph:/etc/ceph:z',
+ '-v', '/var/lib/ceph/:/var/lib/ceph/:z',
+ '-v', '/var/log/ceph/:/var/log/ceph/:z',
+ '--entrypoint=ceph',
+ 'quay.ceph.io/ceph-ci/daemon:latest-luminous',
+ '-n', fake_user,
+ '-k', fake_user_key,
+ '--cluster', fake_cluster,
+ 'auth', 'get',
+ fake_name, '-o', fake_file_destination]]
+ result = ceph_key.get_key(
+ fake_cluster, fake_user, fake_user_key, fake_name, fake_file_destination, fake_container_image)
+ assert result == expected_command_list
+
+ def test_get_key_non_container(self):
+ fake_cluster = "fake"
+ fake_user = 'client.admin'
+ fake_user_key = '/etc/ceph/fake.client.admin.keyring'
+ fake_dest = "/fake/ceph"
+ fake_name = "client.fake"
+ fake_keyring_filename = fake_cluster + "." + fake_name + ".keyring"
+ fake_file_destination = os.path.join(fake_dest, fake_keyring_filename)
+ expected_command_list = [
+ ['ceph', '-n', fake_user, '-k', fake_user_key,
+ '--cluster', fake_cluster, 'auth', 'get', fake_name, '-o', fake_file_destination],
+ ]
+ result = ceph_key.get_key(
+ fake_cluster, fake_user, fake_user_key, fake_name, fake_file_destination)
+ assert result == expected_command_list
+
+ def test_list_key_non_container_with_mon_key(self):
+ fake_hostname = "mon01"
+ fake_cluster = "fake"
+ fake_user = "mon."
+ fake_keyring_dirname = fake_cluster + "-" + fake_hostname
+ fake_key = os.path.join("/var/lib/ceph/mon/", fake_keyring_dirname, 'keyring')
+ expected_command_list = [
+ ['ceph', '-n', "mon.", '-k', "/var/lib/ceph/mon/fake-mon01/keyring",
+ '--cluster', fake_cluster, 'auth', 'ls', '-f', 'json'],
+ ]
+ result = ceph_key.list_keys(fake_cluster, fake_user, fake_key)
+ assert result == expected_command_list
+
+ def test_list_key_container_with_mon_key(self):
+ fake_hostname = "mon01"
+ fake_cluster = "fake"
+ fake_user = "mon."
+ fake_keyring_dirname = fake_cluster + "-" + fake_hostname
+ fake_key = os.path.join("/var/lib/ceph/mon/", fake_keyring_dirname, 'keyring')
+ fake_container_image = "quay.ceph.io/ceph-ci/daemon:latest-luminous"
+ expected_command_list = [['docker',
+ 'run',
+ '--rm',
+ '--net=host',
+ '-v', '/etc/ceph:/etc/ceph:z',
+ '-v', '/var/lib/ceph/:/var/lib/ceph/:z',
+ '-v', '/var/log/ceph/:/var/log/ceph/:z',
+ '--entrypoint=ceph',
+ 'quay.ceph.io/ceph-ci/daemon:latest-luminous',
+ '-n', "mon.",
+ '-k', "/var/lib/ceph/mon/fake-mon01/keyring",
+ '--cluster', fake_cluster,
+ 'auth', 'ls',
+ '-f', 'json'], ]
+ result = ceph_key.list_keys(fake_cluster, fake_user, fake_key, fake_container_image)
+ assert result == expected_command_list
+
+ def test_list_key_container(self):
+ fake_cluster = "fake"
+ fake_user = "fake-user"
+ fake_key = "/tmp/my-key"
+ fake_container_image = "quay.ceph.io/ceph-ci/daemon:latest-luminous"
+ expected_command_list = [['docker',
+ 'run',
+ '--rm',
+ '--net=host',
+ '-v', '/etc/ceph:/etc/ceph:z',
+ '-v', '/var/lib/ceph/:/var/lib/ceph/:z',
+ '-v', '/var/log/ceph/:/var/log/ceph/:z',
+ '--entrypoint=ceph',
+ 'quay.ceph.io/ceph-ci/daemon:latest-luminous',
+ '-n', "fake-user",
+ '-k', "/tmp/my-key",
+ '--cluster', fake_cluster,
+ 'auth', 'ls',
+ '-f', 'json'], ]
+ result = ceph_key.list_keys(
+ fake_cluster, fake_user, fake_key, fake_container_image)
+ assert result == expected_command_list
+
+ def test_lookup_ceph_initial_entities(self):
+ fake_module = "fake"
+ fake_ceph_dict = { "auth_dump":[ { "entity":"osd.0", "key":"AQAJkMhbszeBBBAA4/V1tDFXGlft1GnHJS5wWg==", "caps":{ "mgr":"allow profile osd", "mon":"allow profile osd", "osd":"allow *" } }, { "entity":"osd.1", "key":"AQAjkMhbshueAhAAjZec50aBgd1NObLz57SQvg==", "caps":{ "mgr":"allow profile osd", "mon":"allow profile osd", "osd":"allow *" } }, { "entity":"client.admin", "key":"AQDZjshbrJv6EhAAY9v6LzLYNDpPdlC3HD5KHA==", "auid":0, "caps":{ "mds":"allow", "mgr":"allow *", "mon":"allow *", "osd":"allow *" } }, { "entity":"client.bootstrap-mds", "key":"AQDojshbc4QCHhAA1ZTrkt9dbSZRVU2GzI6U4A==", "caps":{ "mon":"allow profile bootstrap-mds" } }, { "entity":"client.bootstrap-mgr", "key":"AQBfiu5bAAAAABAARcNG24hUMlk4AdstVA5MVQ==", "caps":{ "mon":"allow profile bootstrap-mgr" } }, { "entity":"client.bootstrap-osd", "key":"AQDjjshbYW+uGxAAyHcPCXXmVoL8VsTBI8z1Ng==", "caps":{ "mon":"allow profile bootstrap-osd" } }, { "entity":"client.bootstrap-rbd", "key":"AQDyjshb522eIhAAtAz6nUPMOdG4H9u0NgpXhA==", "caps":{ "mon":"allow profile bootstrap-rbd" } }, { "entity":"client.bootstrap-rbd-mirror", "key":"AQDfh+5bAAAAABAAEGBD59Lj2vAKIdN8pq4lbQ==", "caps":{ "mon":"allow profile bootstrap-rbd-mirror" } }, { "entity":"client.bootstrap-rgw", "key":"AQDtjshbDl8oIBAAq1SfSYQKDR49hJNWJVwDQw==", "caps":{ "mon":"allow profile bootstrap-rgw" } }, { "entity":"mgr.mon0", "key":"AQA0j8hbgGapORAAoDkyAvXVkM5ej4wNn4cwTQ==", "caps":{ "mds":"allow *", "mon":"allow profile mgr", "osd":"allow *" } } ] } # noqa E501
+ fake_ceph_dict_str = json.dumps(fake_ceph_dict) # convert to string
+ expected_entity_list = ['client.admin', 'client.bootstrap-mds', 'client.bootstrap-mgr', # noqa E501
+ 'client.bootstrap-osd', 'client.bootstrap-rbd', 'client.bootstrap-rbd-mirror', 'client.bootstrap-rgw'] # noqa E501
+ result = ceph_key.lookup_ceph_initial_entities(fake_module, fake_ceph_dict_str)
+ assert result == expected_entity_list
+
+ def test_build_key_path_admin(self):
+ fake_cluster = "fake"
+ entity = "client.admin"
+ expected_result = "/etc/ceph/fake.client.admin.keyring"
+ result = ceph_key.build_key_path(fake_cluster, entity)
+ assert result == expected_result
+
+ def test_build_key_path_bootstrap_osd(self):
+ fake_cluster = "fake"
+ entity = "client.bootstrap-osd"
+ expected_result = "/var/lib/ceph/bootstrap-osd/fake.keyring"
+ result = ceph_key.build_key_path(fake_cluster, entity)
+ assert result == expected_result
+
+ @mock.patch('ansible.module_utils.basic.AnsibleModule.exit_json')
+ @mock.patch('ceph_key.exec_commands')
+ @pytest.mark.parametrize('output_format', ['json', 'plain', 'xml', 'yaml'])
+ def test_state_info(self, m_exec_commands, m_exit_json, output_format):
+ ca_test_common.set_module_args({"state": "info",
+ "cluster": "ceph",
+ "name": "client.admin",
+ "output_format": output_format})
+ m_exit_json.side_effect = ca_test_common.exit_json
+ m_exec_commands.return_value = (0,
+ ['ceph', 'auth', 'get', 'client.admin', '-f', output_format],
+ '[{"entity":"client.admin","key":"AQC1tw5fF156GhAAoJCvHGX/jl/k7/N4VZm8iQ==","caps":{"mds":"allow *","mgr":"allow *","mon":"allow *","osd":"allow *"}}]', # noqa: E501
+ 'exported keyring for client.admin')
+
+ with pytest.raises(ca_test_common.AnsibleExitJson) as result:
+ ceph_key.run_module()
+
+ result = result.value.args[0]
+ assert not result['changed']
+ assert result['cmd'] == ['ceph', 'auth', 'get', 'client.admin', '-f', output_format]
+ assert result['stdout'] == '[{"entity":"client.admin","key":"AQC1tw5fF156GhAAoJCvHGX/jl/k7/N4VZm8iQ==","caps":{"mds":"allow *","mgr":"allow *","mon":"allow *","osd":"allow *"}}]' # noqa: E501
+ assert result['stderr'] == 'exported keyring for client.admin'
+ assert result['rc'] == 0
+
+ @mock.patch('ansible.module_utils.basic.AnsibleModule.fail_json')
+ def test_state_info_invalid_format(self, m_fail_json):
+ invalid_format = 'txt'
+ ca_test_common.set_module_args({"state": "info",
+ "cluster": "ceph",
+ "name": "client.admin",
+ "output_format": invalid_format})
+ m_fail_json.side_effect = ca_test_common.fail_json
+
+ with pytest.raises(ca_test_common.AnsibleFailJson) as result:
+ ceph_key.run_module()
+
+ result = result.value.args[0]
+ assert result['msg'] == 'value of output_format must be one of: json, plain, xml, yaml, got: {}'.format(invalid_format)
+
+ @mock.patch('ceph_key.generate_secret')
+ @mock.patch('ansible.module_utils.basic.AnsibleModule.exit_json')
+ def test_generate_key(self, m_exit_json, m_generate_secret):
+ fake_secret = b'AQDaLb1fAAAAABAAsIMKdGEKu+lGOyXnRfT0Hg=='
+ ca_test_common.set_module_args({"state": "generate_secret"})
+ m_exit_json.side_effect = ca_test_common.exit_json
+ m_generate_secret.return_value = fake_secret
+
+ with pytest.raises(ca_test_common.AnsibleExitJson) as result:
+ ceph_key.run_module()
+ assert result.value.args[0]['stdout'] == fake_secret.decode()
--- /dev/null
+from mock.mock import patch
+import os
+import pytest
+import ca_test_common
+import ceph_mgr_module
+
+fake_cluster = 'ceph'
+fake_container_binary = 'podman'
+fake_container_image = 'quay.ceph.io/ceph/daemon:latest'
+fake_module = 'noup'
+fake_user = 'client.admin'
+fake_keyring = '/etc/ceph/{}.{}.keyring'.format(fake_cluster, fake_user)
+
+
+class TestCephMgrModuleModule(object):
+
+ @patch('ansible.module_utils.basic.AnsibleModule.fail_json')
+ def test_without_parameters(self, m_fail_json):
+ ca_test_common.set_module_args({})
+ m_fail_json.side_effect = ca_test_common.fail_json
+
+ with pytest.raises(ca_test_common.AnsibleFailJson) as result:
+ ceph_mgr_module.main()
+
+ result = result.value.args[0]
+ assert result['msg'] == 'missing required arguments: name'
+
+ @patch('ansible.module_utils.basic.AnsibleModule.exit_json')
+ def test_with_check_mode(self, m_exit_json):
+ ca_test_common.set_module_args({
+ 'name': fake_module,
+ '_ansible_check_mode': True
+ })
+ m_exit_json.side_effect = ca_test_common.exit_json
+
+ with pytest.raises(ca_test_common.AnsibleExitJson) as result:
+ ceph_mgr_module.main()
+
+ result = result.value.args[0]
+ assert not result['changed']
+ assert result['cmd'] == ['ceph', '-n', fake_user, '-k', fake_keyring, '--cluster', fake_cluster, 'mgr', 'module', 'enable', fake_module]
+ assert result['rc'] == 0
+ assert not result['stdout']
+ assert not result['stderr']
+
+ @patch('ansible.module_utils.basic.AnsibleModule.exit_json')
+ @patch('ansible.module_utils.basic.AnsibleModule.run_command')
+ def test_with_failure(self, m_run_command, m_exit_json):
+ ca_test_common.set_module_args({
+ 'name': fake_module
+ })
+ m_exit_json.side_effect = ca_test_common.exit_json
+ stdout = ''
+ stderr = 'Error ENOENT: all mgr daemons do not support module \'{}\', pass --force to force enablement'.format(fake_module)
+ rc = 2
+ m_run_command.return_value = rc, stdout, stderr
+
+ with pytest.raises(ca_test_common.AnsibleExitJson) as result:
+ ceph_mgr_module.main()
+
+ result = result.value.args[0]
+ assert result['changed']
+ assert result['cmd'] == ['ceph', '-n', fake_user, '-k', fake_keyring, '--cluster', fake_cluster, 'mgr', 'module', 'enable', fake_module]
+ assert result['rc'] == rc
+ assert result['stderr'] == stderr
+
+ @patch('ansible.module_utils.basic.AnsibleModule.exit_json')
+ @patch('ansible.module_utils.basic.AnsibleModule.run_command')
+ def test_enable_module(self, m_run_command, m_exit_json):
+ ca_test_common.set_module_args({
+ 'name': fake_module,
+ })
+ m_exit_json.side_effect = ca_test_common.exit_json
+ stdout = ''
+ stderr = ''
+ rc = 0
+ m_run_command.return_value = rc, stdout, stderr
+
+ with pytest.raises(ca_test_common.AnsibleExitJson) as result:
+ ceph_mgr_module.main()
+
+ result = result.value.args[0]
+ assert result['changed']
+ assert result['cmd'] == ['ceph', '-n', fake_user, '-k', fake_keyring, '--cluster', fake_cluster, 'mgr', 'module', 'enable', fake_module]
+ assert result['rc'] == rc
+ assert result['stderr'] == stderr
+ assert result['stdout'] == stdout
+
+ @patch('ansible.module_utils.basic.AnsibleModule.exit_json')
+ @patch('ansible.module_utils.basic.AnsibleModule.run_command')
+ def test_already_enable_module(self, m_run_command, m_exit_json):
+ ca_test_common.set_module_args({
+ 'name': fake_module,
+ })
+ m_exit_json.side_effect = ca_test_common.exit_json
+ stderr = 'module \'{}\' is already enabled'.format(fake_module)
+ stdout = ''
+ rc = 0
+ m_run_command.return_value = rc, stdout, stderr
+
+ with pytest.raises(ca_test_common.AnsibleExitJson) as result:
+ ceph_mgr_module.main()
+
+ result = result.value.args[0]
+ assert not result['changed']
+ assert result['cmd'] == ['ceph', '-n', fake_user, '-k', fake_keyring, '--cluster', fake_cluster, 'mgr', 'module', 'enable', fake_module]
+ assert result['rc'] == rc
+ assert result['stderr'] == stderr
+ assert result['stdout'] == stdout
+
+ @patch('ansible.module_utils.basic.AnsibleModule.exit_json')
+ @patch('ansible.module_utils.basic.AnsibleModule.run_command')
+ def test_disable_module(self, m_run_command, m_exit_json):
+ ca_test_common.set_module_args({
+ 'name': fake_module,
+ 'state': 'disable'
+ })
+ m_exit_json.side_effect = ca_test_common.exit_json
+ stdout = ''
+ stderr = ''
+ rc = 0
+ m_run_command.return_value = rc, stdout, stderr
+
+ with pytest.raises(ca_test_common.AnsibleExitJson) as result:
+ ceph_mgr_module.main()
+
+ result = result.value.args[0]
+ assert result['changed']
+ assert result['cmd'] == ['ceph', '-n', fake_user, '-k', fake_keyring, '--cluster', fake_cluster, 'mgr', 'module', 'disable', fake_module]
+ assert result['rc'] == rc
+ assert result['stderr'] == stderr
+ assert result['stdout'] == stdout
+
+ @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary})
+ @patch.dict(os.environ, {'CEPH_CONTAINER_IMAGE': fake_container_image})
+ @patch('ansible.module_utils.basic.AnsibleModule.exit_json')
+ @patch('ansible.module_utils.basic.AnsibleModule.run_command')
+ def test_with_container(self, m_run_command, m_exit_json):
+ ca_test_common.set_module_args({
+ 'name': fake_module,
+ })
+ m_exit_json.side_effect = ca_test_common.exit_json
+ stdout = ''
+ stderr = '{} is set'.format(fake_module)
+ rc = 0
+ m_run_command.return_value = rc, stdout, stderr
+
+ with pytest.raises(ca_test_common.AnsibleExitJson) as result:
+ ceph_mgr_module.main()
+
+ result = result.value.args[0]
+ assert result['changed']
+ assert result['cmd'] == [fake_container_binary, 'run', '--rm', '--net=host',
+ '-v', '/etc/ceph:/etc/ceph:z',
+ '-v', '/var/lib/ceph/:/var/lib/ceph/:z',
+ '-v', '/var/log/ceph/:/var/log/ceph/:z',
+ '--entrypoint=ceph', fake_container_image,
+ '-n', fake_user, '-k', fake_keyring,
+ '--cluster', fake_cluster, 'mgr', 'module', 'enable', fake_module]
+ assert result['rc'] == rc
+ assert result['stderr'] == stderr
+ assert result['stdout'] == stdout
--- /dev/null
+from mock.mock import patch
+import os
+import pytest
+import ca_test_common
+import ceph_osd
+
+fake_cluster = 'ceph'
+fake_container_binary = 'podman'
+fake_container_image = 'quay.ceph.io/ceph/daemon:latest'
+fake_id = '42'
+fake_ids = ['0', '7', '13']
+fake_user = 'client.admin'
+fake_keyring = '/etc/ceph/{}.{}.keyring'.format(fake_cluster, fake_user)
+invalid_state = 'foo'
+
+
+class TestCephOSDModule(object):
+
+ @patch('ansible.module_utils.basic.AnsibleModule.fail_json')
+ def test_without_parameters(self, m_fail_json):
+ ca_test_common.set_module_args({})
+ m_fail_json.side_effect = ca_test_common.fail_json
+
+ with pytest.raises(ca_test_common.AnsibleFailJson) as result:
+ ceph_osd.main()
+
+ result = result.value.args[0]
+ assert result['msg'] == 'missing required arguments: ids, state'
+
+ @patch('ansible.module_utils.basic.AnsibleModule.fail_json')
+ def test_with_invalid_state(self, m_fail_json):
+ ca_test_common.set_module_args({
+ 'ids': fake_id,
+ 'state': invalid_state,
+ })
+ m_fail_json.side_effect = ca_test_common.fail_json
+
+ with pytest.raises(ca_test_common.AnsibleFailJson) as result:
+ ceph_osd.main()
+
+ result = result.value.args[0]
+ assert result['msg'] == ('value of state must be one of: destroy, down, '
+ 'in, out, purge, rm, got: {}'.format(invalid_state))
+
+ @patch('ansible.module_utils.basic.AnsibleModule.exit_json')
+ def test_with_check_mode(self, m_exit_json):
+ ca_test_common.set_module_args({
+ 'ids': fake_id,
+ 'state': 'rm',
+ '_ansible_check_mode': True
+ })
+ m_exit_json.side_effect = ca_test_common.exit_json
+
+ with pytest.raises(ca_test_common.AnsibleExitJson) as result:
+ ceph_osd.main()
+
+ result = result.value.args[0]
+ assert not result['changed']
+ assert result['cmd'] == ['ceph', '-n', fake_user, '-k', fake_keyring, '--cluster', fake_cluster, 'osd', 'rm', fake_id]
+ assert result['rc'] == 0
+ assert not result['stdout']
+ assert not result['stderr']
+
+ @patch('ansible.module_utils.basic.AnsibleModule.exit_json')
+ @patch('ansible.module_utils.basic.AnsibleModule.run_command')
+ def test_with_failure(self, m_run_command, m_exit_json):
+ ca_test_common.set_module_args({
+ 'ids': fake_id,
+ 'state': 'rm'
+ })
+ m_exit_json.side_effect = ca_test_common.exit_json
+ stdout = ''
+ stderr = 'Error EBUSY: osd.{} is still up; must be down before removal.'.format(fake_id)
+ rc = 16
+ m_run_command.return_value = rc, stdout, stderr
+
+ with pytest.raises(ca_test_common.AnsibleExitJson) as result:
+ ceph_osd.main()
+
+ result = result.value.args[0]
+ assert result['changed']
+ assert result['cmd'] == ['ceph', '-n', fake_user, '-k', fake_keyring, '--cluster', fake_cluster, 'osd', 'rm', fake_id]
+ assert result['rc'] == rc
+ assert result['stderr'] == stderr
+
+ @patch('ansible.module_utils.basic.AnsibleModule.exit_json')
+ @patch('ansible.module_utils.basic.AnsibleModule.run_command')
+ @pytest.mark.parametrize('state', ['destroy', 'down', 'in', 'out', 'purge', 'rm'])
+ def test_set_state(self, m_run_command, m_exit_json, state):
+ ca_test_common.set_module_args({
+ 'ids': fake_id,
+ 'state': state
+ })
+ m_exit_json.side_effect = ca_test_common.exit_json
+ stdout = ''
+ stderr = 'marked {} osd.{}'.format(state, fake_id)
+ rc = 0
+ m_run_command.return_value = rc, stdout, stderr
+ cmd = ['ceph', '-n', fake_user, '-k', fake_keyring, '--cluster', fake_cluster, 'osd', state, fake_id]
+ if state in ['destroy', 'purge']:
+ cmd.append('--yes-i-really-mean-it')
+
+ with pytest.raises(ca_test_common.AnsibleExitJson) as result:
+ ceph_osd.main()
+
+ result = result.value.args[0]
+ assert result['changed']
+ assert result['cmd'] == cmd
+ assert result['rc'] == rc
+ assert result['stderr'] == stderr
+ assert result['stdout'] == stdout
+
+ @patch('ansible.module_utils.basic.AnsibleModule.exit_json')
+ @patch('ansible.module_utils.basic.AnsibleModule.run_command')
+ @pytest.mark.parametrize('state', ['down', 'in', 'out', 'rm'])
+ def test_set_state_multiple_ids(self, m_run_command, m_exit_json, state):
+ ca_test_common.set_module_args({
+ 'ids': fake_ids,
+ 'state': state
+ })
+ m_exit_json.side_effect = ca_test_common.exit_json
+ stderr = ''
+ stdout = ''
+ for osd in fake_ids:
+ stderr += 'marked {} osd.{} '.format(state, osd)
+ rc = 0
+ m_run_command.return_value = rc, stdout, stderr
+ cmd = ['ceph', '-n', fake_user, '-k', fake_keyring, '--cluster', fake_cluster, 'osd', state]
+ cmd.extend(fake_ids)
+
+ with pytest.raises(ca_test_common.AnsibleExitJson) as result:
+ ceph_osd.main()
+
+ result = result.value.args[0]
+ assert result['changed']
+ assert result['cmd'] == cmd
+ assert result['rc'] == rc
+ assert result['stderr'] == stderr
+ assert result['stdout'] == stdout
+
+ @patch('ansible.module_utils.basic.AnsibleModule.fail_json')
+ @patch('ansible.module_utils.basic.AnsibleModule.run_command')
+ @pytest.mark.parametrize('state', ['destroy', 'purge'])
+ def test_invalid_state_multiple_ids(self, m_run_command, m_fail_json, state):
+ ca_test_common.set_module_args({
+ 'ids': fake_ids,
+ 'state': state
+ })
+ m_fail_json.side_effect = ca_test_common.fail_json
+
+ with pytest.raises(ca_test_common.AnsibleFailJson) as result:
+ ceph_osd.main()
+
+ result = result.value.args[0]
+ assert result['msg'] == 'destroy and purge only support one OSD at at time'
+ assert result['rc'] == 1
+
+ @patch('ansible.module_utils.basic.AnsibleModule.exit_json')
+ @patch('ansible.module_utils.basic.AnsibleModule.run_command')
+ @pytest.mark.parametrize('state', ['down', 'in', 'out'])
+ def test_already_set_state(self, m_run_command, m_exit_json, state):
+ ca_test_common.set_module_args({
+ 'ids': fake_id,
+ 'state': state
+ })
+ m_exit_json.side_effect = ca_test_common.exit_json
+ stdout = ''
+ stderr = 'osd.{} is already {}.'.format(fake_id, state)
+ rc = 0
+ m_run_command.return_value = rc, stdout, stderr
+ cmd = ['ceph', '-n', fake_user, '-k', fake_keyring, '--cluster', fake_cluster, 'osd', state, fake_id]
+
+ with pytest.raises(ca_test_common.AnsibleExitJson) as result:
+ ceph_osd.main()
+
+ result = result.value.args[0]
+ assert not result['changed']
+ assert result['cmd'] == cmd
+ assert result['rc'] == rc
+ assert result['stderr'] == stderr
+ assert result['stdout'] == stdout
+
+ @patch('ansible.module_utils.basic.AnsibleModule.exit_json')
+ @patch('ansible.module_utils.basic.AnsibleModule.run_command')
+ @pytest.mark.parametrize('state', ['down', 'in', 'out', 'rm'])
+ def test_one_already_set_state_multiple_ids(self, m_run_command, m_exit_json, state):
+ ca_test_common.set_module_args({
+ 'ids': fake_ids,
+ 'state': state
+ })
+ m_exit_json.side_effect = ca_test_common.exit_json
+ stdout = ''
+ stderr = 'marked {} osd.{}. osd.{} does not exist. osd.{} does not exist.'.format(state, fake_ids[0], fake_ids[1], fake_ids[2])
+ rc = 0
+ m_run_command.return_value = rc, stdout, stderr
+ cmd = ['ceph', '-n', fake_user, '-k', fake_keyring, '--cluster', fake_cluster, 'osd', state]
+ cmd.extend(fake_ids)
+ if state in ['destroy', 'purge']:
+ cmd.append('--yes-i-really-mean-it')
+
+ with pytest.raises(ca_test_common.AnsibleExitJson) as result:
+ ceph_osd.main()
+
+ result = result.value.args[0]
+ assert result['changed']
+ assert result['cmd'] == cmd
+ assert result['rc'] == rc
+ assert result['stderr'] == stderr
+ assert result['stdout'] == stdout
+
+ @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary})
+ @patch.dict(os.environ, {'CEPH_CONTAINER_IMAGE': fake_container_image})
+ @patch('ansible.module_utils.basic.AnsibleModule.exit_json')
+ @patch('ansible.module_utils.basic.AnsibleModule.run_command')
+ @pytest.mark.parametrize('state', ['destroy', 'down', 'in', 'out', 'purge', 'rm'])
+ def test_set_state_with_container(self, m_run_command, m_exit_json, state):
+ ca_test_common.set_module_args({
+ 'ids': fake_id,
+ 'state': state
+ })
+ m_exit_json.side_effect = ca_test_common.exit_json
+ stdout = ''
+ stderr = 'marked {} osd.{}'.format(state, fake_id)
+ rc = 0
+ m_run_command.return_value = rc, stdout, stderr
+ cmd = [fake_container_binary, 'run', '--rm', '--net=host',
+ '-v', '/etc/ceph:/etc/ceph:z',
+ '-v', '/var/lib/ceph/:/var/lib/ceph/:z',
+ '-v', '/var/log/ceph/:/var/log/ceph/:z',
+ '--entrypoint=ceph', fake_container_image,
+ '-n', fake_user, '-k', fake_keyring,
+ '--cluster', fake_cluster, 'osd', state, fake_id]
+ if state in ['destroy', 'purge']:
+ cmd.append('--yes-i-really-mean-it')
+
+ with pytest.raises(ca_test_common.AnsibleExitJson) as result:
+ ceph_osd.main()
+
+ result = result.value.args[0]
+ assert result['changed']
+ assert result['cmd'] == cmd
+ assert result['rc'] == rc
+ assert result['stderr'] == stderr
+ assert result['stdout'] == stdout
--- /dev/null
+from mock.mock import patch
+import os
+import pytest
+import ca_test_common
+import ceph_osd_flag
+
+fake_cluster = 'ceph'
+fake_container_binary = 'podman'
+fake_container_image = 'quay.ceph.io/ceph/daemon:latest'
+fake_flag = 'noup'
+fake_user = 'client.admin'
+fake_keyring = '/etc/ceph/{}.{}.keyring'.format(fake_cluster, fake_user)
+invalid_flag = 'nofoo'
+
+
+class TestCephOSDFlagModule(object):
+
+ @patch('ansible.module_utils.basic.AnsibleModule.fail_json')
+ def test_without_parameters(self, m_fail_json):
+ ca_test_common.set_module_args({})
+ m_fail_json.side_effect = ca_test_common.fail_json
+
+ with pytest.raises(ca_test_common.AnsibleFailJson) as result:
+ ceph_osd_flag.main()
+
+ result = result.value.args[0]
+ assert result['msg'] == 'missing required arguments: name'
+
+ @patch('ansible.module_utils.basic.AnsibleModule.fail_json')
+ def test_with_invalid_flag(self, m_fail_json):
+ ca_test_common.set_module_args({
+ 'name': invalid_flag,
+ })
+ m_fail_json.side_effect = ca_test_common.fail_json
+
+ with pytest.raises(ca_test_common.AnsibleFailJson) as result:
+ ceph_osd_flag.main()
+
+ result = result.value.args[0]
+ assert result['msg'] == ('value of name must be one of: noup, nodown, '
+ 'noout, nobackfill, norebalance, norecover, '
+ 'noscrub, nodeep-scrub, got: {}'.format(invalid_flag))
+
+ @patch('ansible.module_utils.basic.AnsibleModule.exit_json')
+ def test_with_check_mode(self, m_exit_json):
+ ca_test_common.set_module_args({
+ 'name': fake_flag,
+ '_ansible_check_mode': True
+ })
+ m_exit_json.side_effect = ca_test_common.exit_json
+
+ with pytest.raises(ca_test_common.AnsibleExitJson) as result:
+ ceph_osd_flag.main()
+
+ result = result.value.args[0]
+ assert not result['changed']
+ assert result['cmd'] == ['ceph', '-n', fake_user, '-k', fake_keyring, '--cluster', fake_cluster, 'osd', 'set', fake_flag]
+ assert result['rc'] == 0
+ assert not result['stdout']
+ assert not result['stderr']
+
+ @patch('ansible.module_utils.basic.AnsibleModule.exit_json')
+ @patch('ansible.module_utils.basic.AnsibleModule.run_command')
+ def test_with_failure(self, m_run_command, m_exit_json):
+ ca_test_common.set_module_args({
+ 'name': fake_flag
+ })
+ m_exit_json.side_effect = ca_test_common.exit_json
+ stdout = ''
+ stderr = 'Error EINVAL: invalid command'
+ rc = 22
+ m_run_command.return_value = rc, stdout, stderr
+
+ with pytest.raises(ca_test_common.AnsibleExitJson) as result:
+ ceph_osd_flag.main()
+
+ result = result.value.args[0]
+ assert result['changed']
+ assert result['cmd'] == ['ceph', '-n', fake_user, '-k', fake_keyring, '--cluster', fake_cluster, 'osd', 'set', fake_flag]
+ assert result['rc'] == rc
+ assert result['stderr'] == stderr
+
+ @patch('ansible.module_utils.basic.AnsibleModule.exit_json')
+ @patch('ansible.module_utils.basic.AnsibleModule.run_command')
+ def test_set_flag(self, m_run_command, m_exit_json):
+ ca_test_common.set_module_args({
+ 'name': fake_flag,
+ })
+ m_exit_json.side_effect = ca_test_common.exit_json
+ stdout = ''
+ stderr = '{} is set'.format(fake_flag)
+ rc = 0
+ m_run_command.return_value = rc, stdout, stderr
+
+ with pytest.raises(ca_test_common.AnsibleExitJson) as result:
+ ceph_osd_flag.main()
+
+ result = result.value.args[0]
+ assert result['changed']
+ assert result['cmd'] == ['ceph', '-n', fake_user, '-k', fake_keyring, '--cluster', fake_cluster, 'osd', 'set', fake_flag]
+ assert result['rc'] == rc
+ assert result['stderr'] == stderr
+ assert result['stdout'] == stdout
+
+ @patch('ansible.module_utils.basic.AnsibleModule.exit_json')
+ @patch('ansible.module_utils.basic.AnsibleModule.run_command')
+ def test_unset_flag(self, m_run_command, m_exit_json):
+ ca_test_common.set_module_args({
+ 'name': fake_flag,
+ 'state': 'absent'
+ })
+ m_exit_json.side_effect = ca_test_common.exit_json
+ stdout = ''
+ stderr = '{} is unset'.format(fake_flag)
+ rc = 0
+ m_run_command.return_value = rc, stdout, stderr
+
+ with pytest.raises(ca_test_common.AnsibleExitJson) as result:
+ ceph_osd_flag.main()
+
+ result = result.value.args[0]
+ assert result['changed']
+ assert result['cmd'] == ['ceph', '-n', fake_user, '-k', fake_keyring, '--cluster', fake_cluster, 'osd', 'unset', fake_flag]
+ assert result['rc'] == rc
+ assert result['stderr'] == stderr
+ assert result['stdout'] == stdout
+
+ @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary})
+ @patch.dict(os.environ, {'CEPH_CONTAINER_IMAGE': fake_container_image})
+ @patch('ansible.module_utils.basic.AnsibleModule.exit_json')
+ @patch('ansible.module_utils.basic.AnsibleModule.run_command')
+ def test_with_container(self, m_run_command, m_exit_json):
+ ca_test_common.set_module_args({
+ 'name': fake_flag,
+ })
+ m_exit_json.side_effect = ca_test_common.exit_json
+ stdout = ''
+ stderr = '{} is set'.format(fake_flag)
+ rc = 0
+ m_run_command.return_value = rc, stdout, stderr
+
+ with pytest.raises(ca_test_common.AnsibleExitJson) as result:
+ ceph_osd_flag.main()
+
+ result = result.value.args[0]
+ assert result['changed']
+ assert result['cmd'] == [fake_container_binary, 'run', '--rm', '--net=host',
+ '-v', '/etc/ceph:/etc/ceph:z',
+ '-v', '/var/lib/ceph/:/var/lib/ceph/:z',
+ '-v', '/var/log/ceph/:/var/log/ceph/:z',
+ '--entrypoint=ceph', fake_container_image,
+ '-n', fake_user, '-k', fake_keyring,
+ '--cluster', fake_cluster, 'osd', 'set', fake_flag]
+ assert result['rc'] == rc
+ assert result['stderr'] == stderr
+ assert result['stdout'] == stdout
--- /dev/null
+import os
+import sys
+import ceph_pool
+from mock.mock import patch
+
+sys.path.append('./library')
+fake_user = 'client.admin'
+fake_user_key = '/etc/ceph/ceph.client.admin.keyring'
+fake_pool_name = 'foo'
+fake_cluster_name = 'ceph'
+fake_container_image_name = 'quay.ceph.io/ceph-ci/daemon:latest-luminous'
+
+
+@patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': 'podman'})
+class TestCephPoolModule(object):
+ def setup_method(self):
+ self.fake_running_pool_details = {
+ 'pool_id': 39,
+ 'pool_name': 'foo2',
+ 'create_time': '2020-05-12T12:32:03.696673+0000',
+ 'flags': 32769,
+ 'flags_names': 'hashpspool,creating',
+ 'type': 1,
+ 'size': 2,
+ 'min_size': 1,
+ 'crush_rule': 0,
+ 'object_hash': 2,
+ 'pg_autoscale_mode': 'on',
+ 'pg_num': 32,
+ 'pg_placement_num': 32,
+ 'pg_placement_num_target': 32,
+ 'pg_num_target': 32,
+ 'pg_num_pending': 32,
+ 'last_pg_merge_meta': {
+ 'source_pgid': '0.0',
+ 'ready_epoch': 0,
+ 'last_epoch_started': 0,
+ 'last_epoch_clean': 0,
+ 'source_version': "0'0",
+ 'target_version': "0'0"
+ },
+ 'last_change': '109',
+ 'last_force_op_resend': '0',
+ 'last_force_op_resend_prenautilus': '0',
+ 'last_force_op_resend_preluminous': '0',
+ 'auid': 0,
+ 'snap_mode': 'selfmanaged',
+ 'snap_seq': 0,
+ 'snap_epoch': 0,
+ 'pool_snaps': [],
+ 'removed_snaps': '[]',
+ 'quota_max_bytes': 0,
+ 'quota_max_objects': 0,
+ 'tiers': [],
+ 'tier_of': -1,
+ 'read_tier': -1,
+ 'write_tier': -1,
+ 'cache_mode': 'none',
+ 'target_max_bytes': 0,
+ 'target_max_objects': 0,
+ 'cache_target_dirty_ratio_micro': 400000,
+ 'cache_target_dirty_high_ratio_micro': 600000,
+ 'cache_target_full_ratio_micro': 800000,
+ 'cache_min_flush_age': 0,
+ 'cache_min_evict_age': 0,
+ 'erasure_code_profile': '',
+ 'hit_set_params': {
+ 'type': 'none'
+ },
+ 'hit_set_period': 0,
+ 'hit_set_count': 0,
+ 'use_gmt_hitset': True,
+ 'min_read_recency_for_promote': 0,
+ 'min_write_recency_for_promote': 0,
+ 'hit_set_grade_decay_rate': 0,
+ 'hit_set_search_last_n': 0,
+ 'grade_table': [],
+ 'stripe_width': 0,
+ 'expected_num_objects': 0,
+ 'fast_read': False,
+ 'options': {},
+ # 'target_size_ratio' is a key present in the dict above
+ # 'options': {}
+ # see comment in get_pool_details() for more details
+ 'target_size_ratio': 0.3,
+ 'application_metadata': {
+ 'rbd': {}
+ },
+ 'application': 'rbd'
+ }
+ self.fake_user_pool_config = {
+ 'pool_name': {
+ 'value': 'foo2'
+ },
+ 'pg_num': {
+ 'value': '32',
+ 'cli_set_opt': 'pg_num'
+ },
+ 'pgp_num': {
+ 'value': '0',
+ 'cli_set_opt': 'pgp_num'
+ },
+ 'pg_autoscale_mode': {
+ 'value': 'on',
+ 'cli_set_opt': 'pg_autoscale_mode'
+ },
+ 'target_size_ratio': {
+ 'value': '0.3',
+ 'cli_set_opt': 'target_size_ratio'
+ },
+ 'application': {
+ 'value': 'rbd'
+ },
+ 'type': {
+ 'value': 'replicated'
+ },
+ 'erasure_profile': {
+ 'value': 'default'
+ },
+ 'crush_rule': {
+ 'value': 'replicated_rule',
+ 'cli_set_opt': 'crush_rule'
+ },
+ 'expected_num_objects': {
+ 'value': '0'
+ },
+ 'size': {
+ 'value': '2',
+ 'cli_set_opt': 'size'
+ },
+ 'min_size': {
+ 'value': '0',
+ 'cli_set_opt': 'min_size'
+ },
+ 'pg_placement_num': {
+ 'value': '32',
+ 'cli_set_opt': 'pgp_num'
+ }}
+
+ def test_check_pool_exist(self):
+ expected_command_list = [
+ 'podman',
+ 'run',
+ '--rm',
+ '--net=host',
+ '-v',
+ '/etc/ceph:/etc/ceph:z',
+ '-v',
+ '/var/lib/ceph/:/var/lib/ceph/:z',
+ '-v',
+ '/var/log/ceph/:/var/log/ceph/:z',
+ '--entrypoint=ceph',
+ fake_container_image_name,
+ '-n',
+ fake_user,
+ '-k',
+ fake_user_key,
+ '--cluster',
+ 'ceph',
+ 'osd',
+ 'pool',
+ 'stats',
+ self.fake_user_pool_config['pool_name']['value'],
+ '-f',
+ 'json'
+ ]
+
+ cmd = ceph_pool.check_pool_exist(fake_cluster_name,
+ self.fake_user_pool_config['pool_name']['value'],
+ fake_user, fake_user_key, output_format='json',
+ container_image=fake_container_image_name)
+ assert cmd == expected_command_list
+
+ def test_get_default_running_config(self):
+ params = ['osd_pool_default_size',
+ 'osd_pool_default_min_size',
+ 'osd_pool_default_pg_num',
+ 'osd_pool_default_pgp_num']
+
+ expected_command_list = []
+ cmd_list = []
+
+ for param in params:
+ expected_command_list.append([
+ 'podman',
+ 'run',
+ '--rm',
+ '--net=host',
+ '-v',
+ '/etc/ceph:/etc/ceph:z',
+ '-v',
+ '/var/lib/ceph/:/var/lib/ceph/:z',
+ '-v',
+ '/var/log/ceph/:/var/log/ceph/:z',
+ '--entrypoint=ceph',
+ fake_container_image_name,
+ '-n',
+ 'client.admin',
+ '-k',
+ '/etc/ceph/ceph.client.admin.keyring',
+ '--cluster',
+ 'ceph',
+ 'config',
+ 'get',
+ 'mon.*',
+ param
+ ])
+ cmd_list.append(ceph_pool.generate_get_config_cmd(param,
+ fake_cluster_name,
+ fake_user, fake_user_key,
+ container_image=fake_container_image_name))
+ assert cmd_list == expected_command_list
+
+ def test_get_application_pool(self):
+ expected_command = [
+ 'podman',
+ 'run',
+ '--rm',
+ '--net=host',
+ '-v',
+ '/etc/ceph:/etc/ceph:z',
+ '-v',
+ '/var/lib/ceph/:/var/lib/ceph/:z',
+ '-v',
+ '/var/log/ceph/:/var/log/ceph/:z',
+ '--entrypoint=ceph',
+ fake_container_image_name,
+ '-n',
+ 'client.admin',
+ '-k',
+ '/etc/ceph/ceph.client.admin.keyring',
+ '--cluster',
+ 'ceph',
+ 'osd',
+ 'pool',
+ 'application',
+ 'get',
+ self.fake_user_pool_config['pool_name']['value'],
+ '-f',
+ 'json'
+ ]
+
+ cmd = ceph_pool.get_application_pool(fake_cluster_name,
+ self.fake_user_pool_config['pool_name']['value'],
+ fake_user, fake_user_key, 'json',
+ container_image=fake_container_image_name)
+
+ assert cmd == expected_command
+
+ def test_enable_application_pool(self):
+ expected_command = [
+ 'podman',
+ 'run',
+ '--rm',
+ '--net=host',
+ '-v',
+ '/etc/ceph:/etc/ceph:z',
+ '-v',
+ '/var/lib/ceph/:/var/lib/ceph/:z',
+ '-v',
+ '/var/log/ceph/:/var/log/ceph/:z',
+ '--entrypoint=ceph',
+ fake_container_image_name,
+ '-n',
+ 'client.admin',
+ '-k',
+ '/etc/ceph/ceph.client.admin.keyring',
+ '--cluster',
+ 'ceph',
+ 'osd',
+ 'pool',
+ 'application',
+ 'enable',
+ self.fake_user_pool_config['pool_name']['value'],
+ 'rbd'
+ ]
+
+ cmd = ceph_pool.enable_application_pool(fake_cluster_name,
+ self.fake_user_pool_config['pool_name']['value'],
+ 'rbd', fake_user, fake_user_key,
+ container_image=fake_container_image_name)
+
+ assert cmd == expected_command
+
+ def test_disable_application_pool(self):
+ expected_command = [
+ 'podman',
+ 'run',
+ '--rm',
+ '--net=host',
+ '-v',
+ '/etc/ceph:/etc/ceph:z',
+ '-v',
+ '/var/lib/ceph/:/var/lib/ceph/:z',
+ '-v',
+ '/var/log/ceph/:/var/log/ceph/:z',
+ '--entrypoint=ceph',
+ fake_container_image_name,
+ '-n',
+ 'client.admin',
+ '-k',
+ '/etc/ceph/ceph.client.admin.keyring',
+ '--cluster',
+ 'ceph',
+ 'osd',
+ 'pool',
+ 'application',
+ 'disable',
+ self.fake_user_pool_config['pool_name']['value'],
+ 'rbd',
+ '--yes-i-really-mean-it'
+ ]
+
+ cmd = ceph_pool.disable_application_pool(fake_cluster_name,
+ self.fake_user_pool_config['pool_name']['value'],
+ 'rbd', fake_user, fake_user_key,
+ container_image=fake_container_image_name)
+
+ assert cmd == expected_command
+
+ def test_compare_pool_config_no_diff(self):
+ delta = ceph_pool.compare_pool_config(self.fake_user_pool_config, self.fake_running_pool_details)
+
+ assert delta == {}
+
+ def test_compare_pool_config_std_diff(self):
+ self.fake_user_pool_config['size']['value'] = '3'
+ delta = ceph_pool.compare_pool_config(self.fake_user_pool_config, self.fake_running_pool_details)
+
+ assert delta == {'size': {'cli_set_opt': 'size', 'value': '3'}}
+
+ def test_compare_pool_config_target_size_ratio_diff(self):
+ self.fake_user_pool_config['target_size_ratio']['value'] = '0.5'
+ delta = ceph_pool.compare_pool_config(self.fake_user_pool_config, self.fake_running_pool_details)
+
+ assert delta == {'target_size_ratio': {'cli_set_opt': 'target_size_ratio', 'value': '0.5'}}
+
+ def test_compare_pool_config_application_diff(self):
+ self.fake_user_pool_config['application']['value'] = 'foo'
+ delta = ceph_pool.compare_pool_config(self.fake_user_pool_config, self.fake_running_pool_details)
+
+ assert delta == {'application': {'new_application': 'foo', 'old_application': 'rbd', 'value': 'foo'}}
+
+ def test_list_pools_details(self):
+ expected_command = [
+ 'podman',
+ 'run',
+ '--rm',
+ '--net=host',
+ '-v',
+ '/etc/ceph:/etc/ceph:z',
+ '-v',
+ '/var/lib/ceph/:/var/lib/ceph/:z',
+ '-v',
+ '/var/log/ceph/:/var/log/ceph/:z',
+ '--entrypoint=ceph',
+ fake_container_image_name,
+ '-n',
+ 'client.admin',
+ '-k',
+ '/etc/ceph/ceph.client.admin.keyring',
+ '--cluster',
+ 'ceph',
+ 'osd',
+ 'pool',
+ 'ls',
+ 'detail',
+ '-f',
+ 'json'
+ ]
+
+ cmd = ceph_pool.list_pools(fake_cluster_name, fake_user, fake_user_key, True, 'json', container_image=fake_container_image_name)
+
+ assert cmd == expected_command
+
+ def test_list_pools_nodetails(self):
+ expected_command = [
+ 'podman',
+ 'run',
+ '--rm',
+ '--net=host',
+ '-v',
+ '/etc/ceph:/etc/ceph:z',
+ '-v',
+ '/var/lib/ceph/:/var/lib/ceph/:z',
+ '-v',
+ '/var/log/ceph/:/var/log/ceph/:z',
+ '--entrypoint=ceph',
+ fake_container_image_name,
+ '-n',
+ 'client.admin',
+ '-k',
+ '/etc/ceph/ceph.client.admin.keyring',
+ '--cluster',
+ 'ceph',
+ 'osd',
+ 'pool',
+ 'ls',
+ '-f',
+ 'json'
+ ]
+
+ cmd = ceph_pool.list_pools(fake_cluster_name, fake_user, fake_user_key, False, 'json', container_image=fake_container_image_name)
+
+ assert cmd == expected_command
+
+ def test_create_replicated_pool_pg_autoscaler_enabled(self):
+ self.fake_user_pool_config['type']['value'] = 'replicated'
+ expected_command = [
+ 'podman',
+ 'run',
+ '--rm',
+ '--net=host',
+ '-v',
+ '/etc/ceph:/etc/ceph:z',
+ '-v',
+ '/var/lib/ceph/:/var/lib/ceph/:z',
+ '-v',
+ '/var/log/ceph/:/var/log/ceph/:z',
+ '--entrypoint=ceph',
+ fake_container_image_name,
+ '-n',
+ 'client.admin',
+ '-k',
+ '/etc/ceph/ceph.client.admin.keyring',
+ '--cluster',
+ 'ceph',
+ 'osd',
+ 'pool',
+ 'create',
+ self.fake_user_pool_config['pool_name']['value'],
+ self.fake_user_pool_config['type']['value'],
+ '--target_size_ratio',
+ self.fake_user_pool_config['target_size_ratio']['value'],
+ self.fake_user_pool_config['crush_rule']['value'],
+ '--expected_num_objects',
+ self.fake_user_pool_config['expected_num_objects']['value'],
+ '--autoscale-mode',
+ self.fake_user_pool_config['pg_autoscale_mode']['value'],
+ '--size',
+ self.fake_user_pool_config['size']['value']
+ ]
+
+ cmd = ceph_pool.create_pool(fake_cluster_name,
+ self.fake_user_pool_config['pool_name']['value'],
+ fake_user, fake_user_key, self.fake_user_pool_config,
+ container_image=fake_container_image_name)
+
+ assert cmd == expected_command
+
+ def test_create_replicated_pool_pg_autoscaler_disabled(self):
+ self.fake_user_pool_config['type']['value'] = 'replicated'
+ self.fake_user_pool_config['pg_autoscale_mode']['value'] = 'off'
+ expected_command = [
+ 'podman',
+ 'run',
+ '--rm',
+ '--net=host',
+ '-v',
+ '/etc/ceph:/etc/ceph:z',
+ '-v',
+ '/var/lib/ceph/:/var/lib/ceph/:z',
+ '-v',
+ '/var/log/ceph/:/var/log/ceph/:z',
+ '--entrypoint=ceph',
+ fake_container_image_name,
+ '-n',
+ 'client.admin',
+ '-k',
+ '/etc/ceph/ceph.client.admin.keyring',
+ '--cluster',
+ 'ceph',
+ 'osd',
+ 'pool',
+ 'create',
+ self.fake_user_pool_config['pool_name']['value'],
+ self.fake_user_pool_config['type']['value'],
+ '--pg_num',
+ self.fake_user_pool_config['pg_num']['value'],
+ '--pgp_num',
+ self.fake_user_pool_config['pgp_num']['value'],
+ self.fake_user_pool_config['crush_rule']['value'],
+ '--expected_num_objects',
+ self.fake_user_pool_config['expected_num_objects']['value'],
+ '--autoscale-mode',
+ self.fake_user_pool_config['pg_autoscale_mode']['value'],
+ '--size',
+ self.fake_user_pool_config['size']['value']
+ ]
+
+ cmd = ceph_pool.create_pool(fake_cluster_name,
+ self.fake_user_pool_config['pool_name']['value'],
+ fake_user, fake_user_key,
+ self.fake_user_pool_config,
+ container_image=fake_container_image_name)
+
+ assert cmd == expected_command
+
+ def test_create_erasure_pool_pg_autoscaler_enabled(self):
+ self.fake_user_pool_config['type']['value'] = 'erasure'
+ self.fake_user_pool_config['erasure_profile']['value'] = 'erasure-default'
+ self.fake_user_pool_config['crush_rule']['value'] = 'erasure_rule'
+ expected_command = [
+ 'podman',
+ 'run',
+ '--rm',
+ '--net=host',
+ '-v',
+ '/etc/ceph:/etc/ceph:z',
+ '-v',
+ '/var/lib/ceph/:/var/lib/ceph/:z',
+ '-v',
+ '/var/log/ceph/:/var/log/ceph/:z',
+ '--entrypoint=ceph',
+ fake_container_image_name,
+ '-n',
+ 'client.admin',
+ '-k',
+ '/etc/ceph/ceph.client.admin.keyring',
+ '--cluster',
+ 'ceph',
+ 'osd',
+ 'pool',
+ 'create',
+ self.fake_user_pool_config['pool_name']['value'],
+ self.fake_user_pool_config['type']['value'],
+ '--target_size_ratio',
+ self.fake_user_pool_config['target_size_ratio']['value'],
+ self.fake_user_pool_config['erasure_profile']['value'],
+ self.fake_user_pool_config['crush_rule']['value'],
+ '--expected_num_objects',
+ self.fake_user_pool_config['expected_num_objects']['value'],
+ '--autoscale-mode',
+ self.fake_user_pool_config['pg_autoscale_mode']['value']
+ ]
+
+ cmd = ceph_pool.create_pool(fake_cluster_name,
+ self.fake_user_pool_config['pool_name']['value'],
+ fake_user, fake_user_key, self.fake_user_pool_config,
+ container_image=fake_container_image_name)
+
+ assert cmd == expected_command
+
+ def test_create_erasure_pool_pg_autoscaler_disabled(self):
+ self.fake_user_pool_config['type']['value'] = 'erasure'
+ self.fake_user_pool_config['erasure_profile']['value'] = 'erasure-default'
+ self.fake_user_pool_config['crush_rule']['value'] = 'erasure_rule'
+ self.fake_user_pool_config['pg_autoscale_mode']['value'] = 'off'
+ expected_command = [
+ 'podman',
+ 'run',
+ '--rm',
+ '--net=host',
+ '-v',
+ '/etc/ceph:/etc/ceph:z',
+ '-v',
+ '/var/lib/ceph/:/var/lib/ceph/:z',
+ '-v',
+ '/var/log/ceph/:/var/log/ceph/:z',
+ '--entrypoint=ceph',
+ fake_container_image_name,
+ '-n',
+ 'client.admin',
+ '-k',
+ '/etc/ceph/ceph.client.admin.keyring',
+ '--cluster',
+ 'ceph',
+ 'osd',
+ 'pool',
+ 'create',
+ self.fake_user_pool_config['pool_name']['value'],
+ self.fake_user_pool_config['type']['value'],
+ '--pg_num',
+ self.fake_user_pool_config['pg_num']['value'],
+ '--pgp_num',
+ self.fake_user_pool_config['pgp_num']['value'],
+ self.fake_user_pool_config['erasure_profile']['value'],
+ self.fake_user_pool_config['crush_rule']['value'],
+ '--expected_num_objects',
+ self.fake_user_pool_config['expected_num_objects']['value'],
+ '--autoscale-mode',
+ self.fake_user_pool_config['pg_autoscale_mode']['value']
+ ]
+
+ cmd = ceph_pool.create_pool(fake_cluster_name,
+ self.fake_user_pool_config['pool_name']['value'],
+ fake_user, fake_user_key, self.fake_user_pool_config,
+ container_image=fake_container_image_name)
+
+ assert cmd == expected_command
+
+ def test_remove_pool(self):
+ expected_command = [
+ 'podman',
+ 'run',
+ '--rm',
+ '--net=host',
+ '-v',
+ '/etc/ceph:/etc/ceph:z',
+ '-v',
+ '/var/lib/ceph/:/var/lib/ceph/:z',
+ '-v',
+ '/var/log/ceph/:/var/log/ceph/:z',
+ '--entrypoint=ceph',
+ fake_container_image_name,
+ '-n',
+ 'client.admin',
+ '-k',
+ '/etc/ceph/ceph.client.admin.keyring',
+ '--cluster',
+ 'ceph',
+ 'osd',
+ 'pool',
+ 'rm',
+ self.fake_user_pool_config['pool_name']['value'],
+ self.fake_user_pool_config['pool_name']['value'],
+ '--yes-i-really-really-mean-it'
+ ]
+
+ cmd = ceph_pool.remove_pool(fake_cluster_name, self.fake_user_pool_config['pool_name']['value'],
+ fake_user, fake_user_key, container_image=fake_container_image_name)
+
+ assert cmd == expected_command
--- /dev/null
+import sys
+import mock
+import os
+import pytest
+sys.path.append('./library')
+import ceph_volume # noqa: E402
+
+
+# Python 3
+try:
+ from unittest.mock import MagicMock
+except ImportError:
+ # Python 2
+ try:
+ from mock import MagicMock
+ except ImportError:
+ print('You need the mock library installed on python2.x to run tests')
+
+
+def get_mounts(mounts=None):
+ volumes = {}
+ volumes['/run/lock/lvm'] = '/run/lock/lvm:z'
+ volumes['/var/run/udev'] = '/var/run/udev:z'
+ volumes['/dev'] = '/dev'
+ volumes['/etc/ceph'] = '/etc/ceph:z'
+ volumes['/run/lvm'] = '/run/lvm'
+ volumes['/var/lib/ceph'] = '/var/lib/ceph:z'
+ volumes['/var/log/ceph'] = '/var/log/ceph:z'
+ if mounts is not None:
+ volumes.update(mounts)
+
+ return sum([['-v', '{}:{}'.format(src_dir, dst_dir)] for src_dir, dst_dir in volumes.items()], [])
+
+
+def get_container_cmd(mounts=None):
+
+ return ['docker', 'run', '--rm', '--privileged',
+ '--net=host', '--ipc=host'] + \
+ get_mounts(mounts) + ['--entrypoint=ceph-volume']
+
+
+@mock.patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': 'docker'})
+class TestCephVolumeModule(object):
+
+ def test_data_no_vg(self):
+ result = ceph_volume.get_data("/dev/sda", None)
+ assert result == "/dev/sda"
+
+ def test_data_with_vg(self):
+ result = ceph_volume.get_data("data-lv", "data-vg")
+ assert result == "data-vg/data-lv"
+
+ def test_journal_no_vg(self):
+ result = ceph_volume.get_journal("/dev/sda1", None)
+ assert result == "/dev/sda1"
+
+ def test_journal_with_vg(self):
+ result = ceph_volume.get_journal("journal-lv", "journal-vg")
+ assert result == "journal-vg/journal-lv"
+
+ def test_db_no_vg(self):
+ result = ceph_volume.get_db("/dev/sda1", None)
+ assert result == "/dev/sda1"
+
+ def test_db_with_vg(self):
+ result = ceph_volume.get_db("db-lv", "db-vg")
+ assert result == "db-vg/db-lv"
+
+ def test_wal_no_vg(self):
+ result = ceph_volume.get_wal("/dev/sda1", None)
+ assert result == "/dev/sda1"
+
+ def test_wal_with_vg(self):
+ result = ceph_volume.get_wal("wal-lv", "wal-vg")
+ assert result == "wal-vg/wal-lv"
+
+ def test_container_exec(self):
+ fake_binary = "ceph-volume"
+ fake_container_image = "quay.ceph.io/ceph-ci/daemon:latest"
+ expected_command_list = get_container_cmd() + [fake_container_image]
+ result = ceph_volume.container_exec(fake_binary, fake_container_image)
+ assert result == expected_command_list
+
+ def test_zap_osd_container(self):
+ fake_module = MagicMock()
+ fake_module.params = {'data': '/dev/sda'}
+ fake_container_image = "quay.ceph.io/ceph-ci/daemon:latest"
+ expected_command_list = get_container_cmd() + \
+ [fake_container_image,
+ '--cluster',
+ 'ceph',
+ 'lvm',
+ 'zap',
+ '--destroy',
+ '/dev/sda']
+ result = ceph_volume.zap_devices(fake_module, fake_container_image)
+ assert result == expected_command_list
+
+ def test_zap_osd(self):
+ fake_module = MagicMock()
+ fake_module.params = {'data': '/dev/sda'}
+ fake_container_image = None
+ expected_command_list = ['ceph-volume',
+ '--cluster',
+ 'ceph',
+ 'lvm',
+ 'zap',
+ '--destroy',
+ '/dev/sda']
+ result = ceph_volume.zap_devices(fake_module, fake_container_image)
+ assert result == expected_command_list
+
+ def test_zap_osd_fsid(self):
+ fake_module = MagicMock()
+ fake_module.params = {'osd_fsid': 'a_uuid'}
+ fake_container_image = None
+ expected_command_list = ['ceph-volume',
+ '--cluster',
+ 'ceph',
+ 'lvm',
+ 'zap',
+ '--destroy',
+ '--osd-fsid',
+ 'a_uuid']
+ result = ceph_volume.zap_devices(fake_module, fake_container_image)
+ assert result == expected_command_list
+
+ def test_zap_osd_id(self):
+ fake_module = MagicMock()
+ fake_module.params = {'osd_id': '123'}
+ fake_container_image = None
+ expected_command_list = ['ceph-volume',
+ '--cluster',
+ 'ceph',
+ 'lvm',
+ 'zap',
+ '--destroy',
+ '--osd-id',
+ '123']
+ result = ceph_volume.zap_devices(fake_module, fake_container_image)
+ assert result == expected_command_list
+
+ def test_activate_osd(self):
+ expected_command_list = ['ceph-volume',
+ '--cluster',
+ 'ceph',
+ 'lvm',
+ 'activate',
+ '--all']
+ result = ceph_volume.activate_osd()
+ assert result == expected_command_list
+
+ def test_list_osd(self):
+ fake_module = MagicMock()
+ fake_module.params = {'cluster': 'ceph', 'data': '/dev/sda'}
+ fake_container_image = None
+ expected_command_list = ['ceph-volume',
+ '--cluster',
+ 'ceph',
+ 'lvm',
+ 'list',
+ '/dev/sda',
+ '--format=json']
+ result = ceph_volume.list_osd(fake_module, fake_container_image)
+ assert result == expected_command_list
+
+ def test_list_osd_container(self):
+ fake_module = MagicMock()
+ fake_module.params = {'cluster': 'ceph', 'data': '/dev/sda'}
+ fake_container_image = "quay.ceph.io/ceph-ci/daemon:latest"
+ expected_command_list = get_container_cmd(
+ {
+ '/var/lib/ceph': '/var/lib/ceph:ro'
+ }) + \
+ [fake_container_image,
+ '--cluster',
+ 'ceph',
+ 'lvm',
+ 'list',
+ '/dev/sda',
+ '--format=json']
+ result = ceph_volume.list_osd(fake_module, fake_container_image)
+ assert result == expected_command_list
+
+ def test_list_storage_inventory(self):
+ fake_module = MagicMock()
+ fake_container_image = None
+ expected_command_list = ['ceph-volume',
+ '--cluster',
+ 'ceph',
+ 'inventory',
+ '--format=json',
+ ]
+ result = ceph_volume.list_storage_inventory(fake_module, fake_container_image)
+ assert result == expected_command_list
+
+ def test_list_storage_inventory_container(self):
+ fake_module = MagicMock()
+ fake_container_image = "quay.ceph.io/ceph-ci/daemon:latest"
+ expected_command_list = get_container_cmd() + \
+ [fake_container_image,
+ '--cluster',
+ 'ceph',
+ 'inventory',
+ '--format=json']
+ result = ceph_volume.list_storage_inventory(fake_module, fake_container_image)
+ assert result == expected_command_list
+
+ @pytest.mark.parametrize('objectstore', ['bluestore', 'filestore'])
+ def test_create_osd_container(self, objectstore):
+ fake_module = MagicMock()
+ fake_module.params = {'data': '/dev/sda',
+ 'objectstore': objectstore,
+ 'cluster': 'ceph', }
+
+ fake_action = "create"
+ fake_container_image = "quay.ceph.io/ceph-ci/daemon:latest"
+ expected_command_list = get_container_cmd() + \
+ [fake_container_image,
+ '--cluster',
+ 'ceph',
+ 'lvm',
+ 'create',
+ '--%s' % objectstore,
+ '--data',
+ '/dev/sda']
+ result = ceph_volume.prepare_or_create_osd(
+ fake_module, fake_action, fake_container_image)
+ assert result == expected_command_list
+
+ @pytest.mark.parametrize('objectstore', ['bluestore', 'filestore'])
+ def test_create_osd(self, objectstore):
+ fake_module = MagicMock()
+ fake_module.params = {'data': '/dev/sda',
+ 'objectstore': objectstore,
+ 'cluster': 'ceph', }
+
+ fake_container_image = None
+ fake_action = "create"
+ expected_command_list = ['ceph-volume',
+ '--cluster',
+ 'ceph',
+ 'lvm',
+ 'create',
+ '--%s' % objectstore,
+ '--data',
+ '/dev/sda']
+ result = ceph_volume.prepare_or_create_osd(
+ fake_module, fake_action, fake_container_image)
+ assert result == expected_command_list
+
+ @pytest.mark.parametrize('objectstore', ['bluestore', 'filestore'])
+ def test_prepare_osd_container(self, objectstore):
+ fake_module = MagicMock()
+ fake_module.params = {'data': '/dev/sda',
+ 'objectstore': objectstore,
+ 'cluster': 'ceph', }
+
+ fake_action = "prepare"
+ fake_container_image = "quay.ceph.io/ceph-ci/daemon:latest"
+ expected_command_list = get_container_cmd() + \
+ [fake_container_image,
+ '--cluster',
+ 'ceph',
+ 'lvm',
+ 'prepare',
+ '--%s' % objectstore,
+ '--data',
+ '/dev/sda']
+ result = ceph_volume.prepare_or_create_osd(
+ fake_module, fake_action, fake_container_image)
+ assert result == expected_command_list
+
+ @pytest.mark.parametrize('objectstore', ['bluestore', 'filestore'])
+ def test_prepare_osd(self, objectstore):
+ fake_module = MagicMock()
+ fake_module.params = {'data': '/dev/sda',
+ 'objectstore': objectstore,
+ 'cluster': 'ceph', }
+
+ fake_container_image = None
+ fake_action = "prepare"
+ expected_command_list = ['ceph-volume',
+ '--cluster',
+ 'ceph',
+ 'lvm',
+ 'prepare',
+ '--%s' % objectstore,
+ '--data',
+ '/dev/sda']
+ result = ceph_volume.prepare_or_create_osd(
+ fake_module, fake_action, fake_container_image)
+ assert result == expected_command_list
+
+ @pytest.mark.parametrize('objectstore', ['bluestore', 'filestore'])
+ def test_batch_osd_container(self, objectstore):
+ fake_module = MagicMock()
+ fake_module.params = {'data': '/dev/sda',
+ 'objectstore': objectstore,
+ 'block_db_size': '4096',
+ 'journal_size': '4096',
+ 'cluster': 'ceph',
+ 'batch_devices': ["/dev/sda", "/dev/sdb"]}
+
+ fake_container_image = "quay.ceph.io/ceph-ci/daemon:latest"
+ expected_command_list = get_container_cmd() + \
+ [fake_container_image,
+ '--cluster',
+ 'ceph',
+ 'lvm',
+ 'batch',
+ '--%s' % objectstore,
+ '--yes',
+ '--prepare',
+ '--journal-size' if objectstore == 'filestore' else '--block-db-size', # noqa E501
+ '4096',
+ '/dev/sda',
+ '/dev/sdb']
+ result = ceph_volume.batch(
+ fake_module, fake_container_image)
+ assert result == expected_command_list
+
+ @pytest.mark.parametrize('objectstore', ['bluestore', 'filestore'])
+ def test_batch_osd(self, objectstore):
+ fake_module = MagicMock()
+ fake_module.params = {'data': '/dev/sda',
+ 'objectstore': objectstore,
+ 'block_db_size': '4096',
+ 'journal_size': '4096',
+ 'cluster': 'ceph',
+ 'batch_devices': ["/dev/sda", "/dev/sdb"]}
+
+ fake_container_image = None
+ expected_command_list = ['ceph-volume',
+ '--cluster',
+ 'ceph',
+ 'lvm',
+ 'batch',
+ '--%s' % objectstore,
+ '--yes',
+ '--journal-size' if objectstore == 'filestore' else '--block-db-size', # noqa E501
+ '4096',
+ '/dev/sda',
+ '/dev/sdb']
+ result = ceph_volume.batch(
+ fake_module, fake_container_image)
+ assert result == expected_command_list
+
+ def test_batch_filestore_with_dedicated_journal(self):
+ fake_module = MagicMock()
+ fake_module.params = {'objectstore': 'filestore',
+ 'journal_size': '100',
+ 'cluster': 'ceph',
+ 'batch_devices': ["/dev/sda", "/dev/sdb"],
+ 'journal_devices': ["/dev/sdc", "/dev/sdd"]}
+
+ fake_container_image = None
+ expected_command_list = ['ceph-volume',
+ '--cluster',
+ 'ceph',
+ 'lvm',
+ 'batch',
+ '--filestore',
+ '--yes',
+ '--journal-size',
+ '100',
+ '/dev/sda',
+ '/dev/sdb',
+ '--journal-devices',
+ '/dev/sdc',
+ '/dev/sdd']
+ result = ceph_volume.batch(
+ fake_module, fake_container_image)
+ assert result == expected_command_list
+
+ def test_batch_bluestore_with_dedicated_db(self):
+ fake_module = MagicMock()
+ fake_module.params = {'objectstore': 'bluestore',
+ 'block_db_size': '-1',
+ 'cluster': 'ceph',
+ 'batch_devices': ["/dev/sda", "/dev/sdb"],
+ 'block_db_devices': ["/dev/sdc", "/dev/sdd"]}
+
+ fake_container_image = None
+ expected_command_list = ['ceph-volume',
+ '--cluster',
+ 'ceph',
+ 'lvm',
+ 'batch',
+ '--bluestore',
+ '--yes',
+ '/dev/sda',
+ '/dev/sdb',
+ '--db-devices',
+ '/dev/sdc',
+ '/dev/sdd']
+ result = ceph_volume.batch(
+ fake_module, fake_container_image)
+ assert result == expected_command_list
+
+ def test_batch_bluestore_with_dedicated_wal(self):
+ fake_module = MagicMock()
+ fake_module.params = {'objectstore': 'bluestore',
+ 'cluster': 'ceph',
+ 'block_db_size': '-1',
+ 'batch_devices': ["/dev/sda", "/dev/sdb"],
+ 'wal_devices': ["/dev/sdc", "/dev/sdd"]}
+
+ fake_container_image = None
+ expected_command_list = ['ceph-volume',
+ '--cluster',
+ 'ceph',
+ 'lvm',
+ 'batch',
+ '--bluestore',
+ '--yes',
+ '/dev/sda',
+ '/dev/sdb',
+ '--wal-devices',
+ '/dev/sdc',
+ '/dev/sdd']
+ result = ceph_volume.batch(
+ fake_module, fake_container_image)
+ assert result == expected_command_list
--- /dev/null
+from mock.mock import patch
+import os
+import pytest
+import ca_test_common
+import ceph_volume_simple_activate
+
+fake_cluster = 'ceph'
+fake_container_binary = 'podman'
+fake_container_image = 'quay.ceph.io/ceph/daemon:latest'
+fake_id = '42'
+fake_uuid = '0c4a7eca-0c2a-4c12-beff-08a80f064c52'
+fake_path = '/etc/ceph/osd/{}-{}.json'.format(fake_id, fake_uuid)
+
+
+class TestCephVolumeSimpleActivateModule(object):
+
+ @patch('ansible.module_utils.basic.AnsibleModule.exit_json')
+ def test_with_check_mode(self, m_exit_json):
+ ca_test_common.set_module_args({
+ 'osd_id': fake_id,
+ 'osd_fsid': fake_uuid,
+ '_ansible_check_mode': True
+ })
+ m_exit_json.side_effect = ca_test_common.exit_json
+
+ with pytest.raises(ca_test_common.AnsibleExitJson) as result:
+ ceph_volume_simple_activate.main()
+
+ result = result.value.args[0]
+ assert not result['changed']
+ assert result['cmd'] == ['ceph-volume', '--cluster', fake_cluster, 'simple', 'activate', fake_id, fake_uuid]
+ assert result['rc'] == 0
+ assert not result['stdout']
+ assert not result['stderr']
+
+ @patch('ansible.module_utils.basic.AnsibleModule.exit_json')
+ @patch('ansible.module_utils.basic.AnsibleModule.run_command')
+ def test_with_failure(self, m_run_command, m_exit_json):
+ ca_test_common.set_module_args({
+ 'osd_id': fake_id,
+ 'osd_fsid': fake_uuid
+ })
+ m_exit_json.side_effect = ca_test_common.exit_json
+ stdout = ''
+ stderr = 'error'
+ rc = 2
+ m_run_command.return_value = rc, stdout, stderr
+
+ with pytest.raises(ca_test_common.AnsibleExitJson) as result:
+ ceph_volume_simple_activate.main()
+
+ result = result.value.args[0]
+ assert result['changed']
+ assert result['cmd'] == ['ceph-volume', '--cluster', fake_cluster, 'simple', 'activate', fake_id, fake_uuid]
+ assert result['rc'] == rc
+ assert result['stderr'] == stderr
+
+ @patch('ansible.module_utils.basic.AnsibleModule.exit_json')
+ @patch('ansible.module_utils.basic.AnsibleModule.run_command')
+ def test_activate_all_osds(self, m_run_command, m_exit_json):
+ ca_test_common.set_module_args({
+ 'osd_all': True
+ })
+ m_exit_json.side_effect = ca_test_common.exit_json
+ stdout = ''
+ stderr = ''
+ rc = 0
+ m_run_command.return_value = rc, stdout, stderr
+
+ with pytest.raises(ca_test_common.AnsibleExitJson) as result:
+ ceph_volume_simple_activate.main()
+
+ result = result.value.args[0]
+ assert result['changed']
+ assert result['cmd'] == ['ceph-volume', '--cluster', fake_cluster, 'simple', 'activate', '--all']
+ assert result['rc'] == rc
+ assert result['stderr'] == stderr
+ assert result['stdout'] == stdout
+
+ @patch.object(os.path, 'exists', return_value=True)
+ @patch('ansible.module_utils.basic.AnsibleModule.exit_json')
+ @patch('ansible.module_utils.basic.AnsibleModule.run_command')
+ def test_activate_path_exists(self, m_run_command, m_exit_json, m_os_path):
+ ca_test_common.set_module_args({
+ 'path': fake_path
+ })
+ m_exit_json.side_effect = ca_test_common.exit_json
+ stdout = ''
+ stderr = ''
+ rc = 0
+ m_run_command.return_value = rc, stdout, stderr
+
+ with pytest.raises(ca_test_common.AnsibleExitJson) as result:
+ ceph_volume_simple_activate.main()
+
+ result = result.value.args[0]
+ assert result['changed']
+ assert result['cmd'] == ['ceph-volume', '--cluster', fake_cluster, 'simple', 'activate', '--file', fake_path]
+ assert result['rc'] == rc
+ assert result['stderr'] == stderr
+ assert result['stdout'] == stdout
+
+ @patch.object(os.path, 'exists', return_value=False)
+ @patch('ansible.module_utils.basic.AnsibleModule.fail_json')
+ def test_activate_path_not_exists(self, m_fail_json, m_os_path):
+ ca_test_common.set_module_args({
+ 'path': fake_path
+ })
+ m_fail_json.side_effect = ca_test_common.fail_json
+
+ with pytest.raises(ca_test_common.AnsibleFailJson) as result:
+ ceph_volume_simple_activate.main()
+
+ result = result.value.args[0]
+ assert result['msg'] == '{} does not exist'.format(fake_path)
+ assert result['rc'] == 1
+
+ @patch('ansible.module_utils.basic.AnsibleModule.exit_json')
+ @patch('ansible.module_utils.basic.AnsibleModule.run_command')
+ def test_activate_without_systemd(self, m_run_command, m_exit_json):
+ ca_test_common.set_module_args({
+ 'osd_id': fake_id,
+ 'osd_fsid': fake_uuid,
+ 'systemd': False
+ })
+ m_exit_json.side_effect = ca_test_common.exit_json
+ stdout = ''
+ stderr = ''
+ rc = 0
+ m_run_command.return_value = rc, stdout, stderr
+
+ with pytest.raises(ca_test_common.AnsibleExitJson) as result:
+ ceph_volume_simple_activate.main()
+
+ result = result.value.args[0]
+ assert result['changed']
+ assert result['cmd'] == ['ceph-volume', '--cluster', fake_cluster, 'simple', 'activate', fake_id, fake_uuid, '--no-systemd']
+ assert result['rc'] == rc
+ assert result['stderr'] == stderr
+ assert result['stdout'] == stdout
+
+ @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary})
+ @patch.dict(os.environ, {'CEPH_CONTAINER_IMAGE': fake_container_image})
+ @patch('ansible.module_utils.basic.AnsibleModule.exit_json')
+ @patch('ansible.module_utils.basic.AnsibleModule.run_command')
+ def test_activate_with_container(self, m_run_command, m_exit_json):
+ ca_test_common.set_module_args({
+ 'osd_id': fake_id,
+ 'osd_fsid': fake_uuid,
+ })
+ m_exit_json.side_effect = ca_test_common.exit_json
+ stdout = ''
+ stderr = ''
+ rc = 0
+ m_run_command.return_value = rc, stdout, stderr
+
+ with pytest.raises(ca_test_common.AnsibleExitJson) as result:
+ ceph_volume_simple_activate.main()
+
+ result = result.value.args[0]
+ assert result['changed']
+ assert result['cmd'] == [fake_container_binary,
+ 'run', '--rm', '--privileged',
+ '--ipc=host', '--net=host',
+ '-v', '/etc/ceph:/etc/ceph:z',
+ '-v', '/var/lib/ceph/:/var/lib/ceph/:z',
+ '-v', '/var/log/ceph/:/var/log/ceph/:z',
+ '-v', '/run/lvm/:/run/lvm/',
+ '-v', '/run/lock/lvm/:/run/lock/lvm/',
+ '--entrypoint=ceph-volume', fake_container_image,
+ '--cluster', fake_cluster, 'simple', 'activate', fake_id, fake_uuid]
+ assert result['rc'] == rc
+ assert result['stderr'] == stderr
+ assert result['stdout'] == stdout
--- /dev/null
+from mock.mock import patch
+import os
+import pytest
+import ca_test_common
+import ceph_volume_simple_scan
+
+fake_cluster = 'ceph'
+fake_container_binary = 'podman'
+fake_container_image = 'quay.ceph.io/ceph/daemon:latest'
+fake_path = '/var/lib/ceph/osd/ceph-0'
+
+
+class TestCephVolumeSimpleScanModule(object):
+
+ @patch('ansible.module_utils.basic.AnsibleModule.exit_json')
+ def test_with_check_mode(self, m_exit_json):
+ ca_test_common.set_module_args({
+ '_ansible_check_mode': True
+ })
+ m_exit_json.side_effect = ca_test_common.exit_json
+
+ with pytest.raises(ca_test_common.AnsibleExitJson) as result:
+ ceph_volume_simple_scan.main()
+
+ result = result.value.args[0]
+ assert not result['changed']
+ assert result['cmd'] == ['ceph-volume', '--cluster', fake_cluster, 'simple', 'scan']
+ assert result['rc'] == 0
+ assert not result['stdout']
+ assert not result['stderr']
+
+ @patch('ansible.module_utils.basic.AnsibleModule.exit_json')
+ @patch('ansible.module_utils.basic.AnsibleModule.run_command')
+ def test_with_failure(self, m_run_command, m_exit_json):
+ ca_test_common.set_module_args({
+ })
+ m_exit_json.side_effect = ca_test_common.exit_json
+ stdout = ''
+ stderr = 'error'
+ rc = 2
+ m_run_command.return_value = rc, stdout, stderr
+
+ with pytest.raises(ca_test_common.AnsibleExitJson) as result:
+ ceph_volume_simple_scan.main()
+
+ result = result.value.args[0]
+ assert result['changed']
+ assert result['cmd'] == ['ceph-volume', '--cluster', fake_cluster, 'simple', 'scan']
+ assert result['rc'] == rc
+ assert result['stderr'] == stderr
+
+ @patch('ansible.module_utils.basic.AnsibleModule.exit_json')
+ @patch('ansible.module_utils.basic.AnsibleModule.run_command')
+ def test_scan_all_osds(self, m_run_command, m_exit_json):
+ ca_test_common.set_module_args({
+ })
+ m_exit_json.side_effect = ca_test_common.exit_json
+ stdout = ''
+ stderr = ''
+ rc = 0
+ m_run_command.return_value = rc, stdout, stderr
+
+ with pytest.raises(ca_test_common.AnsibleExitJson) as result:
+ ceph_volume_simple_scan.main()
+
+ result = result.value.args[0]
+ assert result['changed']
+ assert result['cmd'] == ['ceph-volume', '--cluster', fake_cluster, 'simple', 'scan']
+ assert result['rc'] == rc
+ assert result['stderr'] == stderr
+ assert result['stdout'] == stdout
+
+ @patch.object(os.path, 'exists', return_value=True)
+ @patch('ansible.module_utils.basic.AnsibleModule.exit_json')
+ @patch('ansible.module_utils.basic.AnsibleModule.run_command')
+ def test_scan_path_exists(self, m_run_command, m_exit_json, m_os_path):
+ ca_test_common.set_module_args({
+ 'path': fake_path
+ })
+ m_exit_json.side_effect = ca_test_common.exit_json
+ stdout = ''
+ stderr = ''
+ rc = 0
+ m_run_command.return_value = rc, stdout, stderr
+
+ with pytest.raises(ca_test_common.AnsibleExitJson) as result:
+ ceph_volume_simple_scan.main()
+
+ result = result.value.args[0]
+ assert result['changed']
+ assert result['cmd'] == ['ceph-volume', '--cluster', fake_cluster, 'simple', 'scan', fake_path]
+ assert result['rc'] == rc
+ assert result['stderr'] == stderr
+ assert result['stdout'] == stdout
+
+ @patch.object(os.path, 'exists', return_value=False)
+ @patch('ansible.module_utils.basic.AnsibleModule.fail_json')
+ def test_scan_path_not_exists(self, m_fail_json, m_os_path):
+ ca_test_common.set_module_args({
+ 'path': fake_path
+ })
+ m_fail_json.side_effect = ca_test_common.fail_json
+
+ with pytest.raises(ca_test_common.AnsibleFailJson) as result:
+ ceph_volume_simple_scan.main()
+
+ result = result.value.args[0]
+ assert result['msg'] == '{} does not exist'.format(fake_path)
+ assert result['rc'] == 1
+
+ @patch.object(os.path, 'exists', return_value=True)
+ @patch('ansible.module_utils.basic.AnsibleModule.exit_json')
+ @patch('ansible.module_utils.basic.AnsibleModule.run_command')
+ def test_scan_path_stdout_force(self, m_run_command, m_exit_json, m_os_path):
+ ca_test_common.set_module_args({
+ 'path': fake_path,
+ 'force': True,
+ 'stdout': True
+ })
+ m_exit_json.side_effect = ca_test_common.exit_json
+ stdout = ''
+ stderr = ''
+ rc = 0
+ m_run_command.return_value = rc, stdout, stderr
+
+ with pytest.raises(ca_test_common.AnsibleExitJson) as result:
+ ceph_volume_simple_scan.main()
+
+ result = result.value.args[0]
+ assert result['changed']
+ assert result['cmd'] == ['ceph-volume', '--cluster', fake_cluster, 'simple', 'scan', '--force', '--stdout', fake_path]
+ assert result['rc'] == rc
+ assert result['stderr'] == stderr
+ assert result['stdout'] == stdout
+
+ @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary})
+ @patch.dict(os.environ, {'CEPH_CONTAINER_IMAGE': fake_container_image})
+ @patch('ansible.module_utils.basic.AnsibleModule.exit_json')
+ @patch('ansible.module_utils.basic.AnsibleModule.run_command')
+ def test_scan_with_container(self, m_run_command, m_exit_json):
+ ca_test_common.set_module_args({
+ })
+ m_exit_json.side_effect = ca_test_common.exit_json
+ stdout = ''
+ stderr = ''
+ rc = 0
+ m_run_command.return_value = rc, stdout, stderr
+
+ with pytest.raises(ca_test_common.AnsibleExitJson) as result:
+ ceph_volume_simple_scan.main()
+
+ result = result.value.args[0]
+ assert result['changed']
+ assert result['cmd'] == [fake_container_binary,
+ 'run', '--rm', '--privileged',
+ '--ipc=host', '--net=host',
+ '-v', '/etc/ceph:/etc/ceph:z',
+ '-v', '/var/lib/ceph/:/var/lib/ceph/:z',
+ '-v', '/var/log/ceph/:/var/log/ceph/:z',
+ '-v', '/run/lvm/:/run/lvm/',
+ '-v', '/run/lock/lvm/:/run/lock/lvm/',
+ '--entrypoint=ceph-volume', fake_container_image,
+ '--cluster', fake_cluster, 'simple', 'scan']
+ assert result['rc'] == rc
+ assert result['stderr'] == stderr
+ assert result['stdout'] == stdout
--- /dev/null
+from mock.mock import patch
+import pytest
+import ca_test_common
+import cephadm_adopt
+
+fake_cluster = 'ceph'
+fake_image = 'quay.ceph.io/ceph/daemon-base:latest'
+fake_name = 'mon.foo01'
+
+
+class TestCephadmAdoptModule(object):
+
+ @patch('ansible.module_utils.basic.AnsibleModule.fail_json')
+ def test_without_parameters(self, m_fail_json):
+ ca_test_common.set_module_args({})
+ m_fail_json.side_effect = ca_test_common.fail_json
+
+ with pytest.raises(ca_test_common.AnsibleFailJson) as result:
+ cephadm_adopt.main()
+
+ result = result.value.args[0]
+ assert result['msg'] == 'missing required arguments: name'
+
+ @patch('ansible.module_utils.basic.AnsibleModule.exit_json')
+ def test_with_check_mode(self, m_exit_json):
+ ca_test_common.set_module_args({
+ 'name': fake_name,
+ '_ansible_check_mode': True
+ })
+ m_exit_json.side_effect = ca_test_common.exit_json
+
+ with pytest.raises(ca_test_common.AnsibleExitJson) as result:
+ cephadm_adopt.main()
+
+ result = result.value.args[0]
+ assert not result['changed']
+ assert result['cmd'] == ['cephadm', 'ls', '--no-detail']
+ assert result['rc'] == 0
+ assert not result['stdout']
+ assert not result['stderr']
+
+ @patch('ansible.module_utils.basic.AnsibleModule.fail_json')
+ @patch('ansible.module_utils.basic.AnsibleModule.run_command')
+ def test_with_failure(self, m_run_command, m_fail_json):
+ ca_test_common.set_module_args({
+ 'name': fake_name
+ })
+ m_fail_json.side_effect = ca_test_common.fail_json
+ stdout = ''
+ stderr = 'ERROR: cephadm should be run as root'
+ rc = 1
+ m_run_command.return_value = rc, stdout, stderr
+
+ with pytest.raises(ca_test_common.AnsibleFailJson) as result:
+ cephadm_adopt.main()
+
+ result = result.value.args[0]
+ assert result['rc'] == 1
+ assert result['msg'] == 'ERROR: cephadm should be run as root'
+
+ @patch('ansible.module_utils.basic.AnsibleModule.exit_json')
+ @patch('ansible.module_utils.basic.AnsibleModule.run_command')
+ def test_with_default_values(self, m_run_command, m_exit_json):
+ ca_test_common.set_module_args({
+ 'name': fake_name
+ })
+ m_exit_json.side_effect = ca_test_common.exit_json
+ stdout = 'Stopping old systemd unit ceph-mon@{}...\n' \
+ 'Disabling old systemd unit ceph-mon@{}...\n' \
+ 'Moving data...\n' \
+ 'Chowning content...\n' \
+ 'Moving logs...\n' \
+ 'Creating new units...\n' \
+ 'firewalld ready'.format(fake_name, fake_name)
+ stderr = ''
+ rc = 0
+ m_run_command.side_effect = [
+ (0, '[{{"style":"legacy","name":"{}"}}]'.format(fake_name), ''),
+ (rc, stdout, stderr)
+ ]
+
+ with pytest.raises(ca_test_common.AnsibleExitJson) as result:
+ cephadm_adopt.main()
+
+ result = result.value.args[0]
+ assert result['changed']
+ assert result['cmd'] == ['cephadm', 'adopt', '--cluster', fake_cluster, '--name', fake_name, '--style', 'legacy']
+ assert result['rc'] == 0
+ assert result['stderr'] == stderr
+ assert result['stdout'] == stdout
+
+ @patch('ansible.module_utils.basic.AnsibleModule.exit_json')
+ @patch('ansible.module_utils.basic.AnsibleModule.run_command')
+ def test_already_adopted(self, m_run_command, m_exit_json):
+ ca_test_common.set_module_args({
+ 'name': fake_name
+ })
+ m_exit_json.side_effect = ca_test_common.exit_json
+ stderr = ''
+ stdout = '[{{"style":"cephadm:v1","name":"{}"}}]'.format(fake_name)
+ rc = 0
+ m_run_command.return_value = rc, stdout, stderr
+
+ with pytest.raises(ca_test_common.AnsibleExitJson) as result:
+ cephadm_adopt.main()
+
+ result = result.value.args[0]
+ assert not result['changed']
+ assert result['cmd'] == ['cephadm', 'ls', '--no-detail']
+ assert result['rc'] == 0
+ assert result['stderr'] == stderr
+ assert result['stdout'] == '{} is already adopted'.format(fake_name)
+
+ @patch('ansible.module_utils.basic.AnsibleModule.exit_json')
+ @patch('ansible.module_utils.basic.AnsibleModule.run_command')
+ def test_with_docker(self, m_run_command, m_exit_json):
+ ca_test_common.set_module_args({
+ 'name': fake_name,
+ 'docker': True
+ })
+ m_exit_json.side_effect = ca_test_common.exit_json
+ stdout = ''
+ stderr = ''
+ rc = 0
+ m_run_command.side_effect = [
+ (0, '[{{"style":"legacy","name":"{}"}}]'.format(fake_name), ''),
+ (rc, stdout, stderr)
+ ]
+
+ with pytest.raises(ca_test_common.AnsibleExitJson) as result:
+ cephadm_adopt.main()
+
+ result = result.value.args[0]
+ assert result['changed']
+ assert result['cmd'] == ['cephadm', '--docker', 'adopt', '--cluster', fake_cluster, '--name', fake_name, '--style', 'legacy']
+ assert result['rc'] == 0
+
+ @patch('ansible.module_utils.basic.AnsibleModule.exit_json')
+ @patch('ansible.module_utils.basic.AnsibleModule.run_command')
+ def test_with_custom_image(self, m_run_command, m_exit_json):
+ ca_test_common.set_module_args({
+ 'name': fake_name,
+ 'image': fake_image
+ })
+ m_exit_json.side_effect = ca_test_common.exit_json
+ stdout = ''
+ stderr = ''
+ rc = 0
+ m_run_command.side_effect = [
+ (0, '[{{"style":"legacy","name":"{}"}}]'.format(fake_name), ''),
+ (rc, stdout, stderr)
+ ]
+
+ with pytest.raises(ca_test_common.AnsibleExitJson) as result:
+ cephadm_adopt.main()
+
+ result = result.value.args[0]
+ assert result['changed']
+ assert result['cmd'] == ['cephadm', '--image', fake_image, 'adopt', '--cluster', fake_cluster, '--name', fake_name, '--style', 'legacy']
+ assert result['rc'] == 0
+
+ @patch('ansible.module_utils.basic.AnsibleModule.exit_json')
+ @patch('ansible.module_utils.basic.AnsibleModule.run_command')
+ def test_without_pull(self, m_run_command, m_exit_json):
+ ca_test_common.set_module_args({
+ 'name': fake_name,
+ 'pull': False
+ })
+ m_exit_json.side_effect = ca_test_common.exit_json
+ stdout = ''
+ stderr = ''
+ rc = 0
+ m_run_command.side_effect = [
+ (0, '[{{"style":"legacy","name":"{}"}}]'.format(fake_name), ''),
+ (rc, stdout, stderr)
+ ]
+
+ with pytest.raises(ca_test_common.AnsibleExitJson) as result:
+ cephadm_adopt.main()
+
+ result = result.value.args[0]
+ assert result['changed']
+ assert result['cmd'] == ['cephadm', 'adopt', '--cluster', fake_cluster, '--name', fake_name, '--style', 'legacy', '--skip-pull']
+ assert result['rc'] == 0
+
+ @patch('ansible.module_utils.basic.AnsibleModule.exit_json')
+ @patch('ansible.module_utils.basic.AnsibleModule.run_command')
+ def test_without_firewalld(self, m_run_command, m_exit_json):
+ ca_test_common.set_module_args({
+ 'name': fake_name,
+ 'firewalld': False
+ })
+ m_exit_json.side_effect = ca_test_common.exit_json
+ stdout = ''
+ stderr = ''
+ rc = 0
+ m_run_command.side_effect = [
+ (0, '[{{"style":"legacy","name":"{}"}}]'.format(fake_name), ''),
+ (rc, stdout, stderr)
+ ]
+
+ with pytest.raises(ca_test_common.AnsibleExitJson) as result:
+ cephadm_adopt.main()
+
+ result = result.value.args[0]
+ assert result['changed']
+ assert result['cmd'] == ['cephadm', 'adopt', '--cluster', fake_cluster, '--name', fake_name, '--style', 'legacy', '--skip-firewalld']
+ assert result['rc'] == 0
--- /dev/null
+from mock.mock import patch
+import pytest
+import ca_test_common
+import cephadm_bootstrap
+
+fake_fsid = '0f1e0605-db0b-485c-b366-bd8abaa83f3b'
+fake_image = 'quay.ceph.io/ceph/daemon-base:latest-master-devel'
+fake_ip = '192.168.42.1'
+fake_registry = 'quay.ceph.io'
+fake_registry_user = 'foo'
+fake_registry_pass = 'bar'
+fake_registry_json = 'registry.json'
+
+
+class TestCephadmBootstrapModule(object):
+
+ @patch('ansible.module_utils.basic.AnsibleModule.fail_json')
+ def test_without_parameters(self, m_fail_json):
+ ca_test_common.set_module_args({})
+ m_fail_json.side_effect = ca_test_common.fail_json
+
+ with pytest.raises(ca_test_common.AnsibleFailJson) as result:
+ cephadm_bootstrap.main()
+
+ result = result.value.args[0]
+ assert result['msg'] == 'missing required arguments: mon_ip'
+
+ @patch('ansible.module_utils.basic.AnsibleModule.exit_json')
+ def test_with_check_mode(self, m_exit_json):
+ ca_test_common.set_module_args({
+ 'mon_ip': fake_ip,
+ '_ansible_check_mode': True
+ })
+ m_exit_json.side_effect = ca_test_common.exit_json
+
+ with pytest.raises(ca_test_common.AnsibleExitJson) as result:
+ cephadm_bootstrap.main()
+
+ result = result.value.args[0]
+ assert not result['changed']
+ assert result['cmd'] == ['cephadm', 'bootstrap', '--mon-ip', fake_ip]
+ assert result['rc'] == 0
+ assert not result['stdout']
+ assert not result['stderr']
+
+ @patch('ansible.module_utils.basic.AnsibleModule.exit_json')
+ @patch('ansible.module_utils.basic.AnsibleModule.run_command')
+ def test_with_failure(self, m_run_command, m_exit_json):
+ ca_test_common.set_module_args({
+ 'mon_ip': fake_ip
+ })
+ m_exit_json.side_effect = ca_test_common.exit_json
+ stdout = ''
+ stderr = 'ERROR: cephadm should be run as root'
+ rc = 1
+ m_run_command.return_value = rc, stdout, stderr
+
+ with pytest.raises(ca_test_common.AnsibleExitJson) as result:
+ cephadm_bootstrap.main()
+
+ result = result.value.args[0]
+ assert result['changed']
+ assert result['cmd'] == ['cephadm', 'bootstrap', '--mon-ip', fake_ip]
+ assert result['rc'] == 1
+ assert result['stderr'] == 'ERROR: cephadm should be run as root'
+
+ @patch('ansible.module_utils.basic.AnsibleModule.exit_json')
+ @patch('ansible.module_utils.basic.AnsibleModule.run_command')
+ def test_with_default_values(self, m_run_command, m_exit_json):
+ ca_test_common.set_module_args({
+ 'mon_ip': fake_ip
+ })
+ m_exit_json.side_effect = ca_test_common.exit_json
+ stdout = 'Bootstrap complete.'
+ stderr = ''
+ rc = 0
+ m_run_command.return_value = rc, stdout, stderr
+
+ with pytest.raises(ca_test_common.AnsibleExitJson) as result:
+ cephadm_bootstrap.main()
+
+ result = result.value.args[0]
+ assert result['changed']
+ assert result['cmd'] == ['cephadm', 'bootstrap', '--mon-ip', fake_ip]
+ assert result['rc'] == 0
+ assert result['stdout'] == 'Bootstrap complete.'
+
+ @patch('ansible.module_utils.basic.AnsibleModule.exit_json')
+ @patch('ansible.module_utils.basic.AnsibleModule.run_command')
+ def test_with_docker(self, m_run_command, m_exit_json):
+ ca_test_common.set_module_args({
+ 'mon_ip': fake_ip,
+ 'docker': True
+ })
+ m_exit_json.side_effect = ca_test_common.exit_json
+ stdout = ''
+ stderr = ''
+ rc = 0
+ m_run_command.return_value = rc, stdout, stderr
+
+ with pytest.raises(ca_test_common.AnsibleExitJson) as result:
+ cephadm_bootstrap.main()
+
+ result = result.value.args[0]
+ assert result['changed']
+ assert result['cmd'] == ['cephadm', '--docker', 'bootstrap', '--mon-ip', fake_ip]
+ assert result['rc'] == 0
+
+ @patch('ansible.module_utils.basic.AnsibleModule.exit_json')
+ @patch('ansible.module_utils.basic.AnsibleModule.run_command')
+ def test_with_custom_image(self, m_run_command, m_exit_json):
+ ca_test_common.set_module_args({
+ 'mon_ip': fake_ip,
+ 'image': fake_image
+ })
+ m_exit_json.side_effect = ca_test_common.exit_json
+ stdout = ''
+ stderr = ''
+ rc = 0
+ m_run_command.return_value = rc, stdout, stderr
+
+ with pytest.raises(ca_test_common.AnsibleExitJson) as result:
+ cephadm_bootstrap.main()
+
+ result = result.value.args[0]
+ assert result['changed']
+ assert result['cmd'] == ['cephadm', '--image', fake_image, 'bootstrap', '--mon-ip', fake_ip]
+ assert result['rc'] == 0
+
+ @patch('ansible.module_utils.basic.AnsibleModule.exit_json')
+ @patch('ansible.module_utils.basic.AnsibleModule.run_command')
+ def test_with_custom_fsid(self, m_run_command, m_exit_json):
+ ca_test_common.set_module_args({
+ 'mon_ip': fake_ip,
+ 'fsid': fake_fsid
+ })
+ m_exit_json.side_effect = ca_test_common.exit_json
+ stdout = ''
+ stderr = ''
+ rc = 0
+ m_run_command.return_value = rc, stdout, stderr
+
+ with pytest.raises(ca_test_common.AnsibleExitJson) as result:
+ cephadm_bootstrap.main()
+
+ result = result.value.args[0]
+ assert result['changed']
+ assert result['cmd'] == ['cephadm', 'bootstrap', '--mon-ip', fake_ip, '--fsid', fake_fsid]
+ assert result['rc'] == 0
+
+ @patch('ansible.module_utils.basic.AnsibleModule.exit_json')
+ @patch('ansible.module_utils.basic.AnsibleModule.run_command')
+ def test_without_pull(self, m_run_command, m_exit_json):
+ ca_test_common.set_module_args({
+ 'mon_ip': fake_ip,
+ 'pull': False
+ })
+ m_exit_json.side_effect = ca_test_common.exit_json
+ stdout = ''
+ stderr = ''
+ rc = 0
+ m_run_command.return_value = rc, stdout, stderr
+
+ with pytest.raises(ca_test_common.AnsibleExitJson) as result:
+ cephadm_bootstrap.main()
+
+ result = result.value.args[0]
+ assert result['changed']
+ assert result['cmd'] == ['cephadm', 'bootstrap', '--mon-ip', fake_ip, '--skip-pull']
+ assert result['rc'] == 0
+
+ @patch('ansible.module_utils.basic.AnsibleModule.exit_json')
+ @patch('ansible.module_utils.basic.AnsibleModule.run_command')
+ def test_with_dashboard_user_password(self, m_run_command, m_exit_json):
+ ca_test_common.set_module_args({
+ 'mon_ip': fake_ip,
+ 'dashboard': True,
+ 'dashboard_user': 'foo',
+ 'dashboard_password': 'bar'
+ })
+ m_exit_json.side_effect = ca_test_common.exit_json
+ stdout = ''
+ stderr = ''
+ rc = 0
+ m_run_command.return_value = rc, stdout, stderr
+
+ with pytest.raises(ca_test_common.AnsibleExitJson) as result:
+ cephadm_bootstrap.main()
+
+ result = result.value.args[0]
+ assert result['changed']
+ assert result['cmd'] == ['cephadm', 'bootstrap', '--mon-ip', fake_ip, '--initial-dashboard-user', 'foo', '--initial-dashboard-password', 'bar']
+ assert result['rc'] == 0
+
+ @patch('ansible.module_utils.basic.AnsibleModule.exit_json')
+ @patch('ansible.module_utils.basic.AnsibleModule.run_command')
+ def test_without_dashboard(self, m_run_command, m_exit_json):
+ ca_test_common.set_module_args({
+ 'mon_ip': fake_ip,
+ 'dashboard': False
+ })
+ m_exit_json.side_effect = ca_test_common.exit_json
+ stdout = ''
+ stderr = ''
+ rc = 0
+ m_run_command.return_value = rc, stdout, stderr
+
+ with pytest.raises(ca_test_common.AnsibleExitJson) as result:
+ cephadm_bootstrap.main()
+
+ result = result.value.args[0]
+ assert result['changed']
+ assert result['cmd'] == ['cephadm', 'bootstrap', '--mon-ip', fake_ip, '--skip-dashboard']
+ assert result['rc'] == 0
+
+ @patch('ansible.module_utils.basic.AnsibleModule.exit_json')
+ @patch('ansible.module_utils.basic.AnsibleModule.run_command')
+ def test_without_monitoring(self, m_run_command, m_exit_json):
+ ca_test_common.set_module_args({
+ 'mon_ip': fake_ip,
+ 'monitoring': False
+ })
+ m_exit_json.side_effect = ca_test_common.exit_json
+ stdout = ''
+ stderr = ''
+ rc = 0
+ m_run_command.return_value = rc, stdout, stderr
+
+ with pytest.raises(ca_test_common.AnsibleExitJson) as result:
+ cephadm_bootstrap.main()
+
+ result = result.value.args[0]
+ assert result['changed']
+ assert result['cmd'] == ['cephadm', 'bootstrap', '--mon-ip', fake_ip, '--skip-monitoring-stack']
+ assert result['rc'] == 0
+
+ @patch('ansible.module_utils.basic.AnsibleModule.exit_json')
+ @patch('ansible.module_utils.basic.AnsibleModule.run_command')
+ def test_without_firewalld(self, m_run_command, m_exit_json):
+ ca_test_common.set_module_args({
+ 'mon_ip': fake_ip,
+ 'firewalld': False
+ })
+ m_exit_json.side_effect = ca_test_common.exit_json
+ stdout = ''
+ stderr = ''
+ rc = 0
+ m_run_command.return_value = rc, stdout, stderr
+
+ with pytest.raises(ca_test_common.AnsibleExitJson) as result:
+ cephadm_bootstrap.main()
+
+ result = result.value.args[0]
+ assert result['changed']
+ assert result['cmd'] == ['cephadm', 'bootstrap', '--mon-ip', fake_ip, '--skip-firewalld']
+ assert result['rc'] == 0
+
+ @patch('ansible.module_utils.basic.AnsibleModule.exit_json')
+ @patch('ansible.module_utils.basic.AnsibleModule.run_command')
+ def test_with_registry_credentials(self, m_run_command, m_exit_json):
+ ca_test_common.set_module_args({
+ 'mon_ip': fake_ip,
+ 'registry_url': fake_registry,
+ 'registry_username': fake_registry_user,
+ 'registry_password': fake_registry_pass
+ })
+ m_exit_json.side_effect = ca_test_common.exit_json
+ stdout = ''
+ stderr = ''
+ rc = 0
+ m_run_command.return_value = rc, stdout, stderr
+
+ with pytest.raises(ca_test_common.AnsibleExitJson) as result:
+ cephadm_bootstrap.main()
+
+ result = result.value.args[0]
+ assert result['changed']
+ assert result['cmd'] == ['cephadm', 'bootstrap', '--mon-ip', fake_ip,
+ '--registry-url', fake_registry,
+ '--registry-username', fake_registry_user,
+ '--registry-password', fake_registry_pass]
+ assert result['rc'] == 0
+
+ @patch('ansible.module_utils.basic.AnsibleModule.exit_json')
+ @patch('ansible.module_utils.basic.AnsibleModule.run_command')
+ def test_with_registry_json_file(self, m_run_command, m_exit_json):
+ ca_test_common.set_module_args({
+ 'mon_ip': fake_ip,
+ 'registry_json': fake_registry_json
+ })
+ m_exit_json.side_effect = ca_test_common.exit_json
+ stdout = ''
+ stderr = ''
+ rc = 0
+ m_run_command.return_value = rc, stdout, stderr
+
+ with pytest.raises(ca_test_common.AnsibleExitJson) as result:
+ cephadm_bootstrap.main()
+
+ result = result.value.args[0]
+ assert result['changed']
+ assert result['cmd'] == ['cephadm', 'bootstrap', '--mon-ip', fake_ip,
+ '--registry-json', fake_registry_json]
+ assert result['rc'] == 0
--- /dev/null
+import os
+import sys
+from mock.mock import patch, MagicMock
+import pytest
+
+sys.path.append("./library")
+import radosgw_caps # noqa: E402
+
+
+fake_binary = "radosgw-admin"
+fake_cluster = "ceph"
+fake_container_binary = "podman"
+fake_container_image = "docker.io/ceph/daemon:latest"
+fake_container_cmd = [
+ fake_container_binary,
+ "run",
+ "--rm",
+ "--net=host",
+ "-v",
+ "/etc/ceph:/etc/ceph:z",
+ "-v",
+ "/var/lib/ceph/:/var/lib/ceph/:z",
+ "-v",
+ "/var/log/ceph/:/var/log/ceph/:z",
+ "--entrypoint=" + fake_binary,
+ fake_container_image,
+]
+fake_user = "foo"
+fake_caps = ["users=write", "zone=*", "metadata=read,write"]
+fake_params = {
+ "cluster": fake_cluster,
+ "name": fake_user,
+ "caps": fake_caps,
+}
+
+
+class TestRadosgwCapsModule(object):
+ @patch.dict(os.environ, {"CEPH_CONTAINER_BINARY": fake_container_binary})
+ def test_container_exec(self):
+ cmd = radosgw_caps.container_exec(fake_binary, fake_container_image)
+ assert cmd == fake_container_cmd
+
+ def test_not_is_containerized(self):
+ assert radosgw_caps.is_containerized() is None
+
+ @patch.dict(os.environ, {"CEPH_CONTAINER_IMAGE": fake_container_image})
+ def test_is_containerized(self):
+ assert radosgw_caps.is_containerized() == fake_container_image
+
+ @pytest.mark.parametrize("image", [None, fake_container_image])
+ @patch.dict(os.environ, {"CEPH_CONTAINER_BINARY": fake_container_binary})
+ def test_pre_generate_radosgw_cmd(self, image):
+ if image:
+ expected_cmd = fake_container_cmd
+ else:
+ expected_cmd = [fake_binary]
+
+ assert radosgw_caps.pre_generate_radosgw_cmd(image) == expected_cmd
+
+ @pytest.mark.parametrize("image", [None, fake_container_image])
+ @patch.dict(os.environ, {"CEPH_CONTAINER_BINARY": fake_container_binary})
+ def test_generate_radosgw_cmd(self, image):
+ if image:
+ expected_cmd = fake_container_cmd
+ else:
+ expected_cmd = [fake_binary]
+
+ expected_cmd.extend(["--cluster", fake_cluster, "caps"])
+ assert (
+ radosgw_caps.generate_radosgw_cmd(fake_cluster, [], image) == expected_cmd
+ )
+
+ def test_add_caps(self):
+ fake_module = MagicMock()
+ fake_module.params = fake_params
+ expected_cmd = [
+ fake_binary,
+ "--cluster",
+ fake_cluster,
+ "caps",
+ "add",
+ "--uid=" + fake_user,
+ "--caps=" + ";".join(fake_caps),
+ ]
+
+ assert radosgw_caps.add_caps(fake_module) == expected_cmd
+
+ def test_remove_caps(self):
+ fake_module = MagicMock()
+ fake_module.params = fake_params
+ expected_cmd = [
+ fake_binary,
+ "--cluster",
+ fake_cluster,
+ "caps",
+ "rm",
+ "--uid=" + fake_user,
+ "--caps=" + ";".join(fake_caps),
+ ]
+
+ assert radosgw_caps.remove_caps(fake_module) == expected_cmd
--- /dev/null
+import os
+import sys
+from mock.mock import patch, MagicMock
+import pytest
+sys.path.append('./library')
+import radosgw_realm # noqa: E402
+
+
+fake_binary = 'radosgw-admin'
+fake_cluster = 'ceph'
+fake_container_binary = 'podman'
+fake_container_image = 'docker.io/ceph/daemon:latest'
+fake_container_cmd = [
+ fake_container_binary,
+ 'run',
+ '--rm',
+ '--net=host',
+ '-v', '/etc/ceph:/etc/ceph:z',
+ '-v', '/var/lib/ceph/:/var/lib/ceph/:z',
+ '-v', '/var/log/ceph/:/var/log/ceph/:z',
+ '--entrypoint=' + fake_binary,
+ fake_container_image
+]
+fake_realm = 'foo'
+fake_params = {'cluster': fake_cluster,
+ 'name': fake_realm,
+ 'default': True}
+
+
+class TestRadosgwRealmModule(object):
+
+ @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary})
+ def test_container_exec(self):
+ cmd = radosgw_realm.container_exec(fake_binary, fake_container_image)
+ assert cmd == fake_container_cmd
+
+ def test_not_is_containerized(self):
+ assert radosgw_realm.is_containerized() is None
+
+ @patch.dict(os.environ, {'CEPH_CONTAINER_IMAGE': fake_container_image})
+ def test_is_containerized(self):
+ assert radosgw_realm.is_containerized() == fake_container_image
+
+ @pytest.mark.parametrize('image', [None, fake_container_image])
+ @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary})
+ def test_pre_generate_radosgw_cmd(self, image):
+ if image:
+ expected_cmd = fake_container_cmd
+ else:
+ expected_cmd = [fake_binary]
+
+ assert radosgw_realm.pre_generate_radosgw_cmd(image) == expected_cmd
+
+ @pytest.mark.parametrize('image', [None, fake_container_image])
+ @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary})
+ def test_generate_radosgw_cmd(self, image):
+ if image:
+ expected_cmd = fake_container_cmd
+ else:
+ expected_cmd = [fake_binary]
+
+ expected_cmd.extend([
+ '--cluster',
+ fake_cluster,
+ 'realm'
+ ])
+ assert radosgw_realm.generate_radosgw_cmd(fake_cluster, [], image) == expected_cmd
+
+ def test_create_realm(self):
+ fake_module = MagicMock()
+ fake_module.params = fake_params
+ expected_cmd = [
+ fake_binary,
+ '--cluster', fake_cluster,
+ 'realm', 'create',
+ '--rgw-realm=' + fake_realm,
+ '--default'
+ ]
+
+ assert radosgw_realm.create_realm(fake_module) == expected_cmd
+
+ def test_get_realm(self):
+ fake_module = MagicMock()
+ fake_module.params = fake_params
+ expected_cmd = [
+ fake_binary,
+ '--cluster', fake_cluster,
+ 'realm', 'get',
+ '--rgw-realm=' + fake_realm,
+ '--format=json'
+ ]
+
+ assert radosgw_realm.get_realm(fake_module) == expected_cmd
+
+ def test_remove_realm(self):
+ fake_module = MagicMock()
+ fake_module.params = fake_params
+ expected_cmd = [
+ fake_binary,
+ '--cluster', fake_cluster,
+ 'realm', 'delete',
+ '--rgw-realm=' + fake_realm
+ ]
+
+ assert radosgw_realm.remove_realm(fake_module) == expected_cmd
--- /dev/null
+import os
+import sys
+from mock.mock import patch, MagicMock
+import pytest
+sys.path.append('./library')
+import radosgw_user # noqa: E402
+
+
+fake_binary = 'radosgw-admin'
+fake_cluster = 'ceph'
+fake_container_binary = 'podman'
+fake_container_image = 'docker.io/ceph/daemon:latest'
+fake_container_cmd = [
+ fake_container_binary,
+ 'run',
+ '--rm',
+ '--net=host',
+ '-v', '/etc/ceph:/etc/ceph:z',
+ '-v', '/var/lib/ceph/:/var/lib/ceph/:z',
+ '-v', '/var/log/ceph/:/var/log/ceph/:z',
+ '--entrypoint=' + fake_binary,
+ fake_container_image
+]
+fake_user = 'foo'
+fake_realm = 'canada'
+fake_zonegroup = 'quebec'
+fake_zone = 'montreal'
+fake_params = {'cluster': fake_cluster,
+ 'name': fake_user,
+ 'display_name': fake_user,
+ 'email': fake_user,
+ 'access_key': 'PC7NPg87QWhOzXTkXIhX',
+ 'secret_key': 'jV64v39lVTjEx1ZJN6ocopnhvwMp1mXCD4kzBiPz',
+ 'realm': fake_realm,
+ 'zonegroup': fake_zonegroup,
+ 'zone': fake_zone,
+ 'system': True,
+ 'admin': True}
+
+
+class TestRadosgwUserModule(object):
+
+ @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary})
+ def test_container_exec(self):
+ cmd = radosgw_user.container_exec(fake_binary, fake_container_image)
+ assert cmd == fake_container_cmd
+
+ def test_not_is_containerized(self):
+ assert radosgw_user.is_containerized() is None
+
+ @patch.dict(os.environ, {'CEPH_CONTAINER_IMAGE': fake_container_image})
+ def test_is_containerized(self):
+ assert radosgw_user.is_containerized() == fake_container_image
+
+ @pytest.mark.parametrize('image', [None, fake_container_image])
+ @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary})
+ def test_pre_generate_radosgw_cmd(self, image):
+ if image:
+ expected_cmd = fake_container_cmd
+ else:
+ expected_cmd = [fake_binary]
+
+ assert radosgw_user.pre_generate_radosgw_cmd(image) == expected_cmd
+
+ @pytest.mark.parametrize('image', [None, fake_container_image])
+ @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary})
+ def test_generate_radosgw_cmd(self, image):
+ if image:
+ expected_cmd = fake_container_cmd
+ else:
+ expected_cmd = [fake_binary]
+
+ expected_cmd.extend([
+ '--cluster',
+ fake_cluster,
+ 'user'
+ ])
+ assert radosgw_user.generate_radosgw_cmd(fake_cluster, [], image) == expected_cmd
+
+ def test_create_user(self):
+ fake_module = MagicMock()
+ fake_module.params = fake_params
+ expected_cmd = [
+ fake_binary,
+ '--cluster', fake_cluster,
+ 'user', 'create',
+ '--uid=' + fake_user,
+ '--display_name=' + fake_user,
+ '--email=' + fake_user,
+ '--access-key=PC7NPg87QWhOzXTkXIhX',
+ '--secret-key=jV64v39lVTjEx1ZJN6ocopnhvwMp1mXCD4kzBiPz',
+ '--rgw-realm=' + fake_realm,
+ '--rgw-zonegroup=' + fake_zonegroup,
+ '--rgw-zone=' + fake_zone,
+ '--system',
+ '--admin'
+ ]
+
+ assert radosgw_user.create_user(fake_module) == expected_cmd
+
+ def test_modify_user(self):
+ fake_module = MagicMock()
+ fake_module.params = fake_params
+ expected_cmd = [
+ fake_binary,
+ '--cluster', fake_cluster,
+ 'user', 'modify',
+ '--uid=' + fake_user,
+ '--display_name=' + fake_user,
+ '--email=' + fake_user,
+ '--access-key=PC7NPg87QWhOzXTkXIhX',
+ '--secret-key=jV64v39lVTjEx1ZJN6ocopnhvwMp1mXCD4kzBiPz',
+ '--rgw-realm=' + fake_realm,
+ '--rgw-zonegroup=' + fake_zonegroup,
+ '--rgw-zone=' + fake_zone,
+ '--system',
+ '--admin'
+ ]
+
+ assert radosgw_user.modify_user(fake_module) == expected_cmd
+
+ def test_get_user(self):
+ fake_module = MagicMock()
+ fake_module.params = fake_params
+ expected_cmd = [
+ fake_binary,
+ '--cluster', fake_cluster,
+ 'user', 'info',
+ '--uid=' + fake_user,
+ '--format=json',
+ '--rgw-realm=' + fake_realm,
+ '--rgw-zonegroup=' + fake_zonegroup,
+ '--rgw-zone=' + fake_zone
+ ]
+
+ assert radosgw_user.get_user(fake_module) == expected_cmd
+
+ def test_remove_user(self):
+ fake_module = MagicMock()
+ fake_module.params = fake_params
+ expected_cmd = [
+ fake_binary,
+ '--cluster', fake_cluster,
+ 'user', 'rm',
+ '--uid=' + fake_user,
+ '--rgw-realm=' + fake_realm,
+ '--rgw-zonegroup=' + fake_zonegroup,
+ '--rgw-zone=' + fake_zone
+ ]
+
+ assert radosgw_user.remove_user(fake_module) == expected_cmd
--- /dev/null
+import os
+import sys
+from mock.mock import patch, MagicMock
+import pytest
+sys.path.append('./library')
+import radosgw_zone # noqa: E402
+
+
+fake_binary = 'radosgw-admin'
+fake_cluster = 'ceph'
+fake_container_binary = 'podman'
+fake_container_image = 'docker.io/ceph/daemon:latest'
+fake_container_cmd = [
+ fake_container_binary,
+ 'run',
+ '--rm',
+ '--net=host',
+ '-v', '/etc/ceph:/etc/ceph:z',
+ '-v', '/var/lib/ceph/:/var/lib/ceph/:z',
+ '-v', '/var/log/ceph/:/var/log/ceph/:z',
+ '--entrypoint=' + fake_binary,
+ fake_container_image
+]
+fake_realm = 'foo'
+fake_zonegroup = 'bar'
+fake_zone = 'z1'
+fake_endpoints = ['http://192.168.1.10:8080', 'http://192.168.1.11:8080']
+fake_params = {'cluster': fake_cluster,
+ 'name': fake_zone,
+ 'realm': fake_realm,
+ 'zonegroup': fake_zonegroup,
+ 'endpoints': fake_endpoints,
+ 'default': True,
+ 'master': True}
+
+
+class TestRadosgwZoneModule(object):
+
+ @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary})
+ def test_container_exec(self):
+ cmd = radosgw_zone.container_exec(fake_binary, fake_container_image)
+ assert cmd == fake_container_cmd
+
+ def test_not_is_containerized(self):
+ assert radosgw_zone.is_containerized() is None
+
+ @patch.dict(os.environ, {'CEPH_CONTAINER_IMAGE': fake_container_image})
+ def test_is_containerized(self):
+ assert radosgw_zone.is_containerized() == fake_container_image
+
+ @pytest.mark.parametrize('image', [None, fake_container_image])
+ @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary})
+ def test_pre_generate_radosgw_cmd(self, image):
+ if image:
+ expected_cmd = fake_container_cmd
+ else:
+ expected_cmd = [fake_binary]
+
+ assert radosgw_zone.pre_generate_radosgw_cmd(image) == expected_cmd
+
+ @pytest.mark.parametrize('image', [None, fake_container_image])
+ @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary})
+ def test_generate_radosgw_cmd(self, image):
+ if image:
+ expected_cmd = fake_container_cmd
+ else:
+ expected_cmd = [fake_binary]
+
+ expected_cmd.extend([
+ '--cluster',
+ fake_cluster,
+ 'zone'
+ ])
+ assert radosgw_zone.generate_radosgw_cmd(fake_cluster, [], image) == expected_cmd
+
+ def test_create_zone(self):
+ fake_module = MagicMock()
+ fake_module.params = fake_params
+ expected_cmd = [
+ fake_binary,
+ '--cluster', fake_cluster,
+ 'zone', 'create',
+ '--rgw-realm=' + fake_realm,
+ '--rgw-zonegroup=' + fake_zonegroup,
+ '--rgw-zone=' + fake_zone,
+ '--endpoints=' + ','.join(fake_endpoints),
+ '--default',
+ '--master'
+ ]
+
+ assert radosgw_zone.create_zone(fake_module) == expected_cmd
+
+ def test_modify_zone(self):
+ fake_module = MagicMock()
+ fake_module.params = fake_params
+ expected_cmd = [
+ fake_binary,
+ '--cluster', fake_cluster,
+ 'zone', 'modify',
+ '--rgw-realm=' + fake_realm,
+ '--rgw-zonegroup=' + fake_zonegroup,
+ '--rgw-zone=' + fake_zone,
+ '--endpoints=' + ','.join(fake_endpoints),
+ '--default',
+ '--master'
+ ]
+
+ assert radosgw_zone.modify_zone(fake_module) == expected_cmd
+
+ def test_get_zone(self):
+ fake_module = MagicMock()
+ fake_module.params = fake_params
+ expected_cmd = [
+ fake_binary,
+ '--cluster', fake_cluster,
+ 'zone', 'get',
+ '--rgw-realm=' + fake_realm,
+ '--rgw-zonegroup=' + fake_zonegroup,
+ '--rgw-zone=' + fake_zone,
+ '--format=json'
+ ]
+
+ assert radosgw_zone.get_zone(fake_module) == expected_cmd
+
+ def test_get_zonegroup(self):
+ fake_module = MagicMock()
+ fake_module.params = fake_params
+ expected_cmd = [
+ fake_binary,
+ '--cluster', fake_cluster,
+ 'zonegroup', 'get',
+ '--rgw-realm=' + fake_realm,
+ '--rgw-zonegroup=' + fake_zonegroup,
+ '--format=json'
+ ]
+
+ assert radosgw_zone.get_zonegroup(fake_module) == expected_cmd
+
+ def test_get_realm(self):
+ fake_module = MagicMock()
+ fake_module.params = fake_params
+ expected_cmd = [
+ fake_binary,
+ '--cluster', fake_cluster,
+ 'realm', 'get',
+ '--rgw-realm=' + fake_realm,
+ '--format=json'
+ ]
+
+ assert radosgw_zone.get_realm(fake_module) == expected_cmd
+
+ def test_remove_zone(self):
+ fake_module = MagicMock()
+ fake_module.params = fake_params
+ expected_cmd = [
+ fake_binary,
+ '--cluster', fake_cluster,
+ 'zone', 'delete',
+ '--rgw-realm=' + fake_realm,
+ '--rgw-zonegroup=' + fake_zonegroup,
+ '--rgw-zone=' + fake_zone
+ ]
+
+ assert radosgw_zone.remove_zone(fake_module) == expected_cmd
--- /dev/null
+import os
+import sys
+from mock.mock import patch, MagicMock
+import pytest
+sys.path.append('./library')
+import radosgw_zonegroup # noqa: E402
+
+
+fake_binary = 'radosgw-admin'
+fake_cluster = 'ceph'
+fake_container_binary = 'podman'
+fake_container_image = 'docker.io/ceph/daemon:latest'
+fake_container_cmd = [
+ fake_container_binary,
+ 'run',
+ '--rm',
+ '--net=host',
+ '-v', '/etc/ceph:/etc/ceph:z',
+ '-v', '/var/lib/ceph/:/var/lib/ceph/:z',
+ '-v', '/var/log/ceph/:/var/log/ceph/:z',
+ '--entrypoint=' + fake_binary,
+ fake_container_image
+]
+fake_realm = 'foo'
+fake_zonegroup = 'bar'
+fake_endpoints = ['http://192.168.1.10:8080', 'http://192.168.1.11:8080']
+fake_params = {'cluster': fake_cluster,
+ 'name': fake_zonegroup,
+ 'realm': fake_realm,
+ 'endpoints': fake_endpoints,
+ 'default': True,
+ 'master': True}
+
+
+class TestRadosgwZonegroupModule(object):
+
+ @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary})
+ def test_container_exec(self):
+ cmd = radosgw_zonegroup.container_exec(fake_binary, fake_container_image)
+ assert cmd == fake_container_cmd
+
+ def test_not_is_containerized(self):
+ assert radosgw_zonegroup.is_containerized() is None
+
+ @patch.dict(os.environ, {'CEPH_CONTAINER_IMAGE': fake_container_image})
+ def test_is_containerized(self):
+ assert radosgw_zonegroup.is_containerized() == fake_container_image
+
+ @pytest.mark.parametrize('image', [None, fake_container_image])
+ @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary})
+ def test_pre_generate_radosgw_cmd(self, image):
+ if image:
+ expected_cmd = fake_container_cmd
+ else:
+ expected_cmd = [fake_binary]
+
+ assert radosgw_zonegroup.pre_generate_radosgw_cmd(image) == expected_cmd
+
+ @pytest.mark.parametrize('image', [None, fake_container_image])
+ @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary})
+ def test_generate_radosgw_cmd(self, image):
+ if image:
+ expected_cmd = fake_container_cmd
+ else:
+ expected_cmd = [fake_binary]
+
+ expected_cmd.extend([
+ '--cluster',
+ fake_cluster,
+ 'zonegroup'
+ ])
+ assert radosgw_zonegroup.generate_radosgw_cmd(fake_cluster, [], image) == expected_cmd
+
+ def test_create_zonegroup(self):
+ fake_module = MagicMock()
+ fake_module.params = fake_params
+ expected_cmd = [
+ fake_binary,
+ '--cluster', fake_cluster,
+ 'zonegroup', 'create',
+ '--rgw-realm=' + fake_realm,
+ '--rgw-zonegroup=' + fake_zonegroup,
+ '--endpoints=' + ','.join(fake_endpoints),
+ '--default',
+ '--master'
+ ]
+
+ assert radosgw_zonegroup.create_zonegroup(fake_module) == expected_cmd
+
+ def test_modify_zonegroup(self):
+ fake_module = MagicMock()
+ fake_module.params = fake_params
+ expected_cmd = [
+ fake_binary,
+ '--cluster', fake_cluster,
+ 'zonegroup', 'modify',
+ '--rgw-realm=' + fake_realm,
+ '--rgw-zonegroup=' + fake_zonegroup,
+ '--endpoints=' + ','.join(fake_endpoints),
+ '--default',
+ '--master'
+ ]
+
+ assert radosgw_zonegroup.modify_zonegroup(fake_module) == expected_cmd
+
+ def test_get_zonegroup(self):
+ fake_module = MagicMock()
+ fake_module.params = fake_params
+ expected_cmd = [
+ fake_binary,
+ '--cluster', fake_cluster,
+ 'zonegroup', 'get',
+ '--rgw-realm=' + fake_realm,
+ '--rgw-zonegroup=' + fake_zonegroup,
+ '--format=json'
+ ]
+
+ assert radosgw_zonegroup.get_zonegroup(fake_module) == expected_cmd
+
+ def test_get_realm(self):
+ fake_module = MagicMock()
+ fake_module.params = fake_params
+ expected_cmd = [
+ fake_binary,
+ '--cluster', fake_cluster,
+ 'realm', 'get',
+ '--rgw-realm=' + fake_realm,
+ '--format=json'
+ ]
+
+ assert radosgw_zonegroup.get_realm(fake_module) == expected_cmd
+
+ def test_remove_zonegroup(self):
+ fake_module = MagicMock()
+ fake_module.params = fake_params
+ expected_cmd = [
+ fake_binary,
+ '--cluster', fake_cluster,
+ 'zonegroup', 'delete',
+ '--rgw-realm=' + fake_realm,
+ '--rgw-zonegroup=' + fake_zonegroup
+ ]
+
+ assert radosgw_zonegroup.remove_zonegroup(fake_module) == expected_cmd
--- /dev/null
+from mock.mock import patch, MagicMock
+import os
+import ca_common
+import pytest
+
+fake_container_binary = 'podman'
+fake_container_image = 'docker.io/ceph/daemon:latest'
+
+
+class TestCommon(object):
+
+ def setup_method(self):
+ self.fake_binary = 'ceph'
+ self.fake_cluster = 'ceph'
+ self.fake_container_cmd = [
+ fake_container_binary,
+ 'run',
+ '--rm',
+ '--net=host',
+ '-v', '/etc/ceph:/etc/ceph:z',
+ '-v', '/var/lib/ceph/:/var/lib/ceph/:z',
+ '-v', '/var/log/ceph/:/var/log/ceph/:z',
+ '--entrypoint=' + self.fake_binary,
+ fake_container_image
+ ]
+
+ @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary})
+ def test_container_exec(self):
+ cmd = ca_common.container_exec(self.fake_binary, fake_container_image)
+ assert cmd == self.fake_container_cmd
+
+ def test_not_is_containerized(self):
+ assert ca_common.is_containerized() is None
+
+ @patch.dict(os.environ, {'CEPH_CONTAINER_IMAGE': fake_container_image})
+ def test_is_containerized(self):
+ assert ca_common.is_containerized() == fake_container_image
+
+ @pytest.mark.parametrize('image', [None, fake_container_image])
+ @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary})
+ def test_pre_generate_ceph_cmd(self, image):
+ if image:
+ expected_cmd = self.fake_container_cmd
+ else:
+ expected_cmd = [self.fake_binary]
+
+ assert ca_common.pre_generate_ceph_cmd(image) == expected_cmd
+
+ @pytest.mark.parametrize('image', [None, fake_container_image])
+ @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary})
+ def test_generate_ceph_cmd(self, image):
+ sub_cmd = ['osd', 'pool']
+ args = ['create', 'foo']
+ if image:
+ expected_cmd = self.fake_container_cmd
+ else:
+ expected_cmd = [self.fake_binary]
+
+ expected_cmd.extend([
+ '-n', 'client.admin',
+ '-k', '/etc/ceph/ceph.client.admin.keyring',
+ '--cluster',
+ self.fake_cluster,
+ 'osd', 'pool',
+ 'create', 'foo'
+ ])
+ assert ca_common.generate_ceph_cmd(sub_cmd, args, cluster=self.fake_cluster, container_image=image) == expected_cmd
+
+ @pytest.mark.parametrize('image', [None, fake_container_image])
+ @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary})
+ def test_generate_ceph_cmd_different_cluster_name(self, image):
+ sub_cmd = ['osd', 'pool']
+ args = ['create', 'foo']
+ if image:
+ expected_cmd = self.fake_container_cmd
+ else:
+ expected_cmd = [self.fake_binary]
+
+ expected_cmd.extend([
+ '-n', 'client.admin',
+ '-k', '/etc/ceph/foo.client.admin.keyring',
+ '--cluster',
+ 'foo',
+ 'osd', 'pool',
+ 'create', 'foo'
+ ])
+ result = ca_common.generate_ceph_cmd(sub_cmd, args, cluster='foo', container_image=image)
+ assert result == expected_cmd
+
+ @pytest.mark.parametrize('image', [None, fake_container_image])
+ @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary})
+ def test_generate_ceph_cmd_different_cluster_name_and_user(self, image):
+ sub_cmd = ['osd', 'pool']
+ args = ['create', 'foo']
+ if image:
+ expected_cmd = self.fake_container_cmd
+ else:
+ expected_cmd = [self.fake_binary]
+
+ expected_cmd.extend([
+ '-n', 'client.foo',
+ '-k', '/etc/ceph/foo.client.foo.keyring',
+ '--cluster',
+ 'foo',
+ 'osd', 'pool',
+ 'create', 'foo'
+ ])
+ result = ca_common.generate_ceph_cmd(sub_cmd, args, cluster='foo', user='client.foo', container_image=image)
+ assert result == expected_cmd
+
+ @pytest.mark.parametrize('image', [None, fake_container_image])
+ @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary})
+ def test_generate_ceph_cmd_different_user(self, image):
+ sub_cmd = ['osd', 'pool']
+ args = ['create', 'foo']
+ if image:
+ expected_cmd = self.fake_container_cmd
+ else:
+ expected_cmd = [self.fake_binary]
+
+ expected_cmd.extend([
+ '-n', 'client.foo',
+ '-k', '/etc/ceph/ceph.client.foo.keyring',
+ '--cluster',
+ 'ceph',
+ 'osd', 'pool',
+ 'create', 'foo'
+ ])
+ result = ca_common.generate_ceph_cmd(sub_cmd, args, user='client.foo', container_image=image)
+ assert result == expected_cmd
+
+ @pytest.mark.parametrize('stdin', [None, 'foo'])
+ def test_exec_command(self, stdin):
+ fake_module = MagicMock()
+ rc = 0
+ stderr = ''
+ stdout = 'ceph version 1.2.3'
+ fake_module.run_command.return_value = 0, stdout, stderr
+ expected_cmd = [self.fake_binary, '--version']
+ _rc, _cmd, _out, _err = ca_common.exec_command(fake_module, expected_cmd, stdin=stdin)
+ assert _rc == rc
+ assert _cmd == expected_cmd
+ assert _err == stderr
+ assert _out == stdout
--- /dev/null
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.errors import AnsibleFilterError
+
+import ipaddrs_in_ranges
+import pytest
+
+pytest.importorskip('netaddr')
+
+filter_plugin = ipaddrs_in_ranges.FilterModule()
+
+
+class TestIpaddrsInRanges(object):
+
+ def test_one_ip_one_range(self):
+ ips = ['10.10.10.1']
+ ranges = ['10.10.10.1/24']
+ result = filter_plugin.ips_in_ranges(ips, ranges)
+ assert ips[0] in result
+ assert len(result) == 1
+
+ def test_two_ip_one_range(self):
+ ips = ['192.168.1.1', '10.10.10.1']
+ ranges = ['10.10.10.1/24']
+ result = filter_plugin.ips_in_ranges(ips, ranges)
+ assert ips[0] not in result
+ assert ips[1] in result
+ assert len(result) == 1
+
+ def test_one_ip_two_ranges(self):
+ ips = ['10.10.10.1']
+ ranges = ['192.168.1.0/24', '10.10.10.1/24']
+ result = filter_plugin.ips_in_ranges(ips, ranges)
+ assert ips[0] in result
+ assert len(result) == 1
+
+ def test_multiple_ips_multiple_ranges(self):
+ ips = ['10.10.10.1', '192.168.1.1', '172.16.10.1']
+ ranges = ['192.168.1.0/24', '10.10.10.1/24', '172.16.17.0/24']
+ result = filter_plugin.ips_in_ranges(ips, ranges)
+ assert ips[0] in result
+ assert ips[1] in result
+ assert ips[2] not in result
+ assert len(result) == 2
+
+ def test_no_ips_in_ranges(self):
+ ips = ['10.10.20.1', '192.168.2.1', '172.16.10.1']
+ ranges = ['192.168.1.0/24', '10.10.10.1/24', '172.16.17.0/24']
+ result = filter_plugin.ips_in_ranges(ips, ranges)
+ assert len(result) == 0
+
+ def test_ips_in_ranges_in_filters_dict(self):
+ assert 'ips_in_ranges' in filter_plugin.filters()
+
+ def test_missing_netaddr_module(self):
+ ipaddrs_in_ranges.netaddr = None
+
+ with pytest.raises(AnsibleFilterError) as result:
+ filter_plugin.filters()
+
+ assert result.type == AnsibleFilterError
+ assert str(result.value) == "The ips_in_ranges filter requires python's netaddr be installed on the ansible controller."
--- /dev/null
+# this is just a placeholder so that we can define what the 'root' of the tests
+# dir really is.
+[pytest]
+markers =
+ ceph_crash: environment with ceph crash enabled
+ dashboard: environment with dashboard enabled
+ no_docker: environment without containers
+ docker: environment with containers
+ all: for all nodes
+ iscsigws: for iscsigw nodes
+ mdss: for mds nodes
+ mgrs: for mgr nodes
+ mons: for mon nodes
+ nfss: for nfs nodes
+ osds: for osd nodes
+ rbdmirrors: for rbdmirror nodes
+ rgws: for rgw nodes
+ grafanas: for grafana nodes
--- /dev/null
+# These are Python requirements needed to run the functional tests
+testinfra
+pytest-xdist
+pytest
+ansible>=2.9,<2.10,!=2.9.10
+Jinja2>=2.10
+netaddr
+mock
+jmespath
+pytest-rerunfailures
+pytest-cov
+six
--- /dev/null
+#!/bin/bash
+# Generate a custom ssh config from Vagrant so that it can then be used by
+# ansible.cfg
+
+path=$1
+
+if [ $# -eq 0 ]
+ then
+ echo "A path to the scenario is required as an argument and it wasn't provided"
+ exit 1
+fi
+
+cd "$path"
+vagrant ssh-config > vagrant_ssh_config
--- /dev/null
+#!/bin/bash
+
+vagrant box remove --force --provider libvirt --box-version 0 centos/stream8 || true
+vagrant box add --provider libvirt --name centos/stream8 https://cloud.centos.org/centos/8-stream/x86_64/images/CentOS-Stream-Vagrant-8-20220125.1.x86_64.vagrant-libvirt.box || true
+
+retries=0
+until [ $retries -ge 5 ]
+do
+ echo "Attempting to start VMs. Attempts: $retries"
+ timeout 10m time vagrant up "$@" && break
+ retries=$[$retries+1]
+ sleep 5
+done
+
+sleep 10
\ No newline at end of file
--- /dev/null
+#!/bin/bash
+
+set -ex
+
+function git_diff_to_head {
+ git diff --diff-filter=MT --no-color origin/"${GITHUB_BASE_REF}"..HEAD
+}
+
+function match_file {
+ git_diff_to_head | sed -n "s|^+++.*\\($1.*\\)|\\1|p"
+}
+
+# group_vars / defaults
+match_file "/defaults/main.yml"
+nb=$(match_file "/defaults/main.yml" | wc -l)
+if [[ "$nb" -eq 0 ]]; then
+ echo "group_vars has not been touched."
+else
+ match_file "group_vars/"
+ nb_group_vars=$(match_file "group_vars/" | wc -l)
+ if [[ "$nb" -gt "$nb_group_vars" ]]; then
+ echo "One or more files containing default variables has/have been modified."
+ echo "You must run 'generate_group_vars_sample.sh' to generate the group_vars template files."
+ exit 1
+ fi
+fi
+
+# ceph_release_num[ceph_release] statements check
+if match_file "roles/ceph-defaults/" | grep -E '^[<>+].*- ceph_release_num\[ceph_release\]'; then
+ echo "Do not use statements like '- ceph_release_num[ceph_release]' in ceph-defaults role!"
+ echo "'ceph_release' is only populated **after** the play of ceph-defaults, typically in ceph-common or ceph-docker-common."
+ exit 1
+fi
+echo "No '- ceph_release_num[ceph_release]' statements found in ceph-defaults role!"
--- /dev/null
+#!/bin/bash
+set -x
+
+if [[ "$(git log --oneline --no-merges origin/"${GITHUB_BASE_REF}"..HEAD | wc -l)" -ne "$(git log --no-merges origin/"${GITHUB_BASE_REF}"..HEAD | grep -c Signed-off-by)" ]]; then
+ echo "One or more commits is/are missing a Signed-off-by. Add it with 'git commit -s'."
+ exit 1
+else
+ echo "Sign-off ok!"
+fi
\ No newline at end of file
--- /dev/null
+[tox]
+envlist = centos-container-cephadm
+
+skipsdist = True
+
+[testenv]
+whitelist_externals =
+ vagrant
+ bash
+ pip
+ rm
+passenv=*
+sitepackages=True
+setenv=
+ ANSIBLE_SSH_ARGS = -F {changedir}/vagrant_ssh_config -o ControlMaster=auto -o ControlPersist=600s -o PreferredAuthentications=publickey
+ ANSIBLE_CONFIG = {toxinidir}/ansible.cfg
+ ANSIBLE_CALLBACK_WHITELIST = profile_tasks
+ ANSIBLE_KEEP_REMOTE_FILES = 1
+ ANSIBLE_CACHE_PLUGIN = memory
+ ANSIBLE_GATHERING = implicit
+ # only available for ansible >= 2.5
+ ANSIBLE_STDOUT_CALLBACK = yaml
+ # Set the vagrant box image to use
+ CEPH_ANSIBLE_VAGRANT_BOX = centos/stream8
+
+deps= -r{toxinidir}/tests/requirements.txt
+changedir= {toxinidir}/tests/functional/cephadm
+
+commands=
+ bash {toxinidir}/tests/scripts/vagrant_up.sh --no-provision {posargs:--provider=virtualbox}
+ bash {toxinidir}/tests/scripts/generate_ssh_config.sh {changedir}
+
+ ansible-playbook -vv -i {changedir}/hosts {toxinidir}/infrastructure-playbooks/cephadm.yml --extra-vars "\
+ delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \
+ ceph_docker_registry_auth=True \
+ ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
+ ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
+ "
+
+ vagrant destroy -f
--- /dev/null
+[tox]
+envlist = centos-container-docker_to_podman
+
+skipsdist = True
+
+[testenv]
+whitelist_externals =
+ vagrant
+ bash
+ pip
+ rm
+passenv=*
+sitepackages=True
+setenv=
+ ANSIBLE_SSH_ARGS = -F {changedir}/vagrant_ssh_config -o ControlMaster=auto -o ControlPersist=600s -o PreferredAuthentications=publickey
+ ANSIBLE_CONFIG = {toxinidir}/ansible.cfg
+ ANSIBLE_CALLBACK_WHITELIST = profile_tasks
+ ANSIBLE_KEEP_REMOTE_FILES = 1
+ ANSIBLE_CACHE_PLUGIN = memory
+ ANSIBLE_GATHERING = implicit
+ # only available for ansible >= 2.5
+ ANSIBLE_STDOUT_CALLBACK = yaml
+ # Set the vagrant box image to use
+ CEPH_ANSIBLE_VAGRANT_BOX = centos/7
+
+deps= -r{toxinidir}/tests/requirements.txt
+changedir= {toxinidir}/tests/functional/docker2podman
+
+commands=
+ bash {toxinidir}/tests/scripts/vagrant_up.sh --no-provision {posargs:--provider=virtualbox}
+ bash {toxinidir}/tests/scripts/generate_ssh_config.sh {changedir}
+
+ # configure lvm
+ ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/lvm_setup.yml
+
+ ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/setup.yml
+
+ ansible-playbook -vv -i {changedir}/hosts {toxinidir}/site-container.yml.sample --extra-vars "\
+ delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \
+ ceph_docker_registry_auth=True \
+ ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
+ ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
+ "
+
+ ansible-playbook -vv -i {changedir}/hosts {toxinidir}/infrastructure-playbooks/docker-to-podman.yml --extra-vars "\
+ delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \
+ "
+
+ py.test --reruns 5 --reruns-delay 1 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests
+
+ vagrant destroy -f
--- /dev/null
+[tox]
+envlist = centos-{container,non_container}-external_clients
+
+skipsdist = True
+
+[testenv]
+whitelist_externals =
+ vagrant
+ bash
+ git
+ pip
+passenv=*
+setenv=
+ ANSIBLE_SSH_ARGS = -F {changedir}/vagrant_ssh_config -o ControlMaster=auto -o ControlPersist=600s -o PreferredAuthentications=publickey
+ ANSIBLE_CONFIG = {toxinidir}/ansible.cfg
+ ANSIBLE_CALLBACK_WHITELIST = profile_tasks
+ ANSIBLE_CACHE_PLUGIN = memory
+ ANSIBLE_GATHERING = implicit
+ # only available for ansible >= 2.5
+ ANSIBLE_STDOUT_CALLBACK = yaml
+# non_container: DEV_SETUP = True
+ # Set the vagrant box image to use
+ centos-non_container: CEPH_ANSIBLE_VAGRANT_BOX = centos/stream8
+ centos-container: CEPH_ANSIBLE_VAGRANT_BOX = centos/stream8
+
+ container: CONTAINER_DIR = /container
+ container: PLAYBOOK = site-container.yml.sample
+ non_container: PLAYBOOK = site.yml.sample
+
+deps= -r{toxinidir}/tests/requirements.txt
+changedir={toxinidir}/tests/functional/external_clients{env:CONTAINER_DIR:}
+commands=
+ ansible-galaxy install -r {toxinidir}/requirements.yml -v
+ bash {toxinidir}/tests/scripts/vagrant_up.sh --no-provision {posargs:--provider=virtualbox}
+ bash {toxinidir}/tests/scripts/generate_ssh_config.sh {changedir}
+
+ ansible-playbook -vv -i {changedir}/inventory {toxinidir}/tests/functional/setup.yml
+
+ # configure lvm
+ ansible-playbook -vv -i {changedir}/inventory/hosts {toxinidir}/tests/functional/lvm_setup.yml
+
+ ansible-playbook -vv -i {changedir}/inventory/hosts {toxinidir}/{env:PLAYBOOK:site.yml.sample} --limit 'all:!clients' --extra-vars "\
+ delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \
+ ceph_docker_registry_auth=True \
+ ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
+ ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
+ "
+
+ ansible-playbook -vv -i {changedir}/inventory {toxinidir}/tests/functional/external_clients_admin_key.yml
+
+ ansible-playbook -vv -i {changedir}/inventory/external_clients-hosts {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
+ ireallymeanit=yes \
+ fsid=40358a87-ab6e-4bdc-83db-1d909147861c \
+ external_cluster_mon_ips=192.168.31.10,192.168.31.11,192.168.31.12 \
+ generate_fsid=false \
+ ceph_docker_registry_auth=True \
+ ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
+ ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
+ "
+
+ bash -c "CEPH_STABLE_RELEASE=pacific py.test --reruns 5 --reruns-delay 1 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/inventory/external_clients-hosts --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests/test_install.py::TestCephConf"
+
+ ansible-playbook -vv -i {changedir}/inventory/external_clients-hosts {toxinidir}/infrastructure-playbooks/rolling_update.yml --extra-vars "\
+ ireallymeanit=yes \
+ fsid=40358a87-ab6e-4bdc-83db-1d909147861c \
+ external_cluster_mon_ips=192.168.31.10,192.168.31.11,192.168.31.12 \
+ generate_fsid=false \
+ ceph_docker_registry_auth=True \
+ ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
+ ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
+ "
+
+ bash -c "CEPH_STABLE_RELEASE=pacific py.test --reruns 5 --reruns-delay 1 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/inventory/external_clients-hosts --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests/test_install.py::TestCephConf"
+
+ vagrant destroy --force
--- /dev/null
+[tox]
+envlist = centos-{container,non_container}-filestore_to_bluestore
+
+skipsdist = True
+
+[testenv]
+whitelist_externals =
+ vagrant
+ bash
+ git
+ pip
+passenv=*
+setenv=
+ ANSIBLE_SSH_ARGS = -F {changedir}/vagrant_ssh_config -o ControlMaster=auto -o ControlPersist=600s -o PreferredAuthentications=publickey
+ ANSIBLE_CONFIG = {toxinidir}/ansible.cfg
+ ANSIBLE_CALLBACK_WHITELIST = profile_tasks
+ ANSIBLE_CACHE_PLUGIN = memory
+ ANSIBLE_GATHERING = implicit
+ # only available for ansible >= 2.5
+ ANSIBLE_STDOUT_CALLBACK = yaml
+# non_container: DEV_SETUP = True
+ # Set the vagrant box image to use
+ centos-non_container: CEPH_ANSIBLE_VAGRANT_BOX = centos/stream8
+ centos-container: CEPH_ANSIBLE_VAGRANT_BOX = centos/stream8
+
+ # Set the ansible inventory host file to be used according to which distrib we are running on
+ INVENTORY = {env:_INVENTORY:hosts}
+ container: CONTAINER_DIR = /container
+ container: PLAYBOOK = site-container.yml.sample
+ non_container: PLAYBOOK = site.yml.sample
+ non_container: DEV_SETUP = True
+
+ CEPH_DOCKER_IMAGE_TAG = latest-pacific
+
+deps= -r{toxinidir}/tests/requirements.txt
+changedir={toxinidir}/tests/functional/filestore-to-bluestore{env:CONTAINER_DIR:}
+commands=
+ bash {toxinidir}/tests/scripts/vagrant_up.sh --no-provision {posargs:--provider=virtualbox}
+ bash {toxinidir}/tests/scripts/generate_ssh_config.sh {changedir}
+
+ ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/setup.yml
+
+ ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/lvm_setup.yml --limit 'osd0:osd1'
+ ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/lvm_setup.yml --limit 'osd3:osd4' --tags partitions
+
+ # deploy the cluster
+ ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
+ delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \
+ ceph_docker_registry_auth=True \
+ ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
+ ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
+ "
+ ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/filestore-to-bluestore.yml --limit osds --extra-vars "\
+ delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \
+ "
+
+ bash -c "CEPH_STABLE_RELEASE=pacific py.test --reruns 5 --reruns-delay 1 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests"
+
+ vagrant destroy --force
--- /dev/null
+[tox]
+envlist = centos-container-podman
+
+skipsdist = True
+
+[testenv]
+whitelist_externals =
+ vagrant
+ bash
+ pip
+ rm
+passenv=*
+sitepackages=True
+setenv=
+ ANSIBLE_SSH_ARGS = -F {changedir}/vagrant_ssh_config -o ControlMaster=auto -o ControlPersist=600s -o PreferredAuthentications=publickey
+ ANSIBLE_CONFIG = {toxinidir}/ansible.cfg
+ ANSIBLE_CALLBACK_WHITELIST = profile_tasks
+ ANSIBLE_KEEP_REMOTE_FILES = 1
+ ANSIBLE_CACHE_PLUGIN = memory
+ ANSIBLE_GATHERING = implicit
+ # only available for ansible >= 2.5
+ ANSIBLE_STDOUT_CALLBACK = yaml
+ # Set the vagrant box image to use
+ CEPH_ANSIBLE_VAGRANT_BOX = centos/stream8
+
+ # Set the ansible inventory host file to be used according to which distrib we are running on
+ INVENTORY = {env:_INVENTORY:hosts}
+ PLAYBOOK = site-container.yml.sample
+
+deps= -r{toxinidir}/tests/requirements.txt
+changedir= {toxinidir}/tests/functional/podman
+
+commands=
+ bash {toxinidir}/tests/scripts/vagrant_up.sh --no-provision {posargs:--provider=virtualbox}
+ bash {toxinidir}/tests/scripts/generate_ssh_config.sh {changedir}
+
+ # configure lvm
+ ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/lvm_setup.yml
+
+ ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/setup.yml
+
+ ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
+ delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \
+ ceph_docker_registry_auth=True \
+ ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
+ ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
+ "
+
+ # test cluster state using ceph-ansible tests
+ py.test --reruns 5 --reruns-delay 1 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests
+
+ # reboot all vms
+ ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/reboot.yml
+
+ # retest to ensure cluster came back up correctly after rebooting
+ py.test --reruns 5 --reruns-delay 1 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests
+
+ vagrant destroy -f
--- /dev/null
+[tox]
+envlist = centos-{container,non_container}-rbdmirror
+
+skipsdist = True
+
+[testenv]
+allowlist_externals =
+ vagrant
+ bash
+ git
+ pip
+passenv=*
+setenv=
+ ANSIBLE_SSH_ARGS = -F {changedir}/vagrant_ssh_config -o ControlMaster=auto -o ControlPersist=600s -o PreferredAuthentications=publickey
+ ANSIBLE_CONFIG = {toxinidir}/ansible.cfg
+ ANSIBLE_CALLBACK_WHITELIST = profile_tasks
+ ANSIBLE_CACHE_PLUGIN = memory
+ ANSIBLE_GATHERING = implicit
+ # only available for ansible >= 2.5
+ ANSIBLE_KEEP_REMOTE_FILES = 1
+ ANSIBLE_STDOUT_CALLBACK = yaml
+# non_container: DEV_SETUP = True
+ # Set the vagrant box image to use
+ centos-non_container: CEPH_ANSIBLE_VAGRANT_BOX = centos/stream8
+ centos-container: CEPH_ANSIBLE_VAGRANT_BOX = centos/stream8
+
+ INVENTORY = {env:_INVENTORY:hosts}
+ container: CONTAINER_DIR = /container
+ container: PLAYBOOK = site-container.yml.sample
+ non_container: PLAYBOOK = site.yml.sample
+ container: CEPH_RBD_MIRROR_REMOTE_MON_HOSTS = 192.168.144.10
+ non_container: CEPH_RBD_MIRROR_REMOTE_MON_HOSTS = 192.168.140.10
+
+ UPDATE_CEPH_DOCKER_IMAGE_TAG = latest-main
+ UPDATE_CEPH_DEV_BRANCH = main
+ UPDATE_CEPH_DEV_SHA1 = latest
+ ROLLING_UPDATE = True
+deps= -r{toxinidir}/tests/requirements.txt
+changedir={toxinidir}/tests/functional/rbdmirror{env:CONTAINER_DIR:}
+commands=
+ ansible-galaxy install -r {toxinidir}/requirements.yml -v
+ bash {toxinidir}/tests/scripts/vagrant_up.sh --no-provision {posargs:--provider=virtualbox}
+ bash {toxinidir}/tests/scripts/generate_ssh_config.sh {changedir}
+
+ ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/setup.yml
+
+ # configure lvm
+ ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/lvm_setup.yml
+
+ ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
+ ceph_rbd_mirror_configure=true \
+ ceph_rbd_mirror_pool=rbd \
+ ceph_rbd_mirror_local_user_secret=AQC+eM1iKKBXFBAAVpunJvqpkodHSYmljCFCnw== \
+ yes_i_know=true \
+ ireallymeanit=yes \
+ ceph_docker_registry_auth=True \
+ ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
+ ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
+ "
+
+ bash -c "cd {changedir}/secondary && bash {toxinidir}/tests/scripts/vagrant_up.sh --no-provision {posargs:--provider=virtualbox}"
+ bash -c "cd {changedir}/secondary && bash {toxinidir}/tests/scripts/generate_ssh_config.sh {changedir}/secondary"
+ ansible-playbook --ssh-common-args='-F {changedir}/secondary/vagrant_ssh_config -o ControlMaster=auto -o ControlPersist=600s -o PreferredAuthentications=publickey' -vv -i {changedir}/secondary/hosts {toxinidir}/tests/functional/setup.yml
+ ansible-playbook --ssh-common-args='-F {changedir}/secondary/vagrant_ssh_config -o ControlMaster=auto -o ControlPersist=600s -o PreferredAuthentications=publickey' -vv -i {changedir}/secondary/hosts {toxinidir}/tests/functional/lvm_setup.yml
+ # ensure the rule isn't already present
+ ansible -i localhost, all -c local -b -m iptables -a 'chain=FORWARD protocol=tcp source=192.168.0.0/16 destination=192.168.0.0/16 jump=ACCEPT action=insert rule_num=1 state=absent'
+ ansible -i localhost, all -c local -b -m iptables -a 'chain=FORWARD protocol=tcp source=192.168.0.0/16 destination=192.168.0.0/16 jump=ACCEPT action=insert rule_num=1 state=present'
+ ansible-playbook --ssh-common-args='-F {changedir}/secondary/vagrant_ssh_config -o ControlMaster=auto -o ControlPersist=600s -o PreferredAuthentications=publickey' -vv -i {changedir}/secondary/hosts {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
+ yes_i_know=true \
+ ceph_rbd_mirror_configure=true \
+ ceph_rbd_mirror_pool=rbd \
+ ceph_rbd_mirror_remote_user=client.rbd-mirror-peer \
+ ceph_rbd_mirror_remote_mon_hosts={env:CEPH_RBD_MIRROR_REMOTE_MON_HOSTS} \
+ ceph_rbd_mirror_remote_key=AQC+eM1iKKBXFBAAVpunJvqpkodHSYmljCFCnw== \
+ ceph_rbd_mirror_remote_cluster=remote \
+ ireallymeanit=yes \
+ ceph_docker_registry_auth=True \
+ ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
+ ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
+ "
+ ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/rbdmirror.yml --skip-tags=secondary --extra-vars "\
+ ceph_rbd_mirror_pool=rbd \
+ "
+ ansible-playbook --ssh-common-args='-F {changedir}/secondary/vagrant_ssh_config -o ControlMaster=auto -o ControlPersist=600s -o PreferredAuthentications=publickey' -vv -i {changedir}/secondary/hosts {toxinidir}/tests/functional/rbdmirror.yml --skip-tags=primary -e 'ceph_rbd_mirror_pool=rbd'
+ vagrant destroy --force
+ bash -c "cd {changedir}/secondary && vagrant destroy --force"
+ # clean rule after the scenario is complete
+ ansible -i localhost, all -c local -b -m iptables -a 'chain=FORWARD protocol=tcp source=192.168.0.0/16 destination=192.168.0.0/16 jump=ACCEPT action=insert rule_num=1 state=absent'
+
+
--- /dev/null
+[tox]
+envlist = {centos}-{container,non_container}-{shrink_osd_single,shrink_osd_multiple}
+skipsdist = True
+
+[shrink-osd-single]
+commands=
+ ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/shrink-osd.yml --extra-vars "\
+ ireallymeanit=yes \
+ osd_to_kill=0 \
+ "
+ ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/shrink-osd.yml --extra-vars "\
+ ireallymeanit=yes \
+ osd_to_kill=1 \
+ "
+ ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/shrink-osd.yml --extra-vars "\
+ ireallymeanit=yes \
+ osd_to_kill=2 \
+ "
+ ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/shrink-osd.yml --extra-vars "\
+ ireallymeanit=yes \
+ osd_to_kill=3 \
+ "
+ ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/shrink-osd.yml --extra-vars "\
+ ireallymeanit=yes \
+ osd_to_kill=4 \
+ "
+ ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/shrink-osd.yml --extra-vars "\
+ ireallymeanit=yes \
+ osd_to_kill=5 \
+ "
+ ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/shrink-osd.yml --extra-vars "\
+ ireallymeanit=yes \
+ osd_to_kill=6 \
+ "
+ ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/shrink-osd.yml --extra-vars "\
+ ireallymeanit=yes \
+ osd_to_kill=7 \
+ "
+
+[shrink-osd-multiple]
+commands=
+ ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/shrink-osd.yml --extra-vars "\
+ ireallymeanit=yes \
+ osd_to_kill=0,1,2,3,4,5,6,7 \
+ "
+
+[testenv]
+whitelist_externals =
+ vagrant
+ bash
+passenv=*
+sitepackages=False
+setenv=
+ ANSIBLE_SSH_ARGS = -F {changedir}/vagrant_ssh_config -o ControlMaster=auto -o ControlPersist=600s -o PreferredAuthentications=publickey
+ ANSIBLE_CONFIG = {toxinidir}/ansible.cfg
+ ANSIBLE_CALLBACK_WHITELIST = profile_tasks
+ ANSIBLE_KEEP_REMOTE_FILES = 1
+ ANSIBLE_CACHE_PLUGIN = memory
+ ANSIBLE_GATHERING = implicit
+ # only available for ansible >= 2.5
+ ANSIBLE_STDOUT_CALLBACK = yaml
+ non_container: DEV_SETUP = True
+ # Set the vagrant box image to use
+ centos-non_container: CEPH_ANSIBLE_VAGRANT_BOX = centos/stream8
+ centos-container: CEPH_ANSIBLE_VAGRANT_BOX = centos/stream8
+ INVENTORY = {env:_INVENTORY:hosts}
+ container: CONTAINER_DIR = /container
+ container: PLAYBOOK = site-container.yml.sample
+ container: PURGE_PLAYBOOK = purge-container-cluster.yml
+ non_container: PLAYBOOK = site.yml.sample
+
+ CEPH_DOCKER_IMAGE_TAG = latest-pacific
+ CEPH_DOCKER_IMAGE_TAG_BIS = latest-bis-pacific
+ UPDATE_CEPH_DOCKER_IMAGE_TAG = latest-pacific
+
+deps= -r{toxinidir}/tests/requirements.txt
+changedir=
+ shrink_osd_single: {toxinidir}/tests/functional/shrink_osd{env:CONTAINER_DIR:}
+ shrink_osd_multiple: {toxinidir}/tests/functional/shrink_osd{env:CONTAINER_DIR:}
+
+
+commands=
+ bash {toxinidir}/tests/scripts/vagrant_up.sh --no-provision {posargs:--provider=virtualbox}
+ bash {toxinidir}/tests/scripts/generate_ssh_config.sh {changedir}
+
+ # configure lvm
+ ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/lvm_setup.yml
+
+ ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/setup.yml
+
+ ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
+ yes_i_know=true \
+ delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \
+ ceph_docker_registry_auth=True \
+ ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
+ ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
+ "
+
+ # test cluster state using ceph-ansible tests
+ py.test --reruns 5 --reruns-delay 1 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests
+
+ shrink_osd_single: {[shrink-osd-single]commands}
+ shrink_osd_multiple: {[shrink-osd-multiple]commands}
+
+ # configure lvm
+ ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/lvm_setup.yml
+ ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --limit osds --extra-vars "\
+ yes_i_know=true \
+ delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \
+ ceph_docker_registry_auth=True \
+ ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
+ ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
+ "
+ # retest to ensure OSDs are well redeployed
+ py.test --reruns 5 --reruns-delay 1 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests
+
+ vagrant destroy --force
\ No newline at end of file
--- /dev/null
+[tox]
+envlist = centos-{container,non_container}-subset_update
+
+skipsdist = True
+
+[testenv]
+whitelist_externals =
+ vagrant
+ bash
+ git
+ pip
+passenv=*
+setenv=
+ ANSIBLE_SSH_ARGS = -F {changedir}/vagrant_ssh_config -o ControlMaster=auto -o ControlPersist=600s -o PreferredAuthentications=publickey
+ ANSIBLE_CONFIG = {toxinidir}/ansible.cfg
+ ANSIBLE_CALLBACK_WHITELIST = profile_tasks
+ ANSIBLE_CACHE_PLUGIN = memory
+ ANSIBLE_GATHERING = implicit
+ # only available for ansible >= 2.5
+ ANSIBLE_STDOUT_CALLBACK = yaml
+# non_container: DEV_SETUP = True
+ # Set the vagrant box image to use
+ centos-non_container: CEPH_ANSIBLE_VAGRANT_BOX = centos/stream8
+ centos-container: CEPH_ANSIBLE_VAGRANT_BOX = centos/stream8
+
+ INVENTORY = {env:_INVENTORY:hosts}
+ container: CONTAINER_DIR = /container
+ container: PLAYBOOK = site-container.yml.sample
+ non_container: PLAYBOOK = site.yml.sample
+
+ UPDATE_CEPH_DOCKER_IMAGE_TAG = latest-pacific
+ ROLLING_UPDATE = True
+changedir={toxinidir}/tests/functional/subset_update{env:CONTAINER_DIR:}
+commands=
+ bash {toxinidir}/tests/scripts/vagrant_up.sh --no-provision {posargs:--provider=virtualbox}
+ bash {toxinidir}/tests/scripts/generate_ssh_config.sh {changedir}
+
+# use the stable-5.0 branch to deploy an octopus cluster
+ git clone -b stable-5.0 --single-branch https://github.com/ceph/ceph-ansible.git {envdir}/tmp/ceph-ansible
+ pip install -r {envdir}/tmp/ceph-ansible/tests/requirements.txt
+
+ bash -c 'ANSIBLE_CONFIG={envdir}/tmp/ceph-ansible/ansible.cfg ansible-playbook -vv -i {changedir}/{env:INVENTORY} {envdir}/tmp/ceph-ansible/tests/functional/setup.yml'
+
+ # deploy the cluster
+ bash -c 'ANSIBLE_CONFIG={envdir}/tmp/ceph-ansible/ansible.cfg ansible-playbook -vv -i {changedir}/{env:INVENTORY} {envdir}/tmp/ceph-ansible/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
+ delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \
+ ceph_docker_registry=quay.ceph.io \
+ ceph_docker_image=ceph-ci/daemon \
+ ceph_docker_image_tag=latest-octopus \
+ ceph_docker_registry_auth=True \
+ ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
+ ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
+ "'
+
+ pip uninstall -y ansible
+ pip install -r {toxinidir}/tests/requirements.txt
+ ansible-galaxy install -r {toxinidir}/requirements.yml -v
+# upgrade mons
+# mon1
+ ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/rolling_update.yml --limit mon1 --tags=mons --extra-vars "\
+ ireallymeanit=yes \
+ ceph_docker_registry_auth=True \
+ ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
+ ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
+ "
+# mon0 and mon2
+ ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/rolling_update.yml --limit 'mons:!mon1' --tags=mons --extra-vars "\
+ ireallymeanit=yes \
+ ceph_docker_registry_auth=True \
+ ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
+ ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
+ "
+# upgrade mgrs
+ ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/rolling_update.yml --tags=mgrs --extra-vars "\
+ ireallymeanit=yes \
+ ceph_docker_registry_auth=True \
+ ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
+ ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
+ "
+# upgrade osd1
+ ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/rolling_update.yml --limit=osd1 --tags=osds --extra-vars "\
+ ireallymeanit=yes \
+ ceph_docker_registry_auth=True \
+ ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
+ ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
+ "
+# upgrade remaining osds (serially)
+ ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/rolling_update.yml --limit='osds:!osd1' --tags=osds --extra-vars "\
+ ireallymeanit=yes \
+ ceph_docker_registry_auth=True \
+ ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
+ ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
+ "
+# upgrade rgws
+ ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/rolling_update.yml --tags=rgws --extra-vars "\
+ ireallymeanit=yes \
+ ceph_docker_registry_auth=True \
+ ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
+ ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
+ "
+# post upgrade actions
+ ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/rolling_update.yml --tags=post_upgrade --extra-vars "\
+ ireallymeanit=yes \
+ ceph_docker_registry_auth=True \
+ ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
+ ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
+ "
+
+
+ bash -c "CEPH_STABLE_RELEASE=pacific py.test --reruns 5 --reruns-delay 1 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests"
+
+ vagrant destroy --force
--- /dev/null
+[tox]
+envlist = centos-{container,non_container}-update
+
+skipsdist = True
+
+[testenv]
+whitelist_externals =
+ vagrant
+ bash
+ git
+ pip
+passenv=*
+setenv=
+ ANSIBLE_SSH_ARGS = -F {changedir}/vagrant_ssh_config -o ControlMaster=auto -o ControlPersist=600s -o PreferredAuthentications=publickey
+ ANSIBLE_CONFIG = {toxinidir}/ansible.cfg
+ ANSIBLE_CALLBACK_WHITELIST = profile_tasks
+ ANSIBLE_CACHE_PLUGIN = memory
+ ANSIBLE_GATHERING = implicit
+ # only available for ansible >= 2.5
+ ANSIBLE_STDOUT_CALLBACK = yaml
+# non_container: DEV_SETUP = True
+ # Set the vagrant box image to use
+ centos-non_container: CEPH_ANSIBLE_VAGRANT_BOX = centos/stream8
+ centos-container: CEPH_ANSIBLE_VAGRANT_BOX = centos/stream8
+
+ INVENTORY = {env:_INVENTORY:hosts}
+ container: CONTAINER_DIR = /container
+ container: PLAYBOOK = site-container.yml.sample
+ non_container: PLAYBOOK = site.yml.sample
+
+ CEPH_DOCKER_IMAGE_TAG = latest-octopus
+ UPDATE_CEPH_DOCKER_IMAGE_TAG = latest-pacific
+ ROLLING_UPDATE = True
+changedir={toxinidir}/tests/functional/all_daemons{env:CONTAINER_DIR:}
+commands=
+ bash {toxinidir}/tests/scripts/vagrant_up.sh --no-provision {posargs:--provider=virtualbox}
+ bash {toxinidir}/tests/scripts/generate_ssh_config.sh {changedir}
+
+# use the stable-5.0 branch to deploy an octopus cluster
+ git clone -b stable-5.0 --single-branch https://github.com/ceph/ceph-ansible.git {envdir}/tmp/ceph-ansible
+ pip install -r {envdir}/tmp/ceph-ansible/tests/requirements.txt
+
+ bash -c 'ANSIBLE_CONFIG={envdir}/tmp/ceph-ansible/ansible.cfg ansible-playbook -vv -i {envdir}/tmp/ceph-ansible/tests/functional/all_daemons{env:CONTAINER_DIR:}/hosts {envdir}/tmp/ceph-ansible/tests/functional/setup.yml'
+
+ # configure lvm, we exclude osd2 given this node uses lvm batch scenario (see corresponding inventory host file)
+ bash -c 'ANSIBLE_CONFIG={envdir}/tmp/ceph-ansible/ansible.cfg ansible-playbook -vv -i {envdir}/tmp/ceph-ansible/tests/functional/all_daemons{env:CONTAINER_DIR:}/hosts {envdir}/tmp/ceph-ansible/tests/functional/lvm_setup.yml --extra-vars "osd_scenario=lvm"'
+
+ # deploy the cluster
+ # passing ceph_nfs_rgw_access_key and ceph_nfs_rgw_secret_key because of a weird behavior in the CI:
+ # When rendering the ganesha.conf.j2 template, it complains because of undefined variables in the block "{% if nfs_obj_gw | bool %}" although we explicitly set this variable to false (see below).
+ bash -c 'ANSIBLE_CONFIG={envdir}/tmp/ceph-ansible/ansible.cfg ansible-playbook -vv -i {envdir}/tmp/ceph-ansible/tests/functional/all_daemons{env:CONTAINER_DIR:}/hosts {envdir}/tmp/ceph-ansible/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
+ delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \
+ ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:quay.ceph.io} \
+ ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph-ci/daemon} \
+ ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-octopus} \
+ ceph_docker_registry_auth=True \
+ ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
+ ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
+ nfs_file_gw=True \
+ nfs_obj_gw=False \
+ ceph_nfs_rgw_access_key=fake_access_key \
+ ceph_nfs_rgw_secret_key=fake_secret_key \
+ "'
+ pip uninstall -y ansible
+ pip install -r {toxinidir}/tests/requirements.txt
+ ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/rolling_update.yml --extra-vars "\
+ ireallymeanit=yes \
+ ceph_docker_registry_auth=True \
+ ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
+ ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
+ nfs_file_gw=True \
+ nfs_obj_gw=False \
+ "
+
+ bash -c "CEPH_STABLE_RELEASE=pacific py.test --reruns 5 --reruns-delay 1 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests"
+
+ vagrant destroy --force
--- /dev/null
+[tox]
+envlist = centos-{container,non_container}-{all_daemons,collocation,lvm_osds,shrink_mon,shrink_mgr,shrink_mds,shrink_rbdmirror,shrink_rgw,lvm_batch,add_mons,add_mgrs,add_mdss,add_rbdmirrors,add_rgws,rgw_multisite,purge,storage_inventory,lvm_auto_discovery,all_in_one,cephadm_adopt,purge_dashboard}
+ centos-non_container-{switch_to_containers}
+ infra_lv_create
+ migrate_ceph_disk_to_ceph_volume
+
+skipsdist = True
+
+# a test scenario for the lv-create.yml and lv-teardown playbooks
+[testenv:infra_lv_create]
+whitelist_externals =
+ vagrant
+ bash
+ mkdir
+ cat
+passenv=*
+setenv=
+ ANSIBLE_SSH_ARGS = -F {changedir}/vagrant_ssh_config -o ControlMaster=auto -o ControlPersist=600s -o PreferredAuthentications=publickey
+ ANSIBLE_CONFIG = {toxinidir}/ansible.cfg
+ ANSIBLE_ACTION_PLUGINS = {toxinidir}/plugins/actions
+ ANSIBLE_CALLBACK_PLUGINS = {toxinidir}/plugins/callback
+ ANSIBLE_CALLBACK_WHITELIST = profile_tasks
+ # only available for ansible >= 2.5
+ ANSIBLE_STDOUT_CALLBACK = yaml
+deps= -r{toxinidir}/tests/requirements.txt
+changedir={toxinidir}/tests/functional/infra_lv_create
+commands=
+ bash {toxinidir}/tests/scripts/vagrant_up.sh --no-provision {posargs:--provider=virtualbox}
+ bash {toxinidir}/tests/scripts/generate_ssh_config.sh {changedir}
+
+ ansible-playbook -vv -i {changedir}/hosts {toxinidir}/infrastructure-playbooks/lv-create.yml
+
+ ansible-playbook -vv -i {changedir}/hosts {toxinidir}/infrastructure-playbooks/lv-teardown.yml --extra-vars "ireallymeanit=yes"
+
+ cat {toxinidir}/infrastructure-playbooks/lv-create.log
+
+ vagrant destroy --force
+
+# extra commands for purging clusters
+# that purge the cluster and then set it up again to
+# ensure that a purge can clear nodes well enough that they
+# can be redployed to.
+[purge]
+commands=
+ ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/rbd_map_devices.yml --extra-vars "\
+ ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:quay.ceph.io} \
+ ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph-ci/daemon} \
+ ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-pacific} \
+ "
+
+ ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/{env:PURGE_PLAYBOOK:purge-cluster.yml} --extra-vars "\
+ ireallymeanit=yes \
+ remove_packages=yes \
+ ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:quay.ceph.io} \
+ ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph-ci/daemon} \
+ ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-pacific} \
+ "
+
+ # re-setup lvm, we exclude osd2 given this node uses lvm batch scenario (see corresponding inventory host file)
+ ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/lvm_setup.yml --limit 'osds:!osd2'
+
+ # set up the cluster again
+ ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars @ceph-override.json --extra-vars "\
+ ceph_docker_registry_auth=True \
+ ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
+ ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
+ "
+ # test that the cluster can be redeployed in a healthy state
+ py.test --reruns 5 --reruns-delay 1 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests
+
+[purge-dashboard]
+commands=
+ ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/purge-dashboard.yml --extra-vars "\
+ ireallymeanit=yes \
+ ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:quay.ceph.io} \
+ ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph-ci/daemon} \
+ ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-pacific} \
+ "
+
+ # set up the cluster again
+ ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars @ceph-override.json --extra-vars "\
+ ceph_docker_registry_auth=True \
+ ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
+ ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
+ "
+ # test that the cluster can be redeployed in a healthy state
+ py.test --reruns 5 --reruns-delay 1 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests
+
+[purge-lvm]
+commands=
+ ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/{env:PURGE_PLAYBOOK:purge-cluster.yml} --extra-vars "\
+ ireallymeanit=yes \
+ remove_packages=yes \
+ ceph_docker_registry_auth=True \
+ ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
+ ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
+ "
+
+ ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/lvm_setup.yml
+
+ # set up the cluster again
+ ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample}
+ # test that the cluster can be redeployed in a healthy state
+ py.test --reruns 5 --reruns-delay 1 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests
+
+[shrink-mon]
+commands=
+ ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/shrink-mon.yml --extra-vars "\
+ ireallymeanit=yes \
+ mon_to_kill={env:MON_TO_KILL:mon2} \
+ "
+[shrink-osd]
+commands=
+ ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/shrink-osd.yml --extra-vars "\
+ ireallymeanit=yes \
+ osd_to_kill={env:OSD_TO_KILL:0} \
+ "
+
+[shrink-mgr]
+commands=
+ ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/shrink-mgr.yml --extra-vars "\
+ ireallymeanit=yes \
+ mgr_to_kill={env:MGR_TO_KILL:mgr1} \
+ "
+
+[shrink-mds]
+commands=
+ ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/shrink-mds.yml --extra-vars "\
+ ireallymeanit=yes \
+ mds_to_kill={env:MDS_TO_KILL:mds0} \
+ "
+
+[shrink-rbdmirror]
+commands=
+ ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/shrink-rbdmirror.yml --extra-vars "\
+ ireallymeanit=yes \
+ rbdmirror_to_kill={env:RBDMIRROR_TO_KILL:rbd-mirror0} \
+ "
+
+[shrink-rgw]
+commands=
+ ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/shrink-rgw.yml --extra-vars "\
+ ireallymeanit=yes \
+ rgw_to_kill={env:RGW_TO_KILL:rgw0.rgw0} \
+ "
+
+[switch-to-containers]
+commands=
+ ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/switch-from-non-containerized-to-containerized-ceph-daemons.yml --extra-vars "\
+ ireallymeanit=yes \
+ ceph_docker_image_tag=latest-pacific-devel \
+ ceph_docker_registry=quay.ceph.io \
+ ceph_docker_image=ceph-ci/daemon \
+ ceph_docker_registry_auth=True \
+ ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
+ ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
+ "
+
+ py.test --reruns 5 --reruns-delay 1 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts-switch-to-containers --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests
+
+ ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/reboot.yml
+
+ py.test --reruns 5 --reruns-delay 1 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts-switch-to-containers --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests
+
+[add-mons]
+commands=
+ ansible-playbook -vv -i {changedir}/hosts-2 --limit mon1 {toxinidir}/tests/functional/setup.yml
+ ansible-playbook -vv -i {changedir}/hosts-2 {toxinidir}/infrastructure-playbooks/add-mon.yml --extra-vars ireallymeanit=yes
+ py.test --reruns 5 --reruns-delay 1 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts-2 --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests
+
+[add-mgrs]
+commands=
+ ansible-playbook -vv -i {changedir}/hosts-2 --limit mgrs {toxinidir}/tests/functional/setup.yml
+ ansible-playbook -vv -i {changedir}/hosts-2 --limit mgrs {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
+ ireallymeanit=yes \
+ ceph_docker_registry_auth=True \
+ ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
+ ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
+ "
+ py.test --reruns 5 --reruns-delay 1 -n 8 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts-2 --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests
+
+[add-mdss]
+commands=
+ ansible-playbook -vv -i {changedir}/hosts-2 --limit mdss {toxinidir}/tests/functional/setup.yml
+ ansible-playbook -vv -i {changedir}/hosts-2 --limit mdss {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
+ ireallymeanit=yes \
+ ceph_docker_registry_auth=True \
+ ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
+ ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
+ "
+ py.test --reruns 5 --reruns-delay 1 -n 8 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts-2 --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests
+
+[add-rbdmirrors]
+commands=
+ ansible-playbook -vv -i {changedir}/hosts-2 --limit rbdmirrors {toxinidir}/tests/functional/setup.yml
+ ansible-playbook -vv -i {changedir}/hosts-2 --limit rbdmirrors {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
+ ireallymeanit=yes \
+ ceph_docker_registry_auth=True \
+ ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
+ ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
+ "
+ py.test --reruns 5 --reruns-delay 1 -n 8 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts-2 --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests
+
+[add-rgws]
+commands=
+ ansible-playbook -vv -i {changedir}/hosts-2 --limit rgws {toxinidir}/tests/functional/setup.yml
+ ansible-playbook -vv -i {changedir}/hosts-2 --limit rgws {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
+ ireallymeanit=yes \
+ ceph_docker_registry_auth=True \
+ ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
+ ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
+ "
+ py.test --reruns 5 --reruns-delay 1 -n 8 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts-2 --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests
+
+[rgw-multisite]
+commands=
+ bash -c "cd {changedir}/secondary && bash {toxinidir}/tests/scripts/vagrant_up.sh --no-provision {posargs:--provider=virtualbox}"
+ bash -c "cd {changedir}/secondary && bash {toxinidir}/tests/scripts/generate_ssh_config.sh {changedir}/secondary"
+ ansible-playbook --ssh-common-args='-F {changedir}/secondary/vagrant_ssh_config -o ControlMaster=auto -o ControlPersist=600s -o PreferredAuthentications=publickey' -vv -i {changedir}/secondary/hosts {toxinidir}/tests/functional/setup.yml
+ ansible-playbook --ssh-common-args='-F {changedir}/secondary/vagrant_ssh_config -o ControlMaster=auto -o ControlPersist=600s -o PreferredAuthentications=publickey' -vv -i {changedir}/secondary/hosts {toxinidir}/tests/functional/lvm_setup.yml
+ # ensure the rule isn't already present
+ ansible -i localhost, all -c local -b -m iptables -a 'chain=FORWARD protocol=tcp source=192.168.0.0/16 destination=192.168.0.0/16 jump=ACCEPT action=insert rule_num=1 state=absent'
+ ansible -i localhost, all -c local -b -m iptables -a 'chain=FORWARD protocol=tcp source=192.168.0.0/16 destination=192.168.0.0/16 jump=ACCEPT action=insert rule_num=1 state=present'
+ ansible-playbook --ssh-common-args='-F {changedir}/secondary/vagrant_ssh_config -o ControlMaster=auto -o ControlPersist=600s -o PreferredAuthentications=publickey' -vv -i {changedir}/secondary/hosts {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
+ ireallymeanit=yes \
+ ceph_docker_registry_auth=True \
+ ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
+ ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
+ "
+ ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --limit rgws --extra-vars "\
+ ceph_docker_registry_auth=True \
+ ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
+ ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
+ "
+ ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/rgw_multisite.yml --skip-tags download
+ ansible-playbook --ssh-common-args='-F {changedir}/secondary/vagrant_ssh_config -o ControlMaster=auto -o ControlPersist=600s -o PreferredAuthentications=publickey' -vv -i {changedir}/secondary/hosts {toxinidir}/tests/functional/rgw_multisite.yml --skip-tags download
+ ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/rgw_multisite.yml --skip-tags upload
+ ansible-playbook --ssh-common-args='-F {changedir}/secondary/vagrant_ssh_config -o ControlMaster=auto -o ControlPersist=600s -o PreferredAuthentications=publickey' -vv -i {changedir}/secondary/hosts {toxinidir}/tests/functional/rgw_multisite.yml --skip-tags upload
+ bash -c "cd {changedir}/secondary && vagrant destroy --force"
+ # clean rule after the scenario is complete
+ ansible -i localhost, all -c local -b -m iptables -a 'chain=FORWARD protocol=tcp source=192.168.0.0/16 destination=192.168.0.0/16 jump=ACCEPT action=insert rule_num=1 state=absent'
+
+[storage-inventory]
+commands=
+ ansible-playbook -vv -i {changedir}/hosts {toxinidir}/infrastructure-playbooks/storage-inventory.yml --extra-vars "\
+ ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-pacific} \
+ "
+
+[cephadm-adopt]
+commands=
+ ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/cephadm-adopt.yml --extra-vars "\
+ ireallymeanit=yes \
+ delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \
+ "
+ # idempotency test
+ ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/cephadm-adopt.yml --extra-vars "\
+ ireallymeanit=yes \
+ delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \
+ "
+
+[testenv]
+whitelist_externals =
+ vagrant
+ bash
+ pip
+ rm
+passenv=*
+sitepackages=False
+setenv=
+ ANSIBLE_SSH_ARGS = -F {changedir}/vagrant_ssh_config -o ControlMaster=auto -o ControlPersist=600s -o PreferredAuthentications=publickey
+ ANSIBLE_CONFIG = {toxinidir}/ansible.cfg
+ ANSIBLE_CALLBACK_WHITELIST = profile_tasks
+ ANSIBLE_KEEP_REMOTE_FILES = 1
+ ANSIBLE_CACHE_PLUGIN = memory
+ ANSIBLE_GATHERING = implicit
+ # only available for ansible >= 2.5
+ ANSIBLE_STDOUT_CALLBACK = yaml
+ non_container: DEV_SETUP = True
+ # Set the vagrant box image to use
+ centos-non_container: CEPH_ANSIBLE_VAGRANT_BOX = centos/stream8
+ centos-container: CEPH_ANSIBLE_VAGRANT_BOX = centos/stream8
+ INVENTORY = {env:_INVENTORY:hosts}
+ container: CONTAINER_DIR = /container
+ container: PLAYBOOK = site-container.yml.sample
+ container: PURGE_PLAYBOOK = purge-container-cluster.yml
+ non_container: PLAYBOOK = site.yml.sample
+ shrink_mds: MDS_TO_KILL = mds0
+ shrink_mgr: MGR_TO_KILL = mgr1
+ shrink_mon: MON_TO_KILL = mon2
+ shrink_rbdmirror: RBDMIRROR_TO_KILL = rbd-mirror0
+ shrink_rgw: RGW_TO_KILL = rgw0.rgw0
+
+ CEPH_DOCKER_IMAGE_TAG = latest-pacific
+ CEPH_DOCKER_IMAGE_TAG_BIS = latest-bis-pacific
+
+ switch_to_containers: CEPH_DOCKER_IMAGE_TAG = latest-pacific-devel
+
+deps= -r{toxinidir}/tests/requirements.txt
+changedir=
+ all_daemons: {toxinidir}/tests/functional/all_daemons{env:CONTAINER_DIR:}
+ cluster: {toxinidir}/tests/functional/all_daemons{env:CONTAINER_DIR:}
+ shrink_mon: {toxinidir}/tests/functional/shrink_mon{env:CONTAINER_DIR:}
+ shrink_mgn: {toxinidir}/tests/functional/shrink_mon{env:CONTAINER_DIR:}
+ shrink_mgr: {toxinidir}/tests/functional/shrink_mgr{env:CONTAINER_DIR:}
+ shrink_mds: {toxinidir}/tests/functional/shrink_mds{env:CONTAINER_DIR:}
+ shrink_rbdmirror: {toxinidir}/tests/functional/shrink_rbdmirror{env:CONTAINER_DIR:}
+ shrink_rgw: {toxinidir}/tests/functional/shrink_rgw{env:CONTAINER_DIR:}
+ # tests a 1 mon, 1 osd, 1 mds and 1 rgw centos7 cluster using docker
+ collocation: {toxinidir}/tests/functional/collocation{env:CONTAINER_DIR:}
+ purge: {toxinidir}/tests/functional/all_daemons{env:CONTAINER_DIR:}
+ purge_dashboard: {toxinidir}/tests/functional/all_daemons{env:CONTAINER_DIR:}
+ switch_to_containers: {toxinidir}/tests/functional/all_daemons
+ lvm_osds: {toxinidir}/tests/functional/lvm-osds{env:CONTAINER_DIR:}
+ lvm_batch: {toxinidir}/tests/functional/lvm-batch{env:CONTAINER_DIR:}
+ add_mons: {toxinidir}/tests/functional/add-mons{env:CONTAINER_DIR:}
+ add_mgrs: {toxinidir}/tests/functional/add-mgrs{env:CONTAINER_DIR:}
+ add_mdss: {toxinidir}/tests/functional/add-mdss{env:CONTAINER_DIR:}
+ add_rbdmirrors: {toxinidir}/tests/functional/add-rbdmirrors{env:CONTAINER_DIR:}
+ add_rgws: {toxinidir}/tests/functional/add-rgws{env:CONTAINER_DIR:}
+ rgw_multisite: {toxinidir}/tests/functional/rgw-multisite{env:CONTAINER_DIR:}
+ storage_inventory: {toxinidir}/tests/functional/lvm-osds{env:CONTAINER_DIR:}
+ lvm_auto_discovery: {toxinidir}/tests/functional/lvm-auto-discovery{env:CONTAINER_DIR:}
+ all_in_one: {toxinidir}/tests/functional/all-in-one{env:CONTAINER_DIR:}
+ cephadm_adopt: {toxinidir}/tests/functional/all_daemons{env:CONTAINER_DIR:}
+
+commands=
+ ansible-galaxy install -r {toxinidir}/requirements.yml -v
+ rhcs: ansible-playbook -vv -i "localhost," -c local {toxinidir}/tests/functional/rhcs_setup.yml --extra-vars "change_dir={changedir}" --tags "vagrant_setup"
+
+ bash {toxinidir}/tests/scripts/vagrant_up.sh --no-provision {posargs:--provider=virtualbox}
+ bash {toxinidir}/tests/scripts/generate_ssh_config.sh {changedir}
+
+ # configure lvm, we exclude osd2 given this node uses lvm batch scenario (see corresponding inventory host file)
+ !lvm_batch-!lvm_auto_discovery: ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/lvm_setup.yml --limit 'osds:!osd2'
+ lvm_osds,all_in_one: ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/lvm_setup.yml --limit osd2
+
+ rhcs: ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/rhcs_setup.yml --extra-vars "ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:quay.ceph.io} repo_url={env:REPO_URL:} rhel7_repo_url={env:RHEL7_REPO_URL:}" --skip-tags "vagrant_setup"
+
+ ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/setup.yml
+
+ ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
+ no_log_on_ceph_key_tasks=false \
+ deploy_secondary_zones=False \
+ ceph_docker_registry_auth=True \
+ ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
+ ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
+ "
+
+ # test cluster state using ceph-ansible tests
+ py.test --reruns 20 --reruns-delay 3 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests
+
+ # reboot all vms
+ all_daemons,collocation: ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/reboot.yml
+
+ # retest to ensure cluster came back up correctly after rebooting
+ all_daemons,collocation: py.test --reruns 20 --reruns-delay 3 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests
+
+ # handlers/idempotency test
+ all_daemons,all_in_one,collocation: ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "delegate_facts_host={env:DELEGATE_FACTS_HOST:True} ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG_BIS:latest-bis-pacific}" --extra-vars @ceph-override.json
+
+ purge: {[purge]commands}
+ purge_dashboard: {[purge-dashboard]commands}
+ switch_to_containers: {[switch-to-containers]commands}
+ shrink_mon: {[shrink-mon]commands}
+ shrink_mgr: {[shrink-mgr]commands}
+ shrink_mds: {[shrink-mds]commands}
+ shrink_rbdmirror: {[shrink-rbdmirror]commands}
+ shrink_rgw: {[shrink-rgw]commands}
+ add_mons: {[add-mons]commands}
+ add_mgrs: {[add-mgrs]commands}
+ add_mdss: {[add-mdss]commands}
+ add_rbdmirrors: {[add-rbdmirrors]commands}
+ add_rgws: {[add-rgws]commands}
+ rgw_multisite: {[rgw-multisite]commands}
+ storage_inventory: {[storage-inventory]commands}
+ cephadm_adopt: {[cephadm-adopt]commands}
+
+ vagrant destroy --force
--- /dev/null
+---
+
+# DEPLOY CONTAINERIZED DAEMONS
+docker: false
+
+# DEFINE THE NUMBER OF VMS TO RUN
+mon_vms: 3
+osd_vms: 3
+mds_vms: 0
+rgw_vms: 0
+nfs_vms: 0
+grafana_server_vms: 0
+rbd_mirror_vms: 0
+client_vms: 0
+iscsi_gw_vms: 0
+mgr_vms: 0
+
+# SUBNETS TO USE FOR THE VMS
+public_subnet: 192.168.42
+cluster_subnet: 192.168.43
+
+# MEMORY
+# set 1024 for CentOS
+memory: 1024
+
+# Ethernet interface name
+# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
+eth: 'eth1'
+
+# Disks
+# For Xenial use disks: [ '/dev/sdb', '/dev/sdc' ]
+# For CentOS7 use disks: [ '/dev/sda', '/dev/sdb' ]
+disks: [ '/dev/sdb', '/dev/sdc' ]
+
+# VAGRANT BOX
+# Ceph boxes are *strongly* suggested. They are under better control and will
+# not get updated frequently unless required for build systems. These are (for
+# now):
+#
+# * ceph/ubuntu-xenial
+#
+# Ubuntu: ceph/ubuntu-xenial or bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
+# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
+# openSUSE: opensuse/openSUSE-42.3-x86_64
+# libvirt CentOS: centos/7
+# parallels Ubuntu: parallels/ubuntu-14.04
+# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
+# For more boxes have a look at:
+# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
+# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
+vagrant_box: centos/7
+# The sync directory changes based on vagrant box
+# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
+vagrant_sync_dir: /home/vagrant/sync
+# VAGRANT URL
+# This is a URL to download an image from an alternate location. vagrant_box
+# above should be set to the filename of the image.
+# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
+# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+
+os_tuning_params:
+ - { name: fs.file-max, value: 26234859 }
+
+# Debug mode, runs Ansible with -vvvv
+debug: false