- `./roles/ceph-rbd-mirror/defaults/main.yml`
- `./roles/ceph-defaults/defaults/main.yml`
- `./roles/ceph-osd/defaults/main.yml`
+- `./roles/ceph-nfs/defaults/main.yml`
- `./roles/ceph-client/defaults/main.yml`
- `./roles/ceph-common/defaults/main.yml`
- `./roles/ceph-mon/defaults/main.yml`
NOSDS = settings['osd_vms']
NMDSS = settings['mds_vms']
NRGWS = settings['rgw_vms']
+NNFSS = settings['nfs_vms']
NRBD_MIRRORS = settings['rbd_mirror_vms']
CLIENTS = settings['client_vms']
MGRS = settings['mgr_vms']
'osds' => (0..NOSDS - 1).map { |j| "#{LABEL_PREFIX}osd#{j}" },
'mdss' => (0..NMDSS - 1).map { |j| "#{LABEL_PREFIX}mds#{j}" },
'rgws' => (0..NRGWS - 1).map { |j| "#{LABEL_PREFIX}rgw#{j}" },
+ 'nfss' => (0..NNFSS - 1).map { |j| "#{LABEL_PREFIX}nfs#{j}" },
'rbd_mirrors' => (0..NRBD_MIRRORS - 1).map { |j| "#{LABEL_PREFIX}rbd_mirror#{j}" },
'clients' => (0..CLIENTS - 1).map { |j| "#{LABEL_PREFIX}client#{j}" },
'mgrs' => (0..MGRS - 1).map { |j| "#{LABEL_PREFIX}mgr#{j}" },
end
end
+ (0..NNFSS - 1).each do |i|
+ config.vm.define "#{LABEL_PREFIX}nfs#{i}" do |nfs|
+ nfs.vm.hostname = "#{LABEL_PREFIX}nfs#{i}"
+ if ASSIGN_STATIC_IP && !IPV6
+ nfs.vm.network :private_network,
+ :ip => "#{PUBLIC_SUBNET}.#{$last_ip_pub_digit+=1}"
+ end
+
+ # Virtualbox
+ nfs.vm.provider :virtualbox do |vb|
+ vb.customize ['modifyvm', :id, '--memory', "#{MEMORY}"]
+ end
+
+ # VMware
+ nfs.vm.provider :vmware_fusion do |v|
+ v.vmx['memsize'] = "#{MEMORY}"
+ end
+
+ # Libvirt
+ nfs.vm.provider :libvirt do |lv,override|
+ lv.memory = MEMORY
+ lv.random_hostname = true
+ if IPV6 then
+ override.vm.network :private_network,
+ :libvirt__ipv6_address => "#{PUBLIC_SUBNET}",
+ :libvirt__ipv6_prefix => "64",
+ :libvirt__dhcp_enabled => false,
+ :libvirt__forward_mode => "veryisolated",
+ :libvirt__network_name => "ipv6-public-network",
+ :ip => "#{PUBLIC_SUBNET}#{$last_ip_pub_digit+=1}",
+ :netmask => "64"
+ end
+ end
+
+ # Parallels
+ nfs.vm.provider "parallels" do |prl|
+ prl.name = "ceph-nfs#{i}"
+ prl.memory = "#{MEMORY}"
+ end
+
+ nfs.vm.provider :linode do |provider|
+ provider.label = nfs.vm.hostname
+ end
+ end
+ end
+
(0..NMDSS - 1).each do |i|
config.vm.define "#{LABEL_PREFIX}mds#{i}" do |mds|
mds.vm.hostname = "#{LABEL_PREFIX}mds#{i}"
osd_vms: 1
mds_vms: 0
rgw_vms: 0
+nfs_vms: 0
rbd_mirror_vms: 0
client_vms: 0
mgr_vms: 0
osd_vms: 3
mds_vms: 1
rgw_vms: 0
+nfs_vms: 0
rbd_mirror_vms: 0
client_vms: 0
osd_vms: 1
mds_vms: 0
rgw_vms: 0
+nfs_vms: 0
rbd_mirror_vms: 0
client_vms: 0
- "{{ rgw_group_name|default('rgws') }}"
- "{{ mgr_group_name|default('mgrs') }}"
- "{{ rbdmirror_group_name|default('rbdmirrors') }}"
+ - "{{ nfs_group_name|default('nfss') }}"
- "{{ monitoring_group_name|default('monitoring') }}"
gather_facts: false
become: true
osd_vms: 0
mds_vms: 0
rgw_vms: 0
+ nfs_vms: 0
rbd_mirror_vms: 0
client_vms: 0
mgr_vms: 0
#osd_group_name: osds
#rgw_group_name: rgws
#mds_group_name: mdss
+#nfs_group_name: nfss
#rbdmirror_group_name: rbdmirrors
#client_group_name: clients
#mgr_group_name: mgrs
# - "{{ osd_group_name }}"
# - "{{ rgw_group_name }}"
# - "{{ mds_group_name }}"
+# - "{{ nfs_group_name }}"
# - "{{ rbdmirror_group_name }}"
# - "{{ client_group_name }}"
# - "{{ mgr_group_name }}"
#ceph_osd_firewall_zone: public
#ceph_rgw_firewall_zone: public
#ceph_mds_firewall_zone: public
+#ceph_nfs_firewall_zone: public
#ceph_rbdmirror_firewall_zone: public
#ceph_dashboard_firewall_zone: public
#ceph_rgwloadbalancer_firewall_zone: public
#ceph_stable_release: reef
#ceph_stable_repo: "{{ ceph_mirror }}/debian-{{ ceph_stable_release }}"
+#nfs_ganesha_stable: true # use stable repos for nfs-ganesha
+#centos_release_nfs: centos-release-nfs-ganesha4
+#nfs_ganesha_stable_deb_repo: http://ppa.launchpad.net/nfs-ganesha/nfs-ganesha-4/ubuntu
+#nfs_ganesha_apt_keyserver: keyserver.ubuntu.com
+#nfs_ganesha_apt_key_id: EA914D611053D07BD332E18010353E8834DC57CA
+#libntirpc_stable_deb_repo: http://ppa.launchpad.net/nfs-ganesha/libntirpc-4/ubuntu
+
# Use the option below to specify your applicable package tree, eg. when using non-LTS Ubuntu versions
# # for a list of available Debian distributions, visit http://download.ceph.com/debian-{{ ceph_stable_release }}/dists/
# for more info read: https://github.com/ceph/ceph-ansible/issues/305
#ceph_dev_branch: main # development branch you would like to use e.g: main, wip-hack
#ceph_dev_sha1: latest # distinct sha1 to use, defaults to 'latest' (as in latest built)
+#nfs_ganesha_dev: false # use development repos for nfs-ganesha
+
+# Set this to choose the version of ceph dev libraries used in the nfs-ganesha packages from shaman
+# flavors so far include: ceph_main, ceph_jewel, ceph_kraken, ceph_luminous
+#nfs_ganesha_flavor: "ceph_main"
+
+
# REPOSITORY: CUSTOM
#
# Enabled when ceph_repository == 'custom'
#handler_health_rgw_check_retries: 5
#handler_health_rgw_check_delay: 10
+# NFS handler checks
+#handler_health_nfs_check_retries: 5
+#handler_health_nfs_check_delay: 10
+
# RBD MIRROR handler checks
#handler_health_rbd_mirror_check_retries: 5
#handler_health_rbd_mirror_check_delay: 10
#ceph_rbd_mirror_pool: "rbd"
+###############
+# NFS-GANESHA #
+###############
+#
+# Access type options
+#
+# Enable NFS File access
+# If set to true, then ganesha is set up to export the root of the
+# Ceph filesystem, and ganesha's attribute and directory caching is disabled
+# as much as possible since libcephfs clients also caches the same
+# information.
+#
+# Set this to true to enable File access via NFS. Requires an MDS role.
+#nfs_file_gw: false
+# Set this to true to enable Object access via NFS. Requires an RGW role.
+#nfs_obj_gw: "{{ False if groups.get(mon_group_name, []) | length == 0 else True }}"
+
+
###################
# CONFIG OVERRIDE #
###################
--- /dev/null
+---
+# Variables here are applicable to all host groups NOT roles
+
+# This sample file generated by generate_group_vars_sample.sh
+
+# Dummy variable to avoid error because ansible does not recognize the
+# file as a good configuration file when no variable in it.
+dummy:
+
+# You can override vars by using host or group vars
+
+###########
+# GENERAL #
+###########
+
+# Even though NFS nodes should not have the admin key
+# at their disposal, some people might want to have it
+# distributed on RGW nodes. Setting 'copy_admin_key' to 'true'
+# will copy the admin key to the /etc/ceph/ directory
+#copy_admin_key: false
+
+# Whether docker container or systemd service should be enabled
+# and started, it's useful to set it to false if nfs-ganesha
+# service is managed by pacemaker
+#ceph_nfs_enable_service: true
+
+# ceph-nfs systemd service uses ansible's hostname as an instance id,
+# so service name is ceph-nfs@{{ ansible_facts['hostname'] }}, this is not
+# ideal when ceph-nfs is managed by pacemaker across multiple hosts - in
+# such case it's better to have constant instance id instead which
+# can be set by 'ceph_nfs_service_suffix'
+# ceph_nfs_service_suffix: "{{ ansible_facts['hostname'] }}"
+
+######################
+# NFS Ganesha Config #
+######################
+#ceph_nfs_log_file: "/var/log/ganesha/ganesha.log"
+#ceph_nfs_dynamic_exports: false
+# If set to true then rados is used to store ganesha exports
+# and client sessions information, this is useful if you
+# run multiple nfs-ganesha servers in active/passive mode and
+# want to do failover
+#ceph_nfs_rados_backend: false
+# Name of the rados object used to store a list of the export rados
+# object URLS
+#ceph_nfs_rados_export_index: "ganesha-export-index"
+# Address ganesha service should listen on, by default ganesha listens on all
+# addresses. (Note: ganesha ignores this parameter in current version due to
+# this bug: https://github.com/nfs-ganesha/nfs-ganesha/issues/217)
+# ceph_nfs_bind_addr: 0.0.0.0
+
+# If set to true, then ganesha's attribute and directory caching is disabled
+# as much as possible. Currently, ganesha caches by default.
+# When using ganesha as CephFS's gateway, it is recommended to turn off
+# ganesha's caching as the libcephfs clients also cache the same information.
+# Note: Irrespective of this option's setting, ganesha's caching is disabled
+# when setting 'nfs_file_gw' option as true.
+#ceph_nfs_disable_caching: false
+
+# This is the file ganesha will use to control NFSv4 ID mapping
+#ceph_nfs_idmap_conf: "/etc/ganesha/idmap.conf"
+
+# idmap configuration file override.
+# This allows you to specify more configuration options
+# using an INI style format.
+# Example:
+# idmap_conf_overrides:
+# General:
+# Domain: foo.domain.net
+#idmap_conf_overrides: {}
+
+####################
+# FSAL Ceph Config #
+####################
+#ceph_nfs_ceph_export_id: 20133
+#ceph_nfs_ceph_pseudo_path: "/cephfile"
+#ceph_nfs_ceph_protocols: "3,4"
+#ceph_nfs_ceph_access_type: "RW"
+#ceph_nfs_ceph_user: "admin"
+#ceph_nfs_ceph_squash: "Root_Squash"
+#ceph_nfs_ceph_sectype: "sys,krb5,krb5i,krb5p"
+
+###################
+# FSAL RGW Config #
+###################
+#ceph_nfs_rgw_export_id: 20134
+#ceph_nfs_rgw_pseudo_path: "/cephobject"
+#ceph_nfs_rgw_protocols: "3,4"
+#ceph_nfs_rgw_access_type: "RW"
+#ceph_nfs_rgw_user: "cephnfs"
+#ceph_nfs_rgw_squash: "Root_Squash"
+#ceph_nfs_rgw_sectype: "sys,krb5,krb5i,krb5p"
+# Note: keys are optional and can be generated, but not on containerized, where
+# they must be configered.
+# ceph_nfs_rgw_access_key: "QFAMEDSJP5DEKJO0DDXY"
+# ceph_nfs_rgw_secret_key: "iaSFLDVvDdQt6lkNzHyW4fPLZugBAI1g17LO0+87[MAC[M#C"
+#rgw_client_name: client.rgw.{{ ansible_facts['hostname'] }}
+
+###################
+# CONFIG OVERRIDE #
+###################
+
+# Ganesha configuration file override.
+# These multiline strings will be appended to the contents of the blocks in ganesha.conf and
+# must be in the correct ganesha.conf format seen here:
+# https://github.com/nfs-ganesha/nfs-ganesha/blob/next/src/config_samples/ganesha.conf.example
+#
+# Example:
+# CACHEINODE {
+# # Entries_HWMark = 100000;
+# }
+#
+# ganesha_core_param_overrides:
+# ganesha_ceph_export_overrides:
+# ganesha_rgw_export_overrides:
+# ganesha_rgw_section_overrides:
+# ganesha_log_overrides:
+# ganesha_conf_overrides: |
+# CACHEINODE {
+# # Entries_HWMark = 100000;
+# }
+
+##########
+# DOCKER #
+##########
+
+#ceph_docker_image: "ceph/daemon"
+#ceph_docker_image_tag: latest
+#ceph_nfs_docker_extra_env:
+#ceph_config_keys: [] # DON'T TOUCH ME
+
- "{{ rgw_group_name|default('rgws') }}"
- "{{ mgr_group_name|default('mgrs') }}"
- "{{ rbdmirror_group_name|default('rbdmirrors') }}"
+ - "{{ nfs_group_name|default('nfss') }}"
- "{{ monitoring_group_name|default('monitoring') }}"
become: true
any_errors_fatal: true
inventory_hostname in groups.get(mds_group_name, []) or
inventory_hostname in groups.get(rgw_group_name, []) or
inventory_hostname in groups.get(mgr_group_name, []) or
- inventory_hostname in groups.get(rbdmirror_group_name, [])
+ inventory_hostname in groups.get(rbdmirror_group_name, []) or
+ inventory_hostname in groups.get(nfs_group_name, [])
- name: Configure repository for installing cephadm
when: containerized_deployment | bool
path: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ rgw_zone }}.{{ ansible_facts['hostname'] }}"
state: absent
+- name: Stop and remove legacy ceph nfs daemons
+ hosts: "{{ nfs_group_name|default('nfss') }}"
+ tags: 'ceph_nfs_adopt'
+ serial: 1
+ become: true
+ gather_facts: false
+ any_errors_fatal: true
+ tasks:
+ - name: Import ceph-defaults role
+ ansible.builtin.import_role:
+ name: ceph-defaults
+
+ - name: Import ceph-nfs role
+ ansible.builtin.import_role:
+ name: ceph-nfs
+ tasks_from: create_rgw_nfs_user.yml
+
+ - name: Enable ceph mgr nfs module
+ ceph_mgr_module:
+ name: "nfs"
+ cluster: "{{ cluster }}"
+ state: enable
+ environment:
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+
+ - name: Stop and disable ceph-nfs systemd service
+ ansible.builtin.service:
+ name: "ceph-nfs@{{ ansible_facts['hostname'] }}"
+ state: stopped
+ enabled: false
+ failed_when: false
+
+ - name: Reset failed ceph-nfs systemd unit
+ ansible.builtin.command: "systemctl reset-failed ceph-nfs@{{ ansible_facts['hostname'] }}" # noqa command-instead-of-module
+ changed_when: false
+ failed_when: false
+ when: containerized_deployment | bool
+
+ - name: Remove ceph-nfs systemd unit files
+ ansible.builtin.file:
+ path: "{{ item }}"
+ state: absent
+ loop:
+ - /etc/systemd/system/ceph-nfs@.service
+ - /etc/systemd/system/ceph-nfs@.service.d
+
+ - name: Remove legacy ceph radosgw directory
+ ansible.builtin.file:
+ path: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}"
+ state: absent
+
+ - name: Create nfs ganesha cluster
+ ansible.builtin.command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph nfs cluster create {{ ansible_facts['hostname'] }} {{ ansible_facts['hostname'] }}"
+ changed_when: false
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ environment:
+ CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
+
+ - name: Create cephfs export
+ ansible.builtin.command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph nfs export create cephfs {{ cephfs }} {{ ansible_facts['hostname'] }} {{ ceph_nfs_ceph_pseudo_path }} --squash {{ ceph_nfs_ceph_squash }}"
+ changed_when: false
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ environment:
+ CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
+ when: nfs_file_gw | bool
+
+ - name: Create rgw export
+ ansible.builtin.command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph nfs export create rgw --cluster-id {{ ansible_facts['hostname'] }} --pseudo-path {{ ceph_nfs_rgw_pseudo_path }} --user-id {{ ceph_nfs_rgw_user }} --squash {{ ceph_nfs_rgw_squash }}"
+ changed_when: false
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ environment:
+ CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
+ when: nfs_obj_gw | bool
+
- name: Redeploy rbd-mirror daemons
hosts: "{{ rbdmirror_group_name|default('rbdmirrors') }}"
become: true
- "{{ rgw_group_name|default('rgws') }}"
- "{{ mgr_group_name|default('mgrs') }}"
- "{{ rbdmirror_group_name|default('rbdmirrors') }}"
+ - "{{ nfs_group_name|default('nfss') }}"
- "{{ monitoring_group_name|default('monitoring') }}"
become: true
gather_facts: false
- "{{ rgw_group_name|default('rgws') }}"
- "{{ mgr_group_name|default('mgrs') }}"
- "{{ rbdmirror_group_name|default('rbdmirrors') }}"
+ - "{{ nfs_group_name|default('nfss') }}"
- "{{ monitoring_group_name|default('monitoring') }}"
become: true
gather_facts: false
- "{{ rgw_group_name|default('rgws') }}"
- "{{ mgr_group_name|default('mgrs') }}"
- "{{ rbdmirror_group_name|default('rbdmirrors') }}"
+ - "{{ nfs_group_name|default('nfss') }}"
- "{{ monitoring_group_name|default('monitoring') }}"
become: true
gather_facts: false
- osds
- mdss
- rgws
+ - nfss
- rbdmirrors
- clients
- mgrs
- "{{ osd_group_name | default('osds') }}"
- "{{ mds_group_name | default('mdss') }}"
- "{{ rgw_group_name | default('rgws') }}"
+ - "{{ nfs_group_name | default('nfss') }}"
- "{{ mgr_group_name | default('mgrs') }}"
- "{{ rbdmirror_group_name | default('rbdmirrors') }}"
- "{{ monitoring_group_name | default('monitoring') }}"
inventory_hostname in groups.get(mds_group_name, []) or
inventory_hostname in groups.get(rgw_group_name, []) or
inventory_hostname in groups.get(mgr_group_name, []) or
- inventory_hostname in groups.get(rbdmirror_group_name, [])
+ inventory_hostname in groups.get(rbdmirror_group_name, []) or
+ inventory_hostname in groups.get(nfs_group_name, [])
- name: Pulling alertmanager/grafana/prometheus images from docker daemon
ansible.builtin.command: "{{ timeout_command }} {{ container_binary }} pull docker-daemon:{{ item }}"
tasks_from: systemd.yml
when: inventory_hostname in groups.get(mgr_group_name, [])
+ - name: Import ceph-nfs role
+ ansible.builtin.import_role:
+ name: ceph-nfs
+ tasks_from: systemd.yml
+ when: inventory_hostname in groups.get(nfs_group_name, [])
+
- name: Import ceph-osd role
ansible.builtin.import_role:
name: ceph-osd
- osds
- mdss
- rgws
+ - nfss
- rbdmirrors
- clients
- mgrs
- mdss
- rgws
- rbdmirrors
+ - nfss
- clients
- mgrs
- monitoring
ansible.builtin.import_role:
name: ceph-defaults
+ - name: Nfs related tasks
+ when: groups[nfs_group_name] | default([]) | length > 0
+ block:
+ - name: Get nfs nodes ansible facts
+ ansible.builtin.setup:
+ gather_subset:
+ - 'all'
+ - '!facter'
+ - '!ohai'
+ delegate_to: "{{ item }}"
+ delegate_facts: true
+ with_items: "{{ groups[nfs_group_name] }}"
+ run_once: true
+
+ - name: Get all nfs-ganesha mount points
+ ansible.builtin.command: grep "{{ hostvars[item]['ansible_facts']['all_ipv4_addresses'] | ips_in_ranges(public_network.split(',')) | first }}" /proc/mounts
+ register: nfs_ganesha_mount_points
+ failed_when: false
+ changed_when: false
+ with_items: "{{ groups[nfs_group_name] }}"
+
+ - name: Ensure nfs-ganesha mountpoint(s) are unmounted
+ ansible.posix.mount:
+ path: "{{ item.split(' ')[1] }}"
+ state: unmounted
+ with_items:
+ - "{{ nfs_ganesha_mount_points.results | map(attribute='stdout_lines') | list }}"
+ when: item | length > 0
+
- name: Ensure cephfs mountpoint(s) are unmounted
ansible.builtin.command: umount -a -t ceph
changed_when: false
- ceph
- libceph
+
+- name: Purge ceph nfs cluster
+ hosts: nfss
+ gather_facts: false # Already gathered previously
+ become: true
+ tasks:
+ - name: Import ceph-defaults role
+ ansible.builtin.import_role:
+ name: ceph-defaults
+
+ - name: Stop ceph nfss with systemd
+ ansible.builtin.service:
+ name: "{{ 'ceph-nfs@' + ansible_facts['hostname'] if containerized_deployment | bool else 'nfs-ganesha' }}"
+ state: stopped
+ failed_when: false
+
+ - name: Remove ceph nfs directories for "{{ ansible_facts['hostname'] }}"
+ ansible.builtin.file:
+ path: "{{ item }}"
+ state: absent
+ with_items:
+ - /etc/ganesha
+ - /var/lib/nfs/ganesha
+ - /var/run/ganesha
+ - /etc/systemd/system/ceph-nfs@.service
+
+
- name: Purge node-exporter
hosts:
- mons
- mdss
- rgws
- rbdmirrors
+ - nfss
- clients
- mgrs
- monitoring
- mdss
- rgws
- rbdmirrors
+ - nfss
- mgrs
become: true
tasks:
- mdss
- rgws
- rbdmirrors
+ - nfss
- clients
- mgrs
- monitoring
- mdss
- rgws
- rbdmirrors
+ - nfss
- mgrs
- clients
gather_facts: false # Already gathered previously
- "{{ mds_group_name|default('mdss') }}"
- "{{ rgw_group_name|default('rgws') }}"
- "{{ rbdmirror_group_name|default('rbdmirrors') }}"
+ - "{{ nfs_group_name|default('nfss') }}"
- "{{ client_group_name|default('clients') }}"
- "{{ mgr_group_name|default('mgrs') }}"
- "{{ monitoring_group_name | default('monitoring') }}"
- "{{ mds_group_name|default('mdss') }}"
- "{{ rgw_group_name|default('rgws') }}"
- "{{ rbdmirror_group_name|default('rbdmirrors') }}"
+ - "{{ nfs_group_name|default('nfss') }}"
- "{{ client_group_name|default('clients') }}"
- "{{ mgr_group_name|default('mgrs') }}"
- "{{ monitoring_group_name | default('monitoring') }}"
- "{{ rgw_group_name|default('rgws') }}"
- "{{ mgr_group_name|default('mgrs') }}"
- "{{ rbdmirror_group_name|default('rbdmirrors') }}"
+ - "{{ nfs_group_name|default('nfss') }}"
- "{{ client_group_name|default('clients') }}"
- "{{ monitoring_group_name|default('monitoring') }}"
tags: always
name: ceph-rbd-mirror
+- name: Upgrade ceph nfs node
+ vars:
+ upgrade_ceph_packages: true
+ hosts: "{{ nfs_group_name|default('nfss') }}"
+ tags: nfss
+ serial: 1
+ become: true
+ gather_facts: false
+ tasks:
+ # failed_when: false is here so that if we upgrade
+ # from a version of ceph that does not have nfs-ganesha
+ # then this task will not fail
+ - name: Stop ceph nfs
+ ansible.builtin.systemd:
+ name: nfs-ganesha
+ state: stopped
+ enabled: false
+ masked: true
+ failed_when: false
+ when: not containerized_deployment | bool
+
+ - name: Systemd stop nfs container
+ ansible.builtin.systemd:
+ name: ceph-nfs@{{ ceph_nfs_service_suffix | default(ansible_facts['hostname']) }}
+ state: stopped
+ enabled: false
+ masked: true
+ failed_when: false
+ when:
+ - ceph_nfs_enable_service | bool
+ - containerized_deployment | bool
+
+ - name: Import ceph-defaults role
+ ansible.builtin.import_role:
+ name: ceph-defaults
+
+ - name: Import ceph-facts role
+ ansible.builtin.import_role:
+ name: ceph-facts
+
+ - name: Import ceph-handler role
+ ansible.builtin.import_role:
+ name: ceph-handler
+
+ - name: Import ceph-common role
+ ansible.builtin.import_role:
+ name: ceph-common
+ when: not containerized_deployment | bool
+
+ - name: Import ceph-container-common role
+ ansible.builtin.import_role:
+ name: ceph-container-common
+ when: containerized_deployment | bool
+
+ - name: Import ceph-config role
+ ansible.builtin.import_role:
+ name: ceph-config
+
+ - name: Import ceph-nfs role
+ ansible.builtin.import_role:
+ name: ceph-nfs
+
- name: Upgrade ceph client node
vars:
upgrade_ceph_packages: true
- "{{ rgw_group_name|default('rgws') }}"
- "{{ mgr_group_name|default('mgrs') }}"
- "{{ rbdmirror_group_name|default('rbdmirrors') }}"
+ - "{{ nfs_group_name|default('nfss') }}"
- "{{ monitoring_group_name|default('monitoring') }}"
tags: monitoring
gather_facts: false
- "{{ mds_group_name|default('mdss') }}"
- "{{ rgw_group_name|default('rgws') }}"
- "{{ rbdmirror_group_name|default('rbdmirrors') }}"
+ - "{{ nfs_group_name|default('nfss') }}"
become: true
name: ceph-rbd-mirror
+- name: Switching from non-containerized to containerized ceph nfs
+
+ hosts: "{{ nfs_group_name|default('nfss') }}"
+
+ vars:
+ containerized_deployment: true
+ nfs_group_name: nfss
+
+ serial: 1
+ become: true
+ pre_tasks:
+
+ # failed_when: false is here because if we're
+ # working with a jewel cluster then ceph nfs
+ # will not exist
+ - name: Stop non-containerized ceph nfs(s)
+ ansible.builtin.service:
+ name: nfs-ganesha
+ state: stopped
+ enabled: false
+ failed_when: false
+
+ - name: Import ceph-defaults role
+ ansible.builtin.import_role:
+ name: ceph-defaults
+
+ - name: Import ceph-facts role
+ ansible.builtin.import_role:
+ name: ceph-facts
+
+ # NOTE: changed from file module to raw find command for performance reasons
+ # The file module has to run checks on current ownership of all directories and files. This is unnecessary
+ # as in this case we know we want all owned by ceph user
+ - name: Set proper ownership on ceph directories
+ ansible.builtin.command: "find /var/lib/ceph /etc/ceph -not -( -user {{ ceph_uid }} -or -group {{ ceph_uid }} -) -execdir chown {{ ceph_uid }}:{{ ceph_uid }} {} +"
+ changed_when: false
+
+ tasks:
+ - name: Import ceph-handler role
+ ansible.builtin.import_role:
+ name: ceph-handler
+
+ - name: Import ceph-container-engine role
+ ansible.builtin.import_role:
+ name: ceph-container-engine
+
+ - name: Import ceph-container-common role
+ ansible.builtin.import_role:
+ name: ceph-container-common
+
+ - name: Import ceph-nfs role
+ ansible.builtin.import_role:
+ name: ceph-nfs
+
- name: Switching from non-containerized to containerized ceph-crash
hosts:
- osds
- mdss
- rgws
+ - nfss
- rbdmirrors
- clients
- mgrs
'installer_phase_ceph_osd',
'installer_phase_ceph_mds',
'installer_phase_ceph_rgw',
+ 'installer_phase_ceph_nfs',
'installer_phase_ceph_rbdmirror',
'installer_phase_ceph_client',
'installer_phase_ceph_rgw_loadbalancer',
'title': 'Install Ceph RGW',
'playbook': 'roles/ceph-rgw/tasks/main.yml'
},
+ 'installer_phase_ceph_nfs': {
+ 'title': 'Install Ceph NFS',
+ 'playbook': 'roles/ceph-nfs/tasks/main.yml'
+ },
'installer_phase_ceph_rbdmirror': {
'title': 'Install Ceph RBD Mirror',
'playbook': 'roles/ceph-rbd-mirror/tasks/main.yml'
ansible.builtin.include_tasks: selinux.yml
when:
- ansible_facts['os_family'] == 'RedHat'
- - inventory_hostname in groups.get(rgwloadbalancer_group_name, [])
+ - inventory_hostname in groups.get(nfs_group_name, [])
+ or inventory_hostname in groups.get(rgwloadbalancer_group_name, [])
register: result
until: result is succeeded
when:
- - inventory_hostname in groups.get(rgwloadbalancer_group_name, [])
+ - inventory_hostname in groups.get(nfs_group_name, [])
+ or inventory_hostname in groups.get(rgwloadbalancer_group_name, [])
- ansible_facts['distribution_major_version'] == '8'
inventory_hostname in groups.get(rgw_group_name, []) or
inventory_hostname in groups.get(mgr_group_name, []) or
inventory_hostname in groups.get(rbdmirror_group_name, []) or
+ inventory_hostname in groups.get(nfs_group_name, []) or
inventory_hostname in groups.get(monitoring_group_name, [])
environment:
HTTP_PROXY: "{{ ceph_docker_http_proxy | default('') }}"
osd_group_name: osds
rgw_group_name: rgws
mds_group_name: mdss
+nfs_group_name: nfss
rbdmirror_group_name: rbdmirrors
client_group_name: clients
mgr_group_name: mgrs
- "{{ osd_group_name }}"
- "{{ rgw_group_name }}"
- "{{ mds_group_name }}"
+ - "{{ nfs_group_name }}"
- "{{ rbdmirror_group_name }}"
- "{{ client_group_name }}"
- "{{ mgr_group_name }}"
ceph_osd_firewall_zone: public
ceph_rgw_firewall_zone: public
ceph_mds_firewall_zone: public
+ceph_nfs_firewall_zone: public
ceph_rbdmirror_firewall_zone: public
ceph_dashboard_firewall_zone: public
ceph_rgwloadbalancer_firewall_zone: public
ceph_stable_release: reef
ceph_stable_repo: "{{ ceph_mirror }}/debian-{{ ceph_stable_release }}"
+nfs_ganesha_stable: true # use stable repos for nfs-ganesha
+centos_release_nfs: centos-release-nfs-ganesha4
+nfs_ganesha_stable_deb_repo: http://ppa.launchpad.net/nfs-ganesha/nfs-ganesha-4/ubuntu
+nfs_ganesha_apt_keyserver: keyserver.ubuntu.com
+nfs_ganesha_apt_key_id: EA914D611053D07BD332E18010353E8834DC57CA
+libntirpc_stable_deb_repo: http://ppa.launchpad.net/nfs-ganesha/libntirpc-4/ubuntu
+
# Use the option below to specify your applicable package tree, eg. when using non-LTS Ubuntu versions
# # for a list of available Debian distributions, visit http://download.ceph.com/debian-{{ ceph_stable_release }}/dists/
# for more info read: https://github.com/ceph/ceph-ansible/issues/305
ceph_dev_branch: main # development branch you would like to use e.g: main, wip-hack
ceph_dev_sha1: latest # distinct sha1 to use, defaults to 'latest' (as in latest built)
+nfs_ganesha_dev: false # use development repos for nfs-ganesha
+
+# Set this to choose the version of ceph dev libraries used in the nfs-ganesha packages from shaman
+# flavors so far include: ceph_main, ceph_jewel, ceph_kraken, ceph_luminous
+nfs_ganesha_flavor: "ceph_main"
+
+
# REPOSITORY: CUSTOM
#
# Enabled when ceph_repository == 'custom'
handler_health_rgw_check_retries: 5
handler_health_rgw_check_delay: 10
+# NFS handler checks
+handler_health_nfs_check_retries: 5
+handler_health_nfs_check_delay: 10
+
# RBD MIRROR handler checks
handler_health_rbd_mirror_check_retries: 5
handler_health_rbd_mirror_check_delay: 10
ceph_rbd_mirror_pool: "rbd"
+###############
+# NFS-GANESHA #
+###############
+#
+# Access type options
+#
+# Enable NFS File access
+# If set to true, then ganesha is set up to export the root of the
+# Ceph filesystem, and ganesha's attribute and directory caching is disabled
+# as much as possible since libcephfs clients also caches the same
+# information.
+#
+# Set this to true to enable File access via NFS. Requires an MDS role.
+nfs_file_gw: false
+# Set this to true to enable Object access via NFS. Requires an RGW role.
+nfs_obj_gw: "{{ False if groups.get(mon_group_name, []) | length == 0 else True }}"
+
+
###################
# CONFIG OVERRIDE #
###################
- "Restart ceph osds"
- "Restart ceph mdss"
- "Restart ceph rgws"
+ - "Restart ceph nfss"
- "Restart ceph rbdmirrors"
- "Restart ceph mgrs"
register: tmpdirpath
when: rgw_group_name in group_names
listen: "Restart ceph rgws"
+ - name: Nfss handler
+ ansible.builtin.include_tasks: handler_nfss.yml
+ when: nfs_group_name in group_names
+ listen: "Restart ceph nfss"
+
- name: Rbdmirrors handler
ansible.builtin.include_tasks: handler_rbdmirrors.yml
when: rbdmirror_group_name in group_names
check_mode: false
when: inventory_hostname in groups.get(rbdmirror_group_name, [])
+- name: Check for a nfs container
+ ansible.builtin.command: "{{ container_binary }} ps -q --filter='name=ceph-nfs-{{ ceph_nfs_service_suffix | default(ansible_facts['hostname']) }}'"
+ register: ceph_nfs_container_stat
+ changed_when: false
+ failed_when: false
+ check_mode: false
+ when: inventory_hostname in groups.get(nfs_group_name, [])
+
- name: Check for a ceph-crash container
ansible.builtin.command: "{{ container_binary }} ps -q --filter='name=ceph-crash-{{ ansible_facts['hostname'] }}'"
register: ceph_crash_container_stat
- rbd_mirror_socket_stat.files | length > 0
- item.1.rc == 1
+- name: Check for a nfs ganesha pid
+ ansible.builtin.command: "pgrep ganesha.nfsd"
+ register: nfs_process
+ changed_when: false
+ failed_when: false
+ check_mode: false
+ when: inventory_hostname in groups.get(nfs_group_name, [])
+
- name: Check for a ceph-crash process
ansible.builtin.command: pgrep ceph-crash
changed_when: false
--- /dev/null
+---
+- name: Set _nfs_handler_called before restart
+ ansible.builtin.set_fact:
+ _nfs_handler_called: true
+
+- name: Copy nfs restart script
+ ansible.builtin.template:
+ src: restart_nfs_daemon.sh.j2
+ dest: "{{ tmpdirpath.path }}/restart_nfs_daemon.sh"
+ owner: root
+ group: root
+ mode: "0750"
+ when: tmpdirpath.path is defined
+
+- name: Restart ceph nfs daemon(s)
+ ansible.builtin.command: /usr/bin/env bash {{ hostvars[item]['tmpdirpath']['path'] }}/restart_nfs_daemon.sh
+ when:
+ - hostvars[item]['handler_nfs_status'] | default(False) | bool
+ - hostvars[item]['_nfs_handler_called'] | default(False) | bool
+ - hostvars[item].tmpdirpath.path is defined
+ with_items: "{{ groups[nfs_group_name] }}"
+ delegate_to: "{{ item }}"
+ changed_when: false
+ run_once: true
+
+- name: Set _nfs_handler_called after restart
+ ansible.builtin.set_fact:
+ _nfs_handler_called: false
handler_rgw_status: "{{ 0 in (rgw_socket.results | map(attribute='rc') | list) if not containerized_deployment | bool else (ceph_rgw_container_stat.get('rc') == 0 and ceph_rgw_container_stat.get('stdout_lines', []) | length != 0) }}"
when: inventory_hostname in groups.get(rgw_group_name, [])
+- name: Set_fact handler_nfs_status
+ ansible.builtin.set_fact:
+ handler_nfs_status: "{{ (nfs_process.get('rc') == 0) if not containerized_deployment | bool else (ceph_nfs_container_stat.get('rc') == 0 and ceph_nfs_container_stat.get('stdout_lines', []) | length != 0) }}"
+ when: inventory_hostname in groups.get(nfs_group_name, [])
+
- name: Set_fact handler_rbd_status
ansible.builtin.set_fact:
handler_rbd_mirror_status: "{{ 0 in (rbd_mirror_socket.results | map(attribute='rc') | list) if not containerized_deployment | bool else (ceph_rbd_mirror_container_stat.get('rc') == 0 and ceph_rbd_mirror_container_stat.get('stdout_lines', []) | length != 0) }}"
--- /dev/null
+#!/bin/bash
+
+RETRIES="{{ handler_health_nfs_check_retries }}"
+DELAY="{{ handler_health_nfs_check_delay }}"
+NFS_NAME="ceph-nfs@{{ ceph_nfs_service_suffix | default(ansible_facts['hostname']) }}"
+PID=/var/run/ganesha/ganesha.pid
+{% if containerized_deployment | bool %}
+DOCKER_EXEC="{{ container_binary }} exec ceph-nfs-{{ ceph_nfs_service_suffix | default(ansible_facts['hostname']) }}"
+{% endif %}
+
+# First, restart the daemon
+{% if containerized_deployment | bool -%}
+systemctl restart $NFS_NAME
+# Wait and ensure the pid exists after restarting the daemon
+while [ $RETRIES -ne 0 ]; do
+ $DOCKER_EXEC test -f $PID && exit 0
+ sleep $DELAY
+ let RETRIES=RETRIES-1
+done
+# If we reach this point, it means the pid is not present.
+echo "PID file ${PID} could not be found, which means Ganesha is not running. Showing $NFS_NAME unit logs now:"
+journalctl -u $NFS_NAME
+exit 1
+{% else %}
+systemctl restart nfs-ganesha
+{% endif %}
- mds_group_name is defined
- mds_group_name in group_names
+ - name: Open ceph networks on nfs
+ ansible.posix.firewalld:
+ zone: "{{ ceph_nfs_firewall_zone }}"
+ source: "{{ item }}"
+ permanent: true
+ immediate: true
+ state: enabled
+ with_items: "{{ public_network.split(',') }}"
+ when:
+ - nfs_group_name is defined
+ - nfs_group_name in group_names
+
+ - name: Open nfs ports
+ ansible.posix.firewalld:
+ service: nfs
+ zone: "{{ ceph_nfs_firewall_zone }}"
+ permanent: true
+ immediate: true
+ state: enabled
+ when:
+ - nfs_group_name is defined
+ - nfs_group_name in group_names
+
+ - name: Open nfs ports (portmapper)
+ ansible.posix.firewalld:
+ port: "111/tcp"
+ zone: "{{ ceph_nfs_firewall_zone }}"
+ permanent: true
+ immediate: true
+ state: enabled
+ when:
+ - nfs_group_name is defined
+ - nfs_group_name in group_names
+
- name: Open ceph networks on rbdmirror
ansible.posix.firewalld:
zone: "{{ ceph_rbdmirror_firewall_zone }}"
--- /dev/null
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [2016] [Red Hat, Inc.]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
--- /dev/null
+# Ansible role: ceph-nfs
+
+Documentation is available at http://docs.ceph.com/ceph-ansible/.
--- /dev/null
+---
+# You can override vars by using host or group vars
+
+###########
+# GENERAL #
+###########
+
+# Even though NFS nodes should not have the admin key
+# at their disposal, some people might want to have it
+# distributed on RGW nodes. Setting 'copy_admin_key' to 'true'
+# will copy the admin key to the /etc/ceph/ directory
+copy_admin_key: false
+
+# Whether docker container or systemd service should be enabled
+# and started, it's useful to set it to false if nfs-ganesha
+# service is managed by pacemaker
+ceph_nfs_enable_service: true
+
+# ceph-nfs systemd service uses ansible's hostname as an instance id,
+# so service name is ceph-nfs@{{ ansible_facts['hostname'] }}, this is not
+# ideal when ceph-nfs is managed by pacemaker across multiple hosts - in
+# such case it's better to have constant instance id instead which
+# can be set by 'ceph_nfs_service_suffix'
+# ceph_nfs_service_suffix: "{{ ansible_facts['hostname'] }}"
+
+######################
+# NFS Ganesha Config #
+######################
+ceph_nfs_log_file: "/var/log/ganesha/ganesha.log"
+ceph_nfs_dynamic_exports: false
+# If set to true then rados is used to store ganesha exports
+# and client sessions information, this is useful if you
+# run multiple nfs-ganesha servers in active/passive mode and
+# want to do failover
+ceph_nfs_rados_backend: false
+# Name of the rados object used to store a list of the export rados
+# object URLS
+ceph_nfs_rados_export_index: "ganesha-export-index"
+# Address ganesha service should listen on, by default ganesha listens on all
+# addresses. (Note: ganesha ignores this parameter in current version due to
+# this bug: https://github.com/nfs-ganesha/nfs-ganesha/issues/217)
+# ceph_nfs_bind_addr: 0.0.0.0
+
+# If set to true, then ganesha's attribute and directory caching is disabled
+# as much as possible. Currently, ganesha caches by default.
+# When using ganesha as CephFS's gateway, it is recommended to turn off
+# ganesha's caching as the libcephfs clients also cache the same information.
+# Note: Irrespective of this option's setting, ganesha's caching is disabled
+# when setting 'nfs_file_gw' option as true.
+ceph_nfs_disable_caching: false
+
+# This is the file ganesha will use to control NFSv4 ID mapping
+ceph_nfs_idmap_conf: "/etc/ganesha/idmap.conf"
+
+# idmap configuration file override.
+# This allows you to specify more configuration options
+# using an INI style format.
+# Example:
+# idmap_conf_overrides:
+# General:
+# Domain: foo.domain.net
+idmap_conf_overrides: {}
+
+####################
+# FSAL Ceph Config #
+####################
+ceph_nfs_ceph_export_id: 20133
+ceph_nfs_ceph_pseudo_path: "/cephfile"
+ceph_nfs_ceph_protocols: "3,4"
+ceph_nfs_ceph_access_type: "RW"
+ceph_nfs_ceph_user: "admin"
+ceph_nfs_ceph_squash: "Root_Squash"
+ceph_nfs_ceph_sectype: "sys,krb5,krb5i,krb5p"
+
+###################
+# FSAL RGW Config #
+###################
+ceph_nfs_rgw_export_id: 20134
+ceph_nfs_rgw_pseudo_path: "/cephobject"
+ceph_nfs_rgw_protocols: "3,4"
+ceph_nfs_rgw_access_type: "RW"
+ceph_nfs_rgw_user: "cephnfs"
+ceph_nfs_rgw_squash: "Root_Squash"
+ceph_nfs_rgw_sectype: "sys,krb5,krb5i,krb5p"
+# Note: keys are optional and can be generated, but not on containerized, where
+# they must be configered.
+# ceph_nfs_rgw_access_key: "QFAMEDSJP5DEKJO0DDXY"
+# ceph_nfs_rgw_secret_key: "iaSFLDVvDdQt6lkNzHyW4fPLZugBAI1g17LO0+87[MAC[M#C"
+rgw_client_name: client.rgw.{{ ansible_facts['hostname'] }}
+
+###################
+# CONFIG OVERRIDE #
+###################
+
+# Ganesha configuration file override.
+# These multiline strings will be appended to the contents of the blocks in ganesha.conf and
+# must be in the correct ganesha.conf format seen here:
+# https://github.com/nfs-ganesha/nfs-ganesha/blob/next/src/config_samples/ganesha.conf.example
+#
+# Example:
+# CACHEINODE {
+ # Entries_HWMark = 100000;
+# }
+#
+# ganesha_core_param_overrides:
+# ganesha_ceph_export_overrides:
+# ganesha_rgw_export_overrides:
+# ganesha_rgw_section_overrides:
+# ganesha_log_overrides:
+# ganesha_conf_overrides: |
+# CACHEINODE {
+ # Entries_HWMark = 100000;
+# }
+
+##########
+# DOCKER #
+##########
+
+ceph_docker_image: "ceph/daemon"
+ceph_docker_image_tag: latest
+ceph_nfs_docker_extra_env:
+ceph_config_keys: [] # DON'T TOUCH ME
--- /dev/null
+---
+galaxy_info:
+ company: Red Hat
+ author: Daniel Gryniewicz
+ description: Installs Ceph NFS Gateway
+ license: Apache
+ min_ansible_version: '2.7'
+ platforms:
+ - name: EL
+ versions:
+ - 'all'
+ galaxy_tags:
+ - system
+dependencies: []
--- /dev/null
+---
+- name: Create rgw nfs user "{{ ceph_nfs_rgw_user }}"
+ radosgw_user:
+ name: "{{ ceph_nfs_rgw_user }}"
+ cluster: "{{ cluster }}"
+ display_name: "RGW NFS User"
+ access_key: "{{ ceph_nfs_rgw_access_key | default(omit) }}"
+ secret_key: "{{ ceph_nfs_rgw_secret_key | default(omit) }}"
+ run_once: true
+ register: rgw_nfs_user
+ changed_when: false
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ when: nfs_obj_gw | bool
+ environment:
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+
+- name: Set_fact ceph_nfs_rgw_access_key and ceph_nfs_rgw_secret_key
+ ansible.builtin.set_fact:
+ ceph_nfs_rgw_access_key: "{{ (rgw_nfs_user.stdout | from_json)['keys'][0]['access_key'] }}"
+ ceph_nfs_rgw_secret_key: "{{ (rgw_nfs_user.stdout | from_json)['keys'][0]['secret_key'] }}"
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ when: nfs_obj_gw | bool
--- /dev/null
+---
+# global/common requirement
+- name: Stop nfs server service
+ ansible.builtin.systemd:
+ name: "{{ 'nfs-server' if ansible_facts['os_family'] == 'RedHat' else 'nfsserver' if ansible_facts['os_family'] == 'Suse' else 'nfs-kernel-server' if ansible_facts['os_family'] == 'Debian' }}"
+ state: stopped
+ enabled: false
+ failed_when: false
+
+- name: Include pre_requisite_non_container.yml
+ ansible.builtin.include_tasks: pre_requisite_non_container.yml
+ when: not containerized_deployment | bool
+
+- name: Include pre_requisite_container.yml
+ ansible.builtin.include_tasks: pre_requisite_container.yml
+ when: containerized_deployment | bool
+
+- name: Set_fact _rgw_hostname
+ ansible.builtin.set_fact:
+ _rgw_hostname: "{{ hostvars[inventory_hostname]['ansible_facts']['hostname'] }}"
+
+- name: Set rgw parameter (log file)
+ ceph_config:
+ action: set
+ who: "client.rgw.{{ _rgw_hostname }}"
+ option: "log file"
+ value: "/var/log/ceph/{{ cluster }}-rgw-{{ hostvars[inventory_hostname]['ansible_facts']['hostname'] }}.log"
+ environment:
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+ loop: "{{ groups.get('nfss', []) }}"
+
+- name: Include create_rgw_nfs_user.yml
+ ansible.builtin.import_tasks: create_rgw_nfs_user.yml
+ when: groups.get(mon_group_name, []) | length > 0
+
+- name: Install nfs-ganesha-selinux on RHEL 8
+ ansible.builtin.package:
+ name: nfs-ganesha-selinux
+ state: present
+ register: result
+ until: result is succeeded
+ when:
+ - not containerized_deployment | bool
+ - inventory_hostname in groups.get(nfs_group_name, [])
+ - ansible_facts['os_family'] == 'RedHat'
+ - ansible_facts['distribution_major_version'] == '8'
+
+# NOTE (leseb): workaround for issues with ganesha and librgw
+- name: Add ganesha_t to permissive domain
+ community.general.selinux_permissive:
+ name: ganesha_t
+ permissive: true
+ failed_when: false
+ when:
+ - not containerized_deployment | bool
+ - ansible_facts['os_family'] == 'RedHat'
+ - ansible_facts['selinux']['status'] == 'enabled'
+
+- name: Nfs with external ceph cluster task related
+ when:
+ - groups.get(mon_group_name, []) | length == 0
+ - ceph_nfs_ceph_user is defined
+ block:
+ - name: Create keyring directory
+ ansible.builtin.file:
+ path: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ item }}"
+ state: directory
+ owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
+ group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
+ mode: "0755"
+ with_items:
+ - "{{ ceph_nfs_ceph_user }}"
+ - "{{ ansible_facts['hostname'] }}"
+
+ - name: Set_fact rgw_client_name
+ ansible.builtin.set_fact:
+ rgw_client_name: "client.rgw.{{ ceph_nfs_ceph_user }}"
+
+ - name: Get client cephx keys
+ ansible.builtin.copy:
+ dest: "{{ item.1 }}"
+ content: "{{ item.0.content | b64decode }}"
+ mode: "{{ item.0.item.get('mode', '0600') }}"
+ owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
+ group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
+ with_nested:
+ - "{{ hostvars[groups['_filtered_clients'][0]]['slurp_client_keys']['results'] | default([]) }}"
+ - ['/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ceph_nfs_ceph_user }}/keyring', "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}/keyring"]
+ when:
+ - not item.0.get('skipped', False)
+ - item.0.item.name == 'client.' + ceph_nfs_ceph_user or item.0.item.name == rgw_client_name
+ no_log: "{{ no_log_on_ceph_key_tasks }}"
+
+- name: Include start_nfs.yml
+ ansible.builtin.import_tasks: start_nfs.yml
--- /dev/null
+---
+- name: Keyring related tasks
+ when: groups.get(mon_group_name, []) | length > 0
+ block:
+ - name: Set_fact container_exec_cmd
+ ansible.builtin.set_fact:
+ container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[item]['ansible_facts']['hostname'] }}"
+ with_items: "{{ groups.get(mon_group_name, []) }}"
+ delegate_to: "{{ item }}"
+ delegate_facts: true
+ run_once: true
+
+ - name: Create directories
+ ansible.builtin.file:
+ path: "{{ item.0 }}"
+ state: "directory"
+ owner: "{{ ceph_uid }}"
+ group: "{{ ceph_uid }}"
+ mode: "0755"
+ delegate_to: "{{ item.1 }}"
+ with_nested:
+ - ["/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}",
+ "/var/lib/ceph/radosgw/{{ cluster }}-nfs.{{ ansible_facts['hostname'] }}"]
+ - ["{{ groups.get(mon_group_name)[0] }}", "{{ inventory_hostname }}"]
+
+ - name: Set_fact keyrings_list
+ ansible.builtin.set_fact:
+ keyrings_list:
+ - { name: "client.bootstrap-rgw", path: "/var/lib/ceph/bootstrap-rgw/{{ cluster }}.keyring", copy_key: "{{ nfs_obj_gw }}" }
+ - { name: "client.admin", path: "/etc/ceph/{{ cluster }}.client.admin.keyring", copy_key: "{{ copy_admin_key }}" }
+ - { name: "client.rgw.{{ ansible_facts['hostname'] }}", create: true, path: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}/keyring", caps: { "mon": "allow r", "osd": "allow rwx tag rgw *=*"} }
+ - { name: "client.nfs.{{ ansible_facts['hostname'] }}", create: true, path: "/var/lib/ceph/radosgw/{{ cluster }}-nfs.{{ ansible_facts['hostname'] }}/keyring", caps: { "mon": "r", "osd": "allow rw pool=.nfs"} }
+
+ - name: Create keyrings from a monitor
+ ceph_key:
+ name: "{{ item.name }}"
+ cluster: "{{ cluster }}"
+ dest: "{{ item.path }}"
+ caps: "{{ item.caps }}"
+ import_key: true
+ owner: "{{ ceph_uid }}"
+ group: "{{ ceph_uid }}"
+ mode: "0600"
+ no_log: "{{ no_log_on_ceph_key_tasks }}"
+ environment:
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment else None }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+ delegate_to: "{{ groups.get(mon_group_name)[0] }}"
+ loop: "{{ keyrings_list }}"
+ when:
+ - cephx | bool
+ - item.create | default(False) | bool
+
+ - name: Get keys from monitors
+ ceph_key:
+ name: "{{ item.name }}"
+ cluster: "{{ cluster }}"
+ output_format: plain
+ state: info
+ environment:
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+ register: _rgw_keys
+ loop: "{{ keyrings_list }}"
+ delegate_to: "{{ groups.get(mon_group_name)[0] }}"
+ run_once: true
+ when:
+ - cephx | bool
+ - item.copy_key | default(True) | bool
+ no_log: "{{ no_log_on_ceph_key_tasks }}"
+
+ - name: Debug
+ ansible.builtin.debug:
+ msg: "{{ _rgw_keys }}"
+
+ - name: Copy ceph key(s) if needed
+ ansible.builtin.copy:
+ dest: "{{ item.item.path }}"
+ content: "{{ item.stdout + '\n' }}"
+ owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
+ group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
+ mode: "{{ ceph_keyring_permissions }}"
+ with_items: "{{ _rgw_keys.results }}"
+ when:
+ - cephx | bool
+ - item.item.copy_key | default(True) | bool
+ no_log: "{{ no_log_on_ceph_key_tasks }}"
+
+ - name: Dbus related tasks
+ when: ceph_nfs_dynamic_exports | bool
+ block:
+ - name: Get file
+ ansible.builtin.command: "{{ container_binary }} run --rm --entrypoint=cat {{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag }} /etc/dbus-1/system.d/org.ganesha.nfsd.conf"
+ register: dbus_ganesha_file
+ run_once: true
+ changed_when: false
+
+ - name: Create dbus service file
+ ansible.builtin.copy:
+ content: "{{ dbus_ganesha_file.stdout }}"
+ dest: /etc/dbus-1/system.d/org.ganesha.nfsd.conf
+ owner: "root"
+ group: "root"
+ mode: "0644"
+
+ - name: Reload dbus configuration
+ ansible.builtin.command: "killall -SIGHUP dbus-daemon"
+ changed_when: false
--- /dev/null
+---
+- name: Include red hat based system related tasks
+ ansible.builtin.include_tasks: pre_requisite_non_container_red_hat.yml
+ when: ansible_facts['os_family'] == 'RedHat'
+
+- name: Include debian based system related tasks
+ ansible.builtin.include_tasks: pre_requisite_non_container_debian.yml
+ when: ansible_facts['os_family'] == 'Debian'
+
+- name: Install nfs rgw/cephfs gateway - SUSE/openSUSE
+ community.general.zypper:
+ name: "{{ item.name }}"
+ disable_gpg_check: true
+ with_items:
+ - { name: 'nfs-ganesha-rgw', install: "{{ nfs_obj_gw }}" }
+ - { name: 'radosgw', install: "{{ nfs_obj_gw }}" }
+ - { name: 'nfs-ganesha-ceph', install: "{{ nfs_file_gw }}" }
+ when:
+ - (ceph_origin == 'repository' or ceph_origin == 'distro')
+ - ansible_facts['os_family'] == 'Suse'
+ - item.install | bool
+ register: result
+ until: result is succeeded
+
+# NOTE (leseb): we use root:ceph for permissions since ganesha
+# does not have the right selinux context to read ceph directories.
+- name: Create rados gateway and ganesha directories
+ ansible.builtin.file:
+ path: "{{ item.name }}"
+ state: directory
+ owner: "{{ item.owner | default('ceph') }}"
+ group: "{{ item.group | default('ceph') }}"
+ mode: "{{ ceph_directories_mode }}"
+ with_items:
+ - { name: "/var/lib/ceph/bootstrap-rgw", create: "{{ nfs_obj_gw }}" }
+ - { name: "/var/lib/ceph/radosgw", create: "{{ nfs_obj_gw }}" }
+ - { name: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}", create: "{{ nfs_obj_gw }}" }
+ - { name: "{{ rbd_client_admin_socket_path }}", create: "{{ nfs_obj_gw }}" }
+ - { name: "/var/log/ceph", create: true }
+ - { name: "/var/log/ganesha", create: true, owner: root, group: root }
+ - { name: "/var/run/ceph", create: true }
+ when: item.create | bool
+
+- name: Cephx related tasks
+ when:
+ - cephx | bool
+ - groups.get(mon_group_name, []) | length > 0
+ block:
+ - name: Get keys from monitors
+ ceph_key:
+ name: "{{ item.name }}"
+ cluster: "{{ cluster }}"
+ output_format: plain
+ state: info
+ register: _rgw_keys
+ with_items:
+ - { name: "client.bootstrap-rgw", path: "/var/lib/ceph/bootstrap-rgw/{{ cluster }}.keyring", copy_key: "{{ nfs_obj_gw }}" }
+ - { name: "client.admin", path: "/etc/ceph/{{ cluster }}.client.admin.keyring", copy_key: "{{ copy_admin_key }}" }
+ delegate_to: "{{ groups.get(mon_group_name)[0] }}"
+ run_once: true
+ when:
+ - cephx | bool
+ - item.copy_key | bool
+ no_log: "{{ no_log_on_ceph_key_tasks }}"
+
+ - name: Copy ceph key(s) if needed
+ ansible.builtin.copy:
+ dest: "{{ item.item.path }}"
+ content: "{{ item.stdout + '\n' }}"
+ owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
+ group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
+ mode: "{{ ceph_keyring_permissions }}"
+ with_items: "{{ _rgw_keys.results }}"
+ when:
+ - cephx | bool
+ - item.item.copy_key | bool
+ no_log: "{{ no_log_on_ceph_key_tasks }}"
+
+ - name: Nfs object gateway related tasks
+ when: nfs_obj_gw | bool
+ block:
+ - name: Create rados gateway keyring
+ ceph_key:
+ name: "client.rgw.{{ ansible_facts['hostname'] }}"
+ cluster: "{{ cluster }}"
+ user: client.bootstrap-rgw
+ user_key: "/var/lib/ceph/bootstrap-rgw/{{ cluster }}.keyring"
+ caps:
+ mon: "allow rw"
+ osd: "allow rwx"
+ dest: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}/keyring"
+ import_key: false
+ owner: ceph
+ group: ceph
+ mode: "{{ ceph_keyring_permissions }}"
+ no_log: "{{ no_log_on_ceph_key_tasks }}"
--- /dev/null
+---
+- name: Debian based systems - repo handling
+ when: ceph_origin == 'repository'
+ block:
+ - name: Stable repos specific tasks
+ when:
+ - nfs_ganesha_stable | bool
+ - ceph_repository == 'community'
+ block:
+ - name: Add nfs-ganesha stable repository
+ ansible.builtin.apt_repository:
+ repo: "deb {{ nfs_ganesha_stable_deb_repo }} {{ ceph_stable_distro_source | default(ansible_facts['distribution_release']) }} main"
+ state: present
+ update_cache: false
+ register: add_ganesha_apt_repo
+
+ - name: Add libntirpc stable repository
+ ansible.builtin.apt_repository:
+ repo: "deb {{ libntirpc_stable_deb_repo }} {{ ceph_stable_distro_source | default(ansible_facts['distribution_release']) }} main"
+ state: present
+ update_cache: false
+ register: add_libntirpc_apt_repo
+ when: libntirpc_stable_deb_repo is defined
+
+ - name: Add nfs-ganesha ppa apt key
+ ansible.builtin.apt_key:
+ keyserver: "{{ nfs_ganesha_apt_keyserver }}"
+ id: "{{ nfs_ganesha_apt_key_id }}"
+ when:
+ - nfs_ganesha_apt_key_id is defined
+ - nfs_ganesha_apt_keyserver is defined
+
+ - name: Update apt cache
+ ansible.builtin.apt:
+ update_cache: true
+ register: update_ganesha_apt_cache
+ retries: 5
+ delay: 2
+ until: update_ganesha_apt_cache is success
+ when: add_ganesha_apt_repo is changed or add_libntirpc_apt_repo is changed
+
+ - name: Debian based systems - dev repos specific tasks
+ when:
+ - nfs_ganesha_dev | bool
+ - ceph_repository == 'dev'
+ block:
+ - name: Fetch nfs-ganesha development repository
+ ansible.builtin.uri:
+ url: "https://shaman.ceph.com/api/repos/nfs-ganesha/next/latest/{{ ansible_facts['distribution'] | lower }}/{{ ansible_facts['distribution_release'] }}/flavors/{{ nfs_ganesha_flavor }}/repo?arch={{ ansible_facts['architecture'] }}"
+ return_content: true
+ register: nfs_ganesha_dev_apt_repo
+
+ - name: Add nfs-ganesha development repository
+ ansible.builtin.copy:
+ content: "{{ nfs_ganesha_dev_apt_repo.content }}"
+ dest: /etc/apt/sources.list.d/nfs-ganesha-dev.list
+ owner: root
+ group: root
+ backup: true
+ mode: "0644"
+
+- name: Debain based systems - install required packages
+ block:
+ - name: Debian based systems
+ when: ceph_origin == 'repository' or ceph_origin == 'distro'
+ block:
+ - name: Install nfs rgw/cephfs gateway - debian
+ ansible.builtin.apt:
+ name: ['nfs-ganesha-rgw', 'radosgw']
+ allow_unauthenticated: true
+ register: result
+ until: result is succeeded
+ when: nfs_obj_gw | bool
+ - name: Install nfs rgw/cephfs gateway - debian
+ ansible.builtin.apt:
+ name: nfs-ganesha-ceph
+ allow_unauthenticated: true
+ register: result
+ until: result is succeeded
+ when: nfs_file_gw | bool
--- /dev/null
+---
+- name: Red hat based systems - repo handling
+ when: ceph_origin == 'repository'
+ block:
+ - name: Red hat based systems - stable repo related tasks
+ when:
+ - nfs_ganesha_stable | bool
+ - ceph_repository == 'community'
+ block:
+ - name: Add nfs-ganesha stable repository
+ ansible.builtin.package:
+ name: "{{ centos_release_nfs }}"
+ state: present
+
+ - name: Red hat based systems - dev repo related tasks
+ when:
+ - nfs_ganesha_dev | bool
+ - ceph_repository == 'dev'
+ block:
+ - name: Add nfs-ganesha dev repo
+ ansible.builtin.get_url:
+ url: "https://shaman.ceph.com/api/repos/nfs-ganesha/next/latest/{{ ansible_facts['distribution'] | lower }}/{{ ansible_facts['distribution_major_version'] }}/flavors/{{ nfs_ganesha_flavor }}/repo?arch={{ ansible_facts['architecture'] }}"
+ dest: /etc/yum.repos.d/nfs-ganesha-dev.repo
+ mode: "0644"
+ force: true
+
+- name: Red hat based systems - install nfs packages
+ block:
+ - name: Install nfs cephfs gateway
+ ansible.builtin.package:
+ name: ['nfs-ganesha-ceph', 'nfs-ganesha-rados-grace']
+ state: "{{ (upgrade_ceph_packages | bool) | ternary('latest', 'present') }}"
+ register: result
+ until: result is succeeded
+ when: nfs_file_gw | bool
+
+ - name: Install redhat nfs-ganesha-rgw and ceph-radosgw packages
+ ansible.builtin.package:
+ name: ['nfs-ganesha-rgw', 'nfs-ganesha-rados-grace', 'nfs-ganesha-rados-urls', 'ceph-radosgw']
+ state: "{{ (upgrade_ceph_packages | bool) | ternary('latest', 'present') }}"
+ register: result
+ until: result is succeeded
+ when: nfs_obj_gw | bool
--- /dev/null
+---
+- name: Nfs various pre-requisites tasks
+ block:
+ - name: Set_fact exec_cmd_nfs - external
+ ansible.builtin.set_fact:
+ exec_cmd_nfs: "{{ container_binary + ' run --rm --net=host -v /etc/ceph:/etc/ceph:z -v /var/lib/ceph/:/var/lib/ceph/:z -v /var/log/ceph/:/var/log/ceph/:z --entrypoint=rados ' + ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else 'rados' }} -n client.{{ ceph_nfs_ceph_user }} -k /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ceph_nfs_ceph_user }}/keyring"
+ delegate_node: "{{ inventory_hostname }}"
+ when: groups.get(mon_group_name, []) | length == 0
+
+ - name: Set_fact exec_cmd_nfs - internal
+ ansible.builtin.set_fact:
+ exec_cmd_nfs: "{{ container_binary + ' exec ceph-mon-' + hostvars[groups[mon_group_name][0]]['ansible_facts']['hostname'] if containerized_deployment | bool else '' }} rados"
+ delegate_node: "{{ groups[mon_group_name][0] }}"
+ when: groups.get(mon_group_name, []) | length > 0
+
+ - name: Check if rados index object exists
+ ansible.builtin.shell: "set -o pipefail && {{ exec_cmd_nfs | default('') }} -p {{ cephfs_data_pool.name }} --cluster {{ cluster }} ls | grep {{ ceph_nfs_rados_export_index }}"
+ changed_when: false
+ failed_when: false
+ register: rados_index_exists
+ check_mode: false
+ when: ceph_nfs_rados_backend | bool
+ delegate_to: "{{ delegate_node }}"
+ run_once: true
+
+ - name: Create an empty rados index object
+ ansible.builtin.command: "{{ exec_cmd_nfs | default('') }} -p {{ cephfs_data_pool.name }} --cluster {{ cluster }} put {{ ceph_nfs_rados_export_index }} /dev/null"
+ when:
+ - ceph_nfs_rados_backend | bool
+ - rados_index_exists.rc != 0
+ delegate_to: "{{ delegate_node }}"
+ changed_when: false
+ run_once: true
+
+- name: Create /etc/ganesha
+ ansible.builtin.file:
+ path: /etc/ganesha
+ state: directory
+ owner: root
+ group: root
+ mode: "0755"
+
+- name: Generate ganesha configuration file
+ ansible.builtin.template:
+ src: "ganesha.conf.j2"
+ dest: /etc/ganesha/ganesha.conf
+ owner: "root"
+ group: "root"
+ mode: "0644"
+ notify: Restart ceph nfss
+
+- name: Generate ganesha idmap.conf file
+ openstack.config_template.config_template:
+ src: "idmap.conf.j2"
+ dest: "{{ ceph_nfs_idmap_conf }}"
+ owner: "root"
+ group: "root"
+ mode: "0644"
+ config_overrides: "{{ idmap_conf_overrides }}"
+ config_type: ini
+ notify: Restart ceph nfss
+
+- name: Create exports directory
+ ansible.builtin.file:
+ path: /etc/ganesha/export.d
+ state: directory
+ owner: "root"
+ group: "root"
+ mode: "0755"
+ when: ceph_nfs_dynamic_exports | bool
+
+- name: Create exports dir index file
+ ansible.builtin.copy:
+ content: ""
+ force: false
+ dest: /etc/ganesha/export.d/INDEX.conf
+ owner: "root"
+ group: "root"
+ mode: "0644"
+ when: ceph_nfs_dynamic_exports | bool
+
+- name: Include_tasks systemd.yml
+ ansible.builtin.include_tasks: systemd.yml
+ when: containerized_deployment | bool
+
+- name: Systemd start nfs container
+ ansible.builtin.systemd:
+ name: ceph-nfs@{{ ceph_nfs_service_suffix | default(ansible_facts['hostname']) }}
+ state: started
+ enabled: true
+ masked: false
+ daemon_reload: true
+ when:
+ - containerized_deployment | bool
+ - ceph_nfs_enable_service | bool
+
+- name: Start nfs gateway service
+ ansible.builtin.systemd:
+ name: nfs-ganesha
+ state: started
+ enabled: true
+ masked: false
+ when:
+ - not containerized_deployment | bool
+ - ceph_nfs_enable_service | bool
--- /dev/null
+---
+- name: Generate systemd unit file
+ ansible.builtin.template:
+ src: "{{ role_path }}/templates/ceph-nfs.service.j2"
+ dest: /etc/systemd/system/ceph-nfs@.service
+ owner: "root"
+ group: "root"
+ mode: "0644"
+ notify: Restart ceph nfss
--- /dev/null
+[Unit]
+Description=NFS-Ganesha file server
+Documentation=http://github.com/nfs-ganesha/nfs-ganesha/wiki
+{% if container_binary == 'docker' %}
+After=docker.service network-online.target local-fs.target time-sync.target
+Requires=docker.service
+{% else %}
+After=network-online.target local-fs.target time-sync.target
+{% endif %}
+Wants=network-online.target local-fs.target time-sync.target
+
+[Service]
+EnvironmentFile=-/etc/environment
+{% if container_binary == 'podman' %}
+ExecStartPre=-/usr/bin/rm -f /%t/%n-pid /%t/%n-cid
+ExecStartPre=-/usr/bin/{{ container_binary }} rm --storage ceph-nfs-%i
+ExecStartPre=-/usr/bin/mkdir -p /var/log/ceph /var/log/ganesha
+{% endif %}
+ExecStartPre=-/usr/bin/{{ container_binary }} rm ceph-nfs-%i
+ExecStartPre={{ '/bin/mkdir' if ansible_facts['os_family'] == 'Debian' else '/usr/bin/mkdir' }} -p /etc/ceph /etc/ganesha /var/lib/nfs/ganesha /var/log/ganesha
+ExecStart=/usr/bin/{{ container_binary }} run --rm --net=host \
+{% if container_binary == 'podman' %}
+ -d --log-driver journald --conmon-pidfile /%t/%n-pid --cidfile /%t/%n-cid \
+{% endif %}
+--pids-limit={{ 0 if container_binary == 'podman' else -1 }} \
+-v /etc/ceph:/etc/ceph:z \
+-v /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}/keyring:/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}/keyring:z \
+-v /var/lib/ceph/radosgw/{{ cluster }}-nfs.{{ ansible_facts['hostname'] }}/keyring:/etc/ceph/keyring:z \
+-v /etc/ganesha:/etc/ganesha:z \
+-v /var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket \
+-v /var/run/ceph:/var/run/ceph:z \
+-v /var/log/ceph:/var/log/ceph:z \
+-v /var/log/ganesha:/var/log/ganesha:z \
+-v /etc/localtime:/etc/localtime:ro \
+{{ ceph_nfs_docker_extra_env }} \
+--entrypoint=/usr/bin/ganesha.nfsd \
+--name=ceph-nfs-{{ ceph_nfs_service_suffix | default(ansible_facts['hostname']) }} \
+{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} \
+-F -L STDOUT
+{% if container_binary == 'podman' %}
+ExecStop=-/usr/bin/sh -c "/usr/bin/{{ container_binary }} rm -f `cat /%t/%n-cid`"
+{% else %}
+ExecStopPost=-/usr/bin/{{ container_binary }} stop ceph-nfs-%i
+{% endif %}
+KillMode=none
+Restart=always
+RestartSec=10s
+TimeoutStartSec=120
+TimeoutStopSec=15
+{% if container_binary == 'podman' %}
+Type=forking
+PIDFile=/%t/%n-pid
+{% endif %}
+
+[Install]
+WantedBy=multi-user.target
--- /dev/null
+#jinja2: trim_blocks: "true", lstrip_blocks: "true"
+# {{ ansible_managed }}
+
+{% if ceph_nfs_dynamic_exports | bool and not ceph_nfs_rados_backend | bool %}
+%include /etc/ganesha/export.d/INDEX.conf
+{% endif %}
+
+NFS_Core_Param
+{
+{% if ceph_nfs_bind_addr is defined %}
+ Bind_Addr={{ ceph_nfs_bind_addr }};
+{% endif %}
+{{ ganesha_core_param_overrides | default(None) }}
+}
+
+{% if ceph_nfs_disable_caching | bool or nfs_file_gw | bool %}
+EXPORT_DEFAULTS {
+ Attr_Expiration_Time = 0;
+}
+
+CACHEINODE {
+ Dir_Chunk = 0;
+
+ NParts = 1;
+ Cache_Size = 1;
+}
+{% endif %}
+
+{% if ceph_nfs_rados_backend | bool %}
+RADOS_URLS {
+ ceph_conf = '/etc/ceph/{{ cluster }}.conf';
+ userid = "{{ ceph_nfs_ceph_user }}";
+}
+%url rados://{{ cephfs_data_pool.name }}/{{ ceph_nfs_rados_export_index }}
+
+NFSv4 {
+ RecoveryBackend = 'rados_kv';
+ IdmapConf = "{{ ceph_nfs_idmap_conf }}";
+}
+RADOS_KV {
+ ceph_conf = '/etc/ceph/{{ cluster }}.conf';
+ userid = "{{ ceph_nfs_ceph_user }}";
+ pool = "{{ cephfs_data_pool.name }}";
+}
+{% endif %}
+
+{% if nfs_file_gw | bool %}
+EXPORT
+{
+ Export_id={{ ceph_nfs_ceph_export_id }};
+
+ Path = "/";
+
+ Pseudo = {{ ceph_nfs_ceph_pseudo_path }};
+
+ Access_Type = {{ ceph_nfs_ceph_access_type }};
+
+ Protocols = {{ ceph_nfs_ceph_protocols }};
+
+ Transports = TCP;
+
+ SecType = {{ ceph_nfs_ceph_sectype }};
+
+ Squash = {{ ceph_nfs_ceph_squash }};
+
+ Attr_Expiration_Time = 0;
+
+ FSAL {
+ Name = CEPH;
+ User_Id = "{{ ceph_nfs_ceph_user }}";
+ }
+
+ {{ ganesha_ceph_export_overrides | default(None) }}
+}
+{% endif %}
+{% if nfs_obj_gw | bool %}
+EXPORT
+{
+ Export_id={{ ceph_nfs_rgw_export_id }};
+
+ Path = "/";
+
+ Pseudo = {{ ceph_nfs_rgw_pseudo_path }};
+
+ Access_Type = {{ ceph_nfs_rgw_access_type }};
+
+ Protocols = {{ ceph_nfs_rgw_protocols }};
+
+ Transports = TCP;
+
+ SecType = {{ ceph_nfs_rgw_sectype }};
+
+ Squash = {{ ceph_nfs_rgw_squash }};
+
+ FSAL {
+ Name = RGW;
+ User_Id = "{{ ceph_nfs_rgw_user }}";
+ Access_Key_Id ="{{ ceph_nfs_rgw_access_key }}";
+ Secret_Access_Key = "{{ ceph_nfs_rgw_secret_key }}";
+ }
+
+ {{ ganesha_rgw_export_overrides | default(None) }}
+
+}
+
+RGW {
+ ceph_conf = "/etc/ceph/{{ cluster }}.conf";
+ cluster = "{{ cluster }}";
+ name = "{{ rgw_client_name }}";
+ {{ ganesha_rgw_section_overrides | default(None) }}
+}
+{% endif %}
+
+LOG {
+ Facility {
+ name = FILE;
+ destination = "{{ ceph_nfs_log_file }}";
+ enable = active;
+ }
+
+ {{ ganesha_log_overrides | default(None) }}
+}
+
+{{ ganesha_conf_overrides | default(None) }}
--- /dev/null
+[General]
+#Verbosity = 0
+# The following should be set to the local NFSv4 domain name
+# The default is the host's DNS domain name.
+#Domain = local.domain.edu
+
+# In multi-domain environments, some NFS servers will append the identity
+# management domain to the owner and owner_group in lieu of a true NFSv4
+# domain. This option can facilitate lookups in such environments. If
+# set to a value other than "none", the nsswitch plugin will first pass
+# the name to the password/group lookup function without stripping the
+# domain off. If that mapping fails then the plugin will try again using
+# the old method (comparing the domain in the string to the Domain value,
+# stripping it if it matches, and passing the resulting short name to the
+# lookup function). Valid values are "user", "group", "both", and
+# "none". The default is "none".
+#No-Strip = none
+
+# Winbind has a quirk whereby doing a group lookup in UPN format
+# (e.g. staff@americas.example.com) will cause the group to be
+# displayed prefixed with the full domain in uppercase
+# (e.g. AMERICAS.EXAMPLE.COM\staff) instead of in the familiar netbios
+# name format (e.g. AMERICAS\staff). Setting this option to true
+# causes the name to be reformatted before passing it to the group
+# lookup function in order to work around this. This setting is
+# ignored unless No-Strip is set to either "both" or "group".
+# The default is "false".
+#Reformat-Group = false
+
+# The following is a comma-separated list of Kerberos realm
+# names that should be considered to be equivalent to the
+# local realm, such that <user>@REALM.A can be assumed to
+# be the same user as <user>@REALM.B
+# If not specified, the default local realm is the domain name,
+# which defaults to the host's DNS domain name,
+# translated to upper-case.
+# Note that if this value is specified, the local realm name
+# must be included in the list!
+#Local-Realms =
+
+[Mapping]
+
+#Nobody-User = nobody
+#Nobody-Group = nobody
+
+[Translation]
+
+# Translation Method is an comma-separated, ordered list of
+# translation methods that can be used. Distributed methods
+# include "nsswitch", "umich_ldap", and "static". Each method
+# is a dynamically loadable plugin library.
+# New methods may be defined and inserted in the list.
+# The default is "nsswitch".
+#Method = nsswitch
+
+# Optional. This is a comma-separated, ordered list of
+# translation methods to be used for translating GSS
+# authenticated names to ids.
+# If this option is omitted, the same methods as those
+# specified in "Method" are used.
+#GSS-Methods = <alternate method list for translating GSS names>
+
+#-------------------------------------------------------------------#
+# The following are used only for the "static" Translation Method.
+#-------------------------------------------------------------------#
+[Static]
+
+# A "static" list of GSS-Authenticated names to
+# local user name mappings
+
+#someuser@REALM = localuser
+
+
+#-------------------------------------------------------------------#
+# The following are used only for the "umich_ldap" Translation Method.
+#-------------------------------------------------------------------#
+
+[UMICH_SCHEMA]
+
+# server information (REQUIRED)
+LDAP_server = ldap-server.local.domain.edu
+
+# the default search base (REQUIRED)
+LDAP_base = dc=local,dc=domain,dc=edu
+
+#-----------------------------------------------------------#
+# The remaining options have defaults (as shown)
+# and are therefore not required.
+#-----------------------------------------------------------#
+
+# whether or not to perform canonicalization on the
+# name given as LDAP_server
+#LDAP_canonicalize_name = true
+
+# absolute search base for (people) accounts
+#LDAP_people_base = <LDAP_base>
+
+# absolute search base for groups
+#LDAP_group_base = <LDAP_base>
+
+# Set to true to enable SSL - anything else is not enabled
+#LDAP_use_ssl = false
+
+# You must specify a CA certificate location if you enable SSL
+#LDAP_ca_cert = /etc/ldapca.cert
+
+# Objectclass mapping information
+
+# Mapping for the person (account) object class
+#NFSv4_person_objectclass = NFSv4RemotePerson
+
+# Mapping for the nfsv4name attribute the person object
+#NFSv4_name_attr = NFSv4Name
+
+# Mapping for the UID number
+#NFSv4_uid_attr = UIDNumber
+
+# Mapping for the GSSAPI Principal name
+#GSS_principal_attr = GSSAuthName
+
+# Mapping for the account name attribute (usually uid)
+# The value for this attribute must match the value of
+# the group member attribute - NFSv4_member_attr
+#NFSv4_acctname_attr = uid
+
+# Mapping for the group object class
+#NFSv4_group_objectclass = NFSv4RemoteGroup
+
+# Mapping for the GID attribute
+#NFSv4_gid_attr = GIDNumber
+
+# Mapping for the Group NFSv4 name
+#NFSv4_group_attr = NFSv4Name
+
+# Mapping for the Group member attribute (usually memberUID)
+# The value of this attribute must match the value of NFSv4_acctname_attr
+#NFSv4_member_attr = memberUID
\ No newline at end of file
--- /dev/null
+#!/bin/sh
+T=$1
+N=$2
+
+# start nfs-ganesha
+/usr/bin/{{ container_binary }} run --rm --net=host \
+{% if container_binary == 'podman' %}
+ -d --log-driver journald --conmon-pidfile /%t/%n-pid --cidfile /%t/%n-cid \
+{% endif %}
+--pids-limit={{ 0 if container_binary == 'podman' else -1 }} \
+-v /var/lib/ceph:/var/lib/ceph:z \
+-v /etc/ceph:/etc/ceph:z \
+-v /var/lib/nfs/ganesha:/var/lib/nfs/ganesha:z \
+-v /etc/ganesha:/etc/ganesha:z \
+-v /var/run/ceph:/var/run/ceph:z \
+-v /var/log/ceph:/var/log/ceph:z \
+-v /var/log/ganesha:/var/log/ganesha:z \
+{% if ceph_nfs_dynamic_exports | bool %}
+--privileged \
+-v /var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket \
+{% endif -%}
+-v /etc/localtime:/etc/localtime:ro \
+{{ ceph_nfs_docker_extra_env }} \
+--entrypoint=/usr/bin/ganesha.nfsd \
+--name=ceph-nfs-{{ ceph_nfs_service_suffix | default(ansible_facts['hostname']) }} \
+{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
+-F -L STDOUT "${GANESHA_EPOCH}"
--- /dev/null
+---
+- name: Fail if ceph_nfs_rgw_access_key or ceph_nfs_rgw_secret_key are undefined (nfs standalone)
+ ansible.builtin.fail:
+ msg: "ceph_nfs_rgw_access_key and ceph_nfs_rgw_secret_key must be set if nfs_obj_gw is True"
+ when:
+ - nfs_obj_gw | bool
+ - groups.get(mon_group_name, []) | length == 0
+ - (ceph_nfs_rgw_access_key is undefined or ceph_nfs_rgw_secret_key is undefined)
+
+- name: Fail on openSUSE Leap 15.x using distro packages
+ ansible.builtin.fail:
+ msg: "ceph-nfs packages are not available from openSUSE Leap 15.x repositories (ceph_origin = 'distro')"
+ when:
+ - ceph_origin == 'distro'
+ - ansible_facts['distribution'] == 'openSUSE Leap'
- inventory_hostname in groups.get(rgw_group_name, [])
- rgw_create_pools is defined
+- name: Include check_nfs.yml
+ ansible.builtin.include_tasks: check_nfs.yml
+ when: inventory_hostname in groups.get(nfs_group_name, [])
+
- name: Include check_rbdmirror.yml
ansible.builtin.include_tasks: check_rbdmirror.yml
when:
- osds
- mdss
- rgws
+ - nfss
- rbdmirrors
- clients
- mgrs
status: "Complete"
end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
+- hosts: nfss
+ become: True
+ gather_facts: false
+ any_errors_fatal: true
+ tasks:
+ # pre-tasks for following imports -
+ - name: set ceph nfs install 'In Progress'
+ run_once: true
+ set_stats:
+ data:
+ installer_phase_ceph_nfs:
+ status: "In Progress"
+ start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
+
+ - import_role:
+ name: ceph-defaults
+ tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-facts
+ tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-handler
+ tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-config
+ tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-nfs
+
+ # post-tasks for following imports -
+ - name: set ceph nfs install 'Complete'
+ run_once: true
+ set_stats:
+ data:
+ installer_phase_ceph_nfs:
+ status: "Complete"
+ end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
+
- hosts: rbdmirrors
become: True
gather_facts: false
- osds
- mdss
- rgws
+ - nfss
- rbdmirrors
- clients
- mgrs
status: "Complete"
end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
+- hosts: nfss
+ gather_facts: false
+ become: True
+ any_errors_fatal: true
+ pre_tasks:
+ - name: set ceph nfs install 'In Progress'
+ run_once: true
+ set_stats:
+ data:
+ installer_phase_ceph_nfs:
+ status: "In Progress"
+ start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
+
+ tasks:
+ - import_role:
+ name: ceph-defaults
+ tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-facts
+ tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-handler
+ tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-config
+ tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-nfs
+
+ post_tasks:
+ - name: set ceph nfs install 'Complete'
+ run_once: true
+ set_stats:
+ data:
+ installer_phase_ceph_nfs:
+ status: "Complete"
+ end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
+
- hosts: rbdmirrors
gather_facts: false
become: True
if request.node.get_closest_marker('rbdmirror_secondary') and not ceph_rbd_mirror_remote_user: # noqa E501
pytest.skip('Not a valid test for a non-secondary rbd-mirror node')
- if request.node.get_closest_marker('ceph_crash') and sanitized_group_names in [['clients'], ['monitoring']]:
- pytest.skip('Not a valid test for client nodes')
+ if request.node.get_closest_marker('ceph_crash') and sanitized_group_names in [['nfss'], ['clients'], ['monitoring']]:
+ pytest.skip('Not a valid test for nfs or client nodes')
- if request.node.get_closest_marker('ceph_exporter') and sanitized_group_names in [['clients'], ['monitoring']]:
- pytest.skip('Not a valid test for client nodes')
+ if request.node.get_closest_marker('ceph_exporter') and sanitized_group_names in [['nfss'], ['clients'], ['monitoring']]:
+ pytest.skip('Not a valid test for nfs or client nodes')
if request.node.get_closest_marker("no_docker") and docker:
pytest.skip(
item.add_marker(pytest.mark.rbdmirrors)
elif "rgw" in test_path:
item.add_marker(pytest.mark.rgws)
+ elif "nfs" in test_path:
+ item.add_marker(pytest.mark.nfss)
elif "grafana" in test_path:
item.add_marker(pytest.mark.grafanas)
else:
osd_vms: 1
mds_vms: 1
rgw_vms: 0
+nfs_vms: 0
grafana_server_vms: 0
rbd_mirror_vms: 0
client_vms: 0
osd_vms: 1
mds_vms: 1
rgw_vms: 0
+nfs_vms: 0
grafana_server_vms: 0
rbd_mirror_vms: 0
client_vms: 0
osd_vms: 1
mds_vms: 0
rgw_vms: 0
+nfs_vms: 0
grafana_server_vms: 0
rbd_mirror_vms: 0
client_vms: 0
osd_vms: 1
mds_vms: 0
rgw_vms: 0
+nfs_vms: 0
grafana_server_vms: 0
rbd_mirror_vms: 0
client_vms: 0
osd_vms: 1
mds_vms: 0
rgw_vms: 0
+nfs_vms: 0
grafana_server_vms: 0
rbd_mirror_vms: 0
client_vms: 0
osd_vms: 1
mds_vms: 0
rgw_vms: 0
+nfs_vms: 0
grafana_server_vms: 0
rbd_mirror_vms: 0
client_vms: 0
osd_vms: 2
mds_vms: 0
rgw_vms: 0
+nfs_vms: 0
grafana_server_vms: 0
rbd_mirror_vms: 0
client_vms: 0
osd_vms: 2
mds_vms: 0
rgw_vms: 0
+nfs_vms: 0
grafana_server_vms: 0
rbd_mirror_vms: 0
client_vms: 0
osd_vms: 1
mds_vms: 0
rgw_vms: 0
+nfs_vms: 0
grafana_server_vms: 0
rbd_mirror_vms: 1
client_vms: 0
osd_vms: 1
mds_vms: 0
rgw_vms: 0
+nfs_vms: 0
grafana_server_vms: 0
rbd_mirror_vms: 1
client_vms: 0
osd_vms: 1
mds_vms: 0
rgw_vms: 1
+nfs_vms: 0
grafana_server_vms: 0
rbd_mirror_vms: 0
client_vms: 0
osd_vms: 1
mds_vms: 0
rgw_vms: 1
+nfs_vms: 0
grafana_server_vms: 0
rbd_mirror_vms: 0
client_vms: 0
osd_vms: 3
mds_vms: 0
rgw_vms: 0
+nfs_vms: 0
grafana_server_vms: 0
rbd_mirror_vms: 0
client_vms: 0
osd_vms: 3
mds_vms: 0
rgw_vms: 0
+nfs_vms: 0
grafana_server_vms: 0
rbd_mirror_vms: 0
client_vms: 0
[rgws]
rgw0
+#[nfss]
+#nfs0
+
[clients]
client0
client1
osd_vms: 3
mds_vms: 3
rgw_vms: 1
+nfs_vms: 0
grafana_server_vms: 0
rbd_mirror_vms: 1
client_vms: 2
--- /dev/null
+copy_admin_key: true
+nfs_file_gw: false
+nfs_obj_gw: true
+ganesha_conf_overrides: |
+ CACHEINODE {
+ Entries_HWMark = 100000;
+ }
+nfs_ganesha_stable: false
+nfs_ganesha_dev: true
+nfs_ganesha_flavor: "ceph_main"
client0
client1
+#[nfss]
+#nfs0
+
[rbdmirrors]
rbd-mirror0
osd_vms: 3
mds_vms: 3
rgw_vms: 1
+nfs_vms: 0
grafana_server_vms: 0
rbd_mirror_vms: 1
client_vms: 2
[rgws]
rgw0
+#[nfss]
+#nfs0
+
[clients]
client0
client1
osd_vms: 3
mds_vms: 3
rgw_vms: 1
+nfs_vms: 0
grafana_server_vms: 0
rbd_mirror_vms: 1
client_vms: 2
--- /dev/null
+copy_admin_key: true
+nfs_file_gw: false
+nfs_obj_gw: true
+ganesha_conf_overrides: |
+ CACHEINODE {
+ Entries_HWMark = 100000;
+ }
+nfs_ganesha_stable: true
+nfs_ganesha_dev: false
+nfs_ganesha_flavor: "ceph_main"
client0
client1
+#[nfss]
+#nfs0
+
[rbdmirrors]
rbd-mirror0
osd_vms: 3
mds_vms: 3
rgw_vms: 1
+nfs_vms: 0
grafana_server_vms: 0
rbd_mirror_vms: 1
client_vms: 2
[rgws]
rgw0
+[nfss]
+nfs0
+
[rbdmirrors]
rbd-mirror0
osd_vms: 2
mds_vms: 1
rgw_vms: 1
+nfs_vms: 1
grafana_server_vms: 0
rbd_mirror_vms: 1
client_vms: 0
rgw0
mds0
+#[nfss]
+#rgw0
+#mds0
+
[monitoring]
-mon0
+mon0
\ No newline at end of file
osd_vms: 2
mds_vms: 1
rgw_vms: 1
+nfs_vms: 0
grafana_server_vms: 0
rbd_mirror_vms: 0
client_vms: 0
rgw0
mds0
+#[nfss]
+#rgw0
+#mds0
+
[monitoring]
-mon0
+mon0
\ No newline at end of file
osd_vms: 2
mds_vms: 1
rgw_vms: 1
+nfs_vms: 0
grafana_server_vms: 0
rbd_mirror_vms: 0
client_vms: 0
regexp: "ceph_repository:.*"
replace: "ceph_repository: dev"
dest: "{{ group_vars_path }}/all"
+
+ - block:
+ - name: ensure nfs_ganesha_stable is set to False
+ replace:
+ regexp: "nfs_ganesha_stable:.*"
+ replace: "nfs_ganesha_stable: false"
+ dest: "{{ group_vars_path }}/nfss"
+
+ - name: ensure nfs_ganesha_dev is set to True
+ replace:
+ regexp: "nfs_ganesha_dev:.*"
+ replace: "nfs_ganesha_dev: true"
+ dest: "{{ group_vars_path }}/nfss"
+ when: "'all_daemons' in group_vars_path.split('/')"
when: change_dir is defined
- name: print contents of {{ group_vars_path }}/all
osd_vms: 1
mds_vms: 0
rgw_vms: 0
+nfs_vms: 0
grafana_server_vms: 0
rbd_mirror_vms: 0
client_vms: 0
osd_vms: 0
mds_vms: 0
rgw_vms: 0
+nfs_vms: 0
grafana_server_vms: 0
rbd_mirror_vms: 0
client_vms: 2
osd_vms: 0
mds_vms: 0
rgw_vms: 0
+nfs_vms: 0
grafana_server_vms: 0
rbd_mirror_vms: 0
client_vms: 2
osd_vms: 1
mds_vms: 0
rgw_vms: 0
+nfs_vms: 0
grafana_server_vms: 0
rbd_mirror_vms: 0
client_vms: 0
osd_vms: 1
mds_vms: 0
rgw_vms: 0
+nfs_vms: 0
grafana_server_vms: 0
rbd_mirror_vms: 0
client_vms: 0
osd_vms: 1
mds_vms: 0
rgw_vms: 0
+nfs_vms: 0
grafana_server_vms: 0
rbd_mirror_vms: 0
client_vms: 0
osd_vms: 1
mds_vms: 0
rgw_vms: 0
+nfs_vms: 0
grafana_server_vms: 0
rbd_mirror_vms: 0
client_vms: 0
osd_vms: 1
mds_vms: 0
rgw_vms: 0
+nfs_vms: 0
grafana_server_vms: 0
rbd_mirror_vms: 0
client_vms: 0
osd_vms: 4
mds_vms: 0
rgw_vms: 0
+nfs_vms: 0
grafana_server_vms: 0
rbd_mirror_vms: 0
client_vms: 0
osd_vms: 4
mds_vms: 0
rgw_vms: 0
+nfs_vms: 0
grafana_server_vms: 0
rbd_mirror_vms: 0
client_vms: 0
osd_vms: 1
mds_vms: 0
rgw_vms: 0
+nfs_vms: 0
grafana_server_vms: 0
rbd_mirror_vms: 0
client_vms: 0
[rgws]
rgw0
-clients]
+#[nfss]
+#nfs0
+
+[clients]
client0
client1
mon0
#[all:vars]
-#ansible_python_interpreter=/usr/bin/python3
+#ansible_python_interpreter=/usr/bin/python3
\ No newline at end of file
osd_vms: 2
mds_vms: 1
rgw_vms: 1
+nfs_vms: 0
grafana_server_vms: 0
rbd_mirror_vms: 1
client_vms: 2
osd_vms: 1
mds_vms: 0
rgw_vms: 0
+nfs_vms: 0
grafana_server_vms: 0
rbd_mirror_vms: 0
client_vms: 0
osd_vms: 1
mds_vms: 0
rgw_vms: 0
+nfs_vms: 0
grafana_server_vms: 0
rbd_mirror_vms: 0
client_vms: 0
osd_vms: 1
mds_vms: 0
rgw_vms: 0
+nfs_vms: 0
grafana_server_vms: 0
rbd_mirror_vms: 0
client_vms: 0
osd_vms: 1
mds_vms: 0
rgw_vms: 0
+nfs_vms: 0
grafana_server_vms: 0
rbd_mirror_vms: 0
client_vms: 0
osd_vms: 1
mds_vms: 1
rgw_vms: 0
+nfs_vms: 0
grafana_server_vms: 0
rbd_mirror_vms: 0
client_vms: 0
osd_vms: 1
mds_vms: 1
rgw_vms: 0
+nfs_vms: 0
grafana_server_vms: 0
rbd_mirror_vms: 0
client_vms: 0
osd_vms: 1
mds_vms: 0
rgw_vms: 0
+nfs_vms: 0
grafana_server_vms: 0
rbd_mirror_vms: 0
client_vms: 0
osd_vms: 1
mds_vms: 0
rgw_vms: 0
+nfs_vms: 0
grafana_server_vms: 0
rbd_mirror_vms: 0
client_vms: 0
osd_vms: 1
mds_vms: 0
rgw_vms: 0
+nfs_vms: 0
grafana_server_vms: 0
rbd_mirror_vms: 0
client_vms: 0
osd_vms: 1
mds_vms: 0
rgw_vms: 0
+nfs_vms: 0
grafana_server_vms: 0
rbd_mirror_vms: 0
client_vms: 0
osd_vms: 2
mds_vms: 0
rgw_vms: 0
+nfs_vms: 0
grafana_server_vms: 0
rbd_mirror_vms: 0
client_vms: 0
osd_vms: 2
mds_vms: 0
rgw_vms: 0
+nfs_vms: 0
grafana_server_vms: 0
rbd_mirror_vms: 0
client_vms: 0
osd_vms: 1
mds_vms: 0
rgw_vms: 0
+nfs_vms: 0
grafana_server_vms: 0
rbd_mirror_vms: 1
client_vms: 0
osd_vms: 1
mds_vms: 0
rgw_vms: 0
+nfs_vms: 0
grafana_server_vms: 0
rbd_mirror_vms: 1
client_vms: 0
osd_vms: 1
mds_vms: 0
rgw_vms: 1
+nfs_vms: 0
grafana_server_vms: 0
rbd_mirror_vms: 0
client_vms: 0
osd_vms: 1
mds_vms: 0
rgw_vms: 1
+nfs_vms: 0
grafana_server_vms: 0
rbd_mirror_vms: 0
client_vms: 0
osd_vms: 3
mds_vms: 0
rgw_vms: 2
+nfs_vms: 0
grafana_server_vms: 0
rbd_mirror_vms: 0
client_vms: 0
--- /dev/null
+copy_admin_key: true
+nfs_file_gw: false
+nfs_obj_gw: true
+ganesha_conf_overrides: |
+ CACHEINODE {
+ Entries_HWMark = 100000;
+ }
+nfs_ganesha_stable: true
+nfs_ganesha_dev: false
+nfs_ganesha_flavor: "ceph_main"
osd_vms: 3
mds_vms: 0
rgw_vms: 2
+nfs_vms: 0
grafana_server_vms: 0
rbd_mirror_vms: 0
client_vms: 0
--- /dev/null
+import json
+import pytest
+
+
+class TestNFSs(object):
+
+ @pytest.mark.no_docker
+ @pytest.mark.parametrize('pkg', [
+ 'nfs-ganesha',
+ 'nfs-ganesha-rgw'
+ ])
+ def test_nfs_ganesha_package_is_installed(self, node, host, pkg):
+ assert host.package(pkg).is_installed
+
+ @pytest.mark.no_docker
+ def test_nfs_service_enabled_and_running(self, node, host):
+ s = host.service("nfs-ganesha")
+ assert s.is_enabled
+ assert s.is_running
+
+ @pytest.mark.no_docker
+ def test_nfs_config_override(self, node, host):
+ assert host.file(
+ "/etc/ganesha/ganesha.conf").contains("Entries_HWMark")
+
+ def test_nfs_is_up(self, node, setup, ceph_status):
+ hostname = node["vars"]["inventory_hostname"]
+ cluster = setup["cluster_name"]
+ name = f"client.rgw.{hostname}"
+ output = ceph_status(f'/var/lib/ceph/radosgw/{cluster}-rgw.{hostname}/keyring', name=name)
+ keys = list(json.loads(
+ output)["servicemap"]["services"]["rgw-nfs"]["daemons"].keys())
+ keys.remove('summary')
+ daemons = json.loads(output)["servicemap"]["services"]["rgw-nfs"]["daemons"]
+ hostnames = []
+ for key in keys:
+ hostnames.append(daemons[key]['metadata']['hostname'])
+
+
+# NOTE (guits): This check must be fixed. (Permission denied error)
+# @pytest.mark.no_docker
+# def test_nfs_rgw_fsal_export(self, node, host):
+# if(host.mount_point("/mnt").exists):
+# cmd = host.run("sudo umount /mnt")
+# assert cmd.rc == 0
+# cmd = host.run("sudo mount.nfs localhost:/ceph /mnt/")
+# assert cmd.rc == 0
+# assert host.mount_point("/mnt").exists
mdss: for mds nodes
mgrs: for mgr nodes
mons: for mon nodes
+ nfss: for nfs nodes
osds: for osd nodes
rbdmirrors: for rbdmirror nodes
rgws: for rgw nodes
osd_vms: 3
mds_vms: 0
rgw_vms: 0
+nfs_vms: 0
grafana_server_vms: 0
rbd_mirror_vms: 0
client_vms: 0