]> git.apps.os.sepia.ceph.com Git - ceph-ansible.git/commitdiff
Revert "nfs-ganesha support removal"
authorGuillaume Abrioux <gabrioux@ibm.com>
Mon, 17 Jun 2024 14:35:10 +0000 (16:35 +0200)
committerGuillaume Abrioux <gabrioux@ibm.com>
Thu, 20 Jun 2024 12:22:40 +0000 (14:22 +0200)
This reverts commit 675667e1d60b7080dad7293f2954de23718c5042.

Signed-off-by: Guillaume Abrioux <gabrioux@ibm.com>
(cherry picked from commit 59198f5bcdf2eca5cc99c25951d93513c508c01e)

117 files changed:
CONTRIBUTING.md
Vagrantfile
contrib/vagrant_variables.yml.atomic
contrib/vagrant_variables.yml.linode
contrib/vagrant_variables.yml.openstack
dashboard.yml
docs/source/testing/scenarios.rst
group_vars/all.yml.sample
group_vars/nfss.yml.sample [new file with mode: 0644]
infrastructure-playbooks/cephadm-adopt.yml
infrastructure-playbooks/cephadm.yml
infrastructure-playbooks/docker-to-podman.yml
infrastructure-playbooks/gather-ceph-logs.yml
infrastructure-playbooks/purge-cluster.yml
infrastructure-playbooks/purge-dashboard.yml
infrastructure-playbooks/rolling_update.yml
infrastructure-playbooks/switch-from-non-containerized-to-containerized-ceph-daemons.yml
infrastructure-playbooks/take-over-existing-cluster.yml
plugins/callback/installer_checkpoint.py
roles/ceph-common/tasks/main.yml
roles/ceph-common/tasks/selinux.yml
roles/ceph-container-common/tasks/fetch_image.yml
roles/ceph-defaults/defaults/main.yml
roles/ceph-handler/handlers/main.yml
roles/ceph-handler/tasks/check_running_containers.yml
roles/ceph-handler/tasks/check_socket_non_container.yml
roles/ceph-handler/tasks/handler_nfss.yml [new file with mode: 0644]
roles/ceph-handler/tasks/main.yml
roles/ceph-handler/templates/restart_nfs_daemon.sh.j2 [new file with mode: 0644]
roles/ceph-infra/tasks/configure_firewall.yml
roles/ceph-nfs/LICENSE [new file with mode: 0644]
roles/ceph-nfs/README.md [new file with mode: 0644]
roles/ceph-nfs/defaults/main.yml [new file with mode: 0644]
roles/ceph-nfs/meta/main.yml [new file with mode: 0644]
roles/ceph-nfs/tasks/create_rgw_nfs_user.yml [new file with mode: 0644]
roles/ceph-nfs/tasks/main.yml [new file with mode: 0644]
roles/ceph-nfs/tasks/pre_requisite_container.yml [new file with mode: 0644]
roles/ceph-nfs/tasks/pre_requisite_non_container.yml [new file with mode: 0644]
roles/ceph-nfs/tasks/pre_requisite_non_container_debian.yml [new file with mode: 0644]
roles/ceph-nfs/tasks/pre_requisite_non_container_red_hat.yml [new file with mode: 0644]
roles/ceph-nfs/tasks/start_nfs.yml [new file with mode: 0644]
roles/ceph-nfs/tasks/systemd.yml [new file with mode: 0644]
roles/ceph-nfs/templates/ceph-nfs.service.j2 [new file with mode: 0644]
roles/ceph-nfs/templates/ganesha.conf.j2 [new file with mode: 0644]
roles/ceph-nfs/templates/idmap.conf.j2 [new file with mode: 0644]
roles/ceph-nfs/templates/systemd-run.j2 [new file with mode: 0644]
roles/ceph-validate/tasks/check_nfs.yml [new file with mode: 0644]
roles/ceph-validate/tasks/main.yml
site-container.yml.sample
site.yml.sample
tests/conftest.py
tests/functional/add-mdss/container/vagrant_variables.yml
tests/functional/add-mdss/vagrant_variables.yml
tests/functional/add-mgrs/container/vagrant_variables.yml
tests/functional/add-mgrs/vagrant_variables.yml
tests/functional/add-mons/container/vagrant_variables.yml
tests/functional/add-mons/vagrant_variables.yml
tests/functional/add-osds/container/vagrant_variables.yml
tests/functional/add-osds/vagrant_variables.yml
tests/functional/add-rbdmirrors/container/vagrant_variables.yml
tests/functional/add-rbdmirrors/vagrant_variables.yml
tests/functional/add-rgws/container/vagrant_variables.yml
tests/functional/add-rgws/vagrant_variables.yml
tests/functional/all-in-one/container/vagrant_variables.yml
tests/functional/all-in-one/vagrant_variables.yml
tests/functional/all_daemons/container/hosts
tests/functional/all_daemons/container/vagrant_variables.yml
tests/functional/all_daemons/group_vars/nfss [new file with mode: 0644]
tests/functional/all_daemons/hosts
tests/functional/all_daemons/vagrant_variables.yml
tests/functional/all_daemons_ipv6/container/hosts
tests/functional/all_daemons_ipv6/container/vagrant_variables.yml
tests/functional/all_daemons_ipv6/group_vars/nfss [new file with mode: 0644]
tests/functional/all_daemons_ipv6/hosts
tests/functional/all_daemons_ipv6/vagrant_variables.yml
tests/functional/cephadm/hosts
tests/functional/cephadm/vagrant_variables.yml
tests/functional/collocation/container/hosts
tests/functional/collocation/container/vagrant_variables.yml
tests/functional/collocation/hosts
tests/functional/collocation/vagrant_variables.yml
tests/functional/dev_setup.yml
tests/functional/docker2podman/vagrant_variables.yml
tests/functional/external_clients/container/vagrant_variables.yml
tests/functional/external_clients/vagrant_variables.yml
tests/functional/infra_lv_create/vagrant_variables.yml
tests/functional/lvm-auto-discovery/container/vagrant_variables.yml
tests/functional/lvm-auto-discovery/vagrant_variables.yml
tests/functional/lvm-batch/container/vagrant_variables.yml
tests/functional/lvm-batch/vagrant_variables.yml
tests/functional/lvm-osds/container/vagrant_variables.yml
tests/functional/lvm-osds/vagrant_variables.yml
tests/functional/migrate_ceph_disk_to_ceph_volume/vagrant_variables.yml
tests/functional/podman/hosts
tests/functional/podman/vagrant_variables.yml
tests/functional/rbdmirror/container/secondary/vagrant_variables.yml
tests/functional/rbdmirror/container/vagrant_variables.yml
tests/functional/rbdmirror/secondary/vagrant_variables.yml
tests/functional/rbdmirror/vagrant_variables.yml
tests/functional/shrink_mds/container/vagrant_variables.yml
tests/functional/shrink_mds/vagrant_variables.yml
tests/functional/shrink_mgr/container/vagrant_variables.yml
tests/functional/shrink_mgr/vagrant_variables.yml
tests/functional/shrink_mon/container/vagrant_variables.yml
tests/functional/shrink_mon/vagrant_variables.yml
tests/functional/shrink_osd/container/vagrant_variables.yml
tests/functional/shrink_osd/vagrant_variables.yml
tests/functional/shrink_rbdmirror/container/vagrant_variables.yml
tests/functional/shrink_rbdmirror/vagrant_variables.yml
tests/functional/shrink_rgw/container/vagrant_variables.yml
tests/functional/shrink_rgw/vagrant_variables.yml
tests/functional/subset_update/container/vagrant_variables.yml
tests/functional/subset_update/group_vars/nfss [new file with mode: 0644]
tests/functional/subset_update/vagrant_variables.yml
tests/functional/tests/nfs/test_nfs_ganesha.py [new file with mode: 0644]
tests/pytest.ini
vagrant_variables.yml.sample

index a92c2f9e73937c77cd769849ef20720a128d1592..05a78c7f5930bdbcb31e8029901bb4c86045f3a1 100644 (file)
@@ -62,6 +62,7 @@ It means if you are pushing a patch modifying one of these files:
 - `./roles/ceph-rbd-mirror/defaults/main.yml`
 - `./roles/ceph-defaults/defaults/main.yml`
 - `./roles/ceph-osd/defaults/main.yml`
+- `./roles/ceph-nfs/defaults/main.yml`
 - `./roles/ceph-client/defaults/main.yml`
 - `./roles/ceph-common/defaults/main.yml`
 - `./roles/ceph-mon/defaults/main.yml`
index 1129d851391be2e28dd26d90b93183a784dd7c5b..04b465e832945c0a2159f09183c1f91c1778f9c5 100644 (file)
@@ -20,6 +20,7 @@ NMONS           = settings['mon_vms']
 NOSDS           = settings['osd_vms']
 NMDSS           = settings['mds_vms']
 NRGWS           = settings['rgw_vms']
+NNFSS           = settings['nfs_vms']
 NRBD_MIRRORS    = settings['rbd_mirror_vms']
 CLIENTS         = settings['client_vms']
 MGRS            = settings['mgr_vms']
@@ -61,6 +62,7 @@ ansible_provision = proc do |ansible|
     'osds'             => (0..NOSDS - 1).map { |j| "#{LABEL_PREFIX}osd#{j}" },
     'mdss'             => (0..NMDSS - 1).map { |j| "#{LABEL_PREFIX}mds#{j}" },
     'rgws'             => (0..NRGWS - 1).map { |j| "#{LABEL_PREFIX}rgw#{j}" },
+    'nfss'             => (0..NNFSS - 1).map { |j| "#{LABEL_PREFIX}nfs#{j}" },
     'rbd_mirrors'      => (0..NRBD_MIRRORS - 1).map { |j| "#{LABEL_PREFIX}rbd_mirror#{j}" },
     'clients'          => (0..CLIENTS - 1).map { |j| "#{LABEL_PREFIX}client#{j}" },
     'mgrs'             => (0..MGRS - 1).map { |j| "#{LABEL_PREFIX}mgr#{j}" },
@@ -371,6 +373,52 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
     end
   end
 
+  (0..NNFSS - 1).each do |i|
+    config.vm.define "#{LABEL_PREFIX}nfs#{i}" do |nfs|
+      nfs.vm.hostname = "#{LABEL_PREFIX}nfs#{i}"
+      if ASSIGN_STATIC_IP && !IPV6
+          nfs.vm.network :private_network,
+         :ip => "#{PUBLIC_SUBNET}.#{$last_ip_pub_digit+=1}"
+      end
+
+      # Virtualbox
+      nfs.vm.provider :virtualbox do |vb|
+        vb.customize ['modifyvm', :id, '--memory', "#{MEMORY}"]
+      end
+
+      # VMware
+      nfs.vm.provider :vmware_fusion do |v|
+        v.vmx['memsize'] = "#{MEMORY}"
+      end
+
+      # Libvirt
+      nfs.vm.provider :libvirt do |lv,override|
+        lv.memory = MEMORY
+        lv.random_hostname = true
+       if IPV6 then
+         override.vm.network :private_network,
+         :libvirt__ipv6_address => "#{PUBLIC_SUBNET}",
+         :libvirt__ipv6_prefix => "64",
+         :libvirt__dhcp_enabled => false,
+         :libvirt__forward_mode => "veryisolated",
+         :libvirt__network_name => "ipv6-public-network",
+         :ip => "#{PUBLIC_SUBNET}#{$last_ip_pub_digit+=1}",
+         :netmask => "64"
+       end
+      end
+
+      # Parallels
+      nfs.vm.provider "parallels" do |prl|
+        prl.name = "ceph-nfs#{i}"
+        prl.memory = "#{MEMORY}"
+      end
+
+      nfs.vm.provider :linode do |provider|
+        provider.label = nfs.vm.hostname
+      end
+    end
+  end
+
   (0..NMDSS - 1).each do |i|
     config.vm.define "#{LABEL_PREFIX}mds#{i}" do |mds|
       mds.vm.hostname = "#{LABEL_PREFIX}mds#{i}"
index 20fa2b418d9f806d6c6a4405eb6515e498d8bf3d..3adff7a8829da19253cd73fd57c5e3f6a1813d28 100644 (file)
@@ -7,6 +7,7 @@ mon_vms: 1
 osd_vms: 1
 mds_vms: 0
 rgw_vms: 0
+nfs_vms: 0
 rbd_mirror_vms: 0
 client_vms: 0
 mgr_vms: 0
index 1352637f099d42671a9eab24367119588d895c2a..e62a3bffee0b54e83b4aa1aba9a016a585c875b7 100644 (file)
@@ -24,6 +24,7 @@ mon_vms: 3
 osd_vms: 3
 mds_vms: 1
 rgw_vms: 0
+nfs_vms: 0
 rbd_mirror_vms: 0
 client_vms: 0
 
index 73da1918ba00daeebf4fd9ce49a628e2ef12b1d1..420c09c98d39d167795085099ddb911b3d743596 100644 (file)
@@ -8,6 +8,7 @@ mon_vms: 1
 osd_vms: 1
 mds_vms: 0
 rgw_vms: 0
+nfs_vms: 0
 rbd_mirror_vms: 0
 client_vms: 0
 
index fd670601ce95e61f3516247e3b24cc191e51a208..e998e1e539c3d788a2020d0741497cac68453c66 100644 (file)
@@ -7,6 +7,7 @@
     - "{{ rgw_group_name|default('rgws') }}"
     - "{{ mgr_group_name|default('mgrs') }}"
     - "{{ rbdmirror_group_name|default('rbdmirrors') }}"
+    - "{{ nfs_group_name|default('nfss') }}"
     - "{{ monitoring_group_name|default('monitoring') }}"
   gather_facts: false
   become: true
index 525f19cb1eb58f4713aefffd8cea9f6120f7c9da..c05d91899a2f6c0cb80890f6f5db97b3e4f4763c 100644 (file)
@@ -47,6 +47,7 @@ to follow (most of them are 1 line settings).
      osd_vms: 0
      mds_vms: 0
      rgw_vms: 0
+     nfs_vms: 0
      rbd_mirror_vms: 0
      client_vms: 0
      mgr_vms: 0
index 325846ebe07e2d8ec40c9b02b781bea43eb14c4d..aa293481f23683d3b7090f3be009d16ee6a189ec 100644 (file)
@@ -52,6 +52,7 @@ dummy:
 #osd_group_name: osds
 #rgw_group_name: rgws
 #mds_group_name: mdss
+#nfs_group_name: nfss
 #rbdmirror_group_name: rbdmirrors
 #client_group_name: clients
 #mgr_group_name: mgrs
@@ -62,6 +63,7 @@ dummy:
 #  - "{{ osd_group_name }}"
 #  - "{{ rgw_group_name }}"
 #  - "{{ mds_group_name }}"
+#  - "{{ nfs_group_name }}"
 #  - "{{ rbdmirror_group_name }}"
 #  - "{{ client_group_name }}"
 #  - "{{ mgr_group_name }}"
@@ -79,6 +81,7 @@ dummy:
 #ceph_osd_firewall_zone: public
 #ceph_rgw_firewall_zone: public
 #ceph_mds_firewall_zone: public
+#ceph_nfs_firewall_zone: public
 #ceph_rbdmirror_firewall_zone: public
 #ceph_dashboard_firewall_zone: public
 #ceph_rgwloadbalancer_firewall_zone: public
@@ -155,6 +158,13 @@ dummy:
 #ceph_stable_release: reef
 #ceph_stable_repo: "{{ ceph_mirror }}/debian-{{ ceph_stable_release }}"
 
+#nfs_ganesha_stable: true # use stable repos for nfs-ganesha
+#centos_release_nfs: centos-release-nfs-ganesha4
+#nfs_ganesha_stable_deb_repo: http://ppa.launchpad.net/nfs-ganesha/nfs-ganesha-4/ubuntu
+#nfs_ganesha_apt_keyserver: keyserver.ubuntu.com
+#nfs_ganesha_apt_key_id: EA914D611053D07BD332E18010353E8834DC57CA
+#libntirpc_stable_deb_repo: http://ppa.launchpad.net/nfs-ganesha/libntirpc-4/ubuntu
+
 # Use the option below to specify your applicable package tree, eg. when using non-LTS Ubuntu versions
 # # for a list of available Debian distributions, visit http://download.ceph.com/debian-{{ ceph_stable_release }}/dists/
 # for more info read: https://github.com/ceph/ceph-ansible/issues/305
@@ -189,6 +199,13 @@ dummy:
 #ceph_dev_branch: main # development branch you would like to use e.g: main, wip-hack
 #ceph_dev_sha1: latest # distinct sha1 to use, defaults to 'latest' (as in latest built)
 
+#nfs_ganesha_dev: false # use development repos for nfs-ganesha
+
+# Set this to choose the version of ceph dev libraries used in the nfs-ganesha packages from shaman
+# flavors so far include: ceph_main, ceph_jewel, ceph_kraken, ceph_luminous
+#nfs_ganesha_flavor: "ceph_main"
+
+
 # REPOSITORY: CUSTOM
 #
 # Enabled when ceph_repository == 'custom'
@@ -404,6 +421,10 @@ dummy:
 #handler_health_rgw_check_retries: 5
 #handler_health_rgw_check_delay: 10
 
+# NFS handler checks
+#handler_health_nfs_check_retries: 5
+#handler_health_nfs_check_delay: 10
+
 # RBD MIRROR handler checks
 #handler_health_rbd_mirror_check_retries: 5
 #handler_health_rbd_mirror_check_delay: 10
@@ -425,6 +446,24 @@ dummy:
 
 #ceph_rbd_mirror_pool: "rbd"
 
+###############
+# NFS-GANESHA #
+###############
+#
+# Access type options
+#
+# Enable NFS File access
+# If set to true, then ganesha is set up to export the root of the
+# Ceph filesystem, and ganesha's attribute and directory caching is disabled
+# as much as possible since libcephfs clients also caches the same
+# information.
+#
+# Set this to true to enable File access via NFS.  Requires an MDS role.
+#nfs_file_gw: false
+# Set this to true to enable Object access via NFS. Requires an RGW role.
+#nfs_obj_gw: "{{ False if groups.get(mon_group_name, []) | length == 0 else True }}"
+
+
 ###################
 # CONFIG OVERRIDE #
 ###################
diff --git a/group_vars/nfss.yml.sample b/group_vars/nfss.yml.sample
new file mode 100644 (file)
index 0000000..1fc46ff
--- /dev/null
@@ -0,0 +1,131 @@
+---
+# Variables here are applicable to all host groups NOT roles
+
+# This sample file generated by generate_group_vars_sample.sh
+
+# Dummy variable to avoid error because ansible does not recognize the
+# file as a good configuration file when no variable in it.
+dummy:
+
+# You can override vars by using host or group vars
+
+###########
+# GENERAL #
+###########
+
+# Even though NFS nodes should not have the admin key
+# at their disposal, some people might want to have it
+# distributed on RGW nodes. Setting 'copy_admin_key' to 'true'
+# will copy the admin key to the /etc/ceph/ directory
+#copy_admin_key: false
+
+# Whether docker container or systemd service should be enabled
+# and started, it's useful to set it to false if nfs-ganesha
+# service is managed by pacemaker
+#ceph_nfs_enable_service: true
+
+# ceph-nfs systemd service uses ansible's hostname as an instance id,
+# so service name is ceph-nfs@{{ ansible_facts['hostname'] }}, this is not
+# ideal when ceph-nfs is managed by pacemaker across multiple hosts - in
+# such case it's better to have constant instance id instead which
+# can be set by 'ceph_nfs_service_suffix'
+# ceph_nfs_service_suffix: "{{ ansible_facts['hostname'] }}"
+
+######################
+# NFS Ganesha Config #
+######################
+#ceph_nfs_log_file: "/var/log/ganesha/ganesha.log"
+#ceph_nfs_dynamic_exports: false
+# If set to true then rados is used to store ganesha exports
+# and client sessions information, this is useful if you
+# run multiple nfs-ganesha servers in active/passive mode and
+# want to do failover
+#ceph_nfs_rados_backend: false
+# Name of the rados object used to store a list of the export rados
+# object URLS
+#ceph_nfs_rados_export_index: "ganesha-export-index"
+# Address ganesha service should listen on, by default ganesha listens on all
+# addresses. (Note: ganesha ignores this parameter in current version due to
+# this bug: https://github.com/nfs-ganesha/nfs-ganesha/issues/217)
+# ceph_nfs_bind_addr: 0.0.0.0
+
+# If set to true, then ganesha's attribute and directory caching is disabled
+# as much as possible. Currently, ganesha caches by default.
+# When using ganesha as CephFS's gateway, it is recommended to turn off
+# ganesha's caching as the libcephfs clients also cache the same information.
+# Note: Irrespective of this option's setting, ganesha's caching is disabled
+# when setting 'nfs_file_gw' option as true.
+#ceph_nfs_disable_caching: false
+
+# This is the file ganesha will use to control NFSv4 ID mapping
+#ceph_nfs_idmap_conf: "/etc/ganesha/idmap.conf"
+
+# idmap configuration file override.
+# This allows you to specify more configuration options
+# using an INI style format.
+# Example:
+# idmap_conf_overrides:
+#   General:
+#     Domain: foo.domain.net
+#idmap_conf_overrides: {}
+
+####################
+# FSAL Ceph Config #
+####################
+#ceph_nfs_ceph_export_id: 20133
+#ceph_nfs_ceph_pseudo_path: "/cephfile"
+#ceph_nfs_ceph_protocols: "3,4"
+#ceph_nfs_ceph_access_type: "RW"
+#ceph_nfs_ceph_user: "admin"
+#ceph_nfs_ceph_squash: "Root_Squash"
+#ceph_nfs_ceph_sectype: "sys,krb5,krb5i,krb5p"
+
+###################
+# FSAL RGW Config #
+###################
+#ceph_nfs_rgw_export_id: 20134
+#ceph_nfs_rgw_pseudo_path: "/cephobject"
+#ceph_nfs_rgw_protocols: "3,4"
+#ceph_nfs_rgw_access_type: "RW"
+#ceph_nfs_rgw_user: "cephnfs"
+#ceph_nfs_rgw_squash: "Root_Squash"
+#ceph_nfs_rgw_sectype: "sys,krb5,krb5i,krb5p"
+# Note: keys are optional and can be generated, but not on containerized, where
+# they must be configered.
+# ceph_nfs_rgw_access_key: "QFAMEDSJP5DEKJO0DDXY"
+# ceph_nfs_rgw_secret_key: "iaSFLDVvDdQt6lkNzHyW4fPLZugBAI1g17LO0+87[MAC[M#C"
+#rgw_client_name: client.rgw.{{ ansible_facts['hostname'] }}
+
+###################
+# CONFIG OVERRIDE #
+###################
+
+# Ganesha configuration file override.
+# These multiline strings will be appended to the contents of the blocks in ganesha.conf and
+# must be in the correct ganesha.conf format seen here:
+# https://github.com/nfs-ganesha/nfs-ganesha/blob/next/src/config_samples/ganesha.conf.example
+#
+# Example:
+# CACHEINODE {
+#         # Entries_HWMark = 100000;
+# }
+#
+# ganesha_core_param_overrides:
+# ganesha_ceph_export_overrides:
+# ganesha_rgw_export_overrides:
+# ganesha_rgw_section_overrides:
+# ganesha_log_overrides:
+# ganesha_conf_overrides: |
+#     CACHEINODE {
+#             # Entries_HWMark = 100000;
+#     }
+
+##########
+# DOCKER #
+##########
+
+#ceph_docker_image: "ceph/daemon"
+#ceph_docker_image_tag: latest
+#ceph_nfs_docker_extra_env:
+#ceph_config_keys: [] # DON'T TOUCH ME
+
index 30f2b6f6bfcc47448ba4c9624c913104e77da957..7fedf18fcc06c7a6909c386cbbecb8d9bc6c3f00 100644 (file)
@@ -35,6 +35,7 @@
     - "{{ rgw_group_name|default('rgws') }}"
     - "{{ mgr_group_name|default('mgrs') }}"
     - "{{ rbdmirror_group_name|default('rbdmirrors') }}"
+    - "{{ nfs_group_name|default('nfss') }}"
     - "{{ monitoring_group_name|default('monitoring') }}"
   become: true
   any_errors_fatal: true
           inventory_hostname in groups.get(mds_group_name, []) or
           inventory_hostname in groups.get(rgw_group_name, []) or
           inventory_hostname in groups.get(mgr_group_name, []) or
-          inventory_hostname in groups.get(rbdmirror_group_name, [])
+          inventory_hostname in groups.get(rbdmirror_group_name, []) or
+          inventory_hostname in groups.get(nfs_group_name, [])
 
     - name: Configure repository for installing cephadm
       when: containerized_deployment | bool
         path: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ rgw_zone }}.{{ ansible_facts['hostname'] }}"
         state: absent
 
+- name: Stop and remove legacy ceph nfs daemons
+  hosts: "{{ nfs_group_name|default('nfss') }}"
+  tags: 'ceph_nfs_adopt'
+  serial: 1
+  become: true
+  gather_facts: false
+  any_errors_fatal: true
+  tasks:
+    - name: Import ceph-defaults role
+      ansible.builtin.import_role:
+        name: ceph-defaults
+
+    - name: Import ceph-nfs role
+      ansible.builtin.import_role:
+        name: ceph-nfs
+        tasks_from: create_rgw_nfs_user.yml
+
+    - name: Enable ceph mgr nfs module
+      ceph_mgr_module:
+        name: "nfs"
+        cluster: "{{ cluster }}"
+        state: enable
+      environment:
+        CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+        CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+      delegate_to: "{{ groups[mon_group_name][0] }}"
+
+    - name: Stop and disable ceph-nfs systemd service
+      ansible.builtin.service:
+        name: "ceph-nfs@{{ ansible_facts['hostname'] }}"
+        state: stopped
+        enabled: false
+      failed_when: false
+
+    - name: Reset failed ceph-nfs systemd unit
+      ansible.builtin.command: "systemctl reset-failed ceph-nfs@{{ ansible_facts['hostname'] }}"  # noqa command-instead-of-module
+      changed_when: false
+      failed_when: false
+      when: containerized_deployment | bool
+
+    - name: Remove ceph-nfs systemd unit files
+      ansible.builtin.file:
+        path: "{{ item }}"
+        state: absent
+      loop:
+        - /etc/systemd/system/ceph-nfs@.service
+        - /etc/systemd/system/ceph-nfs@.service.d
+
+    - name: Remove legacy ceph radosgw directory
+      ansible.builtin.file:
+        path: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}"
+        state: absent
+
+    - name: Create nfs ganesha cluster
+      ansible.builtin.command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph nfs cluster create {{ ansible_facts['hostname'] }} {{ ansible_facts['hostname'] }}"
+      changed_when: false
+      delegate_to: "{{ groups[mon_group_name][0] }}"
+      environment:
+        CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
+
+    - name: Create cephfs export
+      ansible.builtin.command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph nfs export create cephfs {{ cephfs }} {{ ansible_facts['hostname'] }} {{ ceph_nfs_ceph_pseudo_path }} --squash {{ ceph_nfs_ceph_squash }}"
+      changed_when: false
+      delegate_to: "{{ groups[mon_group_name][0] }}"
+      environment:
+        CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
+      when: nfs_file_gw | bool
+
+    - name: Create rgw export
+      ansible.builtin.command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph nfs export create rgw --cluster-id {{ ansible_facts['hostname'] }} --pseudo-path {{ ceph_nfs_rgw_pseudo_path }} --user-id {{ ceph_nfs_rgw_user }} --squash {{ ceph_nfs_rgw_squash }}"
+      changed_when: false
+      delegate_to: "{{ groups[mon_group_name][0] }}"
+      environment:
+        CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
+      when: nfs_obj_gw | bool
+
 - name: Redeploy rbd-mirror daemons
   hosts: "{{ rbdmirror_group_name|default('rbdmirrors') }}"
   become: true
     - "{{ rgw_group_name|default('rgws') }}"
     - "{{ mgr_group_name|default('mgrs') }}"
     - "{{ rbdmirror_group_name|default('rbdmirrors') }}"
+    - "{{ nfs_group_name|default('nfss') }}"
     - "{{ monitoring_group_name|default('monitoring') }}"
   become: true
   gather_facts: false
index b7f05209f076320a49423c74ddc1bc471449df50..b08e7f21d8fabfedc105608f1591eeb8ea29df4e 100644 (file)
@@ -7,6 +7,7 @@
     - "{{ rgw_group_name|default('rgws') }}"
     - "{{ mgr_group_name|default('mgrs') }}"
     - "{{ rbdmirror_group_name|default('rbdmirrors') }}"
+    - "{{ nfs_group_name|default('nfss') }}"
     - "{{ monitoring_group_name|default('monitoring') }}"
   become: true
   gather_facts: false
     - "{{ rgw_group_name|default('rgws') }}"
     - "{{ mgr_group_name|default('mgrs') }}"
     - "{{ rbdmirror_group_name|default('rbdmirrors') }}"
+    - "{{ nfs_group_name|default('nfss') }}"
     - "{{ monitoring_group_name|default('monitoring') }}"
   become: true
   gather_facts: false
index 5034553fcaa7c8164cf967773c1de66c3b573bd5..784a244a55b50a37b6baf9f639812028be76756e 100644 (file)
@@ -11,6 +11,7 @@
     - osds
     - mdss
     - rgws
+    - nfss
     - rbdmirrors
     - clients
     - mgrs
@@ -59,6 +60,7 @@
     - "{{ osd_group_name | default('osds') }}"
     - "{{ mds_group_name | default('mdss') }}"
     - "{{ rgw_group_name | default('rgws') }}"
+    - "{{ nfs_group_name | default('nfss') }}"
     - "{{ mgr_group_name | default('mgrs') }}"
     - "{{ rbdmirror_group_name | default('rbdmirrors') }}"
     - "{{ monitoring_group_name | default('monitoring') }}"
                 inventory_hostname in groups.get(mds_group_name, []) or
                 inventory_hostname in groups.get(rgw_group_name, []) or
                 inventory_hostname in groups.get(mgr_group_name, []) or
-                inventory_hostname in groups.get(rbdmirror_group_name, [])
+                inventory_hostname in groups.get(rbdmirror_group_name, []) or
+                inventory_hostname in groups.get(nfs_group_name, [])
 
         - name: Pulling alertmanager/grafana/prometheus images from docker daemon
           ansible.builtin.command: "{{ timeout_command }} {{ container_binary }} pull docker-daemon:{{ item }}"
         tasks_from: systemd.yml
       when: inventory_hostname in groups.get(mgr_group_name, [])
 
+    - name: Import ceph-nfs role
+      ansible.builtin.import_role:
+        name: ceph-nfs
+        tasks_from: systemd.yml
+      when: inventory_hostname in groups.get(nfs_group_name, [])
+
     - name: Import ceph-osd role
       ansible.builtin.import_role:
         name: ceph-osd
index 759debfdad11b40491e9d2b94f0c2e62a840e1d4..ede64e5269e7c3c011e00caad4b69cc6decf7853 100644 (file)
@@ -5,6 +5,7 @@
     - osds
     - mdss
     - rgws
+    - nfss
     - rbdmirrors
     - clients
     - mgrs
index b55e468492d07533cdc7b1099d77a67e7b20d3ce..d1a4114f9af6d4c2efc70bbf863a21f4738dc9be 100644 (file)
@@ -37,6 +37,7 @@
     - mdss
     - rgws
     - rbdmirrors
+    - nfss
     - clients
     - mgrs
     - monitoring
       ansible.builtin.import_role:
         name: ceph-defaults
 
+    - name: Nfs related tasks
+      when: groups[nfs_group_name] | default([]) | length > 0
+      block:
+        - name: Get nfs nodes ansible facts
+          ansible.builtin.setup:
+            gather_subset:
+              - 'all'
+              - '!facter'
+              - '!ohai'
+          delegate_to: "{{ item }}"
+          delegate_facts: true
+          with_items: "{{ groups[nfs_group_name] }}"
+          run_once: true
+
+        - name: Get all nfs-ganesha mount points
+          ansible.builtin.command: grep "{{ hostvars[item]['ansible_facts']['all_ipv4_addresses'] | ips_in_ranges(public_network.split(',')) | first }}" /proc/mounts
+          register: nfs_ganesha_mount_points
+          failed_when: false
+          changed_when: false
+          with_items: "{{ groups[nfs_group_name] }}"
+
+        - name: Ensure nfs-ganesha mountpoint(s) are unmounted
+          ansible.posix.mount:
+            path: "{{ item.split(' ')[1] }}"
+            state: unmounted
+          with_items:
+            - "{{ nfs_ganesha_mount_points.results | map(attribute='stdout_lines') | list }}"
+          when: item | length > 0
+
     - name: Ensure cephfs mountpoint(s) are unmounted
       ansible.builtin.command: umount -a -t ceph
       changed_when: false
         - ceph
         - libceph
 
+
+- name: Purge ceph nfs cluster
+  hosts: nfss
+  gather_facts: false # Already gathered previously
+  become: true
+  tasks:
+    - name: Import ceph-defaults role
+      ansible.builtin.import_role:
+        name: ceph-defaults
+
+    - name: Stop ceph nfss with systemd
+      ansible.builtin.service:
+        name: "{{ 'ceph-nfs@' + ansible_facts['hostname'] if containerized_deployment | bool else 'nfs-ganesha' }}"
+        state: stopped
+      failed_when: false
+
+    - name: Remove ceph nfs directories for "{{ ansible_facts['hostname'] }}"
+      ansible.builtin.file:
+        path: "{{ item }}"
+        state: absent
+      with_items:
+        - /etc/ganesha
+        - /var/lib/nfs/ganesha
+        - /var/run/ganesha
+        - /etc/systemd/system/ceph-nfs@.service
+
+
 - name: Purge node-exporter
   hosts:
     - mons
     - mdss
     - rgws
     - rbdmirrors
+    - nfss
     - clients
     - mgrs
     - monitoring
     - mdss
     - rgws
     - rbdmirrors
+    - nfss
     - mgrs
   become: true
   tasks:
     - mdss
     - rgws
     - rbdmirrors
+    - nfss
     - clients
     - mgrs
     - monitoring
     - mdss
     - rgws
     - rbdmirrors
+    - nfss
     - mgrs
     - clients
   gather_facts: false # Already gathered previously
index 97e57b6155e1a1c0a914f505828aa15c5d86d2d0..7c9ae393bc5fe7f8b957250a3ac569afa2fe022d 100644 (file)
@@ -42,6 +42,7 @@
     - "{{ mds_group_name|default('mdss') }}"
     - "{{ rgw_group_name|default('rgws') }}"
     - "{{ rbdmirror_group_name|default('rbdmirrors') }}"
+    - "{{ nfs_group_name|default('nfss') }}"
     - "{{ client_group_name|default('clients') }}"
     - "{{ mgr_group_name|default('mgrs') }}"
     - "{{ monitoring_group_name | default('monitoring') }}"
@@ -58,6 +59,7 @@
     - "{{ mds_group_name|default('mdss') }}"
     - "{{ rgw_group_name|default('rgws') }}"
     - "{{ rbdmirror_group_name|default('rbdmirrors') }}"
+    - "{{ nfs_group_name|default('nfss') }}"
     - "{{ client_group_name|default('clients') }}"
     - "{{ mgr_group_name|default('mgrs') }}"
     - "{{ monitoring_group_name | default('monitoring') }}"
index 21bafa9100d983573e0164c87afcfadb94eb51b0..ff194d47b6c85634a5a46ca8ddf099a713cb780a 100644 (file)
@@ -46,6 +46,7 @@
     - "{{ rgw_group_name|default('rgws') }}"
     - "{{ mgr_group_name|default('mgrs') }}"
     - "{{ rbdmirror_group_name|default('rbdmirrors') }}"
+    - "{{ nfs_group_name|default('nfss') }}"
     - "{{ client_group_name|default('clients') }}"
     - "{{ monitoring_group_name|default('monitoring') }}"
   tags: always
         name: ceph-rbd-mirror
 
 
+- name: Upgrade ceph nfs node
+  vars:
+    upgrade_ceph_packages: true
+  hosts: "{{ nfs_group_name|default('nfss') }}"
+  tags: nfss
+  serial: 1
+  become: true
+  gather_facts: false
+  tasks:
+    # failed_when: false is here so that if we upgrade
+    # from a version of ceph that does not have nfs-ganesha
+    # then this task will not fail
+    - name: Stop ceph nfs
+      ansible.builtin.systemd:
+        name: nfs-ganesha
+        state: stopped
+        enabled: false
+        masked: true
+      failed_when: false
+      when: not containerized_deployment | bool
+
+    - name: Systemd stop nfs container
+      ansible.builtin.systemd:
+        name: ceph-nfs@{{ ceph_nfs_service_suffix | default(ansible_facts['hostname']) }}
+        state: stopped
+        enabled: false
+        masked: true
+      failed_when: false
+      when:
+        - ceph_nfs_enable_service | bool
+        - containerized_deployment | bool
+
+    - name: Import ceph-defaults role
+      ansible.builtin.import_role:
+        name: ceph-defaults
+
+    - name: Import ceph-facts role
+      ansible.builtin.import_role:
+        name: ceph-facts
+
+    - name: Import ceph-handler role
+      ansible.builtin.import_role:
+        name: ceph-handler
+
+    - name: Import ceph-common role
+      ansible.builtin.import_role:
+        name: ceph-common
+      when: not containerized_deployment | bool
+
+    - name: Import ceph-container-common role
+      ansible.builtin.import_role:
+        name: ceph-container-common
+      when: containerized_deployment | bool
+
+    - name: Import ceph-config role
+      ansible.builtin.import_role:
+        name: ceph-config
+
+    - name: Import ceph-nfs role
+      ansible.builtin.import_role:
+        name: ceph-nfs
+
 - name: Upgrade ceph client node
   vars:
     upgrade_ceph_packages: true
     - "{{ rgw_group_name|default('rgws') }}"
     - "{{ mgr_group_name|default('mgrs') }}"
     - "{{ rbdmirror_group_name|default('rbdmirrors') }}"
+    - "{{ nfs_group_name|default('nfss') }}"
     - "{{ monitoring_group_name|default('monitoring') }}"
   tags: monitoring
   gather_facts: false
index 851618ac0838afd4c1127420c4986e2f56183312..5c9ce494d2cd2f3525ea825e26d624ae5cfca8e0 100644 (file)
@@ -43,6 +43,7 @@
     - "{{ mds_group_name|default('mdss') }}"
     - "{{ rgw_group_name|default('rgws') }}"
     - "{{ rbdmirror_group_name|default('rbdmirrors') }}"
+    - "{{ nfs_group_name|default('nfss') }}"
 
   become: true
 
         name: ceph-rbd-mirror
 
 
+- name: Switching from non-containerized to containerized ceph nfs
+
+  hosts: "{{ nfs_group_name|default('nfss') }}"
+
+  vars:
+    containerized_deployment: true
+    nfs_group_name: nfss
+
+  serial: 1
+  become: true
+  pre_tasks:
+
+    # failed_when: false is here because if we're
+    # working with a jewel cluster then ceph nfs
+    # will not exist
+    - name: Stop non-containerized ceph nfs(s)
+      ansible.builtin.service:
+        name: nfs-ganesha
+        state: stopped
+        enabled: false
+      failed_when: false
+
+    - name: Import ceph-defaults role
+      ansible.builtin.import_role:
+        name: ceph-defaults
+
+    - name: Import ceph-facts role
+      ansible.builtin.import_role:
+        name: ceph-facts
+
+    # NOTE: changed from file module to raw find command for performance reasons
+    # The file module has to run checks on current ownership of all directories and files. This is unnecessary
+    # as in this case we know we want all owned by ceph user
+    - name: Set proper ownership on ceph directories
+      ansible.builtin.command: "find /var/lib/ceph /etc/ceph -not -( -user {{ ceph_uid }} -or -group {{ ceph_uid }} -) -execdir chown {{ ceph_uid }}:{{ ceph_uid }} {} +"
+      changed_when: false
+
+  tasks:
+    - name: Import ceph-handler role
+      ansible.builtin.import_role:
+        name: ceph-handler
+
+    - name: Import ceph-container-engine role
+      ansible.builtin.import_role:
+        name: ceph-container-engine
+
+    - name: Import ceph-container-common role
+      ansible.builtin.import_role:
+        name: ceph-container-common
+
+    - name: Import ceph-nfs role
+      ansible.builtin.import_role:
+        name: ceph-nfs
+
 - name: Switching from non-containerized to containerized ceph-crash
 
   hosts:
index 7d61f7088d17d1ad6e78372548a23030278e9241..228e86ae362545c51d1c8a091bcb039371efdbfd 100644 (file)
@@ -29,6 +29,7 @@
     - osds
     - mdss
     - rgws
+    - nfss
     - rbdmirrors
     - clients
     - mgrs
index 42b684a2ce797db6c6dc1a2ef4592849f6db6227..de9234d218eb5ca95f3e1ea930ee05b95613e672 100644 (file)
@@ -26,6 +26,7 @@ class CallbackModule(CallbackBase):
             'installer_phase_ceph_osd',
             'installer_phase_ceph_mds',
             'installer_phase_ceph_rgw',
+            'installer_phase_ceph_nfs',
             'installer_phase_ceph_rbdmirror',
             'installer_phase_ceph_client',
             'installer_phase_ceph_rgw_loadbalancer',
@@ -58,6 +59,10 @@ class CallbackModule(CallbackBase):
                 'title': 'Install Ceph RGW',
                 'playbook': 'roles/ceph-rgw/tasks/main.yml'
             },
+            'installer_phase_ceph_nfs': {
+                'title': 'Install Ceph NFS',
+                'playbook': 'roles/ceph-nfs/tasks/main.yml'
+            },
             'installer_phase_ceph_rbdmirror': {
                 'title': 'Install Ceph RBD Mirror',
                 'playbook': 'roles/ceph-rbd-mirror/tasks/main.yml'
index 7488db717abf91209c1679857c55d0db8e13e295..1fdb3bbe5c5437a82cf8c19ca89d98a2f746e57c 100644 (file)
@@ -59,4 +59,5 @@
   ansible.builtin.include_tasks: selinux.yml
   when:
     - ansible_facts['os_family'] == 'RedHat'
-    - inventory_hostname in groups.get(rgwloadbalancer_group_name, [])
+    - inventory_hostname in groups.get(nfs_group_name, [])
+      or inventory_hostname in groups.get(rgwloadbalancer_group_name, [])
index 22e2d3f99f7f31b36ccb82f8e5fdbcb6a6f11ae7..65459b58b4714edfbaccec38ffb7c282ad725d27 100644 (file)
@@ -17,5 +17,6 @@
       register: result
       until: result is succeeded
       when:
-        - inventory_hostname in groups.get(rgwloadbalancer_group_name, [])
+        - inventory_hostname in groups.get(nfs_group_name, [])
+          or inventory_hostname in groups.get(rgwloadbalancer_group_name, [])
         - ansible_facts['distribution_major_version'] == '8'
index 4816ea7c209f33ffb00787876dadd1704e12edf1..f222e4ba38d9857fb3124b057f1141bb931ead6f 100644 (file)
@@ -46,6 +46,7 @@
       inventory_hostname in groups.get(rgw_group_name, []) or
       inventory_hostname in groups.get(mgr_group_name, []) or
       inventory_hostname in groups.get(rbdmirror_group_name, []) or
+      inventory_hostname in groups.get(nfs_group_name, []) or
       inventory_hostname in groups.get(monitoring_group_name, [])
   environment:
     HTTP_PROXY: "{{ ceph_docker_http_proxy | default('') }}"
index 09904f9adcb18be912885f41ce3ee95b98a9d1a6..bf1a13141fd6e90559e7e1749b80db89dd4ec898 100644 (file)
@@ -44,6 +44,7 @@ mon_group_name: mons
 osd_group_name: osds
 rgw_group_name: rgws
 mds_group_name: mdss
+nfs_group_name: nfss
 rbdmirror_group_name: rbdmirrors
 client_group_name: clients
 mgr_group_name: mgrs
@@ -54,6 +55,7 @@ adopt_label_group_names:
   - "{{ osd_group_name }}"
   - "{{ rgw_group_name }}"
   - "{{ mds_group_name }}"
+  - "{{ nfs_group_name }}"
   - "{{ rbdmirror_group_name }}"
   - "{{ client_group_name }}"
   - "{{ mgr_group_name }}"
@@ -71,6 +73,7 @@ ceph_mgr_firewall_zone: public
 ceph_osd_firewall_zone: public
 ceph_rgw_firewall_zone: public
 ceph_mds_firewall_zone: public
+ceph_nfs_firewall_zone: public
 ceph_rbdmirror_firewall_zone: public
 ceph_dashboard_firewall_zone: public
 ceph_rgwloadbalancer_firewall_zone: public
@@ -147,6 +150,13 @@ ceph_stable_key: https://download.ceph.com/keys/release.asc
 ceph_stable_release: reef
 ceph_stable_repo: "{{ ceph_mirror }}/debian-{{ ceph_stable_release }}"
 
+nfs_ganesha_stable: true # use stable repos for nfs-ganesha
+centos_release_nfs: centos-release-nfs-ganesha4
+nfs_ganesha_stable_deb_repo: http://ppa.launchpad.net/nfs-ganesha/nfs-ganesha-4/ubuntu
+nfs_ganesha_apt_keyserver: keyserver.ubuntu.com
+nfs_ganesha_apt_key_id: EA914D611053D07BD332E18010353E8834DC57CA
+libntirpc_stable_deb_repo: http://ppa.launchpad.net/nfs-ganesha/libntirpc-4/ubuntu
+
 # Use the option below to specify your applicable package tree, eg. when using non-LTS Ubuntu versions
 # # for a list of available Debian distributions, visit http://download.ceph.com/debian-{{ ceph_stable_release }}/dists/
 # for more info read: https://github.com/ceph/ceph-ansible/issues/305
@@ -181,6 +191,13 @@ ceph_obs_repo: "https://download.opensuse.org/repositories/filesystems:/ceph:/{{
 ceph_dev_branch: main # development branch you would like to use e.g: main, wip-hack
 ceph_dev_sha1: latest # distinct sha1 to use, defaults to 'latest' (as in latest built)
 
+nfs_ganesha_dev: false # use development repos for nfs-ganesha
+
+# Set this to choose the version of ceph dev libraries used in the nfs-ganesha packages from shaman
+# flavors so far include: ceph_main, ceph_jewel, ceph_kraken, ceph_luminous
+nfs_ganesha_flavor: "ceph_main"
+
+
 # REPOSITORY: CUSTOM
 #
 # Enabled when ceph_repository == 'custom'
@@ -396,6 +413,10 @@ handler_health_mds_check_delay: 10
 handler_health_rgw_check_retries: 5
 handler_health_rgw_check_delay: 10
 
+# NFS handler checks
+handler_health_nfs_check_retries: 5
+handler_health_nfs_check_delay: 10
+
 # RBD MIRROR handler checks
 handler_health_rbd_mirror_check_retries: 5
 handler_health_rbd_mirror_check_delay: 10
@@ -417,6 +438,24 @@ health_osd_check_delay: 10
 
 ceph_rbd_mirror_pool: "rbd"
 
+###############
+# NFS-GANESHA #
+###############
+#
+# Access type options
+#
+# Enable NFS File access
+# If set to true, then ganesha is set up to export the root of the
+# Ceph filesystem, and ganesha's attribute and directory caching is disabled
+# as much as possible since libcephfs clients also caches the same
+# information.
+#
+# Set this to true to enable File access via NFS.  Requires an MDS role.
+nfs_file_gw: false
+# Set this to true to enable Object access via NFS. Requires an RGW role.
+nfs_obj_gw: "{{ False if groups.get(mon_group_name, []) | length == 0 else True }}"
+
+
 ###################
 # CONFIG OVERRIDE #
 ###################
index 440918c21f8da280fa59e371293f1ef9bae00150..8f068943fd7475f0f712abbaf12f7ce5218d2820 100644 (file)
@@ -13,6 +13,7 @@
         - "Restart ceph osds"
         - "Restart ceph mdss"
         - "Restart ceph rgws"
+        - "Restart ceph nfss"
         - "Restart ceph rbdmirrors"
         - "Restart ceph mgrs"
       register: tmpdirpath
       when: rgw_group_name in group_names
       listen: "Restart ceph rgws"
 
+    - name: Nfss handler
+      ansible.builtin.include_tasks: handler_nfss.yml
+      when: nfs_group_name in group_names
+      listen: "Restart ceph nfss"
+
     - name: Rbdmirrors handler
       ansible.builtin.include_tasks: handler_rbdmirrors.yml
       when: rbdmirror_group_name in group_names
index 2f6a40ff52e33d6114965e79ea618c52024c5a1b..e78b7bec8aaaba3752b9d7f0e9a43971a1ac456e 100644 (file)
   check_mode: false
   when: inventory_hostname in groups.get(rbdmirror_group_name, [])
 
+- name: Check for a nfs container
+  ansible.builtin.command: "{{ container_binary }} ps -q --filter='name=ceph-nfs-{{ ceph_nfs_service_suffix | default(ansible_facts['hostname']) }}'"
+  register: ceph_nfs_container_stat
+  changed_when: false
+  failed_when: false
+  check_mode: false
+  when: inventory_hostname in groups.get(nfs_group_name, [])
+
 - name: Check for a ceph-crash container
   ansible.builtin.command: "{{ container_binary }} ps -q --filter='name=ceph-crash-{{ ansible_facts['hostname'] }}'"
   register: ceph_crash_container_stat
index ce390b1af1ba462fdda2566508c4488a7a6bc68b..96c492ffcc21ddd0931c4373c93c45ffec204546 100644 (file)
     - rbd_mirror_socket_stat.files | length > 0
     - item.1.rc == 1
 
+- name: Check for a nfs ganesha pid
+  ansible.builtin.command: "pgrep ganesha.nfsd"
+  register: nfs_process
+  changed_when: false
+  failed_when: false
+  check_mode: false
+  when: inventory_hostname in groups.get(nfs_group_name, [])
+
 - name: Check for a ceph-crash process
   ansible.builtin.command: pgrep ceph-crash
   changed_when: false
diff --git a/roles/ceph-handler/tasks/handler_nfss.yml b/roles/ceph-handler/tasks/handler_nfss.yml
new file mode 100644 (file)
index 0000000..dadfc1d
--- /dev/null
@@ -0,0 +1,28 @@
+---
+- name: Set _nfs_handler_called before restart
+  ansible.builtin.set_fact:
+    _nfs_handler_called: true
+
+- name: Copy nfs restart script
+  ansible.builtin.template:
+    src: restart_nfs_daemon.sh.j2
+    dest: "{{ tmpdirpath.path }}/restart_nfs_daemon.sh"
+    owner: root
+    group: root
+    mode: "0750"
+  when: tmpdirpath.path is defined
+
+- name: Restart ceph nfs daemon(s)
+  ansible.builtin.command: /usr/bin/env bash {{ hostvars[item]['tmpdirpath']['path'] }}/restart_nfs_daemon.sh
+  when:
+    - hostvars[item]['handler_nfs_status'] | default(False) | bool
+    - hostvars[item]['_nfs_handler_called'] | default(False) | bool
+    - hostvars[item].tmpdirpath.path is defined
+  with_items: "{{ groups[nfs_group_name] }}"
+  delegate_to: "{{ item }}"
+  changed_when: false
+  run_once: true
+
+- name: Set _nfs_handler_called after restart
+  ansible.builtin.set_fact:
+    _nfs_handler_called: false
index 776fed73a3f849fbeaafdc0459fbeaba991452d7..c963b0115f8cc6d190fd384e84aaceeac6eb6883 100644 (file)
     handler_rgw_status: "{{ 0 in (rgw_socket.results | map(attribute='rc') | list) if not containerized_deployment | bool else (ceph_rgw_container_stat.get('rc') == 0 and ceph_rgw_container_stat.get('stdout_lines', []) | length != 0) }}"
   when: inventory_hostname in groups.get(rgw_group_name, [])
 
+- name: Set_fact handler_nfs_status
+  ansible.builtin.set_fact:
+    handler_nfs_status: "{{ (nfs_process.get('rc') == 0) if not containerized_deployment | bool else (ceph_nfs_container_stat.get('rc') == 0 and ceph_nfs_container_stat.get('stdout_lines', []) | length != 0) }}"
+  when: inventory_hostname in groups.get(nfs_group_name, [])
+
 - name: Set_fact handler_rbd_status
   ansible.builtin.set_fact:
     handler_rbd_mirror_status: "{{ 0 in (rbd_mirror_socket.results | map(attribute='rc') | list) if not containerized_deployment | bool else (ceph_rbd_mirror_container_stat.get('rc') == 0 and ceph_rbd_mirror_container_stat.get('stdout_lines', []) | length != 0) }}"
diff --git a/roles/ceph-handler/templates/restart_nfs_daemon.sh.j2 b/roles/ceph-handler/templates/restart_nfs_daemon.sh.j2
new file mode 100644 (file)
index 0000000..c1571ba
--- /dev/null
@@ -0,0 +1,26 @@
+#!/bin/bash
+
+RETRIES="{{ handler_health_nfs_check_retries }}"
+DELAY="{{ handler_health_nfs_check_delay }}"
+NFS_NAME="ceph-nfs@{{ ceph_nfs_service_suffix | default(ansible_facts['hostname']) }}"
+PID=/var/run/ganesha/ganesha.pid
+{% if containerized_deployment | bool %}
+DOCKER_EXEC="{{ container_binary }} exec ceph-nfs-{{ ceph_nfs_service_suffix | default(ansible_facts['hostname']) }}"
+{% endif %}
+
+# First, restart the daemon
+{% if containerized_deployment | bool -%}
+systemctl restart $NFS_NAME
+# Wait and ensure the pid exists after restarting the daemon
+while [ $RETRIES -ne 0 ]; do
+  $DOCKER_EXEC test -f $PID && exit 0
+  sleep $DELAY
+  let RETRIES=RETRIES-1
+done
+# If we reach this point, it means the pid is not present.
+echo "PID file ${PID} could not be found, which means Ganesha is not running. Showing $NFS_NAME unit logs now:"
+journalctl -u $NFS_NAME
+exit 1
+{% else %}
+systemctl restart nfs-ganesha
+{% endif %}
index f40ccc8caab6f60f841c63eac8b55dc268f8a862..a47986bd57e5a07a57f0bfa53642372c72d69498 100644 (file)
         - mds_group_name is defined
         - mds_group_name in group_names
 
+    - name: Open ceph networks on nfs
+      ansible.posix.firewalld:
+        zone: "{{ ceph_nfs_firewall_zone }}"
+        source: "{{ item }}"
+        permanent: true
+        immediate: true
+        state: enabled
+      with_items: "{{ public_network.split(',') }}"
+      when:
+        - nfs_group_name is defined
+        - nfs_group_name in group_names
+
+    - name: Open nfs ports
+      ansible.posix.firewalld:
+        service: nfs
+        zone: "{{ ceph_nfs_firewall_zone }}"
+        permanent: true
+        immediate: true
+        state: enabled
+      when:
+        - nfs_group_name is defined
+        - nfs_group_name in group_names
+
+    - name: Open nfs ports (portmapper)
+      ansible.posix.firewalld:
+        port: "111/tcp"
+        zone: "{{ ceph_nfs_firewall_zone }}"
+        permanent: true
+        immediate: true
+        state: enabled
+      when:
+        - nfs_group_name is defined
+        - nfs_group_name in group_names
+
     - name: Open ceph networks on rbdmirror
       ansible.posix.firewalld:
         zone: "{{ ceph_rbdmirror_firewall_zone }}"
diff --git a/roles/ceph-nfs/LICENSE b/roles/ceph-nfs/LICENSE
new file mode 100644 (file)
index 0000000..4953f91
--- /dev/null
@@ -0,0 +1,201 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!) The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [2016] [Red Hat, Inc.]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/roles/ceph-nfs/README.md b/roles/ceph-nfs/README.md
new file mode 100644 (file)
index 0000000..b58db56
--- /dev/null
@@ -0,0 +1,3 @@
+# Ansible role: ceph-nfs
+
+Documentation is available at http://docs.ceph.com/ceph-ansible/.
diff --git a/roles/ceph-nfs/defaults/main.yml b/roles/ceph-nfs/defaults/main.yml
new file mode 100644 (file)
index 0000000..5cfbe22
--- /dev/null
@@ -0,0 +1,122 @@
+---
+# You can override vars by using host or group vars
+
+###########
+# GENERAL #
+###########
+
+# Even though NFS nodes should not have the admin key
+# at their disposal, some people might want to have it
+# distributed on RGW nodes. Setting 'copy_admin_key' to 'true'
+# will copy the admin key to the /etc/ceph/ directory
+copy_admin_key: false
+
+# Whether docker container or systemd service should be enabled
+# and started, it's useful to set it to false if nfs-ganesha
+# service is managed by pacemaker
+ceph_nfs_enable_service: true
+
+# ceph-nfs systemd service uses ansible's hostname as an instance id,
+# so service name is ceph-nfs@{{ ansible_facts['hostname'] }}, this is not
+# ideal when ceph-nfs is managed by pacemaker across multiple hosts - in
+# such case it's better to have constant instance id instead which
+# can be set by 'ceph_nfs_service_suffix'
+# ceph_nfs_service_suffix: "{{ ansible_facts['hostname'] }}"
+
+######################
+# NFS Ganesha Config #
+######################
+ceph_nfs_log_file: "/var/log/ganesha/ganesha.log"
+ceph_nfs_dynamic_exports: false
+# If set to true then rados is used to store ganesha exports
+# and client sessions information, this is useful if you
+# run multiple nfs-ganesha servers in active/passive mode and
+# want to do failover
+ceph_nfs_rados_backend: false
+# Name of the rados object used to store a list of the export rados
+# object URLS
+ceph_nfs_rados_export_index: "ganesha-export-index"
+# Address ganesha service should listen on, by default ganesha listens on all
+# addresses. (Note: ganesha ignores this parameter in current version due to
+# this bug: https://github.com/nfs-ganesha/nfs-ganesha/issues/217)
+# ceph_nfs_bind_addr: 0.0.0.0
+
+# If set to true, then ganesha's attribute and directory caching is disabled
+# as much as possible. Currently, ganesha caches by default.
+# When using ganesha as CephFS's gateway, it is recommended to turn off
+# ganesha's caching as the libcephfs clients also cache the same information.
+# Note: Irrespective of this option's setting, ganesha's caching is disabled
+# when setting 'nfs_file_gw' option as true.
+ceph_nfs_disable_caching: false
+
+# This is the file ganesha will use to control NFSv4 ID mapping
+ceph_nfs_idmap_conf: "/etc/ganesha/idmap.conf"
+
+# idmap configuration file override.
+# This allows you to specify more configuration options
+# using an INI style format.
+# Example:
+# idmap_conf_overrides:
+#   General:
+#     Domain: foo.domain.net
+idmap_conf_overrides: {}
+
+####################
+# FSAL Ceph Config #
+####################
+ceph_nfs_ceph_export_id: 20133
+ceph_nfs_ceph_pseudo_path: "/cephfile"
+ceph_nfs_ceph_protocols: "3,4"
+ceph_nfs_ceph_access_type: "RW"
+ceph_nfs_ceph_user: "admin"
+ceph_nfs_ceph_squash: "Root_Squash"
+ceph_nfs_ceph_sectype: "sys,krb5,krb5i,krb5p"
+
+###################
+# FSAL RGW Config #
+###################
+ceph_nfs_rgw_export_id: 20134
+ceph_nfs_rgw_pseudo_path: "/cephobject"
+ceph_nfs_rgw_protocols: "3,4"
+ceph_nfs_rgw_access_type: "RW"
+ceph_nfs_rgw_user: "cephnfs"
+ceph_nfs_rgw_squash: "Root_Squash"
+ceph_nfs_rgw_sectype: "sys,krb5,krb5i,krb5p"
+# Note: keys are optional and can be generated, but not on containerized, where
+# they must be configered.
+# ceph_nfs_rgw_access_key: "QFAMEDSJP5DEKJO0DDXY"
+# ceph_nfs_rgw_secret_key: "iaSFLDVvDdQt6lkNzHyW4fPLZugBAI1g17LO0+87[MAC[M#C"
+rgw_client_name: client.rgw.{{ ansible_facts['hostname'] }}
+
+###################
+# CONFIG OVERRIDE #
+###################
+
+# Ganesha configuration file override.
+# These multiline strings will be appended to the contents of the blocks in ganesha.conf and
+# must be in the correct ganesha.conf format seen here:
+# https://github.com/nfs-ganesha/nfs-ganesha/blob/next/src/config_samples/ganesha.conf.example
+#
+# Example:
+# CACHEINODE {
+         # Entries_HWMark = 100000;
+# }
+#
+# ganesha_core_param_overrides:
+# ganesha_ceph_export_overrides:
+# ganesha_rgw_export_overrides:
+# ganesha_rgw_section_overrides:
+# ganesha_log_overrides:
+# ganesha_conf_overrides: |
+#     CACHEINODE {
+             # Entries_HWMark = 100000;
+#     }
+
+##########
+# DOCKER #
+##########
+
+ceph_docker_image: "ceph/daemon"
+ceph_docker_image_tag: latest
+ceph_nfs_docker_extra_env:
+ceph_config_keys: [] # DON'T TOUCH ME
diff --git a/roles/ceph-nfs/meta/main.yml b/roles/ceph-nfs/meta/main.yml
new file mode 100644 (file)
index 0000000..53a6746
--- /dev/null
@@ -0,0 +1,14 @@
+---
+galaxy_info:
+  company: Red Hat
+  author: Daniel Gryniewicz
+  description: Installs Ceph NFS Gateway
+  license: Apache
+  min_ansible_version: '2.7'
+  platforms:
+    - name: EL
+      versions:
+        - 'all'
+  galaxy_tags:
+    - system
+dependencies: []
diff --git a/roles/ceph-nfs/tasks/create_rgw_nfs_user.yml b/roles/ceph-nfs/tasks/create_rgw_nfs_user.yml
new file mode 100644 (file)
index 0000000..587e3b2
--- /dev/null
@@ -0,0 +1,23 @@
+---
+- name: Create rgw nfs user "{{ ceph_nfs_rgw_user }}"
+  radosgw_user:
+    name: "{{ ceph_nfs_rgw_user }}"
+    cluster: "{{ cluster }}"
+    display_name: "RGW NFS User"
+    access_key: "{{ ceph_nfs_rgw_access_key | default(omit) }}"
+    secret_key: "{{ ceph_nfs_rgw_secret_key | default(omit) }}"
+  run_once: true
+  register: rgw_nfs_user
+  changed_when: false
+  delegate_to: "{{ groups[mon_group_name][0] }}"
+  when: nfs_obj_gw | bool
+  environment:
+    CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+    CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+
+- name: Set_fact ceph_nfs_rgw_access_key and ceph_nfs_rgw_secret_key
+  ansible.builtin.set_fact:
+    ceph_nfs_rgw_access_key: "{{ (rgw_nfs_user.stdout | from_json)['keys'][0]['access_key'] }}"
+    ceph_nfs_rgw_secret_key: "{{ (rgw_nfs_user.stdout | from_json)['keys'][0]['secret_key'] }}"
+  delegate_to: "{{ groups[mon_group_name][0] }}"
+  when: nfs_obj_gw | bool
diff --git a/roles/ceph-nfs/tasks/main.yml b/roles/ceph-nfs/tasks/main.yml
new file mode 100644 (file)
index 0000000..acec885
--- /dev/null
@@ -0,0 +1,96 @@
+---
+# global/common requirement
+- name: Stop nfs server service
+  ansible.builtin.systemd:
+    name: "{{ 'nfs-server' if ansible_facts['os_family'] == 'RedHat' else 'nfsserver' if ansible_facts['os_family'] == 'Suse' else 'nfs-kernel-server' if ansible_facts['os_family'] == 'Debian' }}"
+    state: stopped
+    enabled: false
+  failed_when: false
+
+- name: Include pre_requisite_non_container.yml
+  ansible.builtin.include_tasks: pre_requisite_non_container.yml
+  when: not containerized_deployment | bool
+
+- name: Include pre_requisite_container.yml
+  ansible.builtin.include_tasks: pre_requisite_container.yml
+  when: containerized_deployment | bool
+
+- name: Set_fact _rgw_hostname
+  ansible.builtin.set_fact:
+    _rgw_hostname: "{{ hostvars[inventory_hostname]['ansible_facts']['hostname'] }}"
+
+- name: Set rgw parameter (log file)
+  ceph_config:
+    action: set
+    who: "client.rgw.{{ _rgw_hostname }}"
+    option: "log file"
+    value: "/var/log/ceph/{{ cluster }}-rgw-{{ hostvars[inventory_hostname]['ansible_facts']['hostname'] }}.log"
+  environment:
+    CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+    CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+  loop: "{{ groups.get('nfss', []) }}"
+
+- name: Include create_rgw_nfs_user.yml
+  ansible.builtin.import_tasks: create_rgw_nfs_user.yml
+  when: groups.get(mon_group_name, []) | length > 0
+
+- name: Install nfs-ganesha-selinux on RHEL 8
+  ansible.builtin.package:
+    name: nfs-ganesha-selinux
+    state: present
+  register: result
+  until: result is succeeded
+  when:
+    - not containerized_deployment | bool
+    - inventory_hostname in groups.get(nfs_group_name, [])
+    - ansible_facts['os_family'] == 'RedHat'
+    - ansible_facts['distribution_major_version'] == '8'
+
+# NOTE (leseb): workaround for issues with ganesha and librgw
+- name: Add ganesha_t to permissive domain
+  community.general.selinux_permissive:
+    name: ganesha_t
+    permissive: true
+  failed_when: false
+  when:
+    - not containerized_deployment | bool
+    - ansible_facts['os_family'] == 'RedHat'
+    - ansible_facts['selinux']['status'] == 'enabled'
+
+- name: Nfs with external ceph cluster task related
+  when:
+    - groups.get(mon_group_name, []) | length == 0
+    - ceph_nfs_ceph_user is defined
+  block:
+    - name: Create keyring directory
+      ansible.builtin.file:
+        path: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ item }}"
+        state: directory
+        owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
+        group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
+        mode: "0755"
+      with_items:
+        - "{{ ceph_nfs_ceph_user }}"
+        - "{{ ansible_facts['hostname'] }}"
+
+    - name: Set_fact rgw_client_name
+      ansible.builtin.set_fact:
+        rgw_client_name: "client.rgw.{{ ceph_nfs_ceph_user }}"
+
+    - name: Get client cephx keys
+      ansible.builtin.copy:
+        dest: "{{ item.1 }}"
+        content: "{{ item.0.content | b64decode }}"
+        mode: "{{ item.0.item.get('mode', '0600') }}"
+        owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
+        group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
+      with_nested:
+        - "{{ hostvars[groups['_filtered_clients'][0]]['slurp_client_keys']['results'] | default([]) }}"
+        - ['/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ceph_nfs_ceph_user }}/keyring', "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}/keyring"]
+      when:
+        - not item.0.get('skipped', False)
+        - item.0.item.name == 'client.' + ceph_nfs_ceph_user or item.0.item.name == rgw_client_name
+      no_log: "{{ no_log_on_ceph_key_tasks }}"
+
+- name: Include start_nfs.yml
+  ansible.builtin.import_tasks: start_nfs.yml
diff --git a/roles/ceph-nfs/tasks/pre_requisite_container.yml b/roles/ceph-nfs/tasks/pre_requisite_container.yml
new file mode 100644 (file)
index 0000000..023c8d0
--- /dev/null
@@ -0,0 +1,108 @@
+---
+- name: Keyring related tasks
+  when: groups.get(mon_group_name, []) | length > 0
+  block:
+    - name: Set_fact container_exec_cmd
+      ansible.builtin.set_fact:
+        container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[item]['ansible_facts']['hostname'] }}"
+      with_items: "{{ groups.get(mon_group_name, []) }}"
+      delegate_to: "{{ item }}"
+      delegate_facts: true
+      run_once: true
+
+    - name: Create directories
+      ansible.builtin.file:
+        path: "{{ item.0 }}"
+        state: "directory"
+        owner: "{{ ceph_uid }}"
+        group: "{{ ceph_uid }}"
+        mode: "0755"
+      delegate_to: "{{ item.1 }}"
+      with_nested:
+        - ["/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}",
+           "/var/lib/ceph/radosgw/{{ cluster }}-nfs.{{ ansible_facts['hostname'] }}"]
+        - ["{{ groups.get(mon_group_name)[0] }}", "{{ inventory_hostname }}"]
+
+    - name: Set_fact keyrings_list
+      ansible.builtin.set_fact:
+        keyrings_list:
+          - { name: "client.bootstrap-rgw", path: "/var/lib/ceph/bootstrap-rgw/{{ cluster }}.keyring", copy_key: "{{ nfs_obj_gw }}" }
+          - { name: "client.admin", path: "/etc/ceph/{{ cluster }}.client.admin.keyring", copy_key: "{{ copy_admin_key }}" }
+          - { name: "client.rgw.{{ ansible_facts['hostname'] }}", create: true, path: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}/keyring", caps: { "mon": "allow r", "osd": "allow rwx tag rgw *=*"} }
+          - { name: "client.nfs.{{ ansible_facts['hostname'] }}", create: true, path: "/var/lib/ceph/radosgw/{{ cluster }}-nfs.{{ ansible_facts['hostname'] }}/keyring", caps: { "mon": "r", "osd": "allow rw pool=.nfs"} }
+
+    - name: Create keyrings from a monitor
+      ceph_key:
+        name: "{{ item.name }}"
+        cluster: "{{ cluster }}"
+        dest: "{{ item.path }}"
+        caps: "{{ item.caps }}"
+        import_key: true
+        owner: "{{ ceph_uid }}"
+        group: "{{ ceph_uid }}"
+        mode: "0600"
+      no_log: "{{ no_log_on_ceph_key_tasks }}"
+      environment:
+        CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment else None }}"
+        CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+      delegate_to: "{{ groups.get(mon_group_name)[0] }}"
+      loop: "{{ keyrings_list }}"
+      when:
+        - cephx | bool
+        - item.create | default(False) | bool
+
+    - name: Get keys from monitors
+      ceph_key:
+        name: "{{ item.name }}"
+        cluster: "{{ cluster }}"
+        output_format: plain
+        state: info
+      environment:
+        CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
+        CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+      register: _rgw_keys
+      loop: "{{ keyrings_list }}"
+      delegate_to: "{{ groups.get(mon_group_name)[0] }}"
+      run_once: true
+      when:
+        - cephx | bool
+        - item.copy_key | default(True) | bool
+      no_log: "{{ no_log_on_ceph_key_tasks }}"
+
+    - name: Debug
+      ansible.builtin.debug:
+        msg: "{{ _rgw_keys }}"
+
+    - name: Copy ceph key(s) if needed
+      ansible.builtin.copy:
+        dest: "{{ item.item.path }}"
+        content: "{{ item.stdout + '\n' }}"
+        owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
+        group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
+        mode: "{{ ceph_keyring_permissions }}"
+      with_items: "{{ _rgw_keys.results }}"
+      when:
+        - cephx | bool
+        - item.item.copy_key | default(True) | bool
+      no_log: "{{ no_log_on_ceph_key_tasks }}"
+
+    - name: Dbus related tasks
+      when: ceph_nfs_dynamic_exports | bool
+      block:
+        - name: Get file
+          ansible.builtin.command: "{{ container_binary }} run --rm --entrypoint=cat {{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag }} /etc/dbus-1/system.d/org.ganesha.nfsd.conf"
+          register: dbus_ganesha_file
+          run_once: true
+          changed_when: false
+
+        - name: Create dbus service file
+          ansible.builtin.copy:
+            content: "{{ dbus_ganesha_file.stdout }}"
+            dest: /etc/dbus-1/system.d/org.ganesha.nfsd.conf
+            owner: "root"
+            group: "root"
+            mode: "0644"
+
+        - name: Reload dbus configuration
+          ansible.builtin.command: "killall -SIGHUP dbus-daemon"
+          changed_when: false
diff --git a/roles/ceph-nfs/tasks/pre_requisite_non_container.yml b/roles/ceph-nfs/tasks/pre_requisite_non_container.yml
new file mode 100644 (file)
index 0000000..a13654c
--- /dev/null
@@ -0,0 +1,96 @@
+---
+- name: Include red hat based system related tasks
+  ansible.builtin.include_tasks: pre_requisite_non_container_red_hat.yml
+  when: ansible_facts['os_family'] == 'RedHat'
+
+- name: Include debian based system related tasks
+  ansible.builtin.include_tasks: pre_requisite_non_container_debian.yml
+  when: ansible_facts['os_family'] == 'Debian'
+
+- name: Install nfs rgw/cephfs gateway - SUSE/openSUSE
+  community.general.zypper:
+    name: "{{ item.name }}"
+    disable_gpg_check: true
+  with_items:
+    - { name: 'nfs-ganesha-rgw', install: "{{ nfs_obj_gw }}" }
+    - { name: 'radosgw', install: "{{ nfs_obj_gw }}" }
+    - { name: 'nfs-ganesha-ceph', install: "{{ nfs_file_gw }}" }
+  when:
+    - (ceph_origin == 'repository' or ceph_origin == 'distro')
+    - ansible_facts['os_family'] == 'Suse'
+    - item.install | bool
+  register: result
+  until: result is succeeded
+
+# NOTE (leseb): we use root:ceph for permissions since ganesha
+# does not have the right selinux context to read ceph directories.
+- name: Create rados gateway and ganesha directories
+  ansible.builtin.file:
+    path: "{{ item.name }}"
+    state: directory
+    owner: "{{ item.owner | default('ceph') }}"
+    group: "{{ item.group | default('ceph') }}"
+    mode: "{{ ceph_directories_mode }}"
+  with_items:
+    - { name: "/var/lib/ceph/bootstrap-rgw", create: "{{ nfs_obj_gw }}" }
+    - { name: "/var/lib/ceph/radosgw", create: "{{ nfs_obj_gw }}" }
+    - { name: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}", create: "{{ nfs_obj_gw }}" }
+    - { name: "{{ rbd_client_admin_socket_path }}", create: "{{ nfs_obj_gw }}" }
+    - { name: "/var/log/ceph", create: true }
+    - { name: "/var/log/ganesha", create: true, owner: root, group: root }
+    - { name: "/var/run/ceph", create: true }
+  when: item.create | bool
+
+- name: Cephx related tasks
+  when:
+    - cephx | bool
+    - groups.get(mon_group_name, []) | length > 0
+  block:
+    - name: Get keys from monitors
+      ceph_key:
+        name: "{{ item.name }}"
+        cluster: "{{ cluster }}"
+        output_format: plain
+        state: info
+      register: _rgw_keys
+      with_items:
+        - { name: "client.bootstrap-rgw", path: "/var/lib/ceph/bootstrap-rgw/{{ cluster }}.keyring", copy_key: "{{ nfs_obj_gw }}" }
+        - { name: "client.admin", path: "/etc/ceph/{{ cluster }}.client.admin.keyring", copy_key: "{{ copy_admin_key }}" }
+      delegate_to: "{{ groups.get(mon_group_name)[0] }}"
+      run_once: true
+      when:
+        - cephx | bool
+        - item.copy_key | bool
+      no_log: "{{ no_log_on_ceph_key_tasks }}"
+
+    - name: Copy ceph key(s) if needed
+      ansible.builtin.copy:
+        dest: "{{ item.item.path }}"
+        content: "{{ item.stdout + '\n' }}"
+        owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
+        group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
+        mode: "{{ ceph_keyring_permissions }}"
+      with_items: "{{ _rgw_keys.results }}"
+      when:
+        - cephx | bool
+        - item.item.copy_key | bool
+      no_log: "{{ no_log_on_ceph_key_tasks }}"
+
+    - name: Nfs object gateway related tasks
+      when: nfs_obj_gw | bool
+      block:
+        - name: Create rados gateway keyring
+          ceph_key:
+            name: "client.rgw.{{ ansible_facts['hostname'] }}"
+            cluster: "{{ cluster }}"
+            user: client.bootstrap-rgw
+            user_key: "/var/lib/ceph/bootstrap-rgw/{{ cluster }}.keyring"
+            caps:
+              mon: "allow rw"
+              osd: "allow rwx"
+            dest: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}/keyring"
+            import_key: false
+            owner: ceph
+            group: ceph
+            mode: "{{ ceph_keyring_permissions }}"
+          no_log: "{{ no_log_on_ceph_key_tasks }}"
diff --git a/roles/ceph-nfs/tasks/pre_requisite_non_container_debian.yml b/roles/ceph-nfs/tasks/pre_requisite_non_container_debian.yml
new file mode 100644 (file)
index 0000000..b0848f8
--- /dev/null
@@ -0,0 +1,80 @@
+---
+- name: Debian based systems - repo handling
+  when: ceph_origin == 'repository'
+  block:
+    - name: Stable repos specific tasks
+      when:
+        - nfs_ganesha_stable | bool
+        - ceph_repository == 'community'
+      block:
+        - name: Add nfs-ganesha stable repository
+          ansible.builtin.apt_repository:
+            repo: "deb {{ nfs_ganesha_stable_deb_repo }} {{ ceph_stable_distro_source | default(ansible_facts['distribution_release']) }} main"
+            state: present
+            update_cache: false
+          register: add_ganesha_apt_repo
+
+        - name: Add libntirpc stable repository
+          ansible.builtin.apt_repository:
+            repo: "deb {{ libntirpc_stable_deb_repo }} {{ ceph_stable_distro_source | default(ansible_facts['distribution_release']) }} main"
+            state: present
+            update_cache: false
+          register: add_libntirpc_apt_repo
+          when: libntirpc_stable_deb_repo is defined
+
+        - name: Add nfs-ganesha ppa apt key
+          ansible.builtin.apt_key:
+            keyserver: "{{ nfs_ganesha_apt_keyserver }}"
+            id: "{{ nfs_ganesha_apt_key_id }}"
+          when:
+            - nfs_ganesha_apt_key_id is defined
+            - nfs_ganesha_apt_keyserver is defined
+
+        - name: Update apt cache
+          ansible.builtin.apt:
+            update_cache: true
+          register: update_ganesha_apt_cache
+          retries: 5
+          delay: 2
+          until: update_ganesha_apt_cache is success
+          when: add_ganesha_apt_repo is changed or add_libntirpc_apt_repo is changed
+
+    - name: Debian based systems - dev repos specific tasks
+      when:
+        - nfs_ganesha_dev | bool
+        - ceph_repository == 'dev'
+      block:
+        - name: Fetch nfs-ganesha development repository
+          ansible.builtin.uri:
+            url: "https://shaman.ceph.com/api/repos/nfs-ganesha/next/latest/{{ ansible_facts['distribution'] | lower }}/{{ ansible_facts['distribution_release'] }}/flavors/{{ nfs_ganesha_flavor }}/repo?arch={{ ansible_facts['architecture'] }}"
+            return_content: true
+          register: nfs_ganesha_dev_apt_repo
+
+        - name: Add nfs-ganesha development repository
+          ansible.builtin.copy:
+            content: "{{ nfs_ganesha_dev_apt_repo.content }}"
+            dest: /etc/apt/sources.list.d/nfs-ganesha-dev.list
+            owner: root
+            group: root
+            backup: true
+            mode: "0644"
+
+- name: Debain based systems - install required packages
+  block:
+    - name: Debian based systems
+      when: ceph_origin == 'repository' or ceph_origin == 'distro'
+      block:
+        - name: Install nfs rgw/cephfs gateway - debian
+          ansible.builtin.apt:
+            name: ['nfs-ganesha-rgw', 'radosgw']
+            allow_unauthenticated: true
+          register: result
+          until: result is succeeded
+          when: nfs_obj_gw | bool
+        - name: Install nfs rgw/cephfs gateway - debian
+          ansible.builtin.apt:
+            name: nfs-ganesha-ceph
+            allow_unauthenticated: true
+          register: result
+          until: result is succeeded
+          when: nfs_file_gw | bool
diff --git a/roles/ceph-nfs/tasks/pre_requisite_non_container_red_hat.yml b/roles/ceph-nfs/tasks/pre_requisite_non_container_red_hat.yml
new file mode 100644 (file)
index 0000000..92a4448
--- /dev/null
@@ -0,0 +1,43 @@
+---
+- name: Red hat based systems - repo handling
+  when: ceph_origin == 'repository'
+  block:
+    - name: Red hat based systems - stable repo related tasks
+      when:
+        - nfs_ganesha_stable | bool
+        - ceph_repository == 'community'
+      block:
+        - name: Add nfs-ganesha stable repository
+          ansible.builtin.package:
+            name: "{{ centos_release_nfs }}"
+            state: present
+
+    - name: Red hat based systems - dev repo related tasks
+      when:
+        - nfs_ganesha_dev | bool
+        - ceph_repository == 'dev'
+      block:
+        - name: Add nfs-ganesha dev repo
+          ansible.builtin.get_url:
+            url: "https://shaman.ceph.com/api/repos/nfs-ganesha/next/latest/{{ ansible_facts['distribution'] | lower }}/{{ ansible_facts['distribution_major_version'] }}/flavors/{{ nfs_ganesha_flavor }}/repo?arch={{ ansible_facts['architecture'] }}"
+            dest: /etc/yum.repos.d/nfs-ganesha-dev.repo
+            mode: "0644"
+            force: true
+
+- name: Red hat based systems - install nfs packages
+  block:
+    - name: Install nfs cephfs gateway
+      ansible.builtin.package:
+        name: ['nfs-ganesha-ceph', 'nfs-ganesha-rados-grace']
+        state: "{{ (upgrade_ceph_packages | bool) | ternary('latest', 'present') }}"
+      register: result
+      until: result is succeeded
+      when: nfs_file_gw | bool
+
+    - name: Install redhat nfs-ganesha-rgw and ceph-radosgw packages
+      ansible.builtin.package:
+        name: ['nfs-ganesha-rgw', 'nfs-ganesha-rados-grace', 'nfs-ganesha-rados-urls', 'ceph-radosgw']
+        state: "{{ (upgrade_ceph_packages | bool) | ternary('latest', 'present') }}"
+      register: result
+      until: result is succeeded
+      when: nfs_obj_gw | bool
diff --git a/roles/ceph-nfs/tasks/start_nfs.yml b/roles/ceph-nfs/tasks/start_nfs.yml
new file mode 100644 (file)
index 0000000..45e7a26
--- /dev/null
@@ -0,0 +1,105 @@
+---
+- name: Nfs various pre-requisites tasks
+  block:
+    - name: Set_fact exec_cmd_nfs - external
+      ansible.builtin.set_fact:
+        exec_cmd_nfs: "{{ container_binary + ' run --rm --net=host -v /etc/ceph:/etc/ceph:z -v /var/lib/ceph/:/var/lib/ceph/:z -v /var/log/ceph/:/var/log/ceph/:z --entrypoint=rados ' + ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else 'rados' }} -n client.{{ ceph_nfs_ceph_user }} -k /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ceph_nfs_ceph_user }}/keyring"
+        delegate_node: "{{ inventory_hostname }}"
+      when: groups.get(mon_group_name, []) | length == 0
+
+    - name: Set_fact exec_cmd_nfs - internal
+      ansible.builtin.set_fact:
+        exec_cmd_nfs: "{{ container_binary + ' exec ceph-mon-' + hostvars[groups[mon_group_name][0]]['ansible_facts']['hostname'] if containerized_deployment | bool else '' }} rados"
+        delegate_node: "{{ groups[mon_group_name][0] }}"
+      when: groups.get(mon_group_name, []) | length > 0
+
+    - name: Check if rados index object exists
+      ansible.builtin.shell: "set -o pipefail && {{ exec_cmd_nfs | default('') }} -p {{ cephfs_data_pool.name }} --cluster {{ cluster }} ls | grep {{ ceph_nfs_rados_export_index }}"
+      changed_when: false
+      failed_when: false
+      register: rados_index_exists
+      check_mode: false
+      when: ceph_nfs_rados_backend | bool
+      delegate_to: "{{ delegate_node }}"
+      run_once: true
+
+    - name: Create an empty rados index object
+      ansible.builtin.command: "{{ exec_cmd_nfs | default('') }} -p {{ cephfs_data_pool.name }} --cluster {{ cluster }} put {{ ceph_nfs_rados_export_index }} /dev/null"
+      when:
+        - ceph_nfs_rados_backend | bool
+        - rados_index_exists.rc != 0
+      delegate_to: "{{ delegate_node }}"
+      changed_when: false
+      run_once: true
+
+- name: Create /etc/ganesha
+  ansible.builtin.file:
+    path: /etc/ganesha
+    state: directory
+    owner: root
+    group: root
+    mode: "0755"
+
+- name: Generate ganesha configuration file
+  ansible.builtin.template:
+    src: "ganesha.conf.j2"
+    dest: /etc/ganesha/ganesha.conf
+    owner: "root"
+    group: "root"
+    mode: "0644"
+  notify: Restart ceph nfss
+
+- name: Generate ganesha idmap.conf file
+  openstack.config_template.config_template:
+    src: "idmap.conf.j2"
+    dest: "{{ ceph_nfs_idmap_conf }}"
+    owner: "root"
+    group: "root"
+    mode: "0644"
+    config_overrides: "{{ idmap_conf_overrides }}"
+    config_type: ini
+  notify: Restart ceph nfss
+
+- name: Create exports directory
+  ansible.builtin.file:
+    path: /etc/ganesha/export.d
+    state: directory
+    owner: "root"
+    group: "root"
+    mode: "0755"
+  when: ceph_nfs_dynamic_exports | bool
+
+- name: Create exports dir index file
+  ansible.builtin.copy:
+    content: ""
+    force: false
+    dest: /etc/ganesha/export.d/INDEX.conf
+    owner: "root"
+    group: "root"
+    mode: "0644"
+  when: ceph_nfs_dynamic_exports | bool
+
+- name: Include_tasks systemd.yml
+  ansible.builtin.include_tasks: systemd.yml
+  when: containerized_deployment | bool
+
+- name: Systemd start nfs container
+  ansible.builtin.systemd:
+    name: ceph-nfs@{{ ceph_nfs_service_suffix | default(ansible_facts['hostname']) }}
+    state: started
+    enabled: true
+    masked: false
+    daemon_reload: true
+  when:
+    - containerized_deployment | bool
+    - ceph_nfs_enable_service | bool
+
+- name: Start nfs gateway service
+  ansible.builtin.systemd:
+    name: nfs-ganesha
+    state: started
+    enabled: true
+    masked: false
+  when:
+    - not containerized_deployment | bool
+    - ceph_nfs_enable_service | bool
diff --git a/roles/ceph-nfs/tasks/systemd.yml b/roles/ceph-nfs/tasks/systemd.yml
new file mode 100644 (file)
index 0000000..1534cf4
--- /dev/null
@@ -0,0 +1,9 @@
+---
+- name: Generate systemd unit file
+  ansible.builtin.template:
+    src: "{{ role_path }}/templates/ceph-nfs.service.j2"
+    dest: /etc/systemd/system/ceph-nfs@.service
+    owner: "root"
+    group: "root"
+    mode: "0644"
+  notify: Restart ceph nfss
diff --git a/roles/ceph-nfs/templates/ceph-nfs.service.j2 b/roles/ceph-nfs/templates/ceph-nfs.service.j2
new file mode 100644 (file)
index 0000000..663faed
--- /dev/null
@@ -0,0 +1,56 @@
+[Unit]
+Description=NFS-Ganesha file server
+Documentation=http://github.com/nfs-ganesha/nfs-ganesha/wiki
+{% if container_binary == 'docker' %}
+After=docker.service network-online.target local-fs.target time-sync.target
+Requires=docker.service
+{% else %}
+After=network-online.target local-fs.target time-sync.target
+{% endif %}
+Wants=network-online.target local-fs.target time-sync.target
+
+[Service]
+EnvironmentFile=-/etc/environment
+{% if container_binary == 'podman' %}
+ExecStartPre=-/usr/bin/rm -f /%t/%n-pid /%t/%n-cid
+ExecStartPre=-/usr/bin/{{ container_binary }} rm --storage ceph-nfs-%i
+ExecStartPre=-/usr/bin/mkdir -p /var/log/ceph /var/log/ganesha
+{% endif %}
+ExecStartPre=-/usr/bin/{{ container_binary }} rm ceph-nfs-%i
+ExecStartPre={{ '/bin/mkdir' if ansible_facts['os_family'] == 'Debian' else '/usr/bin/mkdir' }} -p /etc/ceph /etc/ganesha /var/lib/nfs/ganesha /var/log/ganesha
+ExecStart=/usr/bin/{{ container_binary }} run --rm --net=host \
+{% if container_binary == 'podman' %}
+  -d --log-driver journald --conmon-pidfile /%t/%n-pid --cidfile /%t/%n-cid \
+{% endif %}
+--pids-limit={{ 0 if container_binary == 'podman' else -1 }} \
+-v /etc/ceph:/etc/ceph:z \
+-v /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}/keyring:/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}/keyring:z \
+-v /var/lib/ceph/radosgw/{{ cluster }}-nfs.{{ ansible_facts['hostname'] }}/keyring:/etc/ceph/keyring:z \
+-v /etc/ganesha:/etc/ganesha:z \
+-v /var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket \
+-v /var/run/ceph:/var/run/ceph:z \
+-v /var/log/ceph:/var/log/ceph:z \
+-v /var/log/ganesha:/var/log/ganesha:z \
+-v /etc/localtime:/etc/localtime:ro \
+{{ ceph_nfs_docker_extra_env }} \
+--entrypoint=/usr/bin/ganesha.nfsd \
+--name=ceph-nfs-{{ ceph_nfs_service_suffix | default(ansible_facts['hostname']) }} \
+{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} \
+-F -L STDOUT
+{% if container_binary == 'podman' %}
+ExecStop=-/usr/bin/sh -c "/usr/bin/{{ container_binary }} rm -f `cat /%t/%n-cid`"
+{% else %}
+ExecStopPost=-/usr/bin/{{ container_binary }} stop ceph-nfs-%i
+{% endif %}
+KillMode=none
+Restart=always
+RestartSec=10s
+TimeoutStartSec=120
+TimeoutStopSec=15
+{% if container_binary == 'podman' %}
+Type=forking
+PIDFile=/%t/%n-pid
+{% endif %}
+
+[Install]
+WantedBy=multi-user.target
diff --git a/roles/ceph-nfs/templates/ganesha.conf.j2 b/roles/ceph-nfs/templates/ganesha.conf.j2
new file mode 100644 (file)
index 0000000..7e6fab6
--- /dev/null
@@ -0,0 +1,124 @@
+#jinja2: trim_blocks: "true", lstrip_blocks: "true"
+# {{ ansible_managed }}
+
+{% if ceph_nfs_dynamic_exports | bool and not ceph_nfs_rados_backend | bool %}
+%include /etc/ganesha/export.d/INDEX.conf
+{% endif %}
+
+NFS_Core_Param
+{
+{% if ceph_nfs_bind_addr is defined %}
+       Bind_Addr={{ ceph_nfs_bind_addr }};
+{% endif %}
+{{ ganesha_core_param_overrides | default(None) }}
+}
+
+{% if ceph_nfs_disable_caching | bool or nfs_file_gw | bool %}
+EXPORT_DEFAULTS {
+       Attr_Expiration_Time = 0;
+}
+
+CACHEINODE {
+       Dir_Chunk = 0;
+
+       NParts = 1;
+       Cache_Size = 1;
+}
+{% endif %}
+
+{% if ceph_nfs_rados_backend | bool %}
+RADOS_URLS {
+   ceph_conf = '/etc/ceph/{{ cluster }}.conf';
+   userid = "{{ ceph_nfs_ceph_user }}";
+}
+%url rados://{{ cephfs_data_pool.name }}/{{ ceph_nfs_rados_export_index }}
+
+NFSv4 {
+       RecoveryBackend = 'rados_kv';
+       IdmapConf = "{{ ceph_nfs_idmap_conf }}";
+}
+RADOS_KV {
+       ceph_conf = '/etc/ceph/{{ cluster }}.conf';
+       userid = "{{ ceph_nfs_ceph_user }}";
+       pool = "{{ cephfs_data_pool.name }}";
+}
+{% endif %}
+
+{% if nfs_file_gw | bool %}
+EXPORT
+{
+       Export_id={{ ceph_nfs_ceph_export_id }};
+
+       Path = "/";
+
+       Pseudo = {{ ceph_nfs_ceph_pseudo_path }};
+
+       Access_Type = {{ ceph_nfs_ceph_access_type }};
+
+       Protocols = {{ ceph_nfs_ceph_protocols }};
+
+       Transports = TCP;
+
+       SecType = {{ ceph_nfs_ceph_sectype }};
+
+       Squash = {{ ceph_nfs_ceph_squash }};
+
+       Attr_Expiration_Time = 0;
+
+       FSAL {
+               Name = CEPH;
+               User_Id = "{{ ceph_nfs_ceph_user }}";
+       }
+
+        {{ ganesha_ceph_export_overrides | default(None) }}
+}
+{% endif %}
+{% if nfs_obj_gw | bool %}
+EXPORT
+{
+       Export_id={{ ceph_nfs_rgw_export_id }};
+
+       Path = "/";
+
+       Pseudo = {{ ceph_nfs_rgw_pseudo_path }};
+
+       Access_Type = {{ ceph_nfs_rgw_access_type }};
+
+       Protocols = {{ ceph_nfs_rgw_protocols }};
+
+       Transports = TCP;
+
+       SecType = {{ ceph_nfs_rgw_sectype }};
+
+       Squash = {{ ceph_nfs_rgw_squash }};
+
+       FSAL {
+               Name = RGW;
+               User_Id = "{{ ceph_nfs_rgw_user }}";
+               Access_Key_Id ="{{ ceph_nfs_rgw_access_key }}";
+               Secret_Access_Key = "{{ ceph_nfs_rgw_secret_key }}";
+       }
+
+        {{ ganesha_rgw_export_overrides | default(None) }}
+
+}
+
+RGW {
+        ceph_conf = "/etc/ceph/{{ cluster }}.conf";
+        cluster = "{{ cluster }}";
+        name = "{{ rgw_client_name }}";
+        {{ ganesha_rgw_section_overrides | default(None) }}
+}
+{% endif %}
+
+LOG {
+        Facility {
+                name = FILE;
+                destination = "{{ ceph_nfs_log_file }}";
+                enable = active;
+        }
+
+        {{ ganesha_log_overrides | default(None) }}
+}
+
+{{ ganesha_conf_overrides | default(None) }}
diff --git a/roles/ceph-nfs/templates/idmap.conf.j2 b/roles/ceph-nfs/templates/idmap.conf.j2
new file mode 100644 (file)
index 0000000..d052232
--- /dev/null
@@ -0,0 +1,137 @@
+[General]
+#Verbosity = 0
+# The following should be set to the local NFSv4 domain name
+# The default is the host's DNS domain name.
+#Domain = local.domain.edu
+
+# In multi-domain environments, some NFS servers will append the identity
+# management domain to the owner and owner_group in lieu of a true NFSv4
+# domain.  This option can facilitate lookups in such environments.  If
+# set to a value other than "none", the nsswitch  plugin will first pass
+# the name to the password/group lookup function without stripping the
+# domain off.  If that mapping fails then the plugin will try again using
+# the old method (comparing the domain in the string to the Domain value,
+# stripping it if it matches, and passing the resulting short name to the
+# lookup function).  Valid values are "user", "group", "both", and
+# "none".  The default is "none".
+#No-Strip = none
+
+# Winbind has a quirk whereby doing a group lookup in UPN format
+# (e.g. staff@americas.example.com) will cause the group to be
+# displayed prefixed with the full domain in uppercase
+# (e.g. AMERICAS.EXAMPLE.COM\staff) instead of in the familiar netbios
+# name format (e.g. AMERICAS\staff).  Setting this option to true
+# causes the name to be reformatted before passing it to the group
+# lookup function in order to work around this.  This setting is
+# ignored unless No-Strip is set to either "both" or "group".
+# The default is "false".
+#Reformat-Group = false
+
+# The following is a comma-separated list of Kerberos realm
+# names that should be considered to be equivalent to the
+# local realm, such that <user>@REALM.A can be assumed to
+# be the same user as <user>@REALM.B
+# If not specified, the default local realm is the domain name,
+# which defaults to the host's DNS domain name,
+# translated to upper-case.
+# Note that if this value is specified, the local realm name
+# must be included in the list!
+#Local-Realms = 
+
+[Mapping]
+
+#Nobody-User = nobody
+#Nobody-Group = nobody
+
+[Translation]
+
+# Translation Method is an comma-separated, ordered list of
+# translation methods that can be used.  Distributed methods
+# include "nsswitch", "umich_ldap", and "static".  Each method
+# is a dynamically loadable plugin library.
+# New methods may be defined and inserted in the list.
+# The default is "nsswitch".
+#Method = nsswitch
+
+# Optional.  This is a comma-separated, ordered list of
+# translation methods to be used for translating GSS
+# authenticated names to ids.
+# If this option is omitted, the same methods as those
+# specified in "Method" are used.
+#GSS-Methods = <alternate method list for translating GSS names>
+
+#-------------------------------------------------------------------#
+# The following are used only for the "static" Translation Method.
+#-------------------------------------------------------------------#
+[Static]
+
+# A "static" list of GSS-Authenticated names to
+# local user name mappings
+
+#someuser@REALM = localuser
+
+
+#-------------------------------------------------------------------#
+# The following are used only for the "umich_ldap" Translation Method.
+#-------------------------------------------------------------------#
+
+[UMICH_SCHEMA]
+
+# server information (REQUIRED)
+LDAP_server = ldap-server.local.domain.edu
+
+# the default search base (REQUIRED)
+LDAP_base = dc=local,dc=domain,dc=edu
+
+#-----------------------------------------------------------#
+# The remaining options have defaults (as shown)
+# and are therefore not required.
+#-----------------------------------------------------------#
+
+# whether or not to perform canonicalization on the
+# name given as LDAP_server
+#LDAP_canonicalize_name = true
+
+# absolute search base for (people) accounts
+#LDAP_people_base = <LDAP_base>
+
+# absolute search base for groups
+#LDAP_group_base = <LDAP_base>
+
+# Set to true to enable SSL - anything else is not enabled
+#LDAP_use_ssl = false
+
+# You must specify a CA certificate location if you enable SSL
+#LDAP_ca_cert = /etc/ldapca.cert
+
+# Objectclass mapping information
+
+# Mapping for the person (account) object class
+#NFSv4_person_objectclass = NFSv4RemotePerson
+
+# Mapping for the nfsv4name attribute the person object
+#NFSv4_name_attr = NFSv4Name
+
+# Mapping for the UID number
+#NFSv4_uid_attr = UIDNumber
+
+# Mapping for the GSSAPI Principal name
+#GSS_principal_attr = GSSAuthName
+
+# Mapping for the account name attribute (usually uid)
+# The value for this attribute must match the value of 
+# the group member attribute - NFSv4_member_attr
+#NFSv4_acctname_attr = uid
+
+# Mapping for the group object class
+#NFSv4_group_objectclass = NFSv4RemoteGroup
+
+# Mapping for the GID attribute
+#NFSv4_gid_attr = GIDNumber
+
+# Mapping for the Group NFSv4 name
+#NFSv4_group_attr = NFSv4Name
+
+# Mapping for the Group member attribute (usually memberUID)
+# The value of this attribute must match the value of NFSv4_acctname_attr
+#NFSv4_member_attr = memberUID
\ No newline at end of file
diff --git a/roles/ceph-nfs/templates/systemd-run.j2 b/roles/ceph-nfs/templates/systemd-run.j2
new file mode 100644 (file)
index 0000000..868cd19
--- /dev/null
@@ -0,0 +1,27 @@
+#!/bin/sh
+T=$1
+N=$2
+
+# start nfs-ganesha
+/usr/bin/{{ container_binary }} run --rm --net=host \
+{% if container_binary == 'podman' %}
+  -d --log-driver journald --conmon-pidfile /%t/%n-pid --cidfile /%t/%n-cid \
+{% endif %}
+--pids-limit={{ 0 if container_binary == 'podman' else -1 }} \
+-v /var/lib/ceph:/var/lib/ceph:z \
+-v /etc/ceph:/etc/ceph:z \
+-v /var/lib/nfs/ganesha:/var/lib/nfs/ganesha:z \
+-v /etc/ganesha:/etc/ganesha:z \
+-v /var/run/ceph:/var/run/ceph:z \
+-v /var/log/ceph:/var/log/ceph:z \
+-v /var/log/ganesha:/var/log/ganesha:z \
+{% if ceph_nfs_dynamic_exports | bool %}
+--privileged \
+-v /var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket \
+{% endif -%}
+-v /etc/localtime:/etc/localtime:ro \
+{{ ceph_nfs_docker_extra_env }} \
+--entrypoint=/usr/bin/ganesha.nfsd \
+--name=ceph-nfs-{{ ceph_nfs_service_suffix | default(ansible_facts['hostname']) }} \
+{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
+-F -L STDOUT "${GANESHA_EPOCH}"
diff --git a/roles/ceph-validate/tasks/check_nfs.yml b/roles/ceph-validate/tasks/check_nfs.yml
new file mode 100644 (file)
index 0000000..2c26aa4
--- /dev/null
@@ -0,0 +1,15 @@
+---
+- name: Fail if ceph_nfs_rgw_access_key or ceph_nfs_rgw_secret_key are undefined (nfs standalone)
+  ansible.builtin.fail:
+    msg: "ceph_nfs_rgw_access_key and ceph_nfs_rgw_secret_key must be set if nfs_obj_gw is True"
+  when:
+    - nfs_obj_gw | bool
+    - groups.get(mon_group_name, []) | length == 0
+    - (ceph_nfs_rgw_access_key is undefined or ceph_nfs_rgw_secret_key is undefined)
+
+- name: Fail on openSUSE Leap 15.x using distro packages
+  ansible.builtin.fail:
+    msg: "ceph-nfs packages are not available from openSUSE Leap 15.x repositories (ceph_origin = 'distro')"
+  when:
+    - ceph_origin == 'distro'
+    - ansible_facts['distribution'] == 'openSUSE Leap'
index a050788a639c762b5ebf7c1bc08b1a3c73528d04..885ffb36ae2e182af568f57e1f5400cebe7c9155 100644 (file)
     - inventory_hostname in groups.get(rgw_group_name, [])
     - rgw_create_pools is defined
 
+- name: Include check_nfs.yml
+  ansible.builtin.include_tasks: check_nfs.yml
+  when: inventory_hostname in groups.get(nfs_group_name, [])
+
 - name: Include check_rbdmirror.yml
   ansible.builtin.include_tasks: check_rbdmirror.yml
   when:
index 9facb5a5fc969763d43f70c0de8b99c36d88a032..298709dddc692097fa82089f2f304541e1712a47 100644 (file)
@@ -15,6 +15,7 @@
   - osds
   - mdss
   - rgws
+  - nfss
   - rbdmirrors
   - clients
   - mgrs
             status: "Complete"
             end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
 
+- hosts: nfss
+  become: True
+  gather_facts: false
+  any_errors_fatal: true
+  tasks:
+    # pre-tasks for following imports -
+    - name: set ceph nfs install 'In Progress'
+      run_once: true
+      set_stats:
+        data:
+          installer_phase_ceph_nfs:
+            status: "In Progress"
+            start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
+
+    - import_role:
+        name: ceph-defaults
+      tags: ['ceph_update_config']
+    - import_role:
+        name: ceph-facts
+      tags: ['ceph_update_config']
+    - import_role:
+        name: ceph-handler
+      tags: ['ceph_update_config']
+    - import_role:
+        name: ceph-config
+      tags: ['ceph_update_config']
+    - import_role:
+        name: ceph-nfs
+
+    # post-tasks for following imports -
+    - name: set ceph nfs install 'Complete'
+      run_once: true
+      set_stats:
+        data:
+          installer_phase_ceph_nfs:
+            status: "Complete"
+            end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
+
 - hosts: rbdmirrors
   become: True
   gather_facts: false
index 046eb64b7d7610a7d28d74177eb88003e00a9750..8811d3cfc52f609a88df2c6c9762783b0c631213 100644 (file)
@@ -16,6 +16,7 @@
   - osds
   - mdss
   - rgws
+  - nfss
   - rbdmirrors
   - clients
   - mgrs
             status: "Complete"
             end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
 
+- hosts: nfss
+  gather_facts: false
+  become: True
+  any_errors_fatal: true
+  pre_tasks:
+    - name: set ceph nfs install 'In Progress'
+      run_once: true
+      set_stats:
+        data:
+          installer_phase_ceph_nfs:
+            status: "In Progress"
+            start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
+
+  tasks:
+    - import_role:
+        name: ceph-defaults
+      tags: ['ceph_update_config']
+    - import_role:
+        name: ceph-facts
+      tags: ['ceph_update_config']
+    - import_role:
+        name: ceph-handler
+      tags: ['ceph_update_config']
+    - import_role:
+        name: ceph-config
+      tags: ['ceph_update_config']
+    - import_role:
+        name: ceph-nfs
+
+  post_tasks:
+    - name: set ceph nfs install 'Complete'
+      run_once: true
+      set_stats:
+        data:
+          installer_phase_ceph_nfs:
+            status: "Complete"
+            end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
+
 - hosts: rbdmirrors
   gather_facts: false
   become: True
index 5c83246e5b8a93b07cb4cfb72c43ebc1aba922f3..69de7ac69df7497d645fac9ae1ec13f902eb400f 100644 (file)
@@ -167,11 +167,11 @@ def node(host, request):
     if request.node.get_closest_marker('rbdmirror_secondary') and not ceph_rbd_mirror_remote_user:  # noqa E501
         pytest.skip('Not a valid test for a non-secondary rbd-mirror node')
 
-    if request.node.get_closest_marker('ceph_crash') and sanitized_group_names in [['clients'], ['monitoring']]:
-        pytest.skip('Not a valid test for client nodes')
+    if request.node.get_closest_marker('ceph_crash') and sanitized_group_names in [['nfss'], ['clients'], ['monitoring']]:
+        pytest.skip('Not a valid test for nfs or client nodes')
 
-    if request.node.get_closest_marker('ceph_exporter') and sanitized_group_names in [['clients'], ['monitoring']]:
-        pytest.skip('Not a valid test for client nodes')
+    if request.node.get_closest_marker('ceph_exporter') and sanitized_group_names in [['nfss'], ['clients'], ['monitoring']]:
+        pytest.skip('Not a valid test for nfs or client nodes')
 
     if request.node.get_closest_marker("no_docker") and docker:
         pytest.skip(
@@ -220,6 +220,8 @@ def pytest_collection_modifyitems(session, config, items):
             item.add_marker(pytest.mark.rbdmirrors)
         elif "rgw" in test_path:
             item.add_marker(pytest.mark.rgws)
+        elif "nfs" in test_path:
+            item.add_marker(pytest.mark.nfss)
         elif "grafana" in test_path:
             item.add_marker(pytest.mark.grafanas)
         else:
index d8a0037adf660b1a832627dc975a29a8bbc06142..3b0f83df62e4b60ee85e0b1f17e967b804997a3e 100644 (file)
@@ -8,6 +8,7 @@ mon_vms: 1
 osd_vms: 1
 mds_vms: 1
 rgw_vms: 0
+nfs_vms: 0
 grafana_server_vms: 0
 rbd_mirror_vms: 0
 client_vms: 0
index bfec446d846a40c210670c5e3b37a28904137389..ba9c2178495173fe6db6e4264eb1d00128016451 100644 (file)
@@ -8,6 +8,7 @@ mon_vms: 1
 osd_vms: 1
 mds_vms: 1
 rgw_vms: 0
+nfs_vms: 0
 grafana_server_vms: 0
 rbd_mirror_vms: 0
 client_vms: 0
index fe4092bdbfddba464b40f5dec88f927cd10adb5b..7c5bfe8d056f5efebd2187e80762b3080356edde 100644 (file)
@@ -8,6 +8,7 @@ mon_vms: 1
 osd_vms: 1
 mds_vms: 0
 rgw_vms: 0
+nfs_vms: 0
 grafana_server_vms: 0
 rbd_mirror_vms: 0
 client_vms: 0
index 4b47784b26b2e25fcfb95fd269aedea79acf6048..5c0cf696bae485537cf289715a7a1b9fab06ba60 100644 (file)
@@ -8,6 +8,7 @@ mon_vms: 1
 osd_vms: 1
 mds_vms: 0
 rgw_vms: 0
+nfs_vms: 0
 grafana_server_vms: 0
 rbd_mirror_vms: 0
 client_vms: 0
index 813a01e1a61f67e14534ca4df5596816170dae46..9f97f3c78157bfde3a03e3184f07e3602f02c74d 100644 (file)
@@ -8,6 +8,7 @@ mon_vms: 2
 osd_vms: 1
 mds_vms: 0
 rgw_vms: 0
+nfs_vms: 0
 grafana_server_vms: 0
 rbd_mirror_vms: 0
 client_vms: 0
index b71a30375178070c4ff457918c987f95d63de0b1..d9b2582153da03ce18a179ff3e9dac525f00a6a5 100644 (file)
@@ -8,6 +8,7 @@ mon_vms: 2
 osd_vms: 1
 mds_vms: 0
 rgw_vms: 0
+nfs_vms: 0
 grafana_server_vms: 0
 rbd_mirror_vms: 0
 client_vms: 0
index abc9bcc4dbd2bbcacade49f348e7b1c232356f59..8321f852e3fd3f743545096e287a8a3b4f125442 100644 (file)
@@ -8,6 +8,7 @@ mon_vms: 1
 osd_vms: 2
 mds_vms: 0
 rgw_vms: 0
+nfs_vms: 0
 grafana_server_vms: 0
 rbd_mirror_vms: 0
 client_vms: 0
index a730470a4a7e6ac427e4c889617a4cee9b20b938..0a9b76f8ecb1b809e0f447f56f3d378902355b8d 100644 (file)
@@ -8,6 +8,7 @@ mon_vms: 1
 osd_vms: 2
 mds_vms: 0
 rgw_vms: 0
+nfs_vms: 0
 grafana_server_vms: 0
 rbd_mirror_vms: 0
 client_vms: 0
index cbf4fa5ded40112b07f8ae8739caee8316423b41..4d6507affdf295d46aa62afb4e4975a20837f57c 100644 (file)
@@ -8,6 +8,7 @@ mon_vms: 1
 osd_vms: 1
 mds_vms: 0
 rgw_vms: 0
+nfs_vms: 0
 grafana_server_vms: 0
 rbd_mirror_vms: 1
 client_vms: 0
index 209d36135c6cd3ad10b27be4d0f6f2a6fb6ee51d..8866b1a5da3ed506618d2cbdbaebe6df89b1dee0 100644 (file)
@@ -8,6 +8,7 @@ mon_vms: 1
 osd_vms: 1
 mds_vms: 0
 rgw_vms: 0
+nfs_vms: 0
 grafana_server_vms: 0
 rbd_mirror_vms: 1
 client_vms: 0
index f9aa5172e0796794f1d0b172772ae2fef517156d..6592d0c0344fb9a790e617c35e4aca5d7c2fb38a 100644 (file)
@@ -8,6 +8,7 @@ mon_vms: 1
 osd_vms: 1
 mds_vms: 0
 rgw_vms: 1
+nfs_vms: 0
 grafana_server_vms: 0
 rbd_mirror_vms: 0
 client_vms: 0
index 4738fb700d55431c0015509701d0b38e0162eae8..8f4ce5460522cd39e974b56b1e88e8978a876682 100644 (file)
@@ -8,6 +8,7 @@ mon_vms: 1
 osd_vms: 1
 mds_vms: 0
 rgw_vms: 1
+nfs_vms: 0
 grafana_server_vms: 0
 rbd_mirror_vms: 0
 client_vms: 0
index d6c0193f6264f731c67ead5b730aa23fcb5f730a..ee312ec6f3fbe06ccfaf9e36d20ba5bfdf88c2c6 100644 (file)
@@ -8,6 +8,7 @@ mon_vms: 0
 osd_vms: 3
 mds_vms: 0
 rgw_vms: 0
+nfs_vms: 0
 grafana_server_vms: 0
 rbd_mirror_vms: 0
 client_vms: 0
index 45c4f0fd2d5b0239e49abcc99d61a11271bdbe1a..9f8c4474943a745c9f6ed42c0cc639838688b403 100644 (file)
@@ -4,6 +4,7 @@ mon_vms: 0
 osd_vms: 3
 mds_vms: 0
 rgw_vms: 0
+nfs_vms: 0
 grafana_server_vms: 0
 rbd_mirror_vms: 0
 client_vms: 0
index ec6d970a451521478dd76aedb12c433eb715f4d7..51d488ccbd560acf69e2075076a1bb8428f61656 100644 (file)
@@ -19,6 +19,9 @@ mds2
 [rgws]
 rgw0
 
+#[nfss]
+#nfs0
+
 [clients]
 client0
 client1
index aeb6859f131574d57a0db7c143464bd00d773f01..8e08daa9e12b928e024e13354087602efbbf5be5 100644 (file)
@@ -8,6 +8,7 @@ mon_vms: 3
 osd_vms: 3
 mds_vms: 3
 rgw_vms: 1
+nfs_vms: 0
 grafana_server_vms: 0
 rbd_mirror_vms: 1
 client_vms: 2
diff --git a/tests/functional/all_daemons/group_vars/nfss b/tests/functional/all_daemons/group_vars/nfss
new file mode 100644 (file)
index 0000000..826bdfe
--- /dev/null
@@ -0,0 +1,10 @@
+copy_admin_key: true
+nfs_file_gw: false
+nfs_obj_gw: true
+ganesha_conf_overrides: |
+    CACHEINODE {
+            Entries_HWMark = 100000;
+    }
+nfs_ganesha_stable: false
+nfs_ganesha_dev: true
+nfs_ganesha_flavor: "ceph_main"
index ff4b6f5610418debb03503f625c01e43d3c39ccc..8e2019776a5f66c360e321a336be8da695c9415e 100644 (file)
@@ -23,6 +23,9 @@ rgw0
 client0
 client1
 
+#[nfss]
+#nfs0
+
 [rbdmirrors]
 rbd-mirror0
 
index e32be0b54d7dcfc623af18f3693a4b54a5ee5e18..48653bbbb94c85c4cae4d5c7a9eabce093148ca3 100644 (file)
@@ -8,6 +8,7 @@ mon_vms: 3
 osd_vms: 3
 mds_vms: 3
 rgw_vms: 1
+nfs_vms: 0
 grafana_server_vms: 0
 rbd_mirror_vms: 1
 client_vms: 2
index ec6d970a451521478dd76aedb12c433eb715f4d7..51d488ccbd560acf69e2075076a1bb8428f61656 100644 (file)
@@ -19,6 +19,9 @@ mds2
 [rgws]
 rgw0
 
+#[nfss]
+#nfs0
+
 [clients]
 client0
 client1
index 1a67bd064ee959e01c24b7f808a96f5e814921aa..de2cccc3bb4d9156c6108ad6aa106b612cb8ba2e 100644 (file)
@@ -8,6 +8,7 @@ mon_vms: 3
 osd_vms: 3
 mds_vms: 3
 rgw_vms: 1
+nfs_vms: 0
 grafana_server_vms: 0
 rbd_mirror_vms: 1
 client_vms: 2
diff --git a/tests/functional/all_daemons_ipv6/group_vars/nfss b/tests/functional/all_daemons_ipv6/group_vars/nfss
new file mode 100644 (file)
index 0000000..fc280e2
--- /dev/null
@@ -0,0 +1,10 @@
+copy_admin_key: true
+nfs_file_gw: false
+nfs_obj_gw: true
+ganesha_conf_overrides: |
+    CACHEINODE {
+            Entries_HWMark = 100000;
+    }
+nfs_ganesha_stable: true
+nfs_ganesha_dev: false
+nfs_ganesha_flavor: "ceph_main"
index ff4b6f5610418debb03503f625c01e43d3c39ccc..8e2019776a5f66c360e321a336be8da695c9415e 100644 (file)
@@ -23,6 +23,9 @@ rgw0
 client0
 client1
 
+#[nfss]
+#nfs0
+
 [rbdmirrors]
 rbd-mirror0
 
index 5ef0dc8bcc07c82b2d9469f0483cc310ca02b389..b512776aff80b1c7a41957c182c75bffce5a34c5 100644 (file)
@@ -8,6 +8,7 @@ mon_vms: 3
 osd_vms: 3
 mds_vms: 3
 rgw_vms: 1
+nfs_vms: 0
 grafana_server_vms: 0
 rbd_mirror_vms: 1
 client_vms: 2
index 39e1133e9b58c304dd86f61b7915036b1ba1ecf7..28a105b30c09ad12cc5bd5675e72b8a641f900e4 100644 (file)
@@ -18,6 +18,9 @@ mds0
 [rgws]
 rgw0
 
+[nfss]
+nfs0
+
 [rbdmirrors]
 rbd-mirror0
 
index 19cfd396b2bb8678334befa832918959d237a8ed..433e3196570fc1f90e9143b9cb6c2af25ca0a672 100644 (file)
@@ -8,6 +8,7 @@ mon_vms: 3
 osd_vms: 2
 mds_vms: 1
 rgw_vms: 1
+nfs_vms: 1
 grafana_server_vms: 0
 rbd_mirror_vms: 1
 client_vms: 0
index dad06a4c8fd7423dd6cb2ddab7dc4e8b0b33d125..a699db8335e00bb7cd03ff0318ed1a0525e4f5ce 100644 (file)
@@ -19,5 +19,9 @@ mds0
 rgw0
 mds0
 
+#[nfss]
+#rgw0
+#mds0
+
 [monitoring]
-mon0
+mon0
\ No newline at end of file
index 6539bc80754c871a80b1c9d3ea7c08828db76520..fb5059e96a2769f16f0ff177e2cba289c5ee1bf4 100644 (file)
@@ -8,6 +8,7 @@ mon_vms: 3
 osd_vms: 2
 mds_vms: 1
 rgw_vms: 1
+nfs_vms: 0
 grafana_server_vms: 0
 rbd_mirror_vms: 0
 client_vms: 0
index 2aaf1d30011b9d4223f281d00720c50be3f2df4f..95b228ef879bf4027d53c4332c6e566fd4e43033 100644 (file)
@@ -20,5 +20,9 @@ mds0
 rgw0
 mds0
 
+#[nfss]
+#rgw0
+#mds0
+
 [monitoring]
-mon0
+mon0
\ No newline at end of file
index a68c8359e57cb44cedd0c8dd9a527dd5b8859f39..a53450e6fe0cbf44d2d8dde1f867648fe94e5343 100644 (file)
@@ -4,6 +4,7 @@ mon_vms: 3
 osd_vms: 2
 mds_vms: 1
 rgw_vms: 1
+nfs_vms: 0
 grafana_server_vms: 0
 rbd_mirror_vms: 0
 client_vms: 0
index af9682f89ab65215ede2bb61dec13e4a8b7fcd0b..fa85231c2cf6bf7a782dbf6eed841246ec1647ae 100644 (file)
                 regexp: "ceph_repository:.*"
                 replace: "ceph_repository: dev"
                 dest: "{{ group_vars_path }}/all"
+
+            - block:
+                - name: ensure nfs_ganesha_stable is set to False
+                  replace:
+                    regexp: "nfs_ganesha_stable:.*"
+                    replace: "nfs_ganesha_stable: false"
+                    dest: "{{ group_vars_path }}/nfss"
+
+                - name: ensure nfs_ganesha_dev is set to True
+                  replace:
+                    regexp: "nfs_ganesha_dev:.*"
+                    replace: "nfs_ganesha_dev: true"
+                    dest: "{{ group_vars_path }}/nfss"
+              when: "'all_daemons' in group_vars_path.split('/')"
           when: change_dir is defined
 
         - name: print contents of {{ group_vars_path }}/all
index f7e461a5ba24cf38a310c6336bec64d6476b6d9d..921077308a1a4ebac1b7d661e3ca187533e1a02b 100644 (file)
@@ -8,6 +8,7 @@ mon_vms: 1
 osd_vms: 1
 mds_vms: 0
 rgw_vms: 0
+nfs_vms: 0
 grafana_server_vms: 0
 rbd_mirror_vms: 0
 client_vms: 0
index 15af9e6a9e9a5a1939fc92415cecef392ef0b5b3..4892f0d089456c29a923437928d2b3b88c1b45df 100644 (file)
@@ -8,6 +8,7 @@ mon_vms: 3
 osd_vms: 0
 mds_vms: 0
 rgw_vms: 0
+nfs_vms: 0
 grafana_server_vms: 0
 rbd_mirror_vms: 0
 client_vms: 2
index 39e60ce64e17529a5cac10fb60eb0ba1af7638c6..827dcc928d6d67d27616e6d49ae9a96ba8c8ccfe 100644 (file)
@@ -4,6 +4,7 @@ mon_vms: 3
 osd_vms: 0
 mds_vms: 0
 rgw_vms: 0
+nfs_vms: 0
 grafana_server_vms: 0
 rbd_mirror_vms: 0
 client_vms: 2
index f3f4204ecc04a1952f44ed8dc1729ecd91d81ebb..85e074d79058421ae3ca43af28b7b295dc6fdfa2 100644 (file)
@@ -8,6 +8,7 @@ mon_vms: 0
 osd_vms: 1
 mds_vms: 0
 rgw_vms: 0
+nfs_vms: 0
 grafana_server_vms: 0
 rbd_mirror_vms: 0
 client_vms: 0
index 708876b30effc37e23292211d5510aaf977fd4b9..516e14c0a0665ffe465487b3c10344bf9117873e 100644 (file)
@@ -8,6 +8,7 @@ mon_vms: 1
 osd_vms: 1
 mds_vms: 0
 rgw_vms: 0
+nfs_vms: 0
 grafana_server_vms: 0
 rbd_mirror_vms: 0
 client_vms: 0
index 83226a731b168fdb900e498b877822403f5f5624..5bdaadfa01b052282f5e2a615f9a1fddad6c0f1c 100644 (file)
@@ -8,6 +8,7 @@ mon_vms: 1
 osd_vms: 1
 mds_vms: 0
 rgw_vms: 0
+nfs_vms: 0
 grafana_server_vms: 0
 rbd_mirror_vms: 0
 client_vms: 0
index 52cb2c983631b3f1f4c6ae8bd00819390e45a48a..f53ba8280d1231766699fb76ccb6a814176727ad 100644 (file)
@@ -8,6 +8,7 @@ mon_vms: 1
 osd_vms: 1
 mds_vms: 0
 rgw_vms: 0
+nfs_vms: 0
 grafana_server_vms: 0
 rbd_mirror_vms: 0
 client_vms: 0
index 83226a731b168fdb900e498b877822403f5f5624..5bdaadfa01b052282f5e2a615f9a1fddad6c0f1c 100644 (file)
@@ -8,6 +8,7 @@ mon_vms: 1
 osd_vms: 1
 mds_vms: 0
 rgw_vms: 0
+nfs_vms: 0
 grafana_server_vms: 0
 rbd_mirror_vms: 0
 client_vms: 0
index 59d880159e86bf3bc5153cd1eca0cf764db1c02d..d4418d89bdb027e3bd4e9e4003b17a112ef981bc 100644 (file)
@@ -8,6 +8,7 @@ mon_vms: 1
 osd_vms: 4
 mds_vms: 0
 rgw_vms: 0
+nfs_vms: 0
 grafana_server_vms: 0
 rbd_mirror_vms: 0
 client_vms: 0
index 19ef7be88af67b2788b7963147772f789b68ac24..b5d3089f28e5392df4731f7ab5efb8eeb9201906 100644 (file)
@@ -8,6 +8,7 @@ mon_vms: 1
 osd_vms: 4
 mds_vms: 0
 rgw_vms: 0
+nfs_vms: 0
 grafana_server_vms: 0
 rbd_mirror_vms: 0
 client_vms: 0
index b6b2a483feeb38f6e12c4ab2c678d860b4d68c70..2603e244faee4b38d8038e0651c6fee7d71c9139 100644 (file)
@@ -8,6 +8,7 @@ mon_vms: 3
 osd_vms: 1
 mds_vms: 0
 rgw_vms: 0
+nfs_vms: 0
 grafana_server_vms: 0
 rbd_mirror_vms: 0
 client_vms: 0
index 8015ceaf31d47df690abdf8a0de6b4f7ae576037..dea6a9e55f2644317078965c5c8a34e88d7dc595 100644 (file)
@@ -13,7 +13,10 @@ mds0
 [rgws]
 rgw0
 
-clients]
+#[nfss]
+#nfs0
+
+[clients]
 client0
 client1
 
@@ -24,4 +27,4 @@ rbd-mirror0
 mon0
 
 #[all:vars]
-#ansible_python_interpreter=/usr/bin/python3
+#ansible_python_interpreter=/usr/bin/python3
\ No newline at end of file
index eafecc2b2998dbc4670b460bfeb7369d38dec38e..a4ff599bddfdc11deeedae855489550758b76d74 100644 (file)
@@ -8,6 +8,7 @@ mon_vms: 3
 osd_vms: 2
 mds_vms: 1
 rgw_vms: 1
+nfs_vms: 0
 grafana_server_vms: 0
 rbd_mirror_vms: 1
 client_vms: 2
index a538d5a5be380c0f3a9d58713f493460f10d1a07..2b0423c3b1879c31f0d2fc89967ac9cafdcd9460 100644 (file)
@@ -8,6 +8,7 @@ mon_vms: 1
 osd_vms: 1
 mds_vms: 0
 rgw_vms: 0
+nfs_vms: 0
 grafana_server_vms: 0
 rbd_mirror_vms: 0
 client_vms: 0
index 123f03af32fa70180a000e5624e0f6ef0a95ee43..eee5c310e3d6d14ab8eeb5306473c815c626eedb 100644 (file)
@@ -8,6 +8,7 @@ mon_vms: 1
 osd_vms: 1
 mds_vms: 0
 rgw_vms: 0
+nfs_vms: 0
 grafana_server_vms: 0
 rbd_mirror_vms: 0
 client_vms: 0
index 3fdfefe423fe777c22fa671dd4be24f2642565e3..2b8351686eebe3c388bf88390f6c19e77a94950d 100644 (file)
@@ -8,6 +8,7 @@ mon_vms: 1
 osd_vms: 1
 mds_vms: 0
 rgw_vms: 0
+nfs_vms: 0
 grafana_server_vms: 0
 rbd_mirror_vms: 0
 client_vms: 0
index ce204efc5972cc678a190e81a7af904af51841c8..105cad593c09b2075c759195c522b7c9d9719c42 100644 (file)
@@ -8,6 +8,7 @@ mon_vms: 1
 osd_vms: 1
 mds_vms: 0
 rgw_vms: 0
+nfs_vms: 0
 grafana_server_vms: 0
 rbd_mirror_vms: 0
 client_vms: 0
index e076d17110d5b2c97b0afb0bdad0c75314078eef..4bcf2c529c6fee4c442839aa732ef9975ef0cfc5 100644 (file)
@@ -8,6 +8,7 @@ mon_vms: 1
 osd_vms: 1
 mds_vms: 1
 rgw_vms: 0
+nfs_vms: 0
 grafana_server_vms: 0
 rbd_mirror_vms: 0
 client_vms: 0
index 98068c12e7b0d3d4bc5c9c3a401a4dac0079404f..e0a7cf1a88021ae9d86deb3774fb3eef939d3414 100644 (file)
@@ -8,6 +8,7 @@ mon_vms: 1
 osd_vms: 1
 mds_vms: 1
 rgw_vms: 0
+nfs_vms: 0
 grafana_server_vms: 0
 rbd_mirror_vms: 0
 client_vms: 0
index c4a6f37bff50d67471a90b7d618e63dc154955ab..a26e726692a83c1683b5eab923ea7ce9633e3119 100644 (file)
@@ -8,6 +8,7 @@ mon_vms: 1
 osd_vms: 1
 mds_vms: 0
 rgw_vms: 0
+nfs_vms: 0
 grafana_server_vms: 0
 rbd_mirror_vms: 0
 client_vms: 0
index 13acb07008b90e71e2951e86a6b828d9eb198808..90c50d27f0e0af3e2b28d7a3cb46780d13cbfbdd 100644 (file)
@@ -8,6 +8,7 @@ mon_vms: 1
 osd_vms: 1
 mds_vms: 0
 rgw_vms: 0
+nfs_vms: 0
 grafana_server_vms: 0
 rbd_mirror_vms: 0
 client_vms: 0
index 890fb0092843c475d458fb4cbec58359f34043c6..d63a95a8dc775aca23c89dc02d756798e919a8ad 100644 (file)
@@ -8,6 +8,7 @@ mon_vms: 3
 osd_vms: 1
 mds_vms: 0
 rgw_vms: 0
+nfs_vms: 0
 grafana_server_vms: 0
 rbd_mirror_vms: 0
 client_vms: 0
index 3b98de5be0cb9f0e9868022d77430a45bf46b7a8..bf0038ad0b62b862f33ee36eb195ea9f45e5547d 100644 (file)
@@ -8,6 +8,7 @@ mon_vms: 3
 osd_vms: 1
 mds_vms: 0
 rgw_vms: 0
+nfs_vms: 0
 grafana_server_vms: 0
 rbd_mirror_vms: 0
 client_vms: 0
index 30f35ecd6716a3d66ea188a92f805414f00cf2a8..b69ecb35e3afdaddf141f36e92182d3dc4abac1b 100644 (file)
@@ -8,6 +8,7 @@ mon_vms: 1
 osd_vms: 2
 mds_vms: 0
 rgw_vms: 0
+nfs_vms: 0
 grafana_server_vms: 0
 rbd_mirror_vms: 0
 client_vms: 0
index e3be118f199f5a5895340d0eac112eff8ec4a4c8..7d7da7a99f6184c65859e9421d4881f6ac25c7ad 100644 (file)
@@ -8,6 +8,7 @@ mon_vms: 1
 osd_vms: 2
 mds_vms: 0
 rgw_vms: 0
+nfs_vms: 0
 grafana_server_vms: 0
 rbd_mirror_vms: 0
 client_vms: 0
index c91eb2731d5eab177ab7b175ed3677067e105810..2f16c370f838174e8aed076cf24fa73080650221 100644 (file)
@@ -8,6 +8,7 @@ mon_vms: 1
 osd_vms: 1
 mds_vms: 0
 rgw_vms: 0
+nfs_vms: 0
 grafana_server_vms: 0
 rbd_mirror_vms: 1
 client_vms: 0
index 409c700eccf259688d182c3abbf6ff1545a2870e..85f92045c65871c0037e3b2fdb4654283b1627fa 100644 (file)
@@ -8,6 +8,7 @@ mon_vms: 1
 osd_vms: 1
 mds_vms: 0
 rgw_vms: 0
+nfs_vms: 0
 grafana_server_vms: 0
 rbd_mirror_vms: 1
 client_vms: 0
index 4927acc692802b8d38cc814f06f3236ef1efd894..b701d05b490a2a088a3ddc3a42e86462c888123c 100644 (file)
@@ -8,6 +8,7 @@ mon_vms: 1
 osd_vms: 1
 mds_vms: 0
 rgw_vms: 1
+nfs_vms: 0
 grafana_server_vms: 0
 rbd_mirror_vms: 0
 client_vms: 0
index 2a1a6fa17fbc9a70f3cb0009f261179181b20e46..41fb2f34624af042ad6cf921fb342104e3fcb06a 100644 (file)
@@ -8,6 +8,7 @@ mon_vms: 1
 osd_vms: 1
 mds_vms: 0
 rgw_vms: 1
+nfs_vms: 0
 grafana_server_vms: 0
 rbd_mirror_vms: 0
 client_vms: 0
index 535158bef0a7bd5f4f276682de1b1536947f793a..3419fc4e15909e8aeae2124d1cd4cbe3acdb2531 100644 (file)
@@ -8,6 +8,7 @@ mon_vms: 3
 osd_vms: 3
 mds_vms: 0
 rgw_vms: 2
+nfs_vms: 0
 grafana_server_vms: 0
 rbd_mirror_vms: 0
 client_vms: 0
diff --git a/tests/functional/subset_update/group_vars/nfss b/tests/functional/subset_update/group_vars/nfss
new file mode 100644 (file)
index 0000000..fc280e2
--- /dev/null
@@ -0,0 +1,10 @@
+copy_admin_key: true
+nfs_file_gw: false
+nfs_obj_gw: true
+ganesha_conf_overrides: |
+    CACHEINODE {
+            Entries_HWMark = 100000;
+    }
+nfs_ganesha_stable: true
+nfs_ganesha_dev: false
+nfs_ganesha_flavor: "ceph_main"
index a1073f23a1ebc2987d0eb26cfcf3b674bfeb6810..c9105ddf37a2ff5146e982d384f350b44459e2e5 100644 (file)
@@ -8,6 +8,7 @@ mon_vms: 3
 osd_vms: 3
 mds_vms: 0
 rgw_vms: 2
+nfs_vms: 0
 grafana_server_vms: 0
 rbd_mirror_vms: 0
 client_vms: 0
diff --git a/tests/functional/tests/nfs/test_nfs_ganesha.py b/tests/functional/tests/nfs/test_nfs_ganesha.py
new file mode 100644 (file)
index 0000000..fda75ad
--- /dev/null
@@ -0,0 +1,48 @@
+import json
+import pytest
+
+
+class TestNFSs(object):
+
+    @pytest.mark.no_docker
+    @pytest.mark.parametrize('pkg', [
+        'nfs-ganesha',
+        'nfs-ganesha-rgw'
+    ])
+    def test_nfs_ganesha_package_is_installed(self, node, host, pkg):
+        assert host.package(pkg).is_installed
+
+    @pytest.mark.no_docker
+    def test_nfs_service_enabled_and_running(self, node, host):
+        s = host.service("nfs-ganesha")
+        assert s.is_enabled
+        assert s.is_running
+
+    @pytest.mark.no_docker
+    def test_nfs_config_override(self, node, host):
+        assert host.file(
+            "/etc/ganesha/ganesha.conf").contains("Entries_HWMark")
+
+    def test_nfs_is_up(self, node, setup, ceph_status):
+        hostname = node["vars"]["inventory_hostname"]
+        cluster = setup["cluster_name"]
+        name = f"client.rgw.{hostname}"
+        output = ceph_status(f'/var/lib/ceph/radosgw/{cluster}-rgw.{hostname}/keyring', name=name)
+        keys = list(json.loads(
+            output)["servicemap"]["services"]["rgw-nfs"]["daemons"].keys())
+        keys.remove('summary')
+        daemons = json.loads(output)["servicemap"]["services"]["rgw-nfs"]["daemons"]
+        hostnames = []
+        for key in keys:
+            hostnames.append(daemons[key]['metadata']['hostname'])
+
+
+# NOTE (guits): This check must be fixed. (Permission denied error)
+#    @pytest.mark.no_docker
+#    def test_nfs_rgw_fsal_export(self, node, host):
+#        if(host.mount_point("/mnt").exists):
+#            cmd = host.run("sudo umount /mnt")
+#            assert cmd.rc == 0
+#        cmd = host.run("sudo mount.nfs localhost:/ceph /mnt/")
+#        assert cmd.rc == 0
+#        assert host.mount_point("/mnt").exists
index e3ac911f4afb85258c27fc04872ef86d5ed5098c..d4c15634be14085d25a241a1e02c075bb43c223d 100644 (file)
@@ -11,6 +11,7 @@ markers =
   mdss: for mds nodes
   mgrs: for mgr nodes
   mons: for mon nodes
+  nfss: for nfs nodes
   osds: for osd nodes
   rbdmirrors: for rbdmirror nodes
   rgws: for rgw nodes
index 49ee8fc4682188bbcd59a2ee31a6fa492beb1b25..376f3a58258899afbfeca7143df9a6bf4654ccbb 100644 (file)
@@ -8,6 +8,7 @@ mon_vms: 3
 osd_vms: 3
 mds_vms: 0
 rgw_vms: 0
+nfs_vms: 0
 grafana_server_vms: 0
 rbd_mirror_vms: 0
 client_vms: 0