]> git.apps.os.sepia.ceph.com Git - ceph-ci.git/commitdiff
qa/cephfs: introduce nfs-ganesha tests
authorVenky Shankar <vshankar@redhat.com>
Fri, 1 Aug 2025 10:39:46 +0000 (10:39 +0000)
committerVenky Shankar <vshankar@redhat.com>
Thu, 23 Oct 2025 04:14:54 +0000 (04:14 +0000)
Fixes: http://tracker.ceph.com/issues/73172
Signed-off-by: Venky Shankar <vshankar@redhat.com>
41 files changed:
qa/suites/fs/nfs-ganesha/% [new file with mode: 0644]
qa/suites/fs/nfs-ganesha/.qa [new symlink]
qa/suites/fs/nfs-ganesha/begin/+ [new file with mode: 0644]
qa/suites/fs/nfs-ganesha/begin/.qa [new symlink]
qa/suites/fs/nfs-ganesha/begin/0-install.yaml [new symlink]
qa/suites/fs/nfs-ganesha/begin/2-logrotate.yaml [new symlink]
qa/suites/fs/nfs-ganesha/centos_9.stream.yaml [new symlink]
qa/suites/fs/nfs-ganesha/clusters/.qa [new symlink]
qa/suites/fs/nfs-ganesha/clusters/1a3s-mds-1c.yaml [new file with mode: 0644]
qa/suites/fs/nfs-ganesha/conf [new symlink]
qa/suites/fs/nfs-ganesha/overrides/+ [new file with mode: 0644]
qa/suites/fs/nfs-ganesha/overrides/.qa [new symlink]
qa/suites/fs/nfs-ganesha/overrides/ignorelist_health.yaml [new symlink]
qa/suites/fs/nfs-ganesha/overrides/pg_health.yaml [new symlink]
qa/suites/fs/nfs-ganesha/tasks/% [new file with mode: 0644]
qa/suites/fs/nfs-ganesha/tasks/.qa [new symlink]
qa/suites/fs/nfs-ganesha/tasks/0-create-export.yaml [new file with mode: 0644]
qa/suites/fs/nfs-ganesha/tasks/1-apply-config/% [new file with mode: 0644]
qa/suites/fs/nfs-ganesha/tasks/1-apply-config/.qa [new symlink]
qa/suites/fs/nfs-ganesha/tasks/1-apply-config/async/.qa [new symlink]
qa/suites/fs/nfs-ganesha/tasks/1-apply-config/async/no.yaml [new file with mode: 0644]
qa/suites/fs/nfs-ganesha/tasks/1-apply-config/async/yes.yaml [new file with mode: 0644]
qa/suites/fs/nfs-ganesha/tasks/1-apply-config/delegations/no.yaml [new file with mode: 0644]
qa/suites/fs/nfs-ganesha/tasks/1-apply-config/delegations/rw.yaml [new file with mode: 0644]
qa/suites/fs/nfs-ganesha/tasks/1-apply-config/ganesha.yaml [new file with mode: 0644]
qa/suites/fs/nfs-ganesha/tasks/1-apply-config/zerocopy/.qa [new symlink]
qa/suites/fs/nfs-ganesha/tasks/1-apply-config/zerocopy/no.yaml [new file with mode: 0644]
qa/suites/fs/nfs-ganesha/tasks/1-apply-config/zerocopy/yes.yaml [new file with mode: 0644]
qa/suites/fs/nfs-ganesha/tasks/2-mount/% [new file with mode: 0644]
qa/suites/fs/nfs-ganesha/tasks/2-mount/.qa [new symlink]
qa/suites/fs/nfs-ganesha/tasks/2-mount/ganesha-client.yaml [new file with mode: 0644]
qa/suites/fs/nfs-ganesha/tasks/2-mount/nfs-version/.qa [new symlink]
qa/suites/fs/nfs-ganesha/tasks/2-mount/nfs-version/4.1.yaml [new file with mode: 0644]
qa/suites/fs/nfs-ganesha/tasks/2-mount/nfs-version/4.2.yaml [new file with mode: 0644]
qa/suites/fs/nfs-ganesha/tasks/2-mount/nfs-version/latest.yaml [new file with mode: 0644]
qa/suites/fs/nfs-ganesha/tasks/3-workload/iogen.yaml [new file with mode: 0644]
qa/tasks/ganesha_client.py [new file with mode: 0644]
qa/tasks/ganesha_reconf.py [new file with mode: 0644]
src/pybind/mgr/nfs/export.py
src/pybind/mgr/nfs/ganesha_conf.py
src/pybind/mgr/nfs/module.py

diff --git a/qa/suites/fs/nfs-ganesha/% b/qa/suites/fs/nfs-ganesha/%
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/qa/suites/fs/nfs-ganesha/.qa b/qa/suites/fs/nfs-ganesha/.qa
new file mode 120000 (symlink)
index 0000000..fea2489
--- /dev/null
@@ -0,0 +1 @@
+../.qa
\ No newline at end of file
diff --git a/qa/suites/fs/nfs-ganesha/begin/+ b/qa/suites/fs/nfs-ganesha/begin/+
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/qa/suites/fs/nfs-ganesha/begin/.qa b/qa/suites/fs/nfs-ganesha/begin/.qa
new file mode 120000 (symlink)
index 0000000..fea2489
--- /dev/null
@@ -0,0 +1 @@
+../.qa
\ No newline at end of file
diff --git a/qa/suites/fs/nfs-ganesha/begin/0-install.yaml b/qa/suites/fs/nfs-ganesha/begin/0-install.yaml
new file mode 120000 (symlink)
index 0000000..3b18529
--- /dev/null
@@ -0,0 +1 @@
+.qa/cephfs/begin/0-install.yaml
\ No newline at end of file
diff --git a/qa/suites/fs/nfs-ganesha/begin/2-logrotate.yaml b/qa/suites/fs/nfs-ganesha/begin/2-logrotate.yaml
new file mode 120000 (symlink)
index 0000000..9d6e7ba
--- /dev/null
@@ -0,0 +1 @@
+.qa/cephfs/begin/2-logrotate.yaml
\ No newline at end of file
diff --git a/qa/suites/fs/nfs-ganesha/centos_9.stream.yaml b/qa/suites/fs/nfs-ganesha/centos_9.stream.yaml
new file mode 120000 (symlink)
index 0000000..dca92dd
--- /dev/null
@@ -0,0 +1 @@
+.qa/distros/podman/centos_9.stream.yaml
\ No newline at end of file
diff --git a/qa/suites/fs/nfs-ganesha/clusters/.qa b/qa/suites/fs/nfs-ganesha/clusters/.qa
new file mode 120000 (symlink)
index 0000000..fea2489
--- /dev/null
@@ -0,0 +1 @@
+../.qa
\ No newline at end of file
diff --git a/qa/suites/fs/nfs-ganesha/clusters/1a3s-mds-1c.yaml b/qa/suites/fs/nfs-ganesha/clusters/1a3s-mds-1c.yaml
new file mode 100644 (file)
index 0000000..ff9e3c9
--- /dev/null
@@ -0,0 +1,9 @@
+roles:
+- [mon.a, mgr.y, mds.a, mds.c, osd.0, osd.1, osd.2, osd.3, client.0]
+- [mon.b, mon.c, mgr.x, mds.b, osd.4, osd.5, osd.6, osd.7]
+openstack:
+- volumes: # attached to each instance
+    count: 4
+    size: 20 # GB
+- machine:
+    disk: 200 # GB
diff --git a/qa/suites/fs/nfs-ganesha/conf b/qa/suites/fs/nfs-ganesha/conf
new file mode 120000 (symlink)
index 0000000..16e8cc4
--- /dev/null
@@ -0,0 +1 @@
+.qa/cephfs/conf
\ No newline at end of file
diff --git a/qa/suites/fs/nfs-ganesha/overrides/+ b/qa/suites/fs/nfs-ganesha/overrides/+
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/qa/suites/fs/nfs-ganesha/overrides/.qa b/qa/suites/fs/nfs-ganesha/overrides/.qa
new file mode 120000 (symlink)
index 0000000..fea2489
--- /dev/null
@@ -0,0 +1 @@
+../.qa
\ No newline at end of file
diff --git a/qa/suites/fs/nfs-ganesha/overrides/ignorelist_health.yaml b/qa/suites/fs/nfs-ganesha/overrides/ignorelist_health.yaml
new file mode 120000 (symlink)
index 0000000..5cb891a
--- /dev/null
@@ -0,0 +1 @@
+.qa/cephfs/overrides/ignorelist_health.yaml
\ No newline at end of file
diff --git a/qa/suites/fs/nfs-ganesha/overrides/pg_health.yaml b/qa/suites/fs/nfs-ganesha/overrides/pg_health.yaml
new file mode 120000 (symlink)
index 0000000..5b6be3a
--- /dev/null
@@ -0,0 +1 @@
+.qa/cephfs/overrides/pg_health.yaml
\ No newline at end of file
diff --git a/qa/suites/fs/nfs-ganesha/tasks/% b/qa/suites/fs/nfs-ganesha/tasks/%
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/qa/suites/fs/nfs-ganesha/tasks/.qa b/qa/suites/fs/nfs-ganesha/tasks/.qa
new file mode 120000 (symlink)
index 0000000..fea2489
--- /dev/null
@@ -0,0 +1 @@
+../.qa
\ No newline at end of file
diff --git a/qa/suites/fs/nfs-ganesha/tasks/0-create-export.yaml b/qa/suites/fs/nfs-ganesha/tasks/0-create-export.yaml
new file mode 100644 (file)
index 0000000..0a059d6
--- /dev/null
@@ -0,0 +1,64 @@
+overrides:
+  ceph:
+    conf:
+      osd:
+        osd shutdown pgref assert: true
+tasks:
+- cephadm:
+    roleless: false
+- cephadm.shell:
+    mon.a:
+      - ceph orch status
+      - ceph orch ps
+      - ceph orch ls
+      - ceph orch host ls
+      - ceph orch device ls
+- cephadm.shell:
+    mon.a:
+      - cmd: ceph nfs cluster create nfs-ganesha-test
+      - cmd: ceph nfs export apply nfs-ganesha-test -i /dev/stdin
+        stdin: |
+          {
+              "export": {
+                  "export_id": 1,
+                  "path": "/",
+                  "cluster_id": "nfs-ganesha-test",
+                  "pseudo": "/nfsganesha",
+                  "access_type": "RW",
+                  "squash": "none",
+                  "security_label": true,
+                  "protocols": [
+                      4
+                  ],
+                  "transports": [
+                      "TCP"
+                  ],
+                  "fsal": {
+                      "name": "CEPH",
+                      "user_id": "nfs.nfs-ganesha-test.cephfs.a4cd9f65",
+                      "fs_name": "cephfs",
+                      "cmount_path": "/"
+                  },
+                  "clients": []
+              },
+              "log": {
+                  "default_log_level": "WARN",
+                  "components": {
+                      "fsal": "debug",
+                      "nfs4": "debug"
+                  },
+                  "facility": {
+                      "name": "file",
+                      "destination": "/var/log/ceph/ganesha.log",
+                      "enable": "active"
+                  }
+              }
+          }
+      # for debug
+      - cmd: ceph nfs export info nfs-ganesha-test --pseudo_path=/nfsganesha
+      # for debug
+      - cmd: ceph orch ls --service-name nfs.nfs-ganesha-test --export
+      # sleep a bit
+      - cmd: sleep 60
+      # more debug
+      - cmd: ceph orch ps
diff --git a/qa/suites/fs/nfs-ganesha/tasks/1-apply-config/% b/qa/suites/fs/nfs-ganesha/tasks/1-apply-config/%
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/qa/suites/fs/nfs-ganesha/tasks/1-apply-config/.qa b/qa/suites/fs/nfs-ganesha/tasks/1-apply-config/.qa
new file mode 120000 (symlink)
index 0000000..a602a03
--- /dev/null
@@ -0,0 +1 @@
+../.qa/
\ No newline at end of file
diff --git a/qa/suites/fs/nfs-ganesha/tasks/1-apply-config/async/.qa b/qa/suites/fs/nfs-ganesha/tasks/1-apply-config/async/.qa
new file mode 120000 (symlink)
index 0000000..a602a03
--- /dev/null
@@ -0,0 +1 @@
+../.qa/
\ No newline at end of file
diff --git a/qa/suites/fs/nfs-ganesha/tasks/1-apply-config/async/no.yaml b/qa/suites/fs/nfs-ganesha/tasks/1-apply-config/async/no.yaml
new file mode 100644 (file)
index 0000000..d45f84a
--- /dev/null
@@ -0,0 +1,3 @@
+overrides:
+  ganesha-reconf:
+      async: no
\ No newline at end of file
diff --git a/qa/suites/fs/nfs-ganesha/tasks/1-apply-config/async/yes.yaml b/qa/suites/fs/nfs-ganesha/tasks/1-apply-config/async/yes.yaml
new file mode 100644 (file)
index 0000000..12a9518
--- /dev/null
@@ -0,0 +1,3 @@
+overrides:
+  ganesha-reconf:
+      async: true
\ No newline at end of file
diff --git a/qa/suites/fs/nfs-ganesha/tasks/1-apply-config/delegations/no.yaml b/qa/suites/fs/nfs-ganesha/tasks/1-apply-config/delegations/no.yaml
new file mode 100644 (file)
index 0000000..3204f7d
--- /dev/null
@@ -0,0 +1,3 @@
+overrides:
+  ganesha-reconf:
+      delegations: none
\ No newline at end of file
diff --git a/qa/suites/fs/nfs-ganesha/tasks/1-apply-config/delegations/rw.yaml b/qa/suites/fs/nfs-ganesha/tasks/1-apply-config/delegations/rw.yaml
new file mode 100644 (file)
index 0000000..9b4ccc6
--- /dev/null
@@ -0,0 +1,3 @@
+overrides:
+  ganesha-reconf:
+      delegations: rw
\ No newline at end of file
diff --git a/qa/suites/fs/nfs-ganesha/tasks/1-apply-config/ganesha.yaml b/qa/suites/fs/nfs-ganesha/tasks/1-apply-config/ganesha.yaml
new file mode 100644 (file)
index 0000000..5a1b69b
--- /dev/null
@@ -0,0 +1,11 @@
+tasks:
+- ganesha-reconf:
+    cluster_id: 'nfs-ganesha-test'
+    pseudo_path: '/nfsganesha'
+
+- cephadm.shell:
+    mon.a:
+      # sleep a bit
+      - cmd: sleep 30
+      # more debug
+      - cmd: ceph orch ps
\ No newline at end of file
diff --git a/qa/suites/fs/nfs-ganesha/tasks/1-apply-config/zerocopy/.qa b/qa/suites/fs/nfs-ganesha/tasks/1-apply-config/zerocopy/.qa
new file mode 120000 (symlink)
index 0000000..a602a03
--- /dev/null
@@ -0,0 +1 @@
+../.qa/
\ No newline at end of file
diff --git a/qa/suites/fs/nfs-ganesha/tasks/1-apply-config/zerocopy/no.yaml b/qa/suites/fs/nfs-ganesha/tasks/1-apply-config/zerocopy/no.yaml
new file mode 100644 (file)
index 0000000..9bf12e5
--- /dev/null
@@ -0,0 +1,3 @@
+overrides:
+  ganesha-reconf:
+      zerocopy: no
\ No newline at end of file
diff --git a/qa/suites/fs/nfs-ganesha/tasks/1-apply-config/zerocopy/yes.yaml b/qa/suites/fs/nfs-ganesha/tasks/1-apply-config/zerocopy/yes.yaml
new file mode 100644 (file)
index 0000000..4f6ede0
--- /dev/null
@@ -0,0 +1,3 @@
+overrides:
+  ganesha-reconf:
+      zerocopy: true
\ No newline at end of file
diff --git a/qa/suites/fs/nfs-ganesha/tasks/2-mount/% b/qa/suites/fs/nfs-ganesha/tasks/2-mount/%
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/qa/suites/fs/nfs-ganesha/tasks/2-mount/.qa b/qa/suites/fs/nfs-ganesha/tasks/2-mount/.qa
new file mode 120000 (symlink)
index 0000000..a602a03
--- /dev/null
@@ -0,0 +1 @@
+../.qa/
\ No newline at end of file
diff --git a/qa/suites/fs/nfs-ganesha/tasks/2-mount/ganesha-client.yaml b/qa/suites/fs/nfs-ganesha/tasks/2-mount/ganesha-client.yaml
new file mode 100644 (file)
index 0000000..2839d86
--- /dev/null
@@ -0,0 +1,5 @@
+tasks:
+- ganesha-client:
+    client.0:
+      cluster_id: nfs-ganesha-test
+      pseudo_path: /nfsganesha
diff --git a/qa/suites/fs/nfs-ganesha/tasks/2-mount/nfs-version/.qa b/qa/suites/fs/nfs-ganesha/tasks/2-mount/nfs-version/.qa
new file mode 120000 (symlink)
index 0000000..a602a03
--- /dev/null
@@ -0,0 +1 @@
+../.qa/
\ No newline at end of file
diff --git a/qa/suites/fs/nfs-ganesha/tasks/2-mount/nfs-version/4.1.yaml b/qa/suites/fs/nfs-ganesha/tasks/2-mount/nfs-version/4.1.yaml
new file mode 100644 (file)
index 0000000..5aae179
--- /dev/null
@@ -0,0 +1,3 @@
+overrides:
+  ganesha-client:
+      version: 4.1
\ No newline at end of file
diff --git a/qa/suites/fs/nfs-ganesha/tasks/2-mount/nfs-version/4.2.yaml b/qa/suites/fs/nfs-ganesha/tasks/2-mount/nfs-version/4.2.yaml
new file mode 100644 (file)
index 0000000..56e48f5
--- /dev/null
@@ -0,0 +1,3 @@
+overrides:
+  ganesha-client:
+      version: 4.2
\ No newline at end of file
diff --git a/qa/suites/fs/nfs-ganesha/tasks/2-mount/nfs-version/latest.yaml b/qa/suites/fs/nfs-ganesha/tasks/2-mount/nfs-version/latest.yaml
new file mode 100644 (file)
index 0000000..de4a002
--- /dev/null
@@ -0,0 +1,3 @@
+overrides:
+  ganesha-client:
+      version: latest
\ No newline at end of file
diff --git a/qa/suites/fs/nfs-ganesha/tasks/3-workload/iogen.yaml b/qa/suites/fs/nfs-ganesha/tasks/3-workload/iogen.yaml
new file mode 100644 (file)
index 0000000..98d3b52
--- /dev/null
@@ -0,0 +1,5 @@
+tasks:
+- workunit:
+    clients:
+      client.0:
+        - suites/iogen.sh
\ No newline at end of file
diff --git a/qa/tasks/ganesha_client.py b/qa/tasks/ganesha_client.py
new file mode 100644 (file)
index 0000000..e30329e
--- /dev/null
@@ -0,0 +1,108 @@
+"""
+mount a ganesha client
+"""
+
+import os
+import json
+import logging
+from io import StringIO
+
+from teuthology.misc import deep_merge
+from teuthology.task import Task
+from teuthology import misc
+
+log = logging.getLogger(__name__)
+
+class GaneshaClient(Task):
+    def __init__(self, ctx, config):
+        super(GaneshaClient, self).__init__(ctx, config)
+        self.log = log
+        self.mounts = {}
+
+    def setup(self):
+        super(GaneshaClient, self).setup()
+
+    def begin(self):
+        super(GaneshaClient, self).begin()
+        log.info('mounting ganesha client(s)')
+
+        if self.config is None:
+            ids = misc.all_roles_of_type(self.ctx.cluster, 'client')
+            client_roles = [f'client.{id_}' for id_ in ids]
+            self.config = dict([r, dict()] for r in client_rols)
+        elif isinstance(self.config, list):
+            client_roles = self.config
+            self.config = dict([r, dict()] for r in client_roles)
+        elif isinstance(self.config, dict):
+            client_roles = filter(lambda x: 'client.' in x, self.config.keys())
+        else:
+            raise ValueError(f"Invalid config object: {self.config} ({self.config.__class__})")
+        log.info(f"config is {self.config}")
+
+        mounts = {}
+        overrides = self.ctx.config.get('overrides', {}).get('ganesha-client', {})
+        top_overrides = dict(filter(lambda x: 'client.' not in x[0], overrides.items()))
+
+        clients = list(misc.get_clients(ctx=self.ctx, roles=client_roles))
+        test_dir = misc.get_testdir(self.ctx)
+
+        for id_, remote in clients:
+            entity = f'client.{id_}'
+            client_config = self.config.get(entity)
+            if client_config is None:
+                client_config = {}
+            # top level overrides
+            deep_merge(client_config, top_overrides)
+            # mount specific overrides
+            client_config_overrides = overrides.get(entity)
+            deep_merge(client_config, client_config_overrides)
+            log.info(f"{entity} config is {client_config}")
+
+            cluster_id = client_config['cluster_id']
+            pseudo_path = client_config['pseudo_path']
+            nfs_version = client_config.get('version', 'latest')
+
+            try:
+                first_mon = misc.get_first_mon(self.ctx, None)
+                (mon0_remote,) = self.ctx.cluster.only(first_mon).remotes.keys()
+
+                proc = mon0_remote.run(args=['ceph', 'nfs', 'export', 'info', cluster_id, pseudo_path],
+                                       stdout=StringIO(), wait=True)
+                res = proc.stdout.getvalue()
+                export_json = json.loads(res)
+                log.debug(f'export_json: {export_json}')
+
+                proc = mon0_remote.run(args=['ceph', 'nfs', 'cluster', 'info', cluster_id],
+                                       stdout=StringIO(), wait=True)
+                res = proc.stdout.getvalue()
+                cluster_info = json.loads(res)
+                log.debug(f'cluster_info: {cluster_info}')
+
+                info_output = cluster_info[cluster_id]['backend'][0]
+                port = info_output['port']
+                ip = info_output['ip']
+
+                mntpt = os.path.join(test_dir, f'mnt.{id_}')
+                remote.run(args=['mkdir', '-p', mntpt], timeout=60)
+                if nfs_version == 'latest':
+                    remote.run(args=['sudo', 'mount', '-t', 'nfs', '-o',
+                                     f'port={port}', f'{ip}:{pseudo_path}', mntpt])
+                else:
+                    remote.run(args=['sudo', 'mount', '-t', 'nfs', '-o',
+                                     f'port={port},vers={nfs_version}', f'{ip}:{pseudo_path}', mntpt])
+                remote.run(args=['sudo', 'chmod', '1777', mntpt], timeout=60)
+                remote.run(args=['stat', mntpt])
+                mounts[id_] = (remote, mntpt)
+            except Exception as e:
+                log.error(f'failed: {e}')
+        self.mounts = mounts
+
+    def end(self):
+        super(GaneshaClient, self).end()
+        log.debug('unmounting ganesha client(s)')
+        for (remote, mntpt) in self.mounts.values():
+            log.debug(f'unmounting {mntpt}')
+            remote.run(args=['sudo', 'umount', mntpt])
+        self.mounts = {}
+
+task = GaneshaClient
diff --git a/qa/tasks/ganesha_reconf.py b/qa/tasks/ganesha_reconf.py
new file mode 100644 (file)
index 0000000..452c02a
--- /dev/null
@@ -0,0 +1,77 @@
+"""
+reconfigure a ganesha server
+"""
+
+import json
+import logging
+from io import StringIO
+
+from teuthology.misc import deep_merge
+from teuthology.task import Task
+from teuthology import misc
+
+log = logging.getLogger(__name__)
+
+class GaneshaReconf(Task):
+    def __init__(self, ctx, config):
+        super(GaneshaReconf, self).__init__(ctx, config)
+        self.log = log
+
+    def setup(self):
+        super(GaneshaReconf, self).setup()
+
+    def begin(self):
+        super(GaneshaReconf, self).begin()
+        log.info('reconfiguring ganesha server')
+
+        ganesha_config = self.config
+        log.info(f'ganesha_config is {ganesha_config}')
+        overrides = self.ctx.config.get('overrides', {}).get('ganesha-reconf', {})
+        log.info(f'overrides is {overrides}')
+
+        deep_merge(ganesha_config, overrides)
+        log.info(f'ganesha_config is {ganesha_config}')
+
+        try:
+            first_mon = misc.get_first_mon(self.ctx, None)
+            (mon0_remote,) = self.ctx.cluster.only(first_mon).remotes.keys()
+
+            cluster_id = ganesha_config['cluster_id']
+            pseudo_path = ganesha_config['pseudo_path']
+
+            proc = mon0_remote.run(args=['ceph', 'nfs', 'export', 'info', cluster_id, pseudo_path],
+                                   stdout=StringIO(), wait=True)
+            res = proc.stdout.getvalue()
+            export_json = json.loads(res)
+            log.debug(f'export_json: {export_json}')
+
+            ceph_section = {'async': False, 'zerocopy': False}
+            is_async = ganesha_config.get('async', False)
+            if is_async:
+                ceph_section["async"] = True
+            is_zerocopy = ganesha_config.get('zerocopy', False)
+            if is_zerocopy:
+                ceph_section["zerocopy"] = True
+
+            nfsv4_block = {}
+            delegations = ganesha_config.get('delegations', 'none')
+            export_json['delegations'] = delegations
+            nfsv4_block['delegations'] = False if delegations == 'none' else True
+
+            new_export = {}
+            if "export" in export_json.keys():
+                new_export = export_json
+            else:
+                new_export["export"] = export_json
+            new_export["ceph"] = ceph_section
+
+            log.debug(f'new_export is {json.dumps(new_export)}')
+            mon0_remote.run(args=['ceph', 'nfs', 'export', 'apply', cluster_id, "-i", "-"],
+                            stdin=json.dumps(new_export))
+        except Exception as e:
+                log.error(f'failed: {e}')
+
+    def end(self):
+        super(GaneshaReconf, self).end()
+
+task = GaneshaReconf
index 1f0d4b26132f38e71449f4f28d3572b74d0991ff..c1da0fe9b439caa7a6e85ba285f2854e51ce215e 100644 (file)
@@ -29,6 +29,8 @@ from .ganesha_conf import (
     RGWFSAL,
     RawBlock,
     CephBlock,
+    LogBlock,
+    NFSV4Block,
     format_block)
 from .exception import NFSException, NFSInvalidOperation, FSNotFound, NFSObjectNotFound
 from .utils import (
@@ -218,12 +220,16 @@ class AppliedExportResults:
         return self.status
 
 class GaneshaExport:
-    # currently, EXPORT and CEPH block.
+    # EXPORT, CEPH and LOG block.
     def __init__(self,
                  export: Export,
-                 ceph_block: Optional[CephBlock] = None) -> None:
+                 ceph_block: Optional[CephBlock] = None,
+                 log_block: Optional[LogBlock] = None,
+                 nfsv4_block: Optional[NFSV4Block] = None) -> None:
         self.export = export
         self.ceph_block = ceph_block
+        self.log_block = log_block
+        self.nfsv4_block = nfsv4_block
 
     # frequently uesd properties so that much of the code that now
     # has moved to using this class can still continue to acess via
@@ -248,20 +254,32 @@ class GaneshaExport:
     def fsal(self):
         return self.export.fsal
 
+    @property
+    def delegations(self):
+        return self.export.delegations
+
     def to_dict(self, full=False) -> Dict[str, Any]:
         export_dict = self.export.to_dict()
-        if not full or not self.ceph_block:
+        if not full or (not self.ceph_block and not self.log_block
+                        and not self.nfsv4_block):
             return export_dict
-        ge_dict = {
-            'export': export_dict,
-            'ceph': self.ceph_block.to_dict()
-            }
+        ge_dict = {'export': export_dict}
+        if self.ceph_block:
+            ge_dict['ceph'] = self.ceph_block.to_dict()
+        if self.log_block:
+            ge_dict['log'] = self.log_block.to_dict()
+        if self.nfsv4_block:
+            ge_dict['nfsv4'] = self.nfsv4_block.to_dict()
         return ge_dict
 
     def to_export_block(self):
         block_str = format_block(self.export.to_export_block())
         if self.ceph_block:
             block_str += format_block(self.ceph_block.to_ceph_block())
+        if self.log_block:
+            block_str += format_block(self.log_block.to_log_block())
+        if self.nfsv4_block:
+            block_str += format_block(self.nfsv4_block.to_nfsv4_block())
         return block_str
 
     def __eq__(self, other: Any) -> bool:
@@ -379,8 +397,10 @@ class ExportMgr:
                 break
         return nid
 
-    def _has_ceph_block(raw_config_parsed: List) -> bool:
-        return len(raw_config_parsed) > 1
+    def _has_ceph_block(raw_config_parsed: Dict) -> bool:
+        return 'CEPH' in raw_config_parsed.keys()
+    def _has_log_block(raw_config_parsed: Dict) -> bool:
+        return 'LOG' in raw_config_parsed.keys()
 
     def _read_raw_config(self, rados_namespace: str) -> None:
         with self.mgr.rados.open_ioctx(self.rados_pool) as ioctx:
@@ -396,16 +416,19 @@ class ExportMgr:
                     log.debug(f'raw_config: {raw_config}')
                     raw_config_parsed = GaneshaConfParser(raw_config).parse()
                     log.debug(f'raw_config_parsed: {raw_config_parsed}')
-                    export_block = raw_config_parsed[0]
-                    # do we have a ceph block?
+                    # mandatory export block
+                    export_block = raw_config_parsed['EXPORT']
+                    # do we have a ceph/log block? (optional)
+                    ceph_block = None
+                    log_block = None
                     if _has_ceph_block(raw_config_parsed):
-                        ceph_block = raw_config_parsed[1]
-                        self.export_conf_objs.append(
+                        ceph_block = raw_config_parsed['CEPH']
+                    if _has_log_block(raw_config_parsed):
+                        log_block = raw_config_parsed['LOG']
+                    self.export_conf_objs.append(
                             GaneshaExport(Export.from_export_block(export_block, rados_namespace),
-                                          CephBlock.from_ceph_block(ceph_block)))
-                    else:
-                        self.export_conf_objs.append(
-                            GaneshaExport(Export.from_export_block(export_block, rados_namespace)))
+                                          CephBlock.from_ceph_block(ceph_block),
+                                          LogBlock.from_log_block(log_block)))
 
     def _save_export(self, cluster_id: str, ganesha_export: GaneshaExport) -> None:
         log.debug('in _save_export')
@@ -462,14 +485,18 @@ class ExportMgr:
                 log.debug(f'raw_config: {raw_config}')
                 raw_config_parsed = GaneshaConfParser(raw_config).parse()
                 log.debug(f'raw_config_parsed: {raw_config_parsed}')
-                export_block = raw_config_parsed[0]
-                # do we have a ceph block?
+                export_block = raw_config_parsed['EXPORT']
+                # do we have a ceph/log block? (optional)
+                ceph_block = None
+                log_block = None
                 if _has_ceph_block(raw_config_parsed):
-                    ceph_block = raw_config_parsed[1]
-                    export = GaneshaExport(Export.from_export_block(export_block, cluster_id),
-                                           CephBlock.from_ceph_block(ceph_block))
-                else:
-                    export = GaneshaExport(Export.from_export_block(export_block, cluster_id))
+                    ceph_block = raw_config_parsed['CEPH']
+                if _has_log_block(raw_config_parsed):
+                    log_block = raw_config_parsed['LOG']
+                self.export_conf_objs.append(
+                    GaneshaExport(Export.from_export_block(export_block, rados_namespace),
+                                  CephBlock.from_ceph_block(ceph_block),
+                                  LogBlock.from_log_block(log_block)))
                 log.debug(f'export: {export}')
                 return export
         except ObjectNotFound:
@@ -661,20 +688,30 @@ class ExportMgr:
 
     def _change_export(self, cluster_id: str, export: Dict,
                        earmark_resolver: Optional[CephFSEarmarkResolver] = None) -> Dict[str, Any]:
-        # if the export json has a ceph section (key), extract it from the export
+        # if the export json has a ceph/log section (key), extract it from the export
         # json to preserver backward compatability.
         ceph_dict = {}
+        log_dict = {}
+        nfsv4_dict = {}
         if "ceph" in export.keys():
             ceph_dict = export.pop("ceph")
-            if not "export" in export.keys():
-                raise Exception('\'export\' key missing in export json')
+        if "log" in export.keys():
+            log_dict = export.pop("log")
+        if "nfsv4" in export.keys():
+            nfsv4_dict = export.pop("nfsv4")
+        if "export" in export.keys():
             export = export.pop("export")
         msg = f'export_dict: {export}'
-        log.exception(msg)
+        log.debug(msg)
         msg = f'ceph_dict: {ceph_dict}'
-        log.exception(msg)
+        log.debug(msg)
+        msg = f'nfsv4_dict: {nfsv4_dict}'
+        log.debug(msg)
+        msg = f'log_dict: {log_dict}'
+        log.debug(msg)
         try:
-            return self._apply_export(cluster_id, export, earmark_resolver, ceph_dict)
+            return self._apply_export(cluster_id, export, earmark_resolver,
+                                      ceph_dict, log_dict, nfsv4_dict)
         except NotImplementedError as e:
             # in theory, the NotImplementedError here may be raised by a hook back to
             # an orchestration module. If the orchestration module supports it the NFS
@@ -835,7 +872,8 @@ class ExportMgr:
                              clients: list = [],
                              sectype: Optional[List[str]] = None,
                              cmount_path: Optional[str] = "/",
-                             earmark_resolver: Optional[CephFSEarmarkResolver] = None
+                             earmark_resolver: Optional[CephFSEarmarkResolver] = None,
+                             delegations: Optional[str] = "none"
                              ) -> Dict[str, Any]:
 
         validate_cephfs_path(self.mgr, fs_name, path)
@@ -860,6 +898,7 @@ class ExportMgr:
                     },
                     "clients": clients,
                     "sectype": sectype,
+                    "delegations": delegations
                 },
                 earmark_resolver
             )
@@ -926,7 +965,9 @@ class ExportMgr:
             cluster_id: str,
             new_export_dict: Dict,
             earmark_resolver: Optional[CephFSEarmarkResolver] = None,
-            ceph_dict: Optional[Dict] = {}) -> Dict[str, str]:
+            ceph_dict: Optional[Dict] = {},
+            log_dict: Optional[Dict] = {},
+            nfsv4_dict: Optional[Dict] = {}) -> Dict[str, str]:
         for k in ['path', 'pseudo']:
             if k not in new_export_dict:
                 raise NFSInvalidOperation(f'Export missing required field {k}')
@@ -972,9 +1013,17 @@ class ExportMgr:
         log.debug(f'ceph_dict: {ceph_dict}')
         if ceph_dict:
             ceph_block = CephBlock.from_dict(ceph_dict)
+        log_block = None
+        log.debug(f'log_dict: {log_dict}')
+        if log_dict:
+            log_block = LogBlock.from_dict(log_dict)
+        nfsv4_block = None
+        log.debug(f'nfsv4_dict: {nfsv4_dict}')
+        if nfsv4_dict:
+            nfsv4_block = NFSV4Block.from_dict(nfsv4_dict)
 
         # use @ganesha_export in place of @new_export here onwards
-        ganesha_export = GaneshaExport(new_export, ceph_block)
+        ganesha_export = GaneshaExport(new_export, ceph_block, log_block, nfsv4_block)
 
         if not old_export:
             if new_export.fsal.name == NFS_GANESHA_SUPPORTED_FSALS[1]:  # only for RGW
@@ -997,7 +1046,10 @@ class ExportMgr:
                                             and old_fsal.fs_name == new_fsal.fs_name
                                             and old_export.path == new_export.path
                                             and old_export.pseudo == new_export.pseudo
-                                            and old_export.ceph_block == ganesha_export.ceph_block)
+                                            and old_export.ceph_block == ganesha_export.ceph_block
+                                            and old_export.log_block == ganesha_export.log_block
+                                            and old_export.nfsv4_block == ganesha_export.nfsv4_block
+                                            and old_export.delegations == ganesha_export.delegations)
 
         if old_export.fsal.name == NFS_GANESHA_SUPPORTED_FSALS[1]:
             old_rgw_fsal = cast(RGWFSAL, old_export.fsal)
index 6946297cd759fd19e287e7334ac0b7d679418346..1c0fdfb987cb75d4aaa07979d16e0263d258842f 100644 (file)
@@ -113,14 +113,15 @@ class GaneshaConfParser:
                 value = self.stream()[:idx]
                 self.pos += idx + 1
             block_dict = RawBlock('%url', values={'value': value})
-            return block_dict
+            return ('%url', block_dict)
 
-        block_dict = RawBlock(self.parse_block_name().upper())
+        block_name = self.parse_block_name().upper()
+        block_dict = RawBlock(block_name)
         self.parse_block_body(block_dict)
         if self.stream()[0] != '}':
             raise Exception("No closing bracket '}' found at the end of block")
         self.pos += 1
-        return block_dict
+        return (block_name, block_dict)
 
     def parse_parameter_value(self, raw_value: str) -> Any:
         if raw_value.find(',') != -1:
@@ -164,7 +165,7 @@ class GaneshaConfParser:
                 self.parse_stanza(block_dict)
             elif is_lbracket and ((is_semicolon and not is_semicolon_lt_lbracket)
                                   or (not is_semicolon)):
-                block_dict.blocks.append(self.parse_block_or_section())
+                block_dict.blocks.append(self.parse_block_or_section()[1])
             else:
                 raise Exception("Malformed stanza: no semicolon found.")
 
@@ -172,9 +173,10 @@ class GaneshaConfParser:
                 raise Exception("Infinite loop while parsing block content")
 
     def parse(self) -> List[RawBlock]:
-        blocks = []
+        blocks = {}
         while self.stream():
-            blocks.append(self.parse_block_or_section())
+            (block_name, block) = self.parse_block_or_section()
+            blocks[block_name] = block
         return blocks
 
 
@@ -381,7 +383,7 @@ class CephBlock:
         return result
 
     @classmethod
-    def from_dict(cls, ex_dict: Dict[str, Any]) -> 'Export':
+    def from_dict(cls, ex_dict: Dict[str, Any]) -> 'CephBlock':
         return cls(ex_dict.get('async', False),
                    ex_dict.get('zerocopy', False))
 
@@ -397,6 +399,145 @@ class CephBlock:
             return False
         return self.to_dict() == other.to_dict()
 
+class Facility:
+    def __init__(self,
+                 name: str,
+                 destination: str,
+                 enable: str):
+        self.name = name
+        self.destination = destination
+        self.enable = enable
+
+    @classmethod
+    def from_facility_block(cls, facility: RawBlock) -> 'Facility':
+        return cls(facility.values['name'],
+                   facility.values['destination'], facility.values['enable'])
+
+    def to_facility_block(self) -> RawBlock:
+        result = RawBlock("FACILITY", values={'name': self.name,
+                                              'destination': self.destination,
+                                              'enable': self.enable})
+        return result
+
+    @classmethod
+    def from_dict(cls, ex_dict: Dict[str, Any]) -> 'Facility':
+        return cls(ex_dict['name'], ex_dict['destination'], ex_dict['enable'])
+
+    def to_dict(self) -> Dict[str, Any]:
+        values = {
+            'name': self.name,
+            'destination': self.destination,
+            'enable': self.enable
+        }
+        return values
+
+    def __eq__(self, other: Any) -> bool:
+        if not isinstance(other, Facility):
+            return False
+        return self.to_dict() == other.to_dict()
+
+class Components:
+    def __init__(self,
+                 fsal: str,
+                 nfs4: str):
+        self.fsal = fsal
+        self.nfs4 = nfs4
+
+    @classmethod
+    def from_components_block(cls, components: RawBlock) -> 'Components':
+        return cls(components.values['fsal'], components.values['nfs4'])
+
+    def to_components_block(self) -> RawBlock:
+        result = RawBlock("COMPONENTS", values={'fsal': self.fsal, 'nfs4': self.nfs4})
+        return result
+
+    @classmethod
+    def from_dict(cls, ex_dict: Dict[str, Any]) -> 'Components':
+        return cls(ex_dict['fsal'], ex_dict['nfs4'])
+
+    def to_dict(self) -> Dict[str, Any]:
+        values = {
+            'fsal': self.fsal,
+            'nfs4': self.nfs4
+        }
+        return values
+
+    def __eq__(self, other: Any) -> bool:
+        if not isinstance(other, Components):
+            return False
+        return self.to_dict() == other.to_dict()
+
+class LogBlock:
+    def __init__(self,
+                 default_log_level: str,
+                 components: Components,
+                 facility: Facility):
+        self.default_log_level = default_log_level
+        self.components = components
+        self.facility = facility
+
+    @classmethod
+    def from_log_block(cls, log_block: RawBlock) -> 'LogBlock':
+        return cls(log_block.values.get('default_log_level', None),
+                   Components.from_components_block(self.components),
+                   Facility.from_facility_block(self.facility))
+
+    def to_log_block(self) -> RawBlock:
+        result = RawBlock("LOG", values={'default_log_level': self.default_log_level})
+        result.blocks = [
+            self.components.to_components_block()
+            ] + [
+                self.facility.to_facility_block()
+            ]
+        return result
+
+    @classmethod
+    def from_dict(cls, ex_dict: Dict[str, Any]) -> 'LogBlock':
+        return cls(ex_dict['default_log_level'],
+                   Components.from_dict(ex_dict['components']),
+                   Facility.from_dict(ex_dict['facility']))
+
+    def to_dict(self) -> Dict[str, Any]:
+        values = {
+            'default_log_level': self.default_log_level,
+            'components': self.components.to_dict(),
+            'facility': self.facility.to_dict()
+        }
+        return values
+
+    def __eq__(self, other: Any) -> bool:
+        if not isinstance(other, LogBlock):
+            return False
+        return self.to_dict() == other.to_dict()
+
+class NFSV4Block:
+    def __init__(self,
+                 delegations: bool):
+        self.delegations = delegations
+
+    @classmethod
+    def from_nfsv4_block(cls, nfsv4_block: RawBlock) -> 'NFSV4Block':
+        return cls(nfsv4_block.values.get('delegations', False))
+
+    def to_nfsv4_block(self) -> RawBlock:
+        result = RawBlock("NFSV4", values={'delegations': self.delegations})
+        return result
+
+    @classmethod
+    def from_dict(cls, ex_dict: Dict[str, Any]) -> 'NFSV4Block':
+        return cls(ex_dict['delegations'])
+
+    def to_dict(self) -> Dict[str, Any]:
+        values = {
+            'delegations': self.delegations
+        }
+        return values
+
+    def __eq__(self, other: Any) -> bool:
+        if not isinstance(other, NFSV4Block):
+            return False
+        return self.to_dict() == other.to_dict()
+
 class Export:
     def __init__(
             self,
@@ -411,7 +552,8 @@ class Export:
             transports: List[str],
             fsal: FSAL,
             clients: Optional[List[Client]] = None,
-            sectype: Optional[List[str]] = None):
+            sectype: Optional[List[str]] = None,
+            delegations: Optional[str] = "none"):
         self.export_id = export_id
         self.path = path
         self.fsal = fsal
@@ -425,6 +567,7 @@ class Export:
         self.transports = transports
         self.clients: List[Client] = clients or []
         self.sectype = sectype
+        self.delegations = delegations
 
     @classmethod
     def from_export_block(cls, export_block: RawBlock, cluster_id: str) -> 'Export':
@@ -466,7 +609,8 @@ class Export:
                    FSAL.from_fsal_block(fsal_blocks[0]),
                    [Client.from_client_block(client)
                     for client in client_blocks],
-                   sectype=sectype)
+                   sectype=sectype,
+                   delegations=export_block.values.get("delegations", "none"))
 
     def to_export_block(self) -> RawBlock:
         values = {
@@ -479,6 +623,7 @@ class Export:
             'security_label': self.security_label,
             'protocols': self.protocols,
             'transports': self.transports,
+            'delegations': self.delegations
         }
         if self.sectype:
             values['SecType'] = self.sectype
@@ -504,7 +649,8 @@ class Export:
                    ex_dict.get('transports', ['TCP']),
                    FSAL.from_dict(ex_dict.get('fsal', {})),
                    [Client.from_dict(client) for client in ex_dict.get('clients', [])],
-                   sectype=ex_dict.get("sectype"))
+                   sectype=ex_dict.get("sectype"),
+                   delegations=ex_dict.get("delegations", "none"))
 
     def to_dict(self) -> Dict[str, Any]:
         values = {
@@ -518,7 +664,8 @@ class Export:
             'protocols': sorted([p for p in self.protocols]),
             'transports': sorted([t for t in self.transports]),
             'fsal': self.fsal.to_dict(),
-            'clients': [client.to_dict() for client in self.clients]
+            'clients': [client.to_dict() for client in self.clients],
+            "delegations": self.delegations
         }
         if self.sectype:
             values['sectype'] = self.sectype
@@ -565,6 +712,10 @@ class Export:
         for st in (self.sectype or []):
             _validate_sec_type(st)
 
+        valid_delegations = ["R", "RW", "NONE"]
+        if not self.delegations.upper() in valid_delegations:
+            raise NFSInvalidOperation(f'invalid delegations in export block: {self.delegations}')
+
     def __eq__(self, other: Any) -> bool:
         if not isinstance(other, Export):
             return False
index ff67eba64f96c0cb7bd862faf3dc1439e192a61a..331c63cac7938d9f5bf28c983de19fe13833a1ae 100644 (file)
@@ -40,7 +40,8 @@ class Module(orchestrator.OrchestratorClientMixin, MgrModule):
             client_addr: Optional[List[str]] = None,
             squash: str = 'none',
             sectype: Optional[List[str]] = None,
-            cmount_path: Optional[str] = "/"
+            cmount_path: Optional[str] = "/",
+            delegations: Optional[str] = "none"
     ) -> Dict[str, Any]:
         """Create a CephFS export"""
         earmark_resolver = CephFSEarmarkResolver(self)
@@ -55,7 +56,8 @@ class Module(orchestrator.OrchestratorClientMixin, MgrModule):
             addr=client_addr,
             sectype=sectype,
             cmount_path=cmount_path,
-            earmark_resolver=earmark_resolver
+            earmark_resolver=earmark_resolver,
+            delegations=delegations
         )
 
     @CLICommand('nfs export create rgw', perm='rw')