]> git.apps.os.sepia.ceph.com Git - ceph-ci.git/commitdiff
test fix
authorVenky Shankar <vshankar@redhat.com>
Fri, 17 Oct 2025 11:06:49 +0000 (11:06 +0000)
committerVenky Shankar <vshankar@redhat.com>
Tue, 28 Oct 2025 05:24:28 +0000 (05:24 +0000)
Signed-off-by: Venky Shankar <vshankar@redhat.com>
qa/suites/fs/nfs-ganesha/tasks/0-create-export.yaml
qa/tasks/ganesha_client.py
qa/tasks/ganesha_reconf.py

index 0a059d60037f1d065467055525ee1b53a72c1b39..bfe94748e4347191dfe9b2522326bebb9e8953bf 100644 (file)
@@ -39,7 +39,8 @@ tasks:
                       "fs_name": "cephfs",
                       "cmount_path": "/"
                   },
-                  "clients": []
+                  "clients": [],
+                  "delegations": "R"
               },
               "log": {
                   "default_log_level": "WARN",
index e30329e86f2930141c94c4986f7fa20b45631825..f2c424227822f6d4ba126c5888678b2ee9b7daad 100644 (file)
@@ -62,39 +62,36 @@ class GaneshaClient(Task):
             pseudo_path = client_config['pseudo_path']
             nfs_version = client_config.get('version', 'latest')
 
-            try:
-                first_mon = misc.get_first_mon(self.ctx, None)
-                (mon0_remote,) = self.ctx.cluster.only(first_mon).remotes.keys()
-
-                proc = mon0_remote.run(args=['ceph', 'nfs', 'export', 'info', cluster_id, pseudo_path],
-                                       stdout=StringIO(), wait=True)
-                res = proc.stdout.getvalue()
-                export_json = json.loads(res)
-                log.debug(f'export_json: {export_json}')
-
-                proc = mon0_remote.run(args=['ceph', 'nfs', 'cluster', 'info', cluster_id],
-                                       stdout=StringIO(), wait=True)
-                res = proc.stdout.getvalue()
-                cluster_info = json.loads(res)
-                log.debug(f'cluster_info: {cluster_info}')
-
-                info_output = cluster_info[cluster_id]['backend'][0]
-                port = info_output['port']
-                ip = info_output['ip']
-
-                mntpt = os.path.join(test_dir, f'mnt.{id_}')
-                remote.run(args=['mkdir', '-p', mntpt], timeout=60)
-                if nfs_version == 'latest':
-                    remote.run(args=['sudo', 'mount', '-t', 'nfs', '-o',
-                                     f'port={port}', f'{ip}:{pseudo_path}', mntpt])
-                else:
-                    remote.run(args=['sudo', 'mount', '-t', 'nfs', '-o',
-                                     f'port={port},vers={nfs_version}', f'{ip}:{pseudo_path}', mntpt])
-                remote.run(args=['sudo', 'chmod', '1777', mntpt], timeout=60)
-                remote.run(args=['stat', mntpt])
-                mounts[id_] = (remote, mntpt)
-            except Exception as e:
-                log.error(f'failed: {e}')
+            first_mon = misc.get_first_mon(self.ctx, None)
+            (mon0_remote,) = self.ctx.cluster.only(first_mon).remotes.keys()
+
+            proc = mon0_remote.run(args=['ceph', 'nfs', 'export', 'info', cluster_id, pseudo_path],
+                                   stdout=StringIO(), wait=True)
+            res = proc.stdout.getvalue()
+            export_json = json.loads(res)
+            log.debug(f'export_json: {export_json}')
+
+            proc = mon0_remote.run(args=['ceph', 'nfs', 'cluster', 'info', cluster_id],
+                                   stdout=StringIO(), wait=True)
+            res = proc.stdout.getvalue()
+            cluster_info = json.loads(res)
+            log.debug(f'cluster_info: {cluster_info}')
+
+            info_output = cluster_info[cluster_id]['backend'][0]
+            port = info_output['port']
+            ip = info_output['ip']
+
+            mntpt = os.path.join(test_dir, f'mnt.{id_}')
+            remote.run(args=['mkdir', '-p', mntpt], timeout=60)
+            if nfs_version == 'latest':
+                remote.run(args=['sudo', 'mount', '-t', 'nfs', '-o',
+                                 f'port={port}', f'{ip}:{pseudo_path}', mntpt])
+            else:
+                remote.run(args=['sudo', 'mount', '-t', 'nfs', '-o',
+                                 f'port={port},vers={nfs_version}', f'{ip}:{pseudo_path}', mntpt])
+            remote.run(args=['sudo', 'chmod', '1777', mntpt], timeout=60)
+            remote.run(args=['stat', mntpt])
+            mounts[id_] = (remote, mntpt)
         self.mounts = mounts
 
     def end(self):
index 452c02ac17e3aea2d362ceb957100fadc490250e..b19ecf9f7818d72ad6fe2238fcdf22c871652cf8 100644 (file)
@@ -32,44 +32,42 @@ class GaneshaReconf(Task):
         deep_merge(ganesha_config, overrides)
         log.info(f'ganesha_config is {ganesha_config}')
 
-        try:
-            first_mon = misc.get_first_mon(self.ctx, None)
-            (mon0_remote,) = self.ctx.cluster.only(first_mon).remotes.keys()
-
-            cluster_id = ganesha_config['cluster_id']
-            pseudo_path = ganesha_config['pseudo_path']
-
-            proc = mon0_remote.run(args=['ceph', 'nfs', 'export', 'info', cluster_id, pseudo_path],
-                                   stdout=StringIO(), wait=True)
-            res = proc.stdout.getvalue()
-            export_json = json.loads(res)
-            log.debug(f'export_json: {export_json}')
-
-            ceph_section = {'async': False, 'zerocopy': False}
-            is_async = ganesha_config.get('async', False)
-            if is_async:
-                ceph_section["async"] = True
-            is_zerocopy = ganesha_config.get('zerocopy', False)
-            if is_zerocopy:
-                ceph_section["zerocopy"] = True
-
-            nfsv4_block = {}
-            delegations = ganesha_config.get('delegations', 'none')
-            export_json['delegations'] = delegations
-            nfsv4_block['delegations'] = False if delegations == 'none' else True
-
-            new_export = {}
-            if "export" in export_json.keys():
-                new_export = export_json
-            else:
-                new_export["export"] = export_json
-            new_export["ceph"] = ceph_section
-
-            log.debug(f'new_export is {json.dumps(new_export)}')
-            mon0_remote.run(args=['ceph', 'nfs', 'export', 'apply', cluster_id, "-i", "-"],
-                            stdin=json.dumps(new_export))
-        except Exception as e:
-                log.error(f'failed: {e}')
+        first_mon = misc.get_first_mon(self.ctx, None)
+        (mon0_remote,) = self.ctx.cluster.only(first_mon).remotes.keys()
+
+        cluster_id = ganesha_config['cluster_id']
+        pseudo_path = ganesha_config['pseudo_path']
+
+        proc = mon0_remote.run(args=['ceph', 'nfs', 'export', 'info', cluster_id, pseudo_path],
+                               stdout=StringIO(), wait=True)
+        res = proc.stdout.getvalue()
+        export_json = json.loads(res)
+        log.debug(f'export_json: {export_json}')
+
+        ceph_section = {'async': False, 'zerocopy': False}
+        is_async = ganesha_config.get('async', False)
+        if is_async:
+            ceph_section["async"] = True
+        is_zerocopy = ganesha_config.get('zerocopy', False)
+        if is_zerocopy:
+            ceph_section["zerocopy"] = True
+
+        nfsv4_block = {}
+        delegations = ganesha_config.get('delegations', 'none')
+        nfsv4_block['delegations'] = False if delegations == 'none' else True
+
+        new_export = {}
+        if "export" in export_json.keys():
+            new_export = export_json
+        else:
+            new_export["export"] = export_json
+        new_export["export"]["delegations"] = delegations
+        new_export["ceph"] = ceph_section
+        new_export["nfsv4"] = nfsv4_block
+
+        log.debug(f'new_export is {json.dumps(new_export)}')
+        mon0_remote.run(args=['ceph', 'nfs', 'export', 'apply', cluster_id, "-i", "-"],
+                        stdin=json.dumps(new_export))
 
     def end(self):
         super(GaneshaReconf, self).end()