]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph.git/commitdiff
qa: Add "auto_pool_create" to nvmeof_initiator 67641/head
authorVallari Agrawal <vallari.agrawal@ibm.com>
Wed, 4 Mar 2026 06:21:00 +0000 (11:51 +0530)
committerTomer Haskalovitch <tomer.haska@ibm.com>
Wed, 11 Mar 2026 19:18:36 +0000 (21:18 +0200)
While deploying gateways with "ceph orch apply nvmeof",
--pool can be optional now. If not passed, a pool with
name ".nvmeof" would automatically be created.

In nvmeof task, "auto_pool_create: True" would skip --pool
in "ceph orch apply nvmeof".

Signed-off-by: Vallari Agrawal <vallari.agrawal@ibm.com>
qa/suites/nvmeof/basic/workloads/nvmeof_initiator.yaml
qa/tasks/nvmeof.py

index 29b67c84d70262d65cc74f1b7d1d32f5bd876563..aadcfeed0767c350be21681b8fad8b3179c57661 100644 (file)
@@ -3,15 +3,16 @@ tasks:
     installer: host.a
     gw_image: quay.io/ceph/nvmeof:devel # "default" is the image cephadm defaults to; change to test specific nvmeof images, example "latest"
     rbd:
-      pool_name: mypool
+      # pool_name: .nvmeof
       image_name_prefix: myimage
     gateway_config:
       subsystems_count: 3
       namespaces_count: 20
       cli_image: quay.io/ceph/nvmeof-cli:devel
+      auto_pool_create: True
 
 - cephadm.wait_for_service:
-    service: nvmeof.mypool.mygroup0
+    service: nvmeof.nvmeof.mygroup0
     refresh: True
 
 - workunit:
@@ -20,7 +21,7 @@ tasks:
       client.0:
         - nvmeof/setup_subsystem.sh
     env:
-      RBD_POOL: mypool
+      RBD_POOL: .nvmeof
       RBD_IMAGE_PREFIX: myimage
 
 - workunit:
@@ -34,6 +35,6 @@ tasks:
         - nvmeof/basic_tests.sh
         - nvmeof/fio_test.sh --start_ns 31 --end_ns 60
     env:
-      RBD_POOL: mypool
+      RBD_POOL: .nvmeof
       IOSTAT_INTERVAL: '10'
       RUNTIME: '600'
index 85189e03878cd1387bd701117342e3b5d2ce7c05..54b9b83d5dc205be3ebf367d2ac0ad79e660d500 100644 (file)
@@ -84,6 +84,9 @@ class Nvmeof(Task):
         self.port = gateway_config.get('port', '4420')
         self.srport = gateway_config.get('srport', '5500')
         self.create_mtls_secrets = gateway_config.get('create_mtls_secrets', False)
+        self.auto_pool_create = gateway_config.get('auto_pool_create', False)
+        if self.auto_pool_create:
+            self.poolname = ".nvmeof"
 
     def deploy_nvmeof(self):
         """
@@ -117,15 +120,16 @@ class Nvmeof(Task):
 
             poolname = self.poolname
 
-            log.info(f'[nvmeof]: ceph osd pool create {poolname}')
-            _shell(self.ctx, self.cluster_name, self.remote, [
-                'ceph', 'osd', 'pool', 'create', poolname
-            ])
+            if not self.auto_pool_create:
+                log.info(f'[nvmeof]: ceph osd pool create {poolname}')
+                _shell(self.ctx, self.cluster_name, self.remote, [
+                    'ceph', 'osd', 'pool', 'create', poolname
+                ])
 
-            log.info(f'[nvmeof]: rbd pool init {poolname}')
-            _shell(self.ctx, self.cluster_name, self.remote, [
-                'rbd', 'pool', 'init', poolname
-            ])
+                log.info(f'[nvmeof]: rbd pool init {poolname}')
+                _shell(self.ctx, self.cluster_name, self.remote, [
+                    'rbd', 'pool', 'init', poolname
+                ])
 
             if self.enable_groups:
                 group_to_nodes = defaultdict(list)
@@ -134,11 +138,18 @@ class Nvmeof(Task):
                     group_to_nodes[group_name] += [node]
                 for group_name in group_to_nodes:
                     gp_nodes = group_to_nodes[group_name]
-                    log.info(f'[nvmeof]: ceph orch apply nvmeof {poolname} {group_name}')
-                    _shell(self.ctx, self.cluster_name, self.remote, [
-                        'ceph', 'orch', 'apply', 'nvmeof', poolname, group_name,
-                        '--placement', ';'.join(gp_nodes)
-                    ])
+                    if self.auto_pool_create:
+                        log.info(f'[nvmeof]: ceph orch apply nvmeof {group_name}') 
+                        _shell(self.ctx, self.cluster_name, self.remote, [
+                            'ceph', 'orch', 'apply', 'nvmeof', '--group', group_name,
+                            '--placement', ';'.join(gp_nodes)
+                        ])
+                    else:
+                        log.info(f'[nvmeof]: ceph orch apply nvmeof {poolname} {group_name}')
+                        _shell(self.ctx, self.cluster_name, self.remote, [
+                            'ceph', 'orch', 'apply', 'nvmeof', '--pool',poolname, '--group', group_name,
+                            '--placement', ';'.join(gp_nodes)
+                        ])
             else:
                 _shell(self.ctx, self.cluster_name, self.remote, [
                         'ceph', 'orch', 'apply', 'nvmeof', poolname,