From: Vallari Agrawal Date: Wed, 4 Mar 2026 06:21:00 +0000 (+0530) Subject: qa: Add "auto_pool_create" to nvmeof_initiator X-Git-Url: http://git-server-git.apps.pok.os.sepia.ceph.com/?a=commitdiff_plain;h=97f4043a646e52d02374daeacbf1bfedc046b63c;p=ceph.git qa: Add "auto_pool_create" to nvmeof_initiator While deploying gateways with "ceph orch apply nvmeof", --pool can be optional now. If not passed, a pool with name ".nvmeof" would automatically be created. In nvmeof task, "auto_pool_create: True" would skip --pool in "ceph orch apply nvmeof". Signed-off-by: Vallari Agrawal --- diff --git a/qa/suites/nvmeof/basic/workloads/nvmeof_initiator.yaml b/qa/suites/nvmeof/basic/workloads/nvmeof_initiator.yaml index 29b67c84d70..aadcfeed076 100644 --- a/qa/suites/nvmeof/basic/workloads/nvmeof_initiator.yaml +++ b/qa/suites/nvmeof/basic/workloads/nvmeof_initiator.yaml @@ -3,15 +3,16 @@ tasks: installer: host.a gw_image: quay.io/ceph/nvmeof:devel # "default" is the image cephadm defaults to; change to test specific nvmeof images, example "latest" rbd: - pool_name: mypool + # pool_name: .nvmeof image_name_prefix: myimage gateway_config: subsystems_count: 3 namespaces_count: 20 cli_image: quay.io/ceph/nvmeof-cli:devel + auto_pool_create: True - cephadm.wait_for_service: - service: nvmeof.mypool.mygroup0 + service: nvmeof.nvmeof.mygroup0 refresh: True - workunit: @@ -20,7 +21,7 @@ tasks: client.0: - nvmeof/setup_subsystem.sh env: - RBD_POOL: mypool + RBD_POOL: .nvmeof RBD_IMAGE_PREFIX: myimage - workunit: @@ -34,6 +35,6 @@ tasks: - nvmeof/basic_tests.sh - nvmeof/fio_test.sh --start_ns 31 --end_ns 60 env: - RBD_POOL: mypool + RBD_POOL: .nvmeof IOSTAT_INTERVAL: '10' RUNTIME: '600' diff --git a/qa/tasks/nvmeof.py b/qa/tasks/nvmeof.py index 85189e03878..54b9b83d5dc 100644 --- a/qa/tasks/nvmeof.py +++ b/qa/tasks/nvmeof.py @@ -84,6 +84,9 @@ class Nvmeof(Task): self.port = gateway_config.get('port', '4420') self.srport = gateway_config.get('srport', '5500') self.create_mtls_secrets = gateway_config.get('create_mtls_secrets', False) + self.auto_pool_create = gateway_config.get('auto_pool_create', False) + if self.auto_pool_create: + self.poolname = ".nvmeof" def deploy_nvmeof(self): """ @@ -117,15 +120,16 @@ class Nvmeof(Task): poolname = self.poolname - log.info(f'[nvmeof]: ceph osd pool create {poolname}') - _shell(self.ctx, self.cluster_name, self.remote, [ - 'ceph', 'osd', 'pool', 'create', poolname - ]) + if not self.auto_pool_create: + log.info(f'[nvmeof]: ceph osd pool create {poolname}') + _shell(self.ctx, self.cluster_name, self.remote, [ + 'ceph', 'osd', 'pool', 'create', poolname + ]) - log.info(f'[nvmeof]: rbd pool init {poolname}') - _shell(self.ctx, self.cluster_name, self.remote, [ - 'rbd', 'pool', 'init', poolname - ]) + log.info(f'[nvmeof]: rbd pool init {poolname}') + _shell(self.ctx, self.cluster_name, self.remote, [ + 'rbd', 'pool', 'init', poolname + ]) if self.enable_groups: group_to_nodes = defaultdict(list) @@ -134,11 +138,18 @@ class Nvmeof(Task): group_to_nodes[group_name] += [node] for group_name in group_to_nodes: gp_nodes = group_to_nodes[group_name] - log.info(f'[nvmeof]: ceph orch apply nvmeof {poolname} {group_name}') - _shell(self.ctx, self.cluster_name, self.remote, [ - 'ceph', 'orch', 'apply', 'nvmeof', poolname, group_name, - '--placement', ';'.join(gp_nodes) - ]) + if self.auto_pool_create: + log.info(f'[nvmeof]: ceph orch apply nvmeof {group_name}') + _shell(self.ctx, self.cluster_name, self.remote, [ + 'ceph', 'orch', 'apply', 'nvmeof', '--group', group_name, + '--placement', ';'.join(gp_nodes) + ]) + else: + log.info(f'[nvmeof]: ceph orch apply nvmeof {poolname} {group_name}') + _shell(self.ctx, self.cluster_name, self.remote, [ + 'ceph', 'orch', 'apply', 'nvmeof', '--pool',poolname, '--group', group_name, + '--placement', ';'.join(gp_nodes) + ]) else: _shell(self.ctx, self.cluster_name, self.remote, [ 'ceph', 'orch', 'apply', 'nvmeof', poolname,