]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
qa: Add nvmeof:upgrade suite 59608/head
authorVallari Agrawal <vallari.agrawal@ibm.com>
Wed, 17 Sep 2025 09:07:40 +0000 (14:37 +0530)
committerVallari Agrawal <vallari.agrawal@ibm.com>
Wed, 17 Sep 2025 12:52:32 +0000 (18:22 +0530)
Signed-off-by: Vallari Agrawal <vallari.agrawal@ibm.com>
13 files changed:
qa/suites/nvmeof/upgrade/% [new file with mode: 0644]
qa/suites/nvmeof/upgrade/.qa [new symlink]
qa/suites/nvmeof/upgrade/0-clusters/.qa [new symlink]
qa/suites/nvmeof/upgrade/0-clusters/4-gateways-1-initiator.yaml [new file with mode: 0644]
qa/suites/nvmeof/upgrade/1-start-distro/.qa [new symlink]
qa/suites/nvmeof/upgrade/1-start-distro/1-start-centos_9.stream-squid.yaml [new file with mode: 0644]
qa/suites/nvmeof/upgrade/2-setup_subsystem.yaml [new file with mode: 0644]
qa/suites/nvmeof/upgrade/3-upgrade/.qa [new symlink]
qa/suites/nvmeof/upgrade/3-upgrade/simple.yaml [new file with mode: 0644]
qa/suites/nvmeof/upgrade/4-wait.yaml [new file with mode: 0644]
qa/suites/nvmeof/upgrade/5-workloads/.qa [new symlink]
qa/suites/nvmeof/upgrade/5-workloads/nvmeof-thrasher.yaml [new file with mode: 0644]
qa/tasks/nvmeof.py

diff --git a/qa/suites/nvmeof/upgrade/% b/qa/suites/nvmeof/upgrade/%
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/qa/suites/nvmeof/upgrade/.qa b/qa/suites/nvmeof/upgrade/.qa
new file mode 120000 (symlink)
index 0000000..a602a03
--- /dev/null
@@ -0,0 +1 @@
+../.qa/
\ No newline at end of file
diff --git a/qa/suites/nvmeof/upgrade/0-clusters/.qa b/qa/suites/nvmeof/upgrade/0-clusters/.qa
new file mode 120000 (symlink)
index 0000000..a602a03
--- /dev/null
@@ -0,0 +1 @@
+../.qa/
\ No newline at end of file
diff --git a/qa/suites/nvmeof/upgrade/0-clusters/4-gateways-1-initiator.yaml b/qa/suites/nvmeof/upgrade/0-clusters/4-gateways-1-initiator.yaml
new file mode 100644 (file)
index 0000000..76f245f
--- /dev/null
@@ -0,0 +1,32 @@
+roles:
+- - client.0 # initiator
+- - host.a
+  - mon.a
+  - mgr.x
+  - osd.0
+  - osd.1
+  - client.1
+  - ceph.nvmeof.nvmeof.a
+- - host.b
+  - mon.b
+  - mgr.y
+  - osd.2
+  - osd.3
+  - osd.4
+  - client.2
+  - ceph.nvmeof.nvmeof.b
+- - host.c
+  - mon.c
+  - osd.5
+  - osd.6
+  - osd.7
+  - client.3
+  - ceph.nvmeof.nvmeof.c
+- - host.d
+  - client.4
+  - ceph.nvmeof.nvmeof.d
+
+tasks:
+- install:
+    extra_packages:
+        - nvme-cli
diff --git a/qa/suites/nvmeof/upgrade/1-start-distro/.qa b/qa/suites/nvmeof/upgrade/1-start-distro/.qa
new file mode 120000 (symlink)
index 0000000..a602a03
--- /dev/null
@@ -0,0 +1 @@
+../.qa/
\ No newline at end of file
diff --git a/qa/suites/nvmeof/upgrade/1-start-distro/1-start-centos_9.stream-squid.yaml b/qa/suites/nvmeof/upgrade/1-start-distro/1-start-centos_9.stream-squid.yaml
new file mode 100644 (file)
index 0000000..8e3b778
--- /dev/null
@@ -0,0 +1,19 @@
+os_type: centos
+os_version: "9.stream"
+
+tasks:
+- cephadm:
+    image: quay.ceph.io/ceph-ci/ceph:squid-nvmeof
+    compiled_cephadm_branch: squid-nvmeof
+
+- nvmeof:
+    installer: host.a
+    rbd:
+      pool_name: mypool
+      image_name_prefix: myimage
+    gateway_config:
+      subsystems_count: 3
+      namespaces_count: 20
+
+- cephadm.wait_for_service:
+    service: nvmeof.mypool.mygroup0
diff --git a/qa/suites/nvmeof/upgrade/2-setup_subsystem.yaml b/qa/suites/nvmeof/upgrade/2-setup_subsystem.yaml
new file mode 100644 (file)
index 0000000..9721df5
--- /dev/null
@@ -0,0 +1,10 @@
+tasks:
+- workunit:
+    no_coverage_and_limits: true
+    clients:
+      client.0:
+        - nvmeof/setup_subsystem.sh
+        - nvmeof/basic_tests.sh
+    env:
+      RBD_POOL: mypool
+      RBD_IMAGE_PREFIX: myimage
diff --git a/qa/suites/nvmeof/upgrade/3-upgrade/.qa b/qa/suites/nvmeof/upgrade/3-upgrade/.qa
new file mode 120000 (symlink)
index 0000000..a602a03
--- /dev/null
@@ -0,0 +1 @@
+../.qa/
\ No newline at end of file
diff --git a/qa/suites/nvmeof/upgrade/3-upgrade/simple.yaml b/qa/suites/nvmeof/upgrade/3-upgrade/simple.yaml
new file mode 100644 (file)
index 0000000..0a32c17
--- /dev/null
@@ -0,0 +1,21 @@
+overrides:
+  ceph:
+    log-ignorelist:
+      - CEPHADM_STRAY_DAEMON
+      - CEPHADM_FAILED_DAEMON
+      - CEPHADM_AGENT_DOWN
+    log-only-match:
+      - CEPHADM_
+
+tasks:
+- cephadm.shell:
+    env: [sha1]
+    mon.a:
+      # print status before upgrade
+      - ceph health detail
+      - ceph orch ps
+      - ceph -s
+      # upgrade
+      - ceph config set mon mon_warn_on_insecure_global_id_reclaim false --force
+      - ceph config set mon mon_warn_on_insecure_global_id_reclaim_allowed false --force
+      - ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1
diff --git a/qa/suites/nvmeof/upgrade/4-wait.yaml b/qa/suites/nvmeof/upgrade/4-wait.yaml
new file mode 100644 (file)
index 0000000..ed33821
--- /dev/null
@@ -0,0 +1,15 @@
+tasks:
+- cephadm.shell:
+    env: [sha1]
+    mon.a:
+      - while ceph orch upgrade status | jq '.in_progress' | grep true && ! ceph orch upgrade status | jq '.message' | grep Error ; do ceph orch ps ; ceph versions ; ceph orch upgrade status ; ceph health detail ; sleep 30 ; done
+      - ceph config get mgr mgr/cephadm/container_image_nvmeof
+      - sleep 60
+      # print status after upgrade
+      - ceph orch ps
+      - ceph orch ps | grep "nvmeof" || exit 1
+      - ceph orch ps | grep "nvmeof" | grep "running" || exit 1
+      - ceph versions
+      - ceph orch upgrade status
+      - ceph health detail
+      - ceph versions | jq -e '.overall | length == 1'
diff --git a/qa/suites/nvmeof/upgrade/5-workloads/.qa b/qa/suites/nvmeof/upgrade/5-workloads/.qa
new file mode 120000 (symlink)
index 0000000..a602a03
--- /dev/null
@@ -0,0 +1 @@
+../.qa/
\ No newline at end of file
diff --git a/qa/suites/nvmeof/upgrade/5-workloads/nvmeof-thrasher.yaml b/qa/suites/nvmeof/upgrade/5-workloads/nvmeof-thrasher.yaml
new file mode 100644 (file)
index 0000000..4d842ea
--- /dev/null
@@ -0,0 +1,23 @@
+tasks:
+- workunit:
+    no_coverage_and_limits: true
+    clients:
+      client.0:
+        - nvmeof/basic_tests.sh
+    env:
+      RBD_POOL: mypool
+      RBD_IMAGE_PREFIX: myimage
+
+- nvmeof.thrash:
+    checker_host: 'client.0'
+
+- workunit:
+    no_coverage_and_limits: true
+    timeout: 30m
+    clients:
+      client.0:
+        - nvmeof/fio_test.sh --rbd_iostat
+    env:
+      RBD_POOL: mypool
+      IOSTAT_INTERVAL: '10'
+      RUNTIME: '600'
index 591f1b60db885197321d48f8bfd6cd6a06517c66..4fc1973c93e6fcf20ea1234d906ab916f4d7ab9a 100644 (file)
@@ -61,6 +61,7 @@ class Nvmeof(Task):
 
         gateway_config = self.config.get('gateway_config', {})
         self.cli_image = gateway_config.get('cli_image', 'quay.io/ceph/nvmeof-cli:latest')
+        self.enable_groups = bool(gateway_config.get('enable_groups', True))
         self.groups_count = gateway_config.get('groups_count', 1)
         self.groups_prefix = gateway_config.get('groups_prefix', 'mygroup') 
         self.nqn_prefix = gateway_config.get('subsystem_nqn_prefix', 'nqn.2016-06.io.spdk:cnode')
@@ -114,16 +115,22 @@ class Nvmeof(Task):
                 'rbd', 'pool', 'init', poolname
             ])
 
-            group_to_nodes = defaultdict(list)
-            for index, node in enumerate(nodes):
-                group_name = self.groups_prefix + str(index % int(self.groups_count))
-                group_to_nodes[group_name] += [node]
-            for group_name in group_to_nodes:
-                gp_nodes = group_to_nodes[group_name]
-                log.info(f'[nvmeof]: ceph orch apply nvmeof {poolname} {group_name}')
+            if self.enable_groups:
+                group_to_nodes = defaultdict(list)
+                for index, node in enumerate(nodes):
+                    group_name = self.groups_prefix + str(index % int(self.groups_count))
+                    group_to_nodes[group_name] += [node]
+                for group_name in group_to_nodes:
+                    gp_nodes = group_to_nodes[group_name]
+                    log.info(f'[nvmeof]: ceph orch apply nvmeof {poolname} {group_name}')
+                    _shell(self.ctx, self.cluster_name, self.remote, [
+                        'ceph', 'orch', 'apply', 'nvmeof', poolname, group_name,
+                        '--placement', ';'.join(gp_nodes)
+                    ])
+            else:
                 _shell(self.ctx, self.cluster_name, self.remote, [
-                    'ceph', 'orch', 'apply', 'nvmeof', poolname, group_name,
-                    '--placement', ';'.join(gp_nodes)
+                        'ceph', 'orch', 'apply', 'nvmeof', poolname,
+                        '--placement', ';'.join(nodes)
                 ])
 
             total_images = int(self.namespaces_count) * int(self.subsystems_count)
@@ -287,7 +294,7 @@ class NvmeofThrasher(Thrasher, Greenlet):
     - workunit:
         clients:
             client.3:
-            - rbd/nvmeof_fio_test.sh --rbd_iostat
+            - nvmeof/fio_test.sh --rbd_iostat
         env:
             RBD_POOL: mypool
             IOSTAT_INTERVAL: '10'