]> git.apps.os.sepia.ceph.com Git - ceph-ci.git/commitdiff
qa/suites/nvmeof/thrasher: use 120 subsystems and 8 ns each
authorVallari Agrawal <vallari.agrawal@ibm.com>
Tue, 19 Nov 2024 01:22:06 +0000 (06:52 +0530)
committerVallari Agrawal <vallari.agrawal@ibm.com>
Tue, 26 Nov 2024 08:11:01 +0000 (13:41 +0530)
For tharsher test:
1. Run it on 120 subsystems with 8 namespaces each
2. Run FIO for 20 mins (instead of 15mins)
2. Run FIO for few randomly picked devices
    (using `--random_devices 200`)

Signed-off-by: Vallari Agrawal <vallari.agrawal@ibm.com>
qa/suites/nvmeof/thrash/gateway-initiator-setup/120-subsys-8-namespace.yaml [new file with mode: 0644]
qa/suites/nvmeof/thrash/gateway-initiator-setup/3-subsys-60-namespace.yaml [deleted file]
qa/suites/nvmeof/thrash/workloads/fio.yaml
qa/workunits/nvmeof/fio_test.sh

diff --git a/qa/suites/nvmeof/thrash/gateway-initiator-setup/120-subsys-8-namespace.yaml b/qa/suites/nvmeof/thrash/gateway-initiator-setup/120-subsys-8-namespace.yaml
new file mode 100644 (file)
index 0000000..0f7ac01
--- /dev/null
@@ -0,0 +1,24 @@
+tasks:
+- nvmeof:
+    installer: host.a
+    gw_image: quay.io/ceph/nvmeof:latest # "default" is the image cephadm defaults to; change to test specific nvmeof images, example "latest"
+    rbd:
+      pool_name: mypool
+      image_name_prefix: myimage
+    gateway_config:
+      subsystems_count: 120
+      namespaces_count: 8 # each subsystem
+      cli_image: quay.io/ceph/nvmeof-cli:latest
+
+- cephadm.wait_for_service:
+    service: nvmeof.mypool.mygroup0
+
+- workunit:
+    no_coverage_and_limits: true
+    clients:
+      client.0:
+        - nvmeof/setup_subsystem.sh
+        - nvmeof/basic_tests.sh
+    env:
+      RBD_POOL: mypool
+      RBD_IMAGE_PREFIX: myimage
diff --git a/qa/suites/nvmeof/thrash/gateway-initiator-setup/3-subsys-60-namespace.yaml b/qa/suites/nvmeof/thrash/gateway-initiator-setup/3-subsys-60-namespace.yaml
deleted file mode 100644 (file)
index b4755a6..0000000
+++ /dev/null
@@ -1,24 +0,0 @@
-tasks:
-- nvmeof:
-    installer: host.a
-    gw_image: quay.io/ceph/nvmeof:latest # "default" is the image cephadm defaults to; change to test specific nvmeof images, example "latest"
-    rbd:
-      pool_name: mypool
-      image_name_prefix: myimage
-    gateway_config:
-      subsystems_count: 3
-      namespaces_count: 20 # each subsystem
-      cli_image: quay.io/ceph/nvmeof-cli:latest
-
-- cephadm.wait_for_service:
-    service: nvmeof.mypool.mygroup0
-
-- workunit:
-    no_coverage_and_limits: true
-    clients:
-      client.0:
-        - nvmeof/setup_subsystem.sh
-        - nvmeof/basic_tests.sh
-    env:
-      RBD_POOL: mypool
-      RBD_IMAGE_PREFIX: myimage
index e5f130ebe2666280dbfe11cf82dcb99c0ef86c44..91bd99c76dbc07bd415805e2cf746d1e0a083fd4 100644 (file)
@@ -1,11 +1,11 @@
 tasks:
 - workunit:
     no_coverage_and_limits: true
-    timeout: 30m
+    timeout: 60m
     clients:
       client.0:
-        - nvmeof/fio_test.sh
+        - nvmeof/fio_test.sh --random_devices 200
     env:
       RBD_POOL: mypool
       IOSTAT_INTERVAL: '10'
-      RUNTIME: '900'
+      RUNTIME: '1200'
index 57d355a63183fb513d943103ed2a84fba2c1d3b0..03fb58693bd02422eed2648237fcaa1b4b5d8f01 100755 (executable)
@@ -5,6 +5,7 @@ sudo yum -y install sysstat
 
 namespace_range_start=
 namespace_range_end=
+random_devices_count=
 rbd_iostat=false
 
 while [[ $# -gt 0 ]]; do
@@ -17,6 +18,10 @@ while [[ $# -gt 0 ]]; do
             namespace_range_end=$2
             shift 2
             ;;
+        --random_devices)
+            random_devices_count=$2
+            shift 2
+            ;;
         --rbd_iostat)
             rbd_iostat=true
             shift
@@ -37,6 +42,8 @@ all_drives_list=$(sudo nvme list --output-format=json |
 # run on first 3 namespaces here.
 if [ "$namespace_range_start" ] || [ "$namespace_range_end" ]; then
     selected_drives=$(echo "${all_drives_list[@]}" | sed -n "${namespace_range_start},${namespace_range_end}p")
+elif [ "$random_devices_count" ]; then 
+    selected_drives=$(echo "${all_drives_list[@]}" | shuf -n $random_devices_count)
 else
     selected_drives="${all_drives_list[@]}"
 fi