]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
qa: move nvmeof shell scripts to qa/workunits/nvmeof
authorVallari Agrawal <val.agl002@gmail.com>
Wed, 21 Aug 2024 09:38:21 +0000 (15:08 +0530)
committerVallari Agrawal <val.agl002@gmail.com>
Mon, 2 Sep 2024 11:34:55 +0000 (17:04 +0530)
Move all scripts qa/workunits/rbd/nvmeof_*.sh
to qa/workunits/nvmeof/*.sh

Signed-off-by: Vallari Agrawal <val.agl002@gmail.com>
15 files changed:
qa/suites/nvmeof/basic/workloads/nvmeof_initiator.yaml
qa/suites/nvmeof/basic/workloads/nvmeof_namespaces.yaml
qa/suites/nvmeof/basic/workloads/nvmeof_scalability.yaml
qa/suites/nvmeof/thrash/gateway-initiator-setup/3-subsys-60-namespace.yaml
qa/suites/nvmeof/thrash/workloads/fio.yaml
qa/workunits/nvmeof/basic_tests.sh [new file with mode: 0755]
qa/workunits/nvmeof/fio_test.sh [new file with mode: 0755]
qa/workunits/nvmeof/namespace_test.sh [new file with mode: 0755]
qa/workunits/nvmeof/scalability_test.sh [new file with mode: 0755]
qa/workunits/nvmeof/setup_subsystem.sh [new file with mode: 0755]
qa/workunits/rbd/nvmeof_basic_tests.sh [deleted file]
qa/workunits/rbd/nvmeof_fio_test.sh [deleted file]
qa/workunits/rbd/nvmeof_namespace_test.sh [deleted file]
qa/workunits/rbd/nvmeof_scalability_test.sh [deleted file]
qa/workunits/rbd/nvmeof_setup_subsystem.sh [deleted file]

index 7774ba789222c6ce5adc0bbe07c83bc2b5f7b80e..3e925ee6a13f2b4bbe4c199def109378448561c2 100644 (file)
@@ -17,7 +17,7 @@ tasks:
     no_coverage_and_limits: true
     clients:
       client.2:
-        - rbd/nvmeof_setup_subsystem.sh
+        - nvmeof/setup_subsystem.sh
     env:
       RBD_POOL: mypool
       RBD_IMAGE_PREFIX: myimage
@@ -27,11 +27,11 @@ tasks:
     timeout: 30m
     clients:
       client.2:
-        - rbd/nvmeof_basic_tests.sh
-        - rbd/nvmeof_fio_test.sh --start_ns 1 --end_ns 30 --rbd_iostat
+        - nvmeof/basic_tests.sh
+        - nvmeof/fio_test.sh --start_ns 1 --end_ns 30 --rbd_iostat
       client.3:
-        - rbd/nvmeof_basic_tests.sh
-        - rbd/nvmeof_fio_test.sh --start_ns 31 --end_ns 60
+        - nvmeof/basic_tests.sh
+        - nvmeof/fio_test.sh --start_ns 31 --end_ns 60
     env:
       RBD_POOL: mypool
       IOSTAT_INTERVAL: '10'
index a98b5f7ccbf7a2fa569fee37e551ac8e5e8745e7..c87c3c2be1cac65d2d1d5e0e3abba4439df4a1aa 100644 (file)
@@ -17,7 +17,7 @@ tasks:
     no_coverage_and_limits: true
     clients:
       client.2:
-        - rbd/nvmeof_setup_subsystem.sh
+        - nvmeof/setup_subsystem.sh
     env:
       RBD_POOL: mypool
       RBD_IMAGE_PREFIX: myimage
@@ -27,11 +27,11 @@ tasks:
     timeout: 30m
     clients:
       client.2:
-        - rbd/nvmeof_basic_tests.sh
-        - rbd/nvmeof_fio_test.sh --rbd_iostat
+        - nvmeof/basic_tests.sh
+        - nvmeof/fio_test.sh --rbd_iostat
       client.3:
-        - rbd/nvmeof_basic_tests.sh
-        - rbd/nvmeof_namespace_test.sh
+        - nvmeof/basic_tests.sh
+        - nvmeof/namespace_test.sh
     env:
       RBD_POOL: mypool
       IOSTAT_INTERVAL: '10'
index 7d9c23e6ae5bc49c03dc24bc9b6668895545c515..4720821c21ed3a702aeaa49a8f556d3d481b1f10 100644 (file)
@@ -18,9 +18,9 @@ tasks:
     timeout: 30m
     clients:
       client.2:
-        - rbd/nvmeof_setup_subsystem.sh
-        - rbd/nvmeof_basic_tests.sh
-        - rbd/nvmeof_fio_test.sh --rbd_iostat
+        - nvmeof/setup_subsystem.sh
+        - nvmeof/basic_tests.sh
+        - nvmeof/fio_test.sh --rbd_iostat
     env:
       RBD_POOL: mypool
       RBD_IMAGE_PREFIX: myimage
@@ -32,8 +32,8 @@ tasks:
     timeout: 30m
     clients:
       client.2:
-        - rbd/nvmeof_scalability_test.sh nvmeof.a
-        - rbd/nvmeof_scalability_test.sh nvmeof.b 
+        - nvmeof/scalability_test.sh nvmeof.a
+        - nvmeof/scalability_test.sh nvmeof.b 
     env:
       SCALING_DELAYS: '50'
 
index 184858565a7641c42beda9cfb3f64eb5a616d499..d545f9733824f22d9f2165ad91e4b426a261ab1e 100644 (file)
@@ -17,8 +17,8 @@ tasks:
     no_coverage_and_limits: true
     clients:
       client.3:
-        - rbd/nvmeof_setup_subsystem.sh
-        - rbd/nvmeof_basic_tests.sh
+        - nvmeof/setup_subsystem.sh
+        - nvmeof/basic_tests.sh
     env:
       RBD_POOL: mypool
       RBD_IMAGE_PREFIX: myimage
index fa7153d2ed9645885530acf60643f291b3864321..46630a68281c45db99a3e08eed9ca27b6d54db81 100644 (file)
@@ -4,7 +4,7 @@ tasks:
     timeout: 30m
     clients:
       client.3:
-        - rbd/nvmeof_fio_test.sh --rbd_iostat
+        - nvmeof/fio_test.sh --rbd_iostat
     env:
       RBD_POOL: mypool
       IOSTAT_INTERVAL: '10'
diff --git a/qa/workunits/nvmeof/basic_tests.sh b/qa/workunits/nvmeof/basic_tests.sh
new file mode 100755 (executable)
index 0000000..dc6fd16
--- /dev/null
@@ -0,0 +1,82 @@
+#!/bin/bash -x
+
+sudo modprobe nvme-fabrics
+sudo modprobe nvme-tcp
+sudo dnf reinstall nvme-cli -y
+sudo lsmod | grep nvme
+nvme version
+
+source /etc/ceph/nvmeof.env
+SPDK_CONTROLLER="Ceph bdev Controller"
+DISCOVERY_PORT="8009"
+
+discovery() {
+    output=$(sudo nvme discover -t tcp -a $NVMEOF_DEFAULT_GATEWAY_IP_ADDRESS -s $DISCOVERY_PORT)
+    expected_discovery_stdout="subtype: nvme subsystem"
+    if ! echo "$output" | grep -q "$expected_discovery_stdout"; then
+        return 1
+    fi
+}
+
+connect() {
+    sudo nvme connect -t tcp --traddr $NVMEOF_DEFAULT_GATEWAY_IP_ADDRESS -s $NVMEOF_PORT -n "${NVMEOF_SUBSYSTEMS_PREFIX}1"
+    sleep 5
+    output=$(sudo nvme list --output-format=json)
+    if ! echo "$output" | grep -q "$SPDK_CONTROLLER"; then
+        return 1
+    fi
+}
+
+disconnect_all() {
+    sudo nvme disconnect-all
+    output=$(sudo nvme list)
+    if echo "$output" | grep -q "$SPDK_CONTROLLER"; then
+        return 1
+    fi
+}
+
+connect_all() {
+    sudo nvme connect-all --traddr=$NVMEOF_DEFAULT_GATEWAY_IP_ADDRESS --transport=tcp -l 3600
+    sleep 5
+    output=$(sudo nvme list --output-format=json)
+    if ! echo "$output" | grep -q "$SPDK_CONTROLLER"; then
+        return 1
+    fi
+}
+
+list_subsys() {
+    expected_count=$1
+    output=$(sudo nvme list-subsys --output-format=json)
+    multipath=$(echo $output | grep -o '"tcp"' | wc -l)
+    if [ "$multipath" -ne "$expected_count" ]; then
+        return 1
+    fi
+}
+
+
+test_run() {
+    echo "[nvmeof] Running test: $1"
+    $1 "${@:2}" # execute func
+    if [ $? -eq 0 ]; then
+        echo "[nvmeof] $1 test passed!"
+    else
+        echo "[nvmeof] $1 test failed!"
+        exit 1
+    fi
+}
+
+
+test_run disconnect_all
+test_run discovery 
+test_run connect
+test_run list_subsys 1
+test_run disconnect_all
+test_run list_subsys 0
+test_run connect_all
+gateways_count=$(( $(echo "$NVMEOF_GATEWAY_IP_ADDRESSES" | tr -cd ',' | wc -c) + 1 ))
+multipath_count=$(( $gateways_count * $NVMEOF_SUBSYSTEMS_COUNT)) 
+test_run list_subsys $multipath_count
+
+
+echo "-------------Test Summary-------------"
+echo "[nvmeof] All nvmeof basic tests passed!"
diff --git a/qa/workunits/nvmeof/fio_test.sh b/qa/workunits/nvmeof/fio_test.sh
new file mode 100755 (executable)
index 0000000..57d355a
--- /dev/null
@@ -0,0 +1,77 @@
+#!/bin/bash -ex
+
+sudo yum -y install fio
+sudo yum -y install sysstat
+
+namespace_range_start=
+namespace_range_end=
+rbd_iostat=false
+
+while [[ $# -gt 0 ]]; do
+    case "$1" in
+        --start_ns)
+            namespace_range_start=$2
+            shift 2
+            ;;
+        --end_ns)
+            namespace_range_end=$2
+            shift 2
+            ;;
+        --rbd_iostat)
+            rbd_iostat=true
+            shift
+            ;;
+        *)
+            exit 100   # Internal error
+            ;;
+    esac
+done
+
+fio_file=$(mktemp -t nvmeof-fio-XXXX)
+all_drives_list=$(sudo nvme list --output-format=json | 
+    jq -r '.Devices | sort_by(.NameSpace) | .[] | select(.ModelNumber == "Ceph bdev Controller") | .DevicePath')
+
+# When the script is passed --start_ns and --end_ns (example: `nvmeof_fio_test.sh --start_ns 1 --end_ns 3`), 
+# then fio runs on namespaces only in the defined range (which is 1 to 3 here). 
+# So if `nvme list` has 5 namespaces with "SPDK Controller", then fio will 
+# run on first 3 namespaces here.
+if [ "$namespace_range_start" ] || [ "$namespace_range_end" ]; then
+    selected_drives=$(echo "${all_drives_list[@]}" | sed -n "${namespace_range_start},${namespace_range_end}p")
+else
+    selected_drives="${all_drives_list[@]}"
+fi
+
+
+RUNTIME=${RUNTIME:-600}
+
+
+cat >> $fio_file <<EOF
+[nvmeof-fio-test]
+ioengine=${IO_ENGINE:-sync}
+bsrange=${BS_RANGE:-4k-64k}
+numjobs=${NUM_OF_JOBS:-1}
+size=${SIZE:-1G}
+time_based=1
+runtime=$RUNTIME
+rw=${RW:-randrw}
+filename=$(echo "$selected_drives" | tr '\n' ':' | sed 's/:$//')
+verify=md5
+verify_fatal=1
+direct=1
+EOF
+
+echo "[nvmeof.fio] starting fio test..."
+
+if [ -n "$IOSTAT_INTERVAL" ]; then
+    iostat_count=$(( RUNTIME / IOSTAT_INTERVAL ))
+    iostat -d -p $selected_drives $IOSTAT_INTERVAL $iostat_count -h &
+fi
+if [ "$rbd_iostat" = true  ]; then
+    iterations=$(( RUNTIME / 5 ))
+    timeout 20 rbd perf image iostat $RBD_POOL --iterations $iterations &
+fi
+fio --showcmd $fio_file
+sudo fio $fio_file 
+wait
+
+echo "[nvmeof.fio] fio test successful!"
diff --git a/qa/workunits/nvmeof/namespace_test.sh b/qa/workunits/nvmeof/namespace_test.sh
new file mode 100755 (executable)
index 0000000..ef331fd
--- /dev/null
@@ -0,0 +1,71 @@
+#!/bin/bash -xe
+
+# It's assumed in this test that each subsystem has equal number
+# of namespaces (i.e. NVMEOF_NAMESPACES_COUNT ns per subsystem). 
+# This script then adds NEW_NAMESPACES_COUNT amount of namespaces
+# to each subsystem and then deletes those new namespaces.
+
+source /etc/ceph/nvmeof.env
+
+RBD_POOL="${RBD_POOL:-mypool}"
+NEW_IMAGE_SIZE="${RBD_IMAGE_SIZE:-8192}" # 1024*8
+NEW_NAMESPACES_COUNT="${NEW_NAMESPACES_COUNT:-3}"
+
+gateways_count=$(( $(echo "$NVMEOF_GATEWAY_IP_ADDRESSES" | tr -cd ',' | wc -c) + 1 ))
+new_images_count=$(( $NVMEOF_SUBSYSTEMS_COUNT * $NEW_NAMESPACES_COUNT)) 
+
+
+assert_namespaces_count() {
+    expected_count_per_subsys=$1
+    actual_count=$(sudo podman run -it $NVMEOF_CLI_IMAGE --server-address $NVMEOF_DEFAULT_GATEWAY_IP_ADDRESS --server-port $NVMEOF_SRPORT --format json subsystem list | 
+        grep namespace_count | grep $expected_count_per_subsys | wc -l)
+    if [ "$actual_count" -ne "$NVMEOF_SUBSYSTEMS_COUNT" ]; then
+        sudo podman run -it $NVMEOF_CLI_IMAGE --server-address $NVMEOF_DEFAULT_GATEWAY_IP_ADDRESS --server-port $NVMEOF_SRPORT --format json subsystem list
+        echo "Expected count of namepaces not found, expected (per subsystem): $expected_count_per_subsys"
+        return 1
+    fi
+}
+
+
+# add rbd images
+for i in $(seq 1 $new_images_count); do
+    image_name="test${i}"
+    rbd create $RBD_POOL/$image_name --size $NEW_IMAGE_SIZE
+done
+
+# add new namespaces
+image_index=1
+for i in $(seq 1 $NVMEOF_SUBSYSTEMS_COUNT); do
+    subsystem_nqn="${NVMEOF_SUBSYSTEMS_PREFIX}${i}"
+    for ns in $(seq 1 $NEW_NAMESPACES_COUNT); do
+        image="test${image_index}"
+        sudo podman run -it $NVMEOF_CLI_IMAGE --server-address $NVMEOF_DEFAULT_GATEWAY_IP_ADDRESS --server-port $NVMEOF_SRPORT namespace add --subsystem $subsystem_nqn --rbd-pool $RBD_POOL --rbd-image $image --load-balancing-group $(($image_index % $gateways_count + 1))
+        ((image_index++))
+    done
+done
+
+# list namespaces
+for i in $(seq 1 $NVMEOF_SUBSYSTEMS_COUNT); do
+    subsystem_nqn="${NVMEOF_SUBSYSTEMS_PREFIX}${i}"
+    sudo podman run -it $NVMEOF_CLI_IMAGE --server-address $NVMEOF_DEFAULT_GATEWAY_IP_ADDRESS --server-port $NVMEOF_SRPORT --format plain namespace list --subsystem $subsystem_nqn        
+done
+
+# verify namespaces added
+expected_count_per_subsys=$(( $NEW_NAMESPACES_COUNT + $NVMEOF_NAMESPACES_COUNT ))
+assert_namespaces_count $expected_count_per_subsys
+
+# delete namespaces
+for i in $(seq 1 $NVMEOF_SUBSYSTEMS_COUNT); do
+    subsystem_nqn="${NVMEOF_SUBSYSTEMS_PREFIX}${i}"
+    NSIDs=$(sudo podman run -it $NVMEOF_CLI_IMAGE --server-address $NVMEOF_DEFAULT_GATEWAY_IP_ADDRESS --server-port $NVMEOF_SRPORT --format json namespace list --subsystem $subsystem_nqn | 
+            jq -r '.namespaces[] | select(.rbd_image_name | startswith("test")) | .nsid')
+
+    for nsid in $NSIDs; do
+        sudo podman run -it $NVMEOF_CLI_IMAGE --server-address $NVMEOF_DEFAULT_GATEWAY_IP_ADDRESS --server-port $NVMEOF_SRPORT namespace del --subsystem $subsystem_nqn --nsid $nsid
+    done
+done
+
+# verify namespaces deleted
+expected_count_per_subsys=$NVMEOF_NAMESPACES_COUNT
+assert_namespaces_count $expected_count_per_subsys
+
diff --git a/qa/workunits/nvmeof/scalability_test.sh b/qa/workunits/nvmeof/scalability_test.sh
new file mode 100755 (executable)
index 0000000..5a26b62
--- /dev/null
@@ -0,0 +1,39 @@
+#!/bin/bash -xe
+
+
+GATEWAYS=$1 # exmaple "nvmeof.a,nvmeof.b"
+DELAY="${SCALING_DELAYS:-50}"
+
+if [ -z "$GATEWAYS" ]; then
+    echo "At least one gateway needs to be defined for scalability test"
+    exit 1
+fi
+
+pip3 install yq
+
+status_checks() {
+    ceph nvme-gw show mypool ''
+    ceph orch ls
+    ceph orch ps 
+    ceph -s
+}
+
+
+echo "[nvmeof.scale] Setting up config to remove gateways ${GATEWAYS}"
+ceph orch ls nvmeof --export > /tmp/nvmeof-gw.yaml
+cat /tmp/nvmeof-gw.yaml
+yq "del(.placement.hosts[] | select(. | test(\".*($(echo $GATEWAYS | sed 's/,/|/g'))\")))" /tmp/nvmeof-gw.yaml > /tmp/nvmeof-gw-new.yaml
+cat /tmp/nvmeof-gw-new.yaml
+
+echo "[nvmeof.scale] Starting scale testing by removing ${GATEWAYS}"
+status_checks
+ceph orch rm nvmeof.mypool && sleep 20 # temp workaround
+ceph orch apply -i /tmp/nvmeof-gw-new.yaml # downscale
+sleep $DELAY
+status_checks
+ceph orch rm nvmeof.mypool && sleep 20 # temp workaround
+ceph orch apply -i /tmp/nvmeof-gw.yaml #upscale
+sleep $DELAY
+status_checks
+
+echo "[nvmeof.scale] Scale testing passed for ${GATEWAYS}"
diff --git a/qa/workunits/nvmeof/setup_subsystem.sh b/qa/workunits/nvmeof/setup_subsystem.sh
new file mode 100755 (executable)
index 0000000..fb72e1d
--- /dev/null
@@ -0,0 +1,75 @@
+#!/bin/bash
+
+set -ex
+
+
+source /etc/ceph/nvmeof.env
+
+# Set these in job yaml
+RBD_POOL="${RBD_POOL:-mypool}"
+RBD_IMAGE_PREFIX="${RBD_IMAGE_PREFIX:-myimage}"
+
+HOSTNAME=$(hostname)
+sudo podman images
+sudo podman ps
+sudo podman run -it $NVMEOF_CLI_IMAGE --server-address $NVMEOF_DEFAULT_GATEWAY_IP_ADDRESS --server-port $NVMEOF_SRPORT --format json subsystem list
+
+IFS=',' read -ra gateway_ips <<< "$NVMEOF_GATEWAY_IP_ADDRESSES"
+IFS=',' read -ra gateway_names <<< "$NVMEOF_GATEWAY_NAMES"
+gateways_count=$(( $(echo "$NVMEOF_GATEWAY_IP_ADDRESSES" | tr -cd ',' | wc -c) + 1 ))
+
+list_subsystems () { 
+   for i in "${!gateway_ips[@]}"
+    do
+        ip="${gateway_ips[i]}"
+        sudo podman run -it $NVMEOF_CLI_IMAGE --server-address $ip --server-port $NVMEOF_SRPORT --format json subsystem list
+    done
+}
+
+# add all subsystems
+for i in $(seq 1 $NVMEOF_SUBSYSTEMS_COUNT); do
+    subsystem_nqn="${NVMEOF_SUBSYSTEMS_PREFIX}${i}"
+    sudo podman run -it $NVMEOF_CLI_IMAGE --server-address $NVMEOF_DEFAULT_GATEWAY_IP_ADDRESS --server-port $NVMEOF_SRPORT subsystem add --subsystem $subsystem_nqn
+done
+
+list_subsystems
+
+# add all gateway listeners 
+for i in "${!gateway_ips[@]}"
+do
+    ip="${gateway_ips[i]}"
+    name="${gateway_names[i]}"
+    for j in $(seq 1 $NVMEOF_SUBSYSTEMS_COUNT); do
+        subsystem_nqn="${NVMEOF_SUBSYSTEMS_PREFIX}${j}"
+        echo "Adding gateway listener $index with IP ${ip} and name ${name}"
+        sudo podman run -it $NVMEOF_CLI_IMAGE --server-address $ip --server-port $NVMEOF_SRPORT listener add --subsystem $subsystem_nqn --host-name $name --traddr $ip --trsvcid $NVMEOF_PORT
+    done
+done
+
+# add all hosts
+for i in $(seq 1 $NVMEOF_SUBSYSTEMS_COUNT); do
+    subsystem_nqn="${NVMEOF_SUBSYSTEMS_PREFIX}${i}"
+    sudo podman run -it $NVMEOF_CLI_IMAGE --server-address $NVMEOF_DEFAULT_GATEWAY_IP_ADDRESS --server-port $NVMEOF_SRPORT host add --subsystem $subsystem_nqn --host "*"
+done
+
+# add all namespaces
+image_index=1
+for i in $(seq 1 $NVMEOF_SUBSYSTEMS_COUNT); do
+    subsystem_nqn="${NVMEOF_SUBSYSTEMS_PREFIX}${i}"
+    for ns in $(seq 1 $NVMEOF_NAMESPACES_COUNT); do
+        image="${RBD_IMAGE_PREFIX}${image_index}"
+        sudo podman run -it $NVMEOF_CLI_IMAGE --server-address $NVMEOF_DEFAULT_GATEWAY_IP_ADDRESS --server-port $NVMEOF_SRPORT namespace add --subsystem $subsystem_nqn --rbd-pool $RBD_POOL --rbd-image $image --load-balancing-group $(($image_index % $gateways_count + 1))
+        ((image_index++))
+    done
+done
+
+list_subsystems
+
+# list namespaces
+for i in $(seq 1 $NVMEOF_SUBSYSTEMS_COUNT); do
+    subsystem_nqn="${NVMEOF_SUBSYSTEMS_PREFIX}${i}"
+    sudo podman run -it $NVMEOF_CLI_IMAGE --server-address $NVMEOF_DEFAULT_GATEWAY_IP_ADDRESS --server-port $NVMEOF_SRPORT --format plain namespace list --subsystem $subsystem_nqn        
+done
+
+
+echo "[nvmeof] Subsystem setup done"
diff --git a/qa/workunits/rbd/nvmeof_basic_tests.sh b/qa/workunits/rbd/nvmeof_basic_tests.sh
deleted file mode 100755 (executable)
index dc6fd16..0000000
+++ /dev/null
@@ -1,82 +0,0 @@
-#!/bin/bash -x
-
-sudo modprobe nvme-fabrics
-sudo modprobe nvme-tcp
-sudo dnf reinstall nvme-cli -y
-sudo lsmod | grep nvme
-nvme version
-
-source /etc/ceph/nvmeof.env
-SPDK_CONTROLLER="Ceph bdev Controller"
-DISCOVERY_PORT="8009"
-
-discovery() {
-    output=$(sudo nvme discover -t tcp -a $NVMEOF_DEFAULT_GATEWAY_IP_ADDRESS -s $DISCOVERY_PORT)
-    expected_discovery_stdout="subtype: nvme subsystem"
-    if ! echo "$output" | grep -q "$expected_discovery_stdout"; then
-        return 1
-    fi
-}
-
-connect() {
-    sudo nvme connect -t tcp --traddr $NVMEOF_DEFAULT_GATEWAY_IP_ADDRESS -s $NVMEOF_PORT -n "${NVMEOF_SUBSYSTEMS_PREFIX}1"
-    sleep 5
-    output=$(sudo nvme list --output-format=json)
-    if ! echo "$output" | grep -q "$SPDK_CONTROLLER"; then
-        return 1
-    fi
-}
-
-disconnect_all() {
-    sudo nvme disconnect-all
-    output=$(sudo nvme list)
-    if echo "$output" | grep -q "$SPDK_CONTROLLER"; then
-        return 1
-    fi
-}
-
-connect_all() {
-    sudo nvme connect-all --traddr=$NVMEOF_DEFAULT_GATEWAY_IP_ADDRESS --transport=tcp -l 3600
-    sleep 5
-    output=$(sudo nvme list --output-format=json)
-    if ! echo "$output" | grep -q "$SPDK_CONTROLLER"; then
-        return 1
-    fi
-}
-
-list_subsys() {
-    expected_count=$1
-    output=$(sudo nvme list-subsys --output-format=json)
-    multipath=$(echo $output | grep -o '"tcp"' | wc -l)
-    if [ "$multipath" -ne "$expected_count" ]; then
-        return 1
-    fi
-}
-
-
-test_run() {
-    echo "[nvmeof] Running test: $1"
-    $1 "${@:2}" # execute func
-    if [ $? -eq 0 ]; then
-        echo "[nvmeof] $1 test passed!"
-    else
-        echo "[nvmeof] $1 test failed!"
-        exit 1
-    fi
-}
-
-
-test_run disconnect_all
-test_run discovery 
-test_run connect
-test_run list_subsys 1
-test_run disconnect_all
-test_run list_subsys 0
-test_run connect_all
-gateways_count=$(( $(echo "$NVMEOF_GATEWAY_IP_ADDRESSES" | tr -cd ',' | wc -c) + 1 ))
-multipath_count=$(( $gateways_count * $NVMEOF_SUBSYSTEMS_COUNT)) 
-test_run list_subsys $multipath_count
-
-
-echo "-------------Test Summary-------------"
-echo "[nvmeof] All nvmeof basic tests passed!"
diff --git a/qa/workunits/rbd/nvmeof_fio_test.sh b/qa/workunits/rbd/nvmeof_fio_test.sh
deleted file mode 100755 (executable)
index 57d355a..0000000
+++ /dev/null
@@ -1,77 +0,0 @@
-#!/bin/bash -ex
-
-sudo yum -y install fio
-sudo yum -y install sysstat
-
-namespace_range_start=
-namespace_range_end=
-rbd_iostat=false
-
-while [[ $# -gt 0 ]]; do
-    case "$1" in
-        --start_ns)
-            namespace_range_start=$2
-            shift 2
-            ;;
-        --end_ns)
-            namespace_range_end=$2
-            shift 2
-            ;;
-        --rbd_iostat)
-            rbd_iostat=true
-            shift
-            ;;
-        *)
-            exit 100   # Internal error
-            ;;
-    esac
-done
-
-fio_file=$(mktemp -t nvmeof-fio-XXXX)
-all_drives_list=$(sudo nvme list --output-format=json | 
-    jq -r '.Devices | sort_by(.NameSpace) | .[] | select(.ModelNumber == "Ceph bdev Controller") | .DevicePath')
-
-# When the script is passed --start_ns and --end_ns (example: `nvmeof_fio_test.sh --start_ns 1 --end_ns 3`), 
-# then fio runs on namespaces only in the defined range (which is 1 to 3 here). 
-# So if `nvme list` has 5 namespaces with "SPDK Controller", then fio will 
-# run on first 3 namespaces here.
-if [ "$namespace_range_start" ] || [ "$namespace_range_end" ]; then
-    selected_drives=$(echo "${all_drives_list[@]}" | sed -n "${namespace_range_start},${namespace_range_end}p")
-else
-    selected_drives="${all_drives_list[@]}"
-fi
-
-
-RUNTIME=${RUNTIME:-600}
-
-
-cat >> $fio_file <<EOF
-[nvmeof-fio-test]
-ioengine=${IO_ENGINE:-sync}
-bsrange=${BS_RANGE:-4k-64k}
-numjobs=${NUM_OF_JOBS:-1}
-size=${SIZE:-1G}
-time_based=1
-runtime=$RUNTIME
-rw=${RW:-randrw}
-filename=$(echo "$selected_drives" | tr '\n' ':' | sed 's/:$//')
-verify=md5
-verify_fatal=1
-direct=1
-EOF
-
-echo "[nvmeof.fio] starting fio test..."
-
-if [ -n "$IOSTAT_INTERVAL" ]; then
-    iostat_count=$(( RUNTIME / IOSTAT_INTERVAL ))
-    iostat -d -p $selected_drives $IOSTAT_INTERVAL $iostat_count -h &
-fi
-if [ "$rbd_iostat" = true  ]; then
-    iterations=$(( RUNTIME / 5 ))
-    timeout 20 rbd perf image iostat $RBD_POOL --iterations $iterations &
-fi
-fio --showcmd $fio_file
-sudo fio $fio_file 
-wait
-
-echo "[nvmeof.fio] fio test successful!"
diff --git a/qa/workunits/rbd/nvmeof_namespace_test.sh b/qa/workunits/rbd/nvmeof_namespace_test.sh
deleted file mode 100755 (executable)
index ef331fd..0000000
+++ /dev/null
@@ -1,71 +0,0 @@
-#!/bin/bash -xe
-
-# It's assumed in this test that each subsystem has equal number
-# of namespaces (i.e. NVMEOF_NAMESPACES_COUNT ns per subsystem). 
-# This script then adds NEW_NAMESPACES_COUNT amount of namespaces
-# to each subsystem and then deletes those new namespaces.
-
-source /etc/ceph/nvmeof.env
-
-RBD_POOL="${RBD_POOL:-mypool}"
-NEW_IMAGE_SIZE="${RBD_IMAGE_SIZE:-8192}" # 1024*8
-NEW_NAMESPACES_COUNT="${NEW_NAMESPACES_COUNT:-3}"
-
-gateways_count=$(( $(echo "$NVMEOF_GATEWAY_IP_ADDRESSES" | tr -cd ',' | wc -c) + 1 ))
-new_images_count=$(( $NVMEOF_SUBSYSTEMS_COUNT * $NEW_NAMESPACES_COUNT)) 
-
-
-assert_namespaces_count() {
-    expected_count_per_subsys=$1
-    actual_count=$(sudo podman run -it $NVMEOF_CLI_IMAGE --server-address $NVMEOF_DEFAULT_GATEWAY_IP_ADDRESS --server-port $NVMEOF_SRPORT --format json subsystem list | 
-        grep namespace_count | grep $expected_count_per_subsys | wc -l)
-    if [ "$actual_count" -ne "$NVMEOF_SUBSYSTEMS_COUNT" ]; then
-        sudo podman run -it $NVMEOF_CLI_IMAGE --server-address $NVMEOF_DEFAULT_GATEWAY_IP_ADDRESS --server-port $NVMEOF_SRPORT --format json subsystem list
-        echo "Expected count of namepaces not found, expected (per subsystem): $expected_count_per_subsys"
-        return 1
-    fi
-}
-
-
-# add rbd images
-for i in $(seq 1 $new_images_count); do
-    image_name="test${i}"
-    rbd create $RBD_POOL/$image_name --size $NEW_IMAGE_SIZE
-done
-
-# add new namespaces
-image_index=1
-for i in $(seq 1 $NVMEOF_SUBSYSTEMS_COUNT); do
-    subsystem_nqn="${NVMEOF_SUBSYSTEMS_PREFIX}${i}"
-    for ns in $(seq 1 $NEW_NAMESPACES_COUNT); do
-        image="test${image_index}"
-        sudo podman run -it $NVMEOF_CLI_IMAGE --server-address $NVMEOF_DEFAULT_GATEWAY_IP_ADDRESS --server-port $NVMEOF_SRPORT namespace add --subsystem $subsystem_nqn --rbd-pool $RBD_POOL --rbd-image $image --load-balancing-group $(($image_index % $gateways_count + 1))
-        ((image_index++))
-    done
-done
-
-# list namespaces
-for i in $(seq 1 $NVMEOF_SUBSYSTEMS_COUNT); do
-    subsystem_nqn="${NVMEOF_SUBSYSTEMS_PREFIX}${i}"
-    sudo podman run -it $NVMEOF_CLI_IMAGE --server-address $NVMEOF_DEFAULT_GATEWAY_IP_ADDRESS --server-port $NVMEOF_SRPORT --format plain namespace list --subsystem $subsystem_nqn        
-done
-
-# verify namespaces added
-expected_count_per_subsys=$(( $NEW_NAMESPACES_COUNT + $NVMEOF_NAMESPACES_COUNT ))
-assert_namespaces_count $expected_count_per_subsys
-
-# delete namespaces
-for i in $(seq 1 $NVMEOF_SUBSYSTEMS_COUNT); do
-    subsystem_nqn="${NVMEOF_SUBSYSTEMS_PREFIX}${i}"
-    NSIDs=$(sudo podman run -it $NVMEOF_CLI_IMAGE --server-address $NVMEOF_DEFAULT_GATEWAY_IP_ADDRESS --server-port $NVMEOF_SRPORT --format json namespace list --subsystem $subsystem_nqn | 
-            jq -r '.namespaces[] | select(.rbd_image_name | startswith("test")) | .nsid')
-
-    for nsid in $NSIDs; do
-        sudo podman run -it $NVMEOF_CLI_IMAGE --server-address $NVMEOF_DEFAULT_GATEWAY_IP_ADDRESS --server-port $NVMEOF_SRPORT namespace del --subsystem $subsystem_nqn --nsid $nsid
-    done
-done
-
-# verify namespaces deleted
-expected_count_per_subsys=$NVMEOF_NAMESPACES_COUNT
-assert_namespaces_count $expected_count_per_subsys
-
diff --git a/qa/workunits/rbd/nvmeof_scalability_test.sh b/qa/workunits/rbd/nvmeof_scalability_test.sh
deleted file mode 100755 (executable)
index 5a26b62..0000000
+++ /dev/null
@@ -1,39 +0,0 @@
-#!/bin/bash -xe
-
-
-GATEWAYS=$1 # exmaple "nvmeof.a,nvmeof.b"
-DELAY="${SCALING_DELAYS:-50}"
-
-if [ -z "$GATEWAYS" ]; then
-    echo "At least one gateway needs to be defined for scalability test"
-    exit 1
-fi
-
-pip3 install yq
-
-status_checks() {
-    ceph nvme-gw show mypool ''
-    ceph orch ls
-    ceph orch ps 
-    ceph -s
-}
-
-
-echo "[nvmeof.scale] Setting up config to remove gateways ${GATEWAYS}"
-ceph orch ls nvmeof --export > /tmp/nvmeof-gw.yaml
-cat /tmp/nvmeof-gw.yaml
-yq "del(.placement.hosts[] | select(. | test(\".*($(echo $GATEWAYS | sed 's/,/|/g'))\")))" /tmp/nvmeof-gw.yaml > /tmp/nvmeof-gw-new.yaml
-cat /tmp/nvmeof-gw-new.yaml
-
-echo "[nvmeof.scale] Starting scale testing by removing ${GATEWAYS}"
-status_checks
-ceph orch rm nvmeof.mypool && sleep 20 # temp workaround
-ceph orch apply -i /tmp/nvmeof-gw-new.yaml # downscale
-sleep $DELAY
-status_checks
-ceph orch rm nvmeof.mypool && sleep 20 # temp workaround
-ceph orch apply -i /tmp/nvmeof-gw.yaml #upscale
-sleep $DELAY
-status_checks
-
-echo "[nvmeof.scale] Scale testing passed for ${GATEWAYS}"
diff --git a/qa/workunits/rbd/nvmeof_setup_subsystem.sh b/qa/workunits/rbd/nvmeof_setup_subsystem.sh
deleted file mode 100755 (executable)
index fb72e1d..0000000
+++ /dev/null
@@ -1,75 +0,0 @@
-#!/bin/bash
-
-set -ex
-
-
-source /etc/ceph/nvmeof.env
-
-# Set these in job yaml
-RBD_POOL="${RBD_POOL:-mypool}"
-RBD_IMAGE_PREFIX="${RBD_IMAGE_PREFIX:-myimage}"
-
-HOSTNAME=$(hostname)
-sudo podman images
-sudo podman ps
-sudo podman run -it $NVMEOF_CLI_IMAGE --server-address $NVMEOF_DEFAULT_GATEWAY_IP_ADDRESS --server-port $NVMEOF_SRPORT --format json subsystem list
-
-IFS=',' read -ra gateway_ips <<< "$NVMEOF_GATEWAY_IP_ADDRESSES"
-IFS=',' read -ra gateway_names <<< "$NVMEOF_GATEWAY_NAMES"
-gateways_count=$(( $(echo "$NVMEOF_GATEWAY_IP_ADDRESSES" | tr -cd ',' | wc -c) + 1 ))
-
-list_subsystems () { 
-   for i in "${!gateway_ips[@]}"
-    do
-        ip="${gateway_ips[i]}"
-        sudo podman run -it $NVMEOF_CLI_IMAGE --server-address $ip --server-port $NVMEOF_SRPORT --format json subsystem list
-    done
-}
-
-# add all subsystems
-for i in $(seq 1 $NVMEOF_SUBSYSTEMS_COUNT); do
-    subsystem_nqn="${NVMEOF_SUBSYSTEMS_PREFIX}${i}"
-    sudo podman run -it $NVMEOF_CLI_IMAGE --server-address $NVMEOF_DEFAULT_GATEWAY_IP_ADDRESS --server-port $NVMEOF_SRPORT subsystem add --subsystem $subsystem_nqn
-done
-
-list_subsystems
-
-# add all gateway listeners 
-for i in "${!gateway_ips[@]}"
-do
-    ip="${gateway_ips[i]}"
-    name="${gateway_names[i]}"
-    for j in $(seq 1 $NVMEOF_SUBSYSTEMS_COUNT); do
-        subsystem_nqn="${NVMEOF_SUBSYSTEMS_PREFIX}${j}"
-        echo "Adding gateway listener $index with IP ${ip} and name ${name}"
-        sudo podman run -it $NVMEOF_CLI_IMAGE --server-address $ip --server-port $NVMEOF_SRPORT listener add --subsystem $subsystem_nqn --host-name $name --traddr $ip --trsvcid $NVMEOF_PORT
-    done
-done
-
-# add all hosts
-for i in $(seq 1 $NVMEOF_SUBSYSTEMS_COUNT); do
-    subsystem_nqn="${NVMEOF_SUBSYSTEMS_PREFIX}${i}"
-    sudo podman run -it $NVMEOF_CLI_IMAGE --server-address $NVMEOF_DEFAULT_GATEWAY_IP_ADDRESS --server-port $NVMEOF_SRPORT host add --subsystem $subsystem_nqn --host "*"
-done
-
-# add all namespaces
-image_index=1
-for i in $(seq 1 $NVMEOF_SUBSYSTEMS_COUNT); do
-    subsystem_nqn="${NVMEOF_SUBSYSTEMS_PREFIX}${i}"
-    for ns in $(seq 1 $NVMEOF_NAMESPACES_COUNT); do
-        image="${RBD_IMAGE_PREFIX}${image_index}"
-        sudo podman run -it $NVMEOF_CLI_IMAGE --server-address $NVMEOF_DEFAULT_GATEWAY_IP_ADDRESS --server-port $NVMEOF_SRPORT namespace add --subsystem $subsystem_nqn --rbd-pool $RBD_POOL --rbd-image $image --load-balancing-group $(($image_index % $gateways_count + 1))
-        ((image_index++))
-    done
-done
-
-list_subsystems
-
-# list namespaces
-for i in $(seq 1 $NVMEOF_SUBSYSTEMS_COUNT); do
-    subsystem_nqn="${NVMEOF_SUBSYSTEMS_PREFIX}${i}"
-    sudo podman run -it $NVMEOF_CLI_IMAGE --server-address $NVMEOF_DEFAULT_GATEWAY_IP_ADDRESS --server-port $NVMEOF_SRPORT --format plain namespace list --subsystem $subsystem_nqn        
-done
-
-
-echo "[nvmeof] Subsystem setup done"