no_coverage_and_limits: true
clients:
client.2:
- - rbd/nvmeof_setup_subsystem.sh
+ - nvmeof/setup_subsystem.sh
env:
RBD_POOL: mypool
RBD_IMAGE_PREFIX: myimage
timeout: 30m
clients:
client.2:
- - rbd/nvmeof_basic_tests.sh
- - rbd/nvmeof_fio_test.sh --start_ns 1 --end_ns 30 --rbd_iostat
+ - nvmeof/basic_tests.sh
+ - nvmeof/fio_test.sh --start_ns 1 --end_ns 30 --rbd_iostat
client.3:
- - rbd/nvmeof_basic_tests.sh
- - rbd/nvmeof_fio_test.sh --start_ns 31 --end_ns 60
+ - nvmeof/basic_tests.sh
+ - nvmeof/fio_test.sh --start_ns 31 --end_ns 60
env:
RBD_POOL: mypool
IOSTAT_INTERVAL: '10'
no_coverage_and_limits: true
clients:
client.2:
- - rbd/nvmeof_setup_subsystem.sh
+ - nvmeof/setup_subsystem.sh
env:
RBD_POOL: mypool
RBD_IMAGE_PREFIX: myimage
timeout: 30m
clients:
client.2:
- - rbd/nvmeof_basic_tests.sh
- - rbd/nvmeof_fio_test.sh --rbd_iostat
+ - nvmeof/basic_tests.sh
+ - nvmeof/fio_test.sh --rbd_iostat
client.3:
- - rbd/nvmeof_basic_tests.sh
- - rbd/nvmeof_namespace_test.sh
+ - nvmeof/basic_tests.sh
+ - nvmeof/namespace_test.sh
env:
RBD_POOL: mypool
IOSTAT_INTERVAL: '10'
timeout: 30m
clients:
client.2:
- - rbd/nvmeof_setup_subsystem.sh
- - rbd/nvmeof_basic_tests.sh
- - rbd/nvmeof_fio_test.sh --rbd_iostat
+ - nvmeof/setup_subsystem.sh
+ - nvmeof/basic_tests.sh
+ - nvmeof/fio_test.sh --rbd_iostat
env:
RBD_POOL: mypool
RBD_IMAGE_PREFIX: myimage
timeout: 30m
clients:
client.2:
- - rbd/nvmeof_scalability_test.sh nvmeof.a
- - rbd/nvmeof_scalability_test.sh nvmeof.b
+ - nvmeof/scalability_test.sh nvmeof.a
+ - nvmeof/scalability_test.sh nvmeof.b
env:
SCALING_DELAYS: '50'
no_coverage_and_limits: true
clients:
client.3:
- - rbd/nvmeof_setup_subsystem.sh
- - rbd/nvmeof_basic_tests.sh
+ - nvmeof/setup_subsystem.sh
+ - nvmeof/basic_tests.sh
env:
RBD_POOL: mypool
RBD_IMAGE_PREFIX: myimage
timeout: 30m
clients:
client.3:
- - rbd/nvmeof_fio_test.sh --rbd_iostat
+ - nvmeof/fio_test.sh --rbd_iostat
env:
RBD_POOL: mypool
IOSTAT_INTERVAL: '10'
--- /dev/null
+#!/bin/bash -x
+
+sudo modprobe nvme-fabrics
+sudo modprobe nvme-tcp
+sudo dnf reinstall nvme-cli -y
+sudo lsmod | grep nvme
+nvme version
+
+source /etc/ceph/nvmeof.env
+SPDK_CONTROLLER="Ceph bdev Controller"
+DISCOVERY_PORT="8009"
+
+discovery() {
+ output=$(sudo nvme discover -t tcp -a $NVMEOF_DEFAULT_GATEWAY_IP_ADDRESS -s $DISCOVERY_PORT)
+ expected_discovery_stdout="subtype: nvme subsystem"
+ if ! echo "$output" | grep -q "$expected_discovery_stdout"; then
+ return 1
+ fi
+}
+
+connect() {
+ sudo nvme connect -t tcp --traddr $NVMEOF_DEFAULT_GATEWAY_IP_ADDRESS -s $NVMEOF_PORT -n "${NVMEOF_SUBSYSTEMS_PREFIX}1"
+ sleep 5
+ output=$(sudo nvme list --output-format=json)
+ if ! echo "$output" | grep -q "$SPDK_CONTROLLER"; then
+ return 1
+ fi
+}
+
+disconnect_all() {
+ sudo nvme disconnect-all
+ output=$(sudo nvme list)
+ if echo "$output" | grep -q "$SPDK_CONTROLLER"; then
+ return 1
+ fi
+}
+
+connect_all() {
+ sudo nvme connect-all --traddr=$NVMEOF_DEFAULT_GATEWAY_IP_ADDRESS --transport=tcp -l 3600
+ sleep 5
+ output=$(sudo nvme list --output-format=json)
+ if ! echo "$output" | grep -q "$SPDK_CONTROLLER"; then
+ return 1
+ fi
+}
+
+list_subsys() {
+ expected_count=$1
+ output=$(sudo nvme list-subsys --output-format=json)
+ multipath=$(echo $output | grep -o '"tcp"' | wc -l)
+ if [ "$multipath" -ne "$expected_count" ]; then
+ return 1
+ fi
+}
+
+
+test_run() {
+ echo "[nvmeof] Running test: $1"
+ $1 "${@:2}" # execute func
+ if [ $? -eq 0 ]; then
+ echo "[nvmeof] $1 test passed!"
+ else
+ echo "[nvmeof] $1 test failed!"
+ exit 1
+ fi
+}
+
+
+test_run disconnect_all
+test_run discovery
+test_run connect
+test_run list_subsys 1
+test_run disconnect_all
+test_run list_subsys 0
+test_run connect_all
+gateways_count=$(( $(echo "$NVMEOF_GATEWAY_IP_ADDRESSES" | tr -cd ',' | wc -c) + 1 ))
+multipath_count=$(( $gateways_count * $NVMEOF_SUBSYSTEMS_COUNT))
+test_run list_subsys $multipath_count
+
+
+echo "-------------Test Summary-------------"
+echo "[nvmeof] All nvmeof basic tests passed!"
--- /dev/null
+#!/bin/bash -ex
+
+sudo yum -y install fio
+sudo yum -y install sysstat
+
+namespace_range_start=
+namespace_range_end=
+rbd_iostat=false
+
+while [[ $# -gt 0 ]]; do
+ case "$1" in
+ --start_ns)
+ namespace_range_start=$2
+ shift 2
+ ;;
+ --end_ns)
+ namespace_range_end=$2
+ shift 2
+ ;;
+ --rbd_iostat)
+ rbd_iostat=true
+ shift
+ ;;
+ *)
+ exit 100 # Internal error
+ ;;
+ esac
+done
+
+fio_file=$(mktemp -t nvmeof-fio-XXXX)
+all_drives_list=$(sudo nvme list --output-format=json |
+ jq -r '.Devices | sort_by(.NameSpace) | .[] | select(.ModelNumber == "Ceph bdev Controller") | .DevicePath')
+
+# When the script is passed --start_ns and --end_ns (example: `nvmeof_fio_test.sh --start_ns 1 --end_ns 3`),
+# then fio runs on namespaces only in the defined range (which is 1 to 3 here).
+# So if `nvme list` has 5 namespaces with "SPDK Controller", then fio will
+# run on first 3 namespaces here.
+if [ "$namespace_range_start" ] || [ "$namespace_range_end" ]; then
+ selected_drives=$(echo "${all_drives_list[@]}" | sed -n "${namespace_range_start},${namespace_range_end}p")
+else
+ selected_drives="${all_drives_list[@]}"
+fi
+
+
+RUNTIME=${RUNTIME:-600}
+
+
+cat >> $fio_file <<EOF
+[nvmeof-fio-test]
+ioengine=${IO_ENGINE:-sync}
+bsrange=${BS_RANGE:-4k-64k}
+numjobs=${NUM_OF_JOBS:-1}
+size=${SIZE:-1G}
+time_based=1
+runtime=$RUNTIME
+rw=${RW:-randrw}
+filename=$(echo "$selected_drives" | tr '\n' ':' | sed 's/:$//')
+verify=md5
+verify_fatal=1
+direct=1
+EOF
+
+echo "[nvmeof.fio] starting fio test..."
+
+if [ -n "$IOSTAT_INTERVAL" ]; then
+ iostat_count=$(( RUNTIME / IOSTAT_INTERVAL ))
+ iostat -d -p $selected_drives $IOSTAT_INTERVAL $iostat_count -h &
+fi
+if [ "$rbd_iostat" = true ]; then
+ iterations=$(( RUNTIME / 5 ))
+ timeout 20 rbd perf image iostat $RBD_POOL --iterations $iterations &
+fi
+fio --showcmd $fio_file
+sudo fio $fio_file
+wait
+
+echo "[nvmeof.fio] fio test successful!"
--- /dev/null
+#!/bin/bash -xe
+
+# It's assumed in this test that each subsystem has equal number
+# of namespaces (i.e. NVMEOF_NAMESPACES_COUNT ns per subsystem).
+# This script then adds NEW_NAMESPACES_COUNT amount of namespaces
+# to each subsystem and then deletes those new namespaces.
+
+source /etc/ceph/nvmeof.env
+
+RBD_POOL="${RBD_POOL:-mypool}"
+NEW_IMAGE_SIZE="${RBD_IMAGE_SIZE:-8192}" # 1024*8
+NEW_NAMESPACES_COUNT="${NEW_NAMESPACES_COUNT:-3}"
+
+gateways_count=$(( $(echo "$NVMEOF_GATEWAY_IP_ADDRESSES" | tr -cd ',' | wc -c) + 1 ))
+new_images_count=$(( $NVMEOF_SUBSYSTEMS_COUNT * $NEW_NAMESPACES_COUNT))
+
+
+assert_namespaces_count() {
+ expected_count_per_subsys=$1
+ actual_count=$(sudo podman run -it $NVMEOF_CLI_IMAGE --server-address $NVMEOF_DEFAULT_GATEWAY_IP_ADDRESS --server-port $NVMEOF_SRPORT --format json subsystem list |
+ grep namespace_count | grep $expected_count_per_subsys | wc -l)
+ if [ "$actual_count" -ne "$NVMEOF_SUBSYSTEMS_COUNT" ]; then
+ sudo podman run -it $NVMEOF_CLI_IMAGE --server-address $NVMEOF_DEFAULT_GATEWAY_IP_ADDRESS --server-port $NVMEOF_SRPORT --format json subsystem list
+ echo "Expected count of namepaces not found, expected (per subsystem): $expected_count_per_subsys"
+ return 1
+ fi
+}
+
+
+# add rbd images
+for i in $(seq 1 $new_images_count); do
+ image_name="test${i}"
+ rbd create $RBD_POOL/$image_name --size $NEW_IMAGE_SIZE
+done
+
+# add new namespaces
+image_index=1
+for i in $(seq 1 $NVMEOF_SUBSYSTEMS_COUNT); do
+ subsystem_nqn="${NVMEOF_SUBSYSTEMS_PREFIX}${i}"
+ for ns in $(seq 1 $NEW_NAMESPACES_COUNT); do
+ image="test${image_index}"
+ sudo podman run -it $NVMEOF_CLI_IMAGE --server-address $NVMEOF_DEFAULT_GATEWAY_IP_ADDRESS --server-port $NVMEOF_SRPORT namespace add --subsystem $subsystem_nqn --rbd-pool $RBD_POOL --rbd-image $image --load-balancing-group $(($image_index % $gateways_count + 1))
+ ((image_index++))
+ done
+done
+
+# list namespaces
+for i in $(seq 1 $NVMEOF_SUBSYSTEMS_COUNT); do
+ subsystem_nqn="${NVMEOF_SUBSYSTEMS_PREFIX}${i}"
+ sudo podman run -it $NVMEOF_CLI_IMAGE --server-address $NVMEOF_DEFAULT_GATEWAY_IP_ADDRESS --server-port $NVMEOF_SRPORT --format plain namespace list --subsystem $subsystem_nqn
+done
+
+# verify namespaces added
+expected_count_per_subsys=$(( $NEW_NAMESPACES_COUNT + $NVMEOF_NAMESPACES_COUNT ))
+assert_namespaces_count $expected_count_per_subsys
+
+# delete namespaces
+for i in $(seq 1 $NVMEOF_SUBSYSTEMS_COUNT); do
+ subsystem_nqn="${NVMEOF_SUBSYSTEMS_PREFIX}${i}"
+ NSIDs=$(sudo podman run -it $NVMEOF_CLI_IMAGE --server-address $NVMEOF_DEFAULT_GATEWAY_IP_ADDRESS --server-port $NVMEOF_SRPORT --format json namespace list --subsystem $subsystem_nqn |
+ jq -r '.namespaces[] | select(.rbd_image_name | startswith("test")) | .nsid')
+
+ for nsid in $NSIDs; do
+ sudo podman run -it $NVMEOF_CLI_IMAGE --server-address $NVMEOF_DEFAULT_GATEWAY_IP_ADDRESS --server-port $NVMEOF_SRPORT namespace del --subsystem $subsystem_nqn --nsid $nsid
+ done
+done
+
+# verify namespaces deleted
+expected_count_per_subsys=$NVMEOF_NAMESPACES_COUNT
+assert_namespaces_count $expected_count_per_subsys
+
--- /dev/null
+#!/bin/bash -xe
+
+
+GATEWAYS=$1 # exmaple "nvmeof.a,nvmeof.b"
+DELAY="${SCALING_DELAYS:-50}"
+
+if [ -z "$GATEWAYS" ]; then
+ echo "At least one gateway needs to be defined for scalability test"
+ exit 1
+fi
+
+pip3 install yq
+
+status_checks() {
+ ceph nvme-gw show mypool ''
+ ceph orch ls
+ ceph orch ps
+ ceph -s
+}
+
+
+echo "[nvmeof.scale] Setting up config to remove gateways ${GATEWAYS}"
+ceph orch ls nvmeof --export > /tmp/nvmeof-gw.yaml
+cat /tmp/nvmeof-gw.yaml
+yq "del(.placement.hosts[] | select(. | test(\".*($(echo $GATEWAYS | sed 's/,/|/g'))\")))" /tmp/nvmeof-gw.yaml > /tmp/nvmeof-gw-new.yaml
+cat /tmp/nvmeof-gw-new.yaml
+
+echo "[nvmeof.scale] Starting scale testing by removing ${GATEWAYS}"
+status_checks
+ceph orch rm nvmeof.mypool && sleep 20 # temp workaround
+ceph orch apply -i /tmp/nvmeof-gw-new.yaml # downscale
+sleep $DELAY
+status_checks
+ceph orch rm nvmeof.mypool && sleep 20 # temp workaround
+ceph orch apply -i /tmp/nvmeof-gw.yaml #upscale
+sleep $DELAY
+status_checks
+
+echo "[nvmeof.scale] Scale testing passed for ${GATEWAYS}"
--- /dev/null
+#!/bin/bash
+
+set -ex
+
+
+source /etc/ceph/nvmeof.env
+
+# Set these in job yaml
+RBD_POOL="${RBD_POOL:-mypool}"
+RBD_IMAGE_PREFIX="${RBD_IMAGE_PREFIX:-myimage}"
+
+HOSTNAME=$(hostname)
+sudo podman images
+sudo podman ps
+sudo podman run -it $NVMEOF_CLI_IMAGE --server-address $NVMEOF_DEFAULT_GATEWAY_IP_ADDRESS --server-port $NVMEOF_SRPORT --format json subsystem list
+
+IFS=',' read -ra gateway_ips <<< "$NVMEOF_GATEWAY_IP_ADDRESSES"
+IFS=',' read -ra gateway_names <<< "$NVMEOF_GATEWAY_NAMES"
+gateways_count=$(( $(echo "$NVMEOF_GATEWAY_IP_ADDRESSES" | tr -cd ',' | wc -c) + 1 ))
+
+list_subsystems () {
+ for i in "${!gateway_ips[@]}"
+ do
+ ip="${gateway_ips[i]}"
+ sudo podman run -it $NVMEOF_CLI_IMAGE --server-address $ip --server-port $NVMEOF_SRPORT --format json subsystem list
+ done
+}
+
+# add all subsystems
+for i in $(seq 1 $NVMEOF_SUBSYSTEMS_COUNT); do
+ subsystem_nqn="${NVMEOF_SUBSYSTEMS_PREFIX}${i}"
+ sudo podman run -it $NVMEOF_CLI_IMAGE --server-address $NVMEOF_DEFAULT_GATEWAY_IP_ADDRESS --server-port $NVMEOF_SRPORT subsystem add --subsystem $subsystem_nqn
+done
+
+list_subsystems
+
+# add all gateway listeners
+for i in "${!gateway_ips[@]}"
+do
+ ip="${gateway_ips[i]}"
+ name="${gateway_names[i]}"
+ for j in $(seq 1 $NVMEOF_SUBSYSTEMS_COUNT); do
+ subsystem_nqn="${NVMEOF_SUBSYSTEMS_PREFIX}${j}"
+ echo "Adding gateway listener $index with IP ${ip} and name ${name}"
+ sudo podman run -it $NVMEOF_CLI_IMAGE --server-address $ip --server-port $NVMEOF_SRPORT listener add --subsystem $subsystem_nqn --host-name $name --traddr $ip --trsvcid $NVMEOF_PORT
+ done
+done
+
+# add all hosts
+for i in $(seq 1 $NVMEOF_SUBSYSTEMS_COUNT); do
+ subsystem_nqn="${NVMEOF_SUBSYSTEMS_PREFIX}${i}"
+ sudo podman run -it $NVMEOF_CLI_IMAGE --server-address $NVMEOF_DEFAULT_GATEWAY_IP_ADDRESS --server-port $NVMEOF_SRPORT host add --subsystem $subsystem_nqn --host "*"
+done
+
+# add all namespaces
+image_index=1
+for i in $(seq 1 $NVMEOF_SUBSYSTEMS_COUNT); do
+ subsystem_nqn="${NVMEOF_SUBSYSTEMS_PREFIX}${i}"
+ for ns in $(seq 1 $NVMEOF_NAMESPACES_COUNT); do
+ image="${RBD_IMAGE_PREFIX}${image_index}"
+ sudo podman run -it $NVMEOF_CLI_IMAGE --server-address $NVMEOF_DEFAULT_GATEWAY_IP_ADDRESS --server-port $NVMEOF_SRPORT namespace add --subsystem $subsystem_nqn --rbd-pool $RBD_POOL --rbd-image $image --load-balancing-group $(($image_index % $gateways_count + 1))
+ ((image_index++))
+ done
+done
+
+list_subsystems
+
+# list namespaces
+for i in $(seq 1 $NVMEOF_SUBSYSTEMS_COUNT); do
+ subsystem_nqn="${NVMEOF_SUBSYSTEMS_PREFIX}${i}"
+ sudo podman run -it $NVMEOF_CLI_IMAGE --server-address $NVMEOF_DEFAULT_GATEWAY_IP_ADDRESS --server-port $NVMEOF_SRPORT --format plain namespace list --subsystem $subsystem_nqn
+done
+
+
+echo "[nvmeof] Subsystem setup done"
+++ /dev/null
-#!/bin/bash -x
-
-sudo modprobe nvme-fabrics
-sudo modprobe nvme-tcp
-sudo dnf reinstall nvme-cli -y
-sudo lsmod | grep nvme
-nvme version
-
-source /etc/ceph/nvmeof.env
-SPDK_CONTROLLER="Ceph bdev Controller"
-DISCOVERY_PORT="8009"
-
-discovery() {
- output=$(sudo nvme discover -t tcp -a $NVMEOF_DEFAULT_GATEWAY_IP_ADDRESS -s $DISCOVERY_PORT)
- expected_discovery_stdout="subtype: nvme subsystem"
- if ! echo "$output" | grep -q "$expected_discovery_stdout"; then
- return 1
- fi
-}
-
-connect() {
- sudo nvme connect -t tcp --traddr $NVMEOF_DEFAULT_GATEWAY_IP_ADDRESS -s $NVMEOF_PORT -n "${NVMEOF_SUBSYSTEMS_PREFIX}1"
- sleep 5
- output=$(sudo nvme list --output-format=json)
- if ! echo "$output" | grep -q "$SPDK_CONTROLLER"; then
- return 1
- fi
-}
-
-disconnect_all() {
- sudo nvme disconnect-all
- output=$(sudo nvme list)
- if echo "$output" | grep -q "$SPDK_CONTROLLER"; then
- return 1
- fi
-}
-
-connect_all() {
- sudo nvme connect-all --traddr=$NVMEOF_DEFAULT_GATEWAY_IP_ADDRESS --transport=tcp -l 3600
- sleep 5
- output=$(sudo nvme list --output-format=json)
- if ! echo "$output" | grep -q "$SPDK_CONTROLLER"; then
- return 1
- fi
-}
-
-list_subsys() {
- expected_count=$1
- output=$(sudo nvme list-subsys --output-format=json)
- multipath=$(echo $output | grep -o '"tcp"' | wc -l)
- if [ "$multipath" -ne "$expected_count" ]; then
- return 1
- fi
-}
-
-
-test_run() {
- echo "[nvmeof] Running test: $1"
- $1 "${@:2}" # execute func
- if [ $? -eq 0 ]; then
- echo "[nvmeof] $1 test passed!"
- else
- echo "[nvmeof] $1 test failed!"
- exit 1
- fi
-}
-
-
-test_run disconnect_all
-test_run discovery
-test_run connect
-test_run list_subsys 1
-test_run disconnect_all
-test_run list_subsys 0
-test_run connect_all
-gateways_count=$(( $(echo "$NVMEOF_GATEWAY_IP_ADDRESSES" | tr -cd ',' | wc -c) + 1 ))
-multipath_count=$(( $gateways_count * $NVMEOF_SUBSYSTEMS_COUNT))
-test_run list_subsys $multipath_count
-
-
-echo "-------------Test Summary-------------"
-echo "[nvmeof] All nvmeof basic tests passed!"
+++ /dev/null
-#!/bin/bash -ex
-
-sudo yum -y install fio
-sudo yum -y install sysstat
-
-namespace_range_start=
-namespace_range_end=
-rbd_iostat=false
-
-while [[ $# -gt 0 ]]; do
- case "$1" in
- --start_ns)
- namespace_range_start=$2
- shift 2
- ;;
- --end_ns)
- namespace_range_end=$2
- shift 2
- ;;
- --rbd_iostat)
- rbd_iostat=true
- shift
- ;;
- *)
- exit 100 # Internal error
- ;;
- esac
-done
-
-fio_file=$(mktemp -t nvmeof-fio-XXXX)
-all_drives_list=$(sudo nvme list --output-format=json |
- jq -r '.Devices | sort_by(.NameSpace) | .[] | select(.ModelNumber == "Ceph bdev Controller") | .DevicePath')
-
-# When the script is passed --start_ns and --end_ns (example: `nvmeof_fio_test.sh --start_ns 1 --end_ns 3`),
-# then fio runs on namespaces only in the defined range (which is 1 to 3 here).
-# So if `nvme list` has 5 namespaces with "SPDK Controller", then fio will
-# run on first 3 namespaces here.
-if [ "$namespace_range_start" ] || [ "$namespace_range_end" ]; then
- selected_drives=$(echo "${all_drives_list[@]}" | sed -n "${namespace_range_start},${namespace_range_end}p")
-else
- selected_drives="${all_drives_list[@]}"
-fi
-
-
-RUNTIME=${RUNTIME:-600}
-
-
-cat >> $fio_file <<EOF
-[nvmeof-fio-test]
-ioengine=${IO_ENGINE:-sync}
-bsrange=${BS_RANGE:-4k-64k}
-numjobs=${NUM_OF_JOBS:-1}
-size=${SIZE:-1G}
-time_based=1
-runtime=$RUNTIME
-rw=${RW:-randrw}
-filename=$(echo "$selected_drives" | tr '\n' ':' | sed 's/:$//')
-verify=md5
-verify_fatal=1
-direct=1
-EOF
-
-echo "[nvmeof.fio] starting fio test..."
-
-if [ -n "$IOSTAT_INTERVAL" ]; then
- iostat_count=$(( RUNTIME / IOSTAT_INTERVAL ))
- iostat -d -p $selected_drives $IOSTAT_INTERVAL $iostat_count -h &
-fi
-if [ "$rbd_iostat" = true ]; then
- iterations=$(( RUNTIME / 5 ))
- timeout 20 rbd perf image iostat $RBD_POOL --iterations $iterations &
-fi
-fio --showcmd $fio_file
-sudo fio $fio_file
-wait
-
-echo "[nvmeof.fio] fio test successful!"
+++ /dev/null
-#!/bin/bash -xe
-
-# It's assumed in this test that each subsystem has equal number
-# of namespaces (i.e. NVMEOF_NAMESPACES_COUNT ns per subsystem).
-# This script then adds NEW_NAMESPACES_COUNT amount of namespaces
-# to each subsystem and then deletes those new namespaces.
-
-source /etc/ceph/nvmeof.env
-
-RBD_POOL="${RBD_POOL:-mypool}"
-NEW_IMAGE_SIZE="${RBD_IMAGE_SIZE:-8192}" # 1024*8
-NEW_NAMESPACES_COUNT="${NEW_NAMESPACES_COUNT:-3}"
-
-gateways_count=$(( $(echo "$NVMEOF_GATEWAY_IP_ADDRESSES" | tr -cd ',' | wc -c) + 1 ))
-new_images_count=$(( $NVMEOF_SUBSYSTEMS_COUNT * $NEW_NAMESPACES_COUNT))
-
-
-assert_namespaces_count() {
- expected_count_per_subsys=$1
- actual_count=$(sudo podman run -it $NVMEOF_CLI_IMAGE --server-address $NVMEOF_DEFAULT_GATEWAY_IP_ADDRESS --server-port $NVMEOF_SRPORT --format json subsystem list |
- grep namespace_count | grep $expected_count_per_subsys | wc -l)
- if [ "$actual_count" -ne "$NVMEOF_SUBSYSTEMS_COUNT" ]; then
- sudo podman run -it $NVMEOF_CLI_IMAGE --server-address $NVMEOF_DEFAULT_GATEWAY_IP_ADDRESS --server-port $NVMEOF_SRPORT --format json subsystem list
- echo "Expected count of namepaces not found, expected (per subsystem): $expected_count_per_subsys"
- return 1
- fi
-}
-
-
-# add rbd images
-for i in $(seq 1 $new_images_count); do
- image_name="test${i}"
- rbd create $RBD_POOL/$image_name --size $NEW_IMAGE_SIZE
-done
-
-# add new namespaces
-image_index=1
-for i in $(seq 1 $NVMEOF_SUBSYSTEMS_COUNT); do
- subsystem_nqn="${NVMEOF_SUBSYSTEMS_PREFIX}${i}"
- for ns in $(seq 1 $NEW_NAMESPACES_COUNT); do
- image="test${image_index}"
- sudo podman run -it $NVMEOF_CLI_IMAGE --server-address $NVMEOF_DEFAULT_GATEWAY_IP_ADDRESS --server-port $NVMEOF_SRPORT namespace add --subsystem $subsystem_nqn --rbd-pool $RBD_POOL --rbd-image $image --load-balancing-group $(($image_index % $gateways_count + 1))
- ((image_index++))
- done
-done
-
-# list namespaces
-for i in $(seq 1 $NVMEOF_SUBSYSTEMS_COUNT); do
- subsystem_nqn="${NVMEOF_SUBSYSTEMS_PREFIX}${i}"
- sudo podman run -it $NVMEOF_CLI_IMAGE --server-address $NVMEOF_DEFAULT_GATEWAY_IP_ADDRESS --server-port $NVMEOF_SRPORT --format plain namespace list --subsystem $subsystem_nqn
-done
-
-# verify namespaces added
-expected_count_per_subsys=$(( $NEW_NAMESPACES_COUNT + $NVMEOF_NAMESPACES_COUNT ))
-assert_namespaces_count $expected_count_per_subsys
-
-# delete namespaces
-for i in $(seq 1 $NVMEOF_SUBSYSTEMS_COUNT); do
- subsystem_nqn="${NVMEOF_SUBSYSTEMS_PREFIX}${i}"
- NSIDs=$(sudo podman run -it $NVMEOF_CLI_IMAGE --server-address $NVMEOF_DEFAULT_GATEWAY_IP_ADDRESS --server-port $NVMEOF_SRPORT --format json namespace list --subsystem $subsystem_nqn |
- jq -r '.namespaces[] | select(.rbd_image_name | startswith("test")) | .nsid')
-
- for nsid in $NSIDs; do
- sudo podman run -it $NVMEOF_CLI_IMAGE --server-address $NVMEOF_DEFAULT_GATEWAY_IP_ADDRESS --server-port $NVMEOF_SRPORT namespace del --subsystem $subsystem_nqn --nsid $nsid
- done
-done
-
-# verify namespaces deleted
-expected_count_per_subsys=$NVMEOF_NAMESPACES_COUNT
-assert_namespaces_count $expected_count_per_subsys
-
+++ /dev/null
-#!/bin/bash -xe
-
-
-GATEWAYS=$1 # exmaple "nvmeof.a,nvmeof.b"
-DELAY="${SCALING_DELAYS:-50}"
-
-if [ -z "$GATEWAYS" ]; then
- echo "At least one gateway needs to be defined for scalability test"
- exit 1
-fi
-
-pip3 install yq
-
-status_checks() {
- ceph nvme-gw show mypool ''
- ceph orch ls
- ceph orch ps
- ceph -s
-}
-
-
-echo "[nvmeof.scale] Setting up config to remove gateways ${GATEWAYS}"
-ceph orch ls nvmeof --export > /tmp/nvmeof-gw.yaml
-cat /tmp/nvmeof-gw.yaml
-yq "del(.placement.hosts[] | select(. | test(\".*($(echo $GATEWAYS | sed 's/,/|/g'))\")))" /tmp/nvmeof-gw.yaml > /tmp/nvmeof-gw-new.yaml
-cat /tmp/nvmeof-gw-new.yaml
-
-echo "[nvmeof.scale] Starting scale testing by removing ${GATEWAYS}"
-status_checks
-ceph orch rm nvmeof.mypool && sleep 20 # temp workaround
-ceph orch apply -i /tmp/nvmeof-gw-new.yaml # downscale
-sleep $DELAY
-status_checks
-ceph orch rm nvmeof.mypool && sleep 20 # temp workaround
-ceph orch apply -i /tmp/nvmeof-gw.yaml #upscale
-sleep $DELAY
-status_checks
-
-echo "[nvmeof.scale] Scale testing passed for ${GATEWAYS}"
+++ /dev/null
-#!/bin/bash
-
-set -ex
-
-
-source /etc/ceph/nvmeof.env
-
-# Set these in job yaml
-RBD_POOL="${RBD_POOL:-mypool}"
-RBD_IMAGE_PREFIX="${RBD_IMAGE_PREFIX:-myimage}"
-
-HOSTNAME=$(hostname)
-sudo podman images
-sudo podman ps
-sudo podman run -it $NVMEOF_CLI_IMAGE --server-address $NVMEOF_DEFAULT_GATEWAY_IP_ADDRESS --server-port $NVMEOF_SRPORT --format json subsystem list
-
-IFS=',' read -ra gateway_ips <<< "$NVMEOF_GATEWAY_IP_ADDRESSES"
-IFS=',' read -ra gateway_names <<< "$NVMEOF_GATEWAY_NAMES"
-gateways_count=$(( $(echo "$NVMEOF_GATEWAY_IP_ADDRESSES" | tr -cd ',' | wc -c) + 1 ))
-
-list_subsystems () {
- for i in "${!gateway_ips[@]}"
- do
- ip="${gateway_ips[i]}"
- sudo podman run -it $NVMEOF_CLI_IMAGE --server-address $ip --server-port $NVMEOF_SRPORT --format json subsystem list
- done
-}
-
-# add all subsystems
-for i in $(seq 1 $NVMEOF_SUBSYSTEMS_COUNT); do
- subsystem_nqn="${NVMEOF_SUBSYSTEMS_PREFIX}${i}"
- sudo podman run -it $NVMEOF_CLI_IMAGE --server-address $NVMEOF_DEFAULT_GATEWAY_IP_ADDRESS --server-port $NVMEOF_SRPORT subsystem add --subsystem $subsystem_nqn
-done
-
-list_subsystems
-
-# add all gateway listeners
-for i in "${!gateway_ips[@]}"
-do
- ip="${gateway_ips[i]}"
- name="${gateway_names[i]}"
- for j in $(seq 1 $NVMEOF_SUBSYSTEMS_COUNT); do
- subsystem_nqn="${NVMEOF_SUBSYSTEMS_PREFIX}${j}"
- echo "Adding gateway listener $index with IP ${ip} and name ${name}"
- sudo podman run -it $NVMEOF_CLI_IMAGE --server-address $ip --server-port $NVMEOF_SRPORT listener add --subsystem $subsystem_nqn --host-name $name --traddr $ip --trsvcid $NVMEOF_PORT
- done
-done
-
-# add all hosts
-for i in $(seq 1 $NVMEOF_SUBSYSTEMS_COUNT); do
- subsystem_nqn="${NVMEOF_SUBSYSTEMS_PREFIX}${i}"
- sudo podman run -it $NVMEOF_CLI_IMAGE --server-address $NVMEOF_DEFAULT_GATEWAY_IP_ADDRESS --server-port $NVMEOF_SRPORT host add --subsystem $subsystem_nqn --host "*"
-done
-
-# add all namespaces
-image_index=1
-for i in $(seq 1 $NVMEOF_SUBSYSTEMS_COUNT); do
- subsystem_nqn="${NVMEOF_SUBSYSTEMS_PREFIX}${i}"
- for ns in $(seq 1 $NVMEOF_NAMESPACES_COUNT); do
- image="${RBD_IMAGE_PREFIX}${image_index}"
- sudo podman run -it $NVMEOF_CLI_IMAGE --server-address $NVMEOF_DEFAULT_GATEWAY_IP_ADDRESS --server-port $NVMEOF_SRPORT namespace add --subsystem $subsystem_nqn --rbd-pool $RBD_POOL --rbd-image $image --load-balancing-group $(($image_index % $gateways_count + 1))
- ((image_index++))
- done
-done
-
-list_subsystems
-
-# list namespaces
-for i in $(seq 1 $NVMEOF_SUBSYSTEMS_COUNT); do
- subsystem_nqn="${NVMEOF_SUBSYSTEMS_PREFIX}${i}"
- sudo podman run -it $NVMEOF_CLI_IMAGE --server-address $NVMEOF_DEFAULT_GATEWAY_IP_ADDRESS --server-port $NVMEOF_SRPORT --format plain namespace list --subsystem $subsystem_nqn
-done
-
-
-echo "[nvmeof] Subsystem setup done"