From 42e121a42a51e09f456d2e4e303cae0f114637c2 Mon Sep 17 00:00:00 2001 From: Vallari Agrawal Date: Thu, 26 Oct 2023 13:25:44 +0530 Subject: [PATCH] qa: add rbd/nvmeof test A basic test for ceph-nvmeof[1] where nvmeof initiator is created. It requires use of a new task "nvmeof_gateway_cfg" under cephadm which shares config information between two remote hosts. [1] https://github.com/ceph/ceph-nvmeof/ Signed-off-by: Vallari Agrawal --- qa/suites/rbd/nvmeof/% | 0 qa/suites/rbd/nvmeof/.qa | 1 + qa/suites/rbd/nvmeof/base/.qa | 1 + qa/suites/rbd/nvmeof/base/install.yaml | 32 ++++++++ qa/suites/rbd/nvmeof/centos_latest.yaml | 1 + qa/suites/rbd/nvmeof/cluster/+ | 0 qa/suites/rbd/nvmeof/cluster/.qa | 1 + qa/suites/rbd/nvmeof/cluster/fixed-3.yaml | 13 +++ qa/suites/rbd/nvmeof/cluster/openstack.yaml | 8 ++ qa/suites/rbd/nvmeof/workloads/.qa | 1 + .../nvmeof/workloads/nvmeof_initiator.yaml | 6 ++ qa/tasks/cephadm.py | 38 +++++++++ qa/workunits/rbd/nvmeof_initiator.sh | 79 +++++++++++++++++++ 13 files changed, 181 insertions(+) create mode 100644 qa/suites/rbd/nvmeof/% create mode 120000 qa/suites/rbd/nvmeof/.qa create mode 120000 qa/suites/rbd/nvmeof/base/.qa create mode 100644 qa/suites/rbd/nvmeof/base/install.yaml create mode 120000 qa/suites/rbd/nvmeof/centos_latest.yaml create mode 100644 qa/suites/rbd/nvmeof/cluster/+ create mode 120000 qa/suites/rbd/nvmeof/cluster/.qa create mode 100644 qa/suites/rbd/nvmeof/cluster/fixed-3.yaml create mode 100644 qa/suites/rbd/nvmeof/cluster/openstack.yaml create mode 120000 qa/suites/rbd/nvmeof/workloads/.qa create mode 100644 qa/suites/rbd/nvmeof/workloads/nvmeof_initiator.yaml create mode 100755 qa/workunits/rbd/nvmeof_initiator.sh diff --git a/qa/suites/rbd/nvmeof/% b/qa/suites/rbd/nvmeof/% new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/qa/suites/rbd/nvmeof/.qa b/qa/suites/rbd/nvmeof/.qa new file mode 120000 index 0000000000000..a602a0353e751 --- /dev/null +++ b/qa/suites/rbd/nvmeof/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rbd/nvmeof/base/.qa b/qa/suites/rbd/nvmeof/base/.qa new file mode 120000 index 0000000000000..a602a0353e751 --- /dev/null +++ b/qa/suites/rbd/nvmeof/base/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rbd/nvmeof/base/install.yaml b/qa/suites/rbd/nvmeof/base/install.yaml new file mode 100644 index 0000000000000..5a852f14dbe1b --- /dev/null +++ b/qa/suites/rbd/nvmeof/base/install.yaml @@ -0,0 +1,32 @@ +use_shaman: True +tasks: +- install: +- cephadm: +- cephadm.shell: + host.a: + # get state before nvmeof deployment + - ceph orch status + - ceph orch ps + - ceph orch host ls + - ceph orch device ls + - ceph osd lspools + # create pool + - ceph osd pool create mypool + - rbd pool init mypool + # deploy nvmeof + ## Uncomment to test specific nvmeof images + ## - ceph config set mgr mgr/cephadm/container_image_nvmeof quay.io/ceph/nvmeof:latest + - ceph orch apply nvmeof mypool --placement="1 $(hostname)" + - ceph orch ps --refresh + +- cephadm.wait_for_service: + service: nvmeof.mypool + +- cephadm.nvmeof_gateway_cfg: + source: host.a + target: client.1 + service: nvmeof.mypool + +- exec: + client.0: + - journalctl -u $(systemctl list-units | grep nvmeof.mypool | awk '{print $1}') diff --git a/qa/suites/rbd/nvmeof/centos_latest.yaml b/qa/suites/rbd/nvmeof/centos_latest.yaml new file mode 120000 index 0000000000000..bd9854e702982 --- /dev/null +++ b/qa/suites/rbd/nvmeof/centos_latest.yaml @@ -0,0 +1 @@ +.qa/distros/supported/centos_latest.yaml \ No newline at end of file diff --git a/qa/suites/rbd/nvmeof/cluster/+ b/qa/suites/rbd/nvmeof/cluster/+ new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/qa/suites/rbd/nvmeof/cluster/.qa b/qa/suites/rbd/nvmeof/cluster/.qa new file mode 120000 index 0000000000000..a602a0353e751 --- /dev/null +++ b/qa/suites/rbd/nvmeof/cluster/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rbd/nvmeof/cluster/fixed-3.yaml b/qa/suites/rbd/nvmeof/cluster/fixed-3.yaml new file mode 100644 index 0000000000000..42e696cd2f147 --- /dev/null +++ b/qa/suites/rbd/nvmeof/cluster/fixed-3.yaml @@ -0,0 +1,13 @@ +roles: +- - host.a + - mon.a + - mgr.x + - osd.0 + - osd.1 + - client.0 +- - host.b + - mon.b + - osd.2 + - osd.3 + - osd.4 + - client.1 diff --git a/qa/suites/rbd/nvmeof/cluster/openstack.yaml b/qa/suites/rbd/nvmeof/cluster/openstack.yaml new file mode 100644 index 0000000000000..40fef4770b9f4 --- /dev/null +++ b/qa/suites/rbd/nvmeof/cluster/openstack.yaml @@ -0,0 +1,8 @@ +openstack: + - machine: + disk: 40 # GB + ram: 8000 # MB + cpus: 1 + volumes: # attached to each instance + count: 4 + size: 30 # GB diff --git a/qa/suites/rbd/nvmeof/workloads/.qa b/qa/suites/rbd/nvmeof/workloads/.qa new file mode 120000 index 0000000000000..a602a0353e751 --- /dev/null +++ b/qa/suites/rbd/nvmeof/workloads/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rbd/nvmeof/workloads/nvmeof_initiator.yaml b/qa/suites/rbd/nvmeof/workloads/nvmeof_initiator.yaml new file mode 100644 index 0000000000000..4c947c1f787fa --- /dev/null +++ b/qa/suites/rbd/nvmeof/workloads/nvmeof_initiator.yaml @@ -0,0 +1,6 @@ +tasks: +- workunit: + no_coverage_and_limits: true + clients: + client.1: + - rbd/nvmeof_initiator.sh diff --git a/qa/tasks/cephadm.py b/qa/tasks/cephadm.py index 0431aac8533eb..1fdf6ee012e32 100644 --- a/qa/tasks/cephadm.py +++ b/qa/tasks/cephadm.py @@ -21,6 +21,7 @@ from teuthology import packaging from teuthology.orchestra import run from teuthology.orchestra.daemon import DaemonGroup from teuthology.config import config as teuth_config +from teuthology.exceptions import ConfigError from textwrap import dedent from tasks.cephfs.filesystem import MDSCluster, Filesystem from tasks.util import chacra @@ -100,6 +101,43 @@ def update_archive_setting(ctx, key, value): yaml.safe_dump(info_yaml, info_file, default_flow_style=False) +@contextlib.contextmanager +def nvmeof_gateway_cfg(ctx, config): + source_host = config.get('source') + target_host = config.get('target') + nvmeof_service = config.get('service') + if not (source_host and target_host and nvmeof_service): + raise ConfigError('nvmeof_gateway_cfg requires "source", "target", and "service"') + remote = list(ctx.cluster.only(source_host).remotes.keys())[0] + ip_address = remote.ip_address + gateway_name = "" + r = remote.run(args=[ + 'systemctl', 'list-units', + run.Raw('|'), 'grep', nvmeof_service + ], stdout=StringIO()) + output = r.stdout.getvalue() + pattern_str = f"{re.escape(nvmeof_service)}(.*?)(?=\.service)" + pattern = re.compile(pattern_str) + match = pattern.search(output) + if match: + gateway_name = match.group() + conf_data = dedent(f""" + NVMEOF_GATEWAY_IP_ADDRESS={ip_address} + NVMEOF_GATEWAY_NAME={gateway_name} + """) + target_remote = list(ctx.cluster.only(target_host).remotes.keys())[0] + target_remote.write_file( + path='/etc/ceph/nvmeof.env', + data=conf_data, + sudo=True + ) + + try: + yield + finally: + pass + + @contextlib.contextmanager def normalize_hostnames(ctx): """ diff --git a/qa/workunits/rbd/nvmeof_initiator.sh b/qa/workunits/rbd/nvmeof_initiator.sh new file mode 100755 index 0000000000000..c3f3c1082bfc3 --- /dev/null +++ b/qa/workunits/rbd/nvmeof_initiator.sh @@ -0,0 +1,79 @@ +#!/bin/bash + +set -ex + +sudo modprobe nvme-fabrics +sudo modprobe nvme-tcp +sudo dnf install nvme-cli -y + +# import NVMEOF_GATEWAY_IP_ADDRESS and NVMEOF_GATEWAY_NAME=nvmeof.poolname.smithiXXX.abcde +source /etc/ceph/nvmeof.env + +HOSTNAME=$(hostname) +IMAGE="quay.io/ceph/nvmeof-cli:latest" +RBD_POOL=$(awk -F'.' '{print $2}' <<< "$NVMEOF_GATEWAY_NAME") +RBD_IMAGE="myimage" +RBD_SIZE=$((1024*8)) #8GiB +BDEV="mybdev" +SERIAL="SPDK00000000000001" +NQN="nqn.2016-06.io.spdk:cnode1" +PORT="4420" +SRPORT="5500" +DISCOVERY_PORT="8009" + +rbd create $RBD_POOL/$RBD_IMAGE --size $RBD_SIZE +sudo podman run -it $IMAGE --server-address $NVMEOF_GATEWAY_IP_ADDRESS --server-port $SRPORT create_bdev --pool $RBD_POOL --image $RBD_IMAGE --bdev $BDEV +sudo podman images +sudo podman ps +sudo podman run -it $IMAGE --server-address $NVMEOF_GATEWAY_IP_ADDRESS --server-port $SRPORT create_subsystem --subnqn $NQN --serial $SERIAL +sudo podman run -it $IMAGE --server-address $NVMEOF_GATEWAY_IP_ADDRESS --server-port $SRPORT add_namespace --subnqn $NQN --bdev $BDEV +sudo podman run -it $IMAGE --server-address $NVMEOF_GATEWAY_IP_ADDRESS --server-port $SRPORT create_listener -n $NQN -g client.$NVMEOF_GATEWAY_NAME -a $NVMEOF_GATEWAY_IP_ADDRESS -s $PORT +sudo podman run -it $IMAGE --server-address $NVMEOF_GATEWAY_IP_ADDRESS --server-port $SRPORT add_host --subnqn $NQN --host "*" +sudo podman run -it $IMAGE --server-address $NVMEOF_GATEWAY_IP_ADDRESS --server-port $SRPORT get_subsystems +sudo lsmod | grep nvme +sudo nvme list +sudo nvme discover -t tcp -a $NVMEOF_GATEWAY_IP_ADDRESS -s $DISCOVERY_PORT +sudo nvme connect -t tcp --traddr $NVMEOF_GATEWAY_IP_ADDRESS -s $PORT -n $NQN +sudo nvme list + +echo "testing nvmeof initiator..." + +nvme_model="SPDK bdev Controller" + +echo "Test 1: create initiator - starting" +if ! sudo nvme list | grep -q "$nvme_model"; then + echo "nvmeof initiator not created!" + exit 1 +fi +echo "Test 1: create initiator - passed!" + + +echo "Test 2: device size - starting" +image_size_in_bytes=$(($RBD_SIZE * 1024 * 1024)) +nvme_size=$(sudo nvme list --output-format=json | \ + jq -r ".Devices | .[] | select(.ModelNumber == \"$nvme_model\") | .PhysicalSize") +if [ "$image_size_in_bytes" != "$nvme_size" ]; then + echo "block device size do not match!" + exit 1 +fi +echo "Test 2: device size - passed!" + + +echo "Test 3: basic IO - starting" +nvme_drive=$(sudo nvme list --output-format=json | \ + jq -r ".Devices | .[] | select(.ModelNumber == \"$nvme_model\") | .DevicePath") +io_input_file="/tmp/nvmeof_test_input" +echo "Hello world" > $io_input_file +truncate -s 2k $io_input_file +sudo dd if=$io_input_file of=$nvme_drive oflag=direct count=1 bs=2k #write +io_output_file="/tmp/nvmeof_test_output" +sudo dd if=$nvme_drive of=$io_output_file iflag=direct count=1 bs=2k #read +if ! cmp $io_input_file $io_output_file; then + echo "nvmeof initiator - io test failed!" + exit 1 +fi +sudo rm -f $io_input_file $io_output_file +echo "Test 3: basic IO - passed!" + + +echo "nvmeof initiator tests passed!" -- 2.39.5