]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
qa: add rbd/nvmeof test 54205/head
authorVallari Agrawal <val.agl002@gmail.com>
Thu, 26 Oct 2023 07:55:44 +0000 (13:25 +0530)
committerVallari Agrawal <val.agl002@gmail.com>
Mon, 4 Dec 2023 13:57:54 +0000 (19:27 +0530)
A basic test for ceph-nvmeof[1] where
nvmeof initiator is created.
It requires use of a new task "nvmeof_gateway_cfg"
under cephadm which shares config information
between two remote hosts.

[1] https://github.com/ceph/ceph-nvmeof/

Signed-off-by: Vallari Agrawal <val.agl002@gmail.com>
13 files changed:
qa/suites/rbd/nvmeof/% [new file with mode: 0644]
qa/suites/rbd/nvmeof/.qa [new symlink]
qa/suites/rbd/nvmeof/base/.qa [new symlink]
qa/suites/rbd/nvmeof/base/install.yaml [new file with mode: 0644]
qa/suites/rbd/nvmeof/centos_latest.yaml [new symlink]
qa/suites/rbd/nvmeof/cluster/+ [new file with mode: 0644]
qa/suites/rbd/nvmeof/cluster/.qa [new symlink]
qa/suites/rbd/nvmeof/cluster/fixed-3.yaml [new file with mode: 0644]
qa/suites/rbd/nvmeof/cluster/openstack.yaml [new file with mode: 0644]
qa/suites/rbd/nvmeof/workloads/.qa [new symlink]
qa/suites/rbd/nvmeof/workloads/nvmeof_initiator.yaml [new file with mode: 0644]
qa/tasks/cephadm.py
qa/workunits/rbd/nvmeof_initiator.sh [new file with mode: 0755]

diff --git a/qa/suites/rbd/nvmeof/% b/qa/suites/rbd/nvmeof/%
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/qa/suites/rbd/nvmeof/.qa b/qa/suites/rbd/nvmeof/.qa
new file mode 120000 (symlink)
index 0000000..a602a03
--- /dev/null
@@ -0,0 +1 @@
+../.qa/
\ No newline at end of file
diff --git a/qa/suites/rbd/nvmeof/base/.qa b/qa/suites/rbd/nvmeof/base/.qa
new file mode 120000 (symlink)
index 0000000..a602a03
--- /dev/null
@@ -0,0 +1 @@
+../.qa/
\ No newline at end of file
diff --git a/qa/suites/rbd/nvmeof/base/install.yaml b/qa/suites/rbd/nvmeof/base/install.yaml
new file mode 100644 (file)
index 0000000..5a852f1
--- /dev/null
@@ -0,0 +1,32 @@
+use_shaman: True
+tasks:
+- install:
+- cephadm:
+- cephadm.shell:
+    host.a:
+    # get state before nvmeof deployment
+    - ceph orch status
+    - ceph orch ps
+    - ceph orch host ls
+    - ceph orch device ls
+    - ceph osd lspools
+    # create pool
+    - ceph osd pool create mypool
+    - rbd pool init mypool
+    # deploy nvmeof
+    ## Uncomment to test specific nvmeof images
+    ## - ceph config set mgr mgr/cephadm/container_image_nvmeof quay.io/ceph/nvmeof:latest
+    - ceph orch apply nvmeof mypool --placement="1 $(hostname)"
+    - ceph orch ps --refresh
+
+- cephadm.wait_for_service:
+    service: nvmeof.mypool
+
+- cephadm.nvmeof_gateway_cfg:
+    source: host.a 
+    target: client.1
+    service: nvmeof.mypool
+
+- exec:
+    client.0:
+      - journalctl -u $(systemctl list-units | grep nvmeof.mypool | awk '{print $1}')
diff --git a/qa/suites/rbd/nvmeof/centos_latest.yaml b/qa/suites/rbd/nvmeof/centos_latest.yaml
new file mode 120000 (symlink)
index 0000000..bd9854e
--- /dev/null
@@ -0,0 +1 @@
+.qa/distros/supported/centos_latest.yaml
\ No newline at end of file
diff --git a/qa/suites/rbd/nvmeof/cluster/+ b/qa/suites/rbd/nvmeof/cluster/+
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/qa/suites/rbd/nvmeof/cluster/.qa b/qa/suites/rbd/nvmeof/cluster/.qa
new file mode 120000 (symlink)
index 0000000..a602a03
--- /dev/null
@@ -0,0 +1 @@
+../.qa/
\ No newline at end of file
diff --git a/qa/suites/rbd/nvmeof/cluster/fixed-3.yaml b/qa/suites/rbd/nvmeof/cluster/fixed-3.yaml
new file mode 100644 (file)
index 0000000..42e696c
--- /dev/null
@@ -0,0 +1,13 @@
+roles:
+- - host.a
+  - mon.a
+  - mgr.x
+  - osd.0
+  - osd.1
+  - client.0
+- - host.b
+  - mon.b
+  - osd.2
+  - osd.3
+  - osd.4
+  - client.1
diff --git a/qa/suites/rbd/nvmeof/cluster/openstack.yaml b/qa/suites/rbd/nvmeof/cluster/openstack.yaml
new file mode 100644 (file)
index 0000000..40fef47
--- /dev/null
@@ -0,0 +1,8 @@
+openstack:
+  - machine:
+      disk: 40 # GB
+      ram: 8000 # MB
+      cpus: 1
+    volumes: # attached to each instance
+      count: 4
+      size: 30 # GB
diff --git a/qa/suites/rbd/nvmeof/workloads/.qa b/qa/suites/rbd/nvmeof/workloads/.qa
new file mode 120000 (symlink)
index 0000000..a602a03
--- /dev/null
@@ -0,0 +1 @@
+../.qa/
\ No newline at end of file
diff --git a/qa/suites/rbd/nvmeof/workloads/nvmeof_initiator.yaml b/qa/suites/rbd/nvmeof/workloads/nvmeof_initiator.yaml
new file mode 100644 (file)
index 0000000..4c947c1
--- /dev/null
@@ -0,0 +1,6 @@
+tasks:
+- workunit:
+    no_coverage_and_limits: true
+    clients:
+      client.1:
+        - rbd/nvmeof_initiator.sh
index 0431aac8533eb8bc89ccb6595458f6f3ad8ffaaf..1fdf6ee012e3217085ff0138fb10ded38f4b34bf 100644 (file)
@@ -21,6 +21,7 @@ from teuthology import packaging
 from teuthology.orchestra import run
 from teuthology.orchestra.daemon import DaemonGroup
 from teuthology.config import config as teuth_config
+from teuthology.exceptions import ConfigError
 from textwrap import dedent
 from tasks.cephfs.filesystem import MDSCluster, Filesystem
 from tasks.util import chacra
@@ -100,6 +101,43 @@ def update_archive_setting(ctx, key, value):
         yaml.safe_dump(info_yaml, info_file, default_flow_style=False)
 
 
+@contextlib.contextmanager
+def nvmeof_gateway_cfg(ctx, config):
+    source_host = config.get('source')
+    target_host = config.get('target')
+    nvmeof_service = config.get('service')
+    if not (source_host and target_host and nvmeof_service):
+        raise ConfigError('nvmeof_gateway_cfg requires "source", "target", and "service"')
+    remote = list(ctx.cluster.only(source_host).remotes.keys())[0]
+    ip_address = remote.ip_address
+    gateway_name = ""
+    r = remote.run(args=[
+        'systemctl', 'list-units',
+        run.Raw('|'), 'grep', nvmeof_service
+    ], stdout=StringIO())
+    output = r.stdout.getvalue()
+    pattern_str = f"{re.escape(nvmeof_service)}(.*?)(?=\.service)"
+    pattern = re.compile(pattern_str)
+    match = pattern.search(output)
+    if match:
+        gateway_name = match.group()
+    conf_data = dedent(f"""
+        NVMEOF_GATEWAY_IP_ADDRESS={ip_address}
+        NVMEOF_GATEWAY_NAME={gateway_name}
+        """)
+    target_remote = list(ctx.cluster.only(target_host).remotes.keys())[0]
+    target_remote.write_file(
+        path='/etc/ceph/nvmeof.env',
+        data=conf_data,
+        sudo=True
+    )
+
+    try:
+        yield
+    finally:
+        pass
+
+
 @contextlib.contextmanager
 def normalize_hostnames(ctx):
     """
diff --git a/qa/workunits/rbd/nvmeof_initiator.sh b/qa/workunits/rbd/nvmeof_initiator.sh
new file mode 100755 (executable)
index 0000000..c3f3c10
--- /dev/null
@@ -0,0 +1,79 @@
+#!/bin/bash
+
+set -ex
+
+sudo modprobe nvme-fabrics
+sudo modprobe nvme-tcp
+sudo dnf install nvme-cli -y
+
+# import NVMEOF_GATEWAY_IP_ADDRESS and NVMEOF_GATEWAY_NAME=nvmeof.poolname.smithiXXX.abcde
+source /etc/ceph/nvmeof.env
+
+HOSTNAME=$(hostname)
+IMAGE="quay.io/ceph/nvmeof-cli:latest"
+RBD_POOL=$(awk -F'.' '{print $2}' <<< "$NVMEOF_GATEWAY_NAME")
+RBD_IMAGE="myimage"
+RBD_SIZE=$((1024*8)) #8GiB
+BDEV="mybdev"
+SERIAL="SPDK00000000000001"
+NQN="nqn.2016-06.io.spdk:cnode1"
+PORT="4420"
+SRPORT="5500"
+DISCOVERY_PORT="8009"
+
+rbd create $RBD_POOL/$RBD_IMAGE --size $RBD_SIZE
+sudo podman run -it $IMAGE --server-address $NVMEOF_GATEWAY_IP_ADDRESS --server-port $SRPORT create_bdev --pool $RBD_POOL --image $RBD_IMAGE --bdev $BDEV
+sudo podman images
+sudo podman ps
+sudo podman run -it $IMAGE --server-address $NVMEOF_GATEWAY_IP_ADDRESS --server-port $SRPORT create_subsystem --subnqn $NQN --serial $SERIAL
+sudo podman run -it $IMAGE --server-address $NVMEOF_GATEWAY_IP_ADDRESS --server-port $SRPORT add_namespace --subnqn $NQN --bdev $BDEV
+sudo podman run -it $IMAGE --server-address $NVMEOF_GATEWAY_IP_ADDRESS --server-port $SRPORT create_listener -n $NQN -g client.$NVMEOF_GATEWAY_NAME -a $NVMEOF_GATEWAY_IP_ADDRESS -s $PORT
+sudo podman run -it $IMAGE --server-address $NVMEOF_GATEWAY_IP_ADDRESS --server-port $SRPORT add_host --subnqn $NQN --host "*"
+sudo podman run -it $IMAGE --server-address $NVMEOF_GATEWAY_IP_ADDRESS --server-port $SRPORT get_subsystems
+sudo lsmod | grep nvme
+sudo nvme list
+sudo nvme discover -t tcp -a $NVMEOF_GATEWAY_IP_ADDRESS -s $DISCOVERY_PORT
+sudo nvme connect -t tcp --traddr $NVMEOF_GATEWAY_IP_ADDRESS -s $PORT -n $NQN
+sudo nvme list
+
+echo "testing nvmeof initiator..."
+
+nvme_model="SPDK bdev Controller"
+
+echo "Test 1: create initiator - starting"
+if ! sudo nvme list | grep -q "$nvme_model"; then
+  echo "nvmeof initiator not created!"
+  exit 1
+fi
+echo "Test 1: create initiator - passed!"
+
+
+echo "Test 2: device size - starting"
+image_size_in_bytes=$(($RBD_SIZE * 1024 * 1024))
+nvme_size=$(sudo nvme list --output-format=json | \
+        jq -r ".Devices | .[] | select(.ModelNumber == \"$nvme_model\") | .PhysicalSize")
+if [ "$image_size_in_bytes" != "$nvme_size" ]; then
+  echo "block device size do not match!"
+  exit 1
+fi
+echo "Test 2: device size - passed!"
+
+
+echo "Test 3: basic IO - starting"
+nvme_drive=$(sudo nvme list --output-format=json | \
+        jq -r ".Devices | .[] | select(.ModelNumber == \"$nvme_model\") | .DevicePath")
+io_input_file="/tmp/nvmeof_test_input"
+echo "Hello world" > $io_input_file
+truncate -s 2k $io_input_file
+sudo dd if=$io_input_file of=$nvme_drive oflag=direct count=1 bs=2k #write
+io_output_file="/tmp/nvmeof_test_output"
+sudo dd if=$nvme_drive of=$io_output_file iflag=direct count=1 bs=2k #read
+if ! cmp $io_input_file $io_output_file; then
+  echo "nvmeof initiator - io test failed!"
+  exit 1
+fi
+sudo rm -f $io_input_file $io_output_file
+echo "Test 3: basic IO - passed!"
+
+
+echo "nvmeof initiator tests passed!"