--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+use_shaman: True
+tasks:
+- install:
+- cephadm:
+- cephadm.shell:
+ host.a:
+ # get state before nvmeof deployment
+ - ceph orch status
+ - ceph orch ps
+ - ceph orch host ls
+ - ceph orch device ls
+ - ceph osd lspools
+ # create pool
+ - ceph osd pool create mypool
+ - rbd pool init mypool
+ # deploy nvmeof
+ ## Uncomment to test specific nvmeof images
+ ## - ceph config set mgr mgr/cephadm/container_image_nvmeof quay.io/ceph/nvmeof:latest
+ - ceph orch apply nvmeof mypool --placement="1 $(hostname)"
+ - ceph orch ps --refresh
+
+- cephadm.wait_for_service:
+ service: nvmeof.mypool
+
+- cephadm.nvmeof_gateway_cfg:
+ source: host.a
+ target: client.1
+ service: nvmeof.mypool
+
+- exec:
+ client.0:
+ - journalctl -u $(systemctl list-units | grep nvmeof.mypool | awk '{print $1}')
--- /dev/null
+.qa/distros/supported/centos_latest.yaml
\ No newline at end of file
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+roles:
+- - host.a
+ - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - client.0
+- - host.b
+ - mon.b
+ - osd.2
+ - osd.3
+ - osd.4
+ - client.1
--- /dev/null
+openstack:
+ - machine:
+ disk: 40 # GB
+ ram: 8000 # MB
+ cpus: 1
+ volumes: # attached to each instance
+ count: 4
+ size: 30 # GB
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+tasks:
+- workunit:
+ no_coverage_and_limits: true
+ clients:
+ client.1:
+ - rbd/nvmeof_initiator.sh
from teuthology.orchestra import run
from teuthology.orchestra.daemon import DaemonGroup
from teuthology.config import config as teuth_config
+from teuthology.exceptions import ConfigError
from textwrap import dedent
from tasks.cephfs.filesystem import MDSCluster, Filesystem
from tasks.util import chacra
yaml.safe_dump(info_yaml, info_file, default_flow_style=False)
+@contextlib.contextmanager
+def nvmeof_gateway_cfg(ctx, config):
+ source_host = config.get('source')
+ target_host = config.get('target')
+ nvmeof_service = config.get('service')
+ if not (source_host and target_host and nvmeof_service):
+ raise ConfigError('nvmeof_gateway_cfg requires "source", "target", and "service"')
+ remote = list(ctx.cluster.only(source_host).remotes.keys())[0]
+ ip_address = remote.ip_address
+ gateway_name = ""
+ r = remote.run(args=[
+ 'systemctl', 'list-units',
+ run.Raw('|'), 'grep', nvmeof_service
+ ], stdout=StringIO())
+ output = r.stdout.getvalue()
+ pattern_str = f"{re.escape(nvmeof_service)}(.*?)(?=\.service)"
+ pattern = re.compile(pattern_str)
+ match = pattern.search(output)
+ if match:
+ gateway_name = match.group()
+ conf_data = dedent(f"""
+ NVMEOF_GATEWAY_IP_ADDRESS={ip_address}
+ NVMEOF_GATEWAY_NAME={gateway_name}
+ """)
+ target_remote = list(ctx.cluster.only(target_host).remotes.keys())[0]
+ target_remote.write_file(
+ path='/etc/ceph/nvmeof.env',
+ data=conf_data,
+ sudo=True
+ )
+
+ try:
+ yield
+ finally:
+ pass
+
+
@contextlib.contextmanager
def normalize_hostnames(ctx):
"""
--- /dev/null
+#!/bin/bash
+
+set -ex
+
+sudo modprobe nvme-fabrics
+sudo modprobe nvme-tcp
+sudo dnf install nvme-cli -y
+
+# import NVMEOF_GATEWAY_IP_ADDRESS and NVMEOF_GATEWAY_NAME=nvmeof.poolname.smithiXXX.abcde
+source /etc/ceph/nvmeof.env
+
+HOSTNAME=$(hostname)
+IMAGE="quay.io/ceph/nvmeof-cli:latest"
+RBD_POOL=$(awk -F'.' '{print $2}' <<< "$NVMEOF_GATEWAY_NAME")
+RBD_IMAGE="myimage"
+RBD_SIZE=$((1024*8)) #8GiB
+BDEV="mybdev"
+SERIAL="SPDK00000000000001"
+NQN="nqn.2016-06.io.spdk:cnode1"
+PORT="4420"
+SRPORT="5500"
+DISCOVERY_PORT="8009"
+
+rbd create $RBD_POOL/$RBD_IMAGE --size $RBD_SIZE
+sudo podman run -it $IMAGE --server-address $NVMEOF_GATEWAY_IP_ADDRESS --server-port $SRPORT create_bdev --pool $RBD_POOL --image $RBD_IMAGE --bdev $BDEV
+sudo podman images
+sudo podman ps
+sudo podman run -it $IMAGE --server-address $NVMEOF_GATEWAY_IP_ADDRESS --server-port $SRPORT create_subsystem --subnqn $NQN --serial $SERIAL
+sudo podman run -it $IMAGE --server-address $NVMEOF_GATEWAY_IP_ADDRESS --server-port $SRPORT add_namespace --subnqn $NQN --bdev $BDEV
+sudo podman run -it $IMAGE --server-address $NVMEOF_GATEWAY_IP_ADDRESS --server-port $SRPORT create_listener -n $NQN -g client.$NVMEOF_GATEWAY_NAME -a $NVMEOF_GATEWAY_IP_ADDRESS -s $PORT
+sudo podman run -it $IMAGE --server-address $NVMEOF_GATEWAY_IP_ADDRESS --server-port $SRPORT add_host --subnqn $NQN --host "*"
+sudo podman run -it $IMAGE --server-address $NVMEOF_GATEWAY_IP_ADDRESS --server-port $SRPORT get_subsystems
+sudo lsmod | grep nvme
+sudo nvme list
+sudo nvme discover -t tcp -a $NVMEOF_GATEWAY_IP_ADDRESS -s $DISCOVERY_PORT
+sudo nvme connect -t tcp --traddr $NVMEOF_GATEWAY_IP_ADDRESS -s $PORT -n $NQN
+sudo nvme list
+
+echo "testing nvmeof initiator..."
+
+nvme_model="SPDK bdev Controller"
+
+echo "Test 1: create initiator - starting"
+if ! sudo nvme list | grep -q "$nvme_model"; then
+ echo "nvmeof initiator not created!"
+ exit 1
+fi
+echo "Test 1: create initiator - passed!"
+
+
+echo "Test 2: device size - starting"
+image_size_in_bytes=$(($RBD_SIZE * 1024 * 1024))
+nvme_size=$(sudo nvme list --output-format=json | \
+ jq -r ".Devices | .[] | select(.ModelNumber == \"$nvme_model\") | .PhysicalSize")
+if [ "$image_size_in_bytes" != "$nvme_size" ]; then
+ echo "block device size do not match!"
+ exit 1
+fi
+echo "Test 2: device size - passed!"
+
+
+echo "Test 3: basic IO - starting"
+nvme_drive=$(sudo nvme list --output-format=json | \
+ jq -r ".Devices | .[] | select(.ModelNumber == \"$nvme_model\") | .DevicePath")
+io_input_file="/tmp/nvmeof_test_input"
+echo "Hello world" > $io_input_file
+truncate -s 2k $io_input_file
+sudo dd if=$io_input_file of=$nvme_drive oflag=direct count=1 bs=2k #write
+io_output_file="/tmp/nvmeof_test_output"
+sudo dd if=$nvme_drive of=$io_output_file iflag=direct count=1 bs=2k #read
+if ! cmp $io_input_file $io_output_file; then
+ echo "nvmeof initiator - io test failed!"
+ exit 1
+fi
+sudo rm -f $io_input_file $io_output_file
+echo "Test 3: basic IO - passed!"
+
+
+echo "nvmeof initiator tests passed!"