xref: /spdk/test/nvme/nvme.sh (revision ba23cec1820104cc710ad776f0127e1cf82033aa)
1#!/usr/bin/env bash
2
3testdir=$(readlink -f $(dirname $0))
4rootdir=$(readlink -f $testdir/../..)
5source $rootdir/scripts/common.sh
6source $rootdir/test/common/autotest_common.sh
7
8function nvme_identify() {
9	$rootdir/examples/nvme/identify/identify -i 0
10	for bdf in $(get_nvme_bdfs); do
11		$rootdir/examples/nvme/identify/identify -r "trtype:PCIe traddr:${bdf}" -i 0
12	done
13	timing_exit identify
14}
15
16function nvme_perf() {
17	# enable no shutdown notification option
18	$rootdir/examples/nvme/perf/perf -q 128 -w read -o 12288 -t 1 -LL -i 0 -N
19	$rootdir/examples/nvme/perf/perf -q 128 -w write -o 12288 -t 1 -LL -i 0
20	if [ -b /dev/ram0 ]; then
21		# Test perf with AIO device
22		$rootdir/examples/nvme/perf/perf /dev/ram0 -q 128 -w read -o 12288 -t 1 -LL -i 0
23	fi
24}
25
26function nvme_fio_test() {
27	PLUGIN_DIR=$rootdir/examples/nvme/fio_plugin
28	for bdf in $(get_nvme_bdfs); do
29		for blkname in $(get_nvme_name_from_bdf $bdf); do
30			fio_nvme $PLUGIN_DIR/example_config.fio --filename="trtype=PCIe traddr=${bdf//:/.} ns=${blkname##*n}"
31		done
32	done
33}
34
35function nvme_multi_secondary() {
36	$rootdir/examples/nvme/perf/perf -i 0 -q 16 -w read -o 4096 -t 3 -c 0x1 &
37	pid0=$!
38	$rootdir/examples/nvme/perf/perf -i 0 -q 16 -w read -o 4096 -t 3 -c 0x2 &
39	pid1=$!
40	$rootdir/examples/nvme/perf/perf -i 0 -q 16 -w read -o 4096 -t 3 -c 0x4
41	wait $pid0
42	wait $pid1
43}
44
45if [ $(uname) = Linux ]; then
46	# check that our setup.sh script does not bind NVMe devices to uio/vfio if they
47	# have an active mountpoint
48	$rootdir/scripts/setup.sh reset
49	# give kernel nvme driver some time to create the block devices before we start looking for them
50	sleep 1
51	blkname=''
52	# first, find an NVMe device that does not have an active mountpoint already;
53	# this covers rare case where someone is running this test script on a system
54	# that has a mounted NVMe filesystem
55	#
56	# note: more work probably needs to be done to properly handle devices with multiple
57	# namespaces
58	for bdf in $(get_nvme_bdfs); do
59		for name in $(get_nvme_name_from_bdf $bdf); do
60			if [ "$name" != "" ]; then
61				mountpoints=$(lsblk /dev/$name --output MOUNTPOINT -n | wc -w)
62				if [ "$mountpoints" = "0" ]; then
63					blkname=$name
64					break 2
65				fi
66			fi
67		done
68	done
69
70	# if we found an NVMe block device without an active mountpoint, create and mount
71	# a filesystem on it for purposes of testing the setup.sh script
72	if [ "$blkname" != "" ]; then
73		parted -s /dev/$blkname mklabel gpt
74		# just create a 100MB partition - this tests our ability to detect mountpoints
75		# on partitions of the device, not just the device itself;  it also is faster
76		# since we don't trim and initialize the whole namespace
77		parted -s /dev/$blkname mkpart primary 1 100
78		sleep 1
79		mkfs.ext4 -F /dev/${blkname}p1
80		mkdir -p /tmp/nvmetest
81		mount /dev/${blkname}p1 /tmp/nvmetest
82		sleep 1
83		$rootdir/scripts/setup.sh
84		driver=$(basename $(readlink /sys/bus/pci/devices/$bdf/driver))
85		# check that the nvme driver is still loaded against the device
86		if [ "$driver" != "nvme" ]; then
87			exit 1
88		fi
89		umount /tmp/nvmetest
90		rmdir /tmp/nvmetest
91		# write zeroes to the device to blow away the partition table and filesystem
92		dd if=/dev/zero of=/dev/$blkname oflag=direct bs=1M count=1
93		$rootdir/scripts/setup.sh
94		driver=$(basename $(readlink /sys/bus/pci/devices/$bdf/driver))
95		# check that the nvme driver is not loaded against the device
96		if [ "$driver" = "nvme" ]; then
97			exit 1
98		fi
99	else
100		$rootdir/scripts/setup.sh
101	fi
102fi
103
104if [ $(uname) = Linux ]; then
105	trap "kill_stub -9; exit 1" SIGINT SIGTERM EXIT
106	start_stub "-s 4096 -i 0 -m 0xE"
107fi
108
109run_test "nvme_reset" $testdir/reset/reset -q 64 -w write -s 4096 -t 5
110run_test "nvme_identify" nvme_identify
111run_test "nvme_perf" nvme_perf
112run_test "nvme_hello_world" $rootdir/examples/nvme/hello_world/hello_world
113run_test "nvme_deallocated_value" $testdir/deallocated_value/deallocated_value
114run_test "nvme_sgl" $testdir/sgl/sgl
115run_test "nvme_e2edp" $testdir/e2edp/nvme_dp
116run_test "nvme_reserve" $testdir/reserve/reserve
117run_test "nvme_err_injection" $testdir/err_injection/err_injection
118run_test "nvme_overhead" $testdir/overhead/overhead -s 4096 -t 1 -H
119run_test "nvme_arbitration" $rootdir/examples/nvme/arbitration/arbitration -t 3 -i 0
120if [ $SPDK_TEST_NVME_CUSE -eq 1 ]; then
121	run_test "nvme_cuse" $testdir/cuse/cuse
122fi
123
124if [[ $CONFIG_FIO_PLUGIN == y ]]; then
125	run_test "nvme_fio" nvme_fio_test
126fi
127
128if [ $(uname) != "FreeBSD" ]; then
129	run_test "nvme_startup" $testdir/startup/startup -t 1000000
130	run_test "nvme_multi_secondary" nvme_multi_secondary
131	trap - SIGINT SIGTERM EXIT
132	kill_stub
133fi
134