xref: /spdk/test/bdev/blockdev.sh (revision 7192849ed24874f3e9cc31e8a33a9b32c49b9506)
1#!/usr/bin/env bash
2
3testdir=$(readlink -f $(dirname $0))
4rootdir=$(readlink -f $testdir/../..)
5source $rootdir/test/common/autotest_common.sh
6source $testdir/nbd_common.sh
7
8rpc_py="$rootdir/scripts/rpc.py"
9conf_file="$testdir/bdev.json"
10# Make sure the configuration is clean
11: > "$conf_file"
12
13function cleanup() {
14	rm -f "$SPDK_TEST_STORAGE/aiofile"
15	rm -f "$SPDK_TEST_STORAGE/spdk-pmem-pool"
16	rm -f "$conf_file"
17
18	if [[ $test_type == rbd ]]; then
19		rbd_cleanup
20	fi
21}
22
23function start_spdk_tgt() {
24	"$SPDK_BIN_DIR/spdk_tgt" &
25	spdk_tgt_pid=$!
26	trap 'killprocess "$spdk_tgt_pid"; exit 1' SIGINT SIGTERM EXIT
27	waitforlisten "$spdk_tgt_pid"
28}
29
30function setup_bdev_conf() {
31	"$rpc_py" <<- RPC
32		bdev_split_create Malloc1 2
33		bdev_split_create -s 4 Malloc2 8
34		bdev_malloc_create -b Malloc0 32 512
35		bdev_malloc_create -b Malloc1 32 512
36		bdev_malloc_create -b Malloc2 32 512
37		bdev_malloc_create -b Malloc3 32 512
38		bdev_malloc_create -b Malloc4 32 512
39		bdev_malloc_create -b Malloc5 32 512
40		bdev_passthru_create -p TestPT -b Malloc3
41		bdev_raid_create -n raid0 -z 64 -r 0 -b "Malloc4 Malloc5"
42	RPC
43	# FIXME: QoS doesn't work properly with json_config, see issue 1146
44	#$rpc_py bdev_set_qos_limit --rw_mbytes_per_sec 100 Malloc3
45	#$rpc_py bdev_set_qos_limit --rw_ios_per_sec 20000 Malloc0
46	if [[ $(uname -s) != "FreeBSD" ]]; then
47		dd if=/dev/zero of="$SPDK_TEST_STORAGE/aiofile" bs=2048 count=5000
48		"$rpc_py" bdev_aio_create "$SPDK_TEST_STORAGE/aiofile" AIO0 2048
49	fi
50}
51
52function setup_nvme_conf() {
53	"$rootdir/scripts/gen_nvme.sh" --json | "$rpc_py" load_subsystem_config
54}
55
56function setup_gpt_conf() {
57	if [[ $(uname -s) = Linux ]] && hash sgdisk; then
58		$rootdir/scripts/setup.sh reset
59		# FIXME: Note that we are racing with the kernel here. There's no guarantee that
60		# proper object will be already in place under sysfs nor that any udev-like
61		# helper created proper block devices for us. Replace the below sleep with proper
62		# udev settle routine.
63		sleep 1s
64		# Get nvme devices by following drivers' links towards nvme class
65		local nvme_devs=(/sys/bus/pci/drivers/nvme/*/nvme/nvme*/nvme*n*) nvme_dev
66		gpt_nvme=""
67		# Pick first device which doesn't have any valid partition table
68		for nvme_dev in "${nvme_devs[@]}"; do
69			dev=/dev/${nvme_dev##*/}
70			if ! pt=$(parted "$dev" -ms print 2>&1); then
71				[[ $pt == *"$dev: unrecognised disk label"* ]] || continue
72				gpt_nvme=$dev
73				break
74			fi
75		done
76		if [[ -n $gpt_nvme ]]; then
77			# Create gpt partition table
78			parted -s "$gpt_nvme" mklabel gpt mkpart first '0%' '50%' mkpart second '50%' '100%'
79			# change the GUID to SPDK GUID value
80			# FIXME: Hardcode this in some common place, this value should not be changed much
81			IFS="()" read -r _ SPDK_GPT_GUID _ < <(grep SPDK_GPT_PART_TYPE_GUID module/bdev/gpt/gpt.h)
82			SPDK_GPT_GUID=${SPDK_GPT_GUID//, /-} SPDK_GPT_GUID=${SPDK_GPT_GUID//0x/}
83			sgdisk -t "1:$SPDK_GPT_GUID" "$gpt_nvme"
84			sgdisk -t "2:$SPDK_GPT_GUID" "$gpt_nvme"
85			"$rootdir/scripts/setup.sh"
86			"$rpc_py" bdev_get_bdevs
87			setup_nvme_conf
88		else
89			printf 'Did not find any nvme block devices to work with, aborting the test\n' >&2
90			"$rootdir/scripts/setup.sh"
91			return 1
92		fi
93	else
94		# Not supported platform or missing tooling, nothing to be done, simply exit the test
95		# in a graceful manner.
96		trap - SIGINT SIGTERM EXIT
97		killprocess "$spdk_tgt_pid"
98		cleanup
99		exit 0
100	fi
101}
102
103function setup_crypto_aesni_conf() {
104	# Malloc0 and Malloc1 use AESNI
105	"$rpc_py" <<- RPC
106		bdev_malloc_create -b Malloc0 16 512
107		bdev_malloc_create -b Malloc1 16 512
108		bdev_crypto_create Malloc0 crypto_ram crypto_aesni_mb 0123456789123456
109		bdev_crypto_create Malloc1 crypto_ram2 crypto_aesni_mb 9012345678912345
110	RPC
111}
112
113function setup_crypto_qat_conf() {
114	# Malloc0 will use QAT AES_CBC
115	# Malloc1 will use QAT AES_XTS
116	"$rpc_py" <<- RPC
117		bdev_malloc_create -b Malloc0 16 512
118		bdev_malloc_create -b Malloc1 16 512
119		bdev_crypto_create Malloc0 crypto_ram crypto_qat 0123456789123456
120		bdev_crypto_create -c AES_XTS -k2 0123456789123456 Malloc1 crypto_ram3 crypto_qat 0123456789123456
121	RPC
122	"$rpc_py" bdev_get_bdevs -b Malloc1
123}
124
125function setup_pmem_conf() {
126	if hash pmempool; then
127		rm -f "$SPDK_TEST_STORAGE/spdk-pmem-pool"
128		pmempool create blk --size=32M 512 "$SPDK_TEST_STORAGE/spdk-pmem-pool"
129		"$rpc_py" bdev_pmem_create -n Pmem0 "$SPDK_TEST_STORAGE/spdk-pmem-pool"
130	else
131		return 1
132	fi
133}
134
135function setup_rbd_conf() {
136	timing_enter rbd_setup
137	rbd_setup 127.0.0.1
138	timing_exit rbd_setup
139
140	"$rpc_py" bdev_rbd_create -b Ceph0 rbd foo 512
141}
142
143function bdev_bounds() {
144	$testdir/bdevio/bdevio -w -s $PRE_RESERVED_MEM --json "$conf_file" &
145	bdevio_pid=$!
146	trap 'killprocess $bdevio_pid; exit 1' SIGINT SIGTERM EXIT
147	echo "Process bdevio pid: $bdevio_pid"
148	waitforlisten $bdevio_pid
149	$testdir/bdevio/tests.py perform_tests
150	killprocess $bdevio_pid
151	trap - SIGINT SIGTERM EXIT
152}
153
154function nbd_function_test() {
155	if [ $(uname -s) = Linux ] && modprobe -n nbd; then
156		local rpc_server=/var/tmp/spdk-nbd.sock
157		local conf=$1
158		local nbd_all=($(ls /dev/nbd* | grep -v p))
159		local bdev_all=($bdevs_name)
160		local nbd_num=${#bdevs_all[@]}
161		if [ ${#nbd_all[@]} -le $nbd_num ]; then
162			nbd_num=${#nbd_all[@]}
163		fi
164		local nbd_list=(${nbd_all[@]:0:$nbd_num})
165		local bdev_list=(${bdev_all[@]:0:$nbd_num})
166
167		if [ ! -e $conf ]; then
168			return 1
169		fi
170
171		modprobe nbd
172		$rootdir/test/app/bdev_svc/bdev_svc -r $rpc_server -i 0 --json "$conf" &
173		nbd_pid=$!
174		trap 'killprocess $nbd_pid; exit 1' SIGINT SIGTERM EXIT
175		echo "Process nbd pid: $nbd_pid"
176		waitforlisten $nbd_pid $rpc_server
177
178		nbd_rpc_start_stop_verify $rpc_server "${bdev_list[*]}"
179		nbd_rpc_data_verify $rpc_server "${bdev_list[*]}" "${nbd_list[*]}"
180
181		killprocess $nbd_pid
182		trap - SIGINT SIGTERM EXIT
183	fi
184
185	return 0
186}
187
188function fio_test_suite() {
189	# Generate the fio config file given the list of all unclaimed bdevs
190	fio_config_gen $testdir/bdev.fio verify AIO
191	for b in $(echo $bdevs | jq -r '.name'); do
192		echo "[job_$b]" >> $testdir/bdev.fio
193		echo "filename=$b" >> $testdir/bdev.fio
194	done
195
196	local fio_params="--ioengine=spdk_bdev --iodepth=8 --bs=4k --runtime=10 $testdir/bdev.fio --spdk_json_conf=$conf_file"
197
198	run_test "bdev_fio_rw_verify" fio_bdev $fio_params --spdk_mem=$PRE_RESERVED_MEM \
199		--output=$output_dir/blockdev_fio_verify.txt
200	rm -f ./*.state
201	rm -f $testdir/bdev.fio
202
203	# Generate the fio config file given the list of all unclaimed bdevs that support unmap
204	fio_config_gen $testdir/bdev.fio trim
205	if [ "$(echo $bdevs | jq -r 'select(.supported_io_types.unmap == true) | .name')" != "" ]; then
206		for b in $(echo $bdevs | jq -r 'select(.supported_io_types.unmap == true) | .name'); do
207			echo "[job_$b]" >> $testdir/bdev.fio
208			echo "filename=$b" >> $testdir/bdev.fio
209		done
210	else
211		rm -f $testdir/bdev.fio
212		return 0
213	fi
214
215	run_test "bdev_fio_trim" fio_bdev $fio_params --output=$output_dir/blockdev_trim.txt
216	rm -f ./*.state
217	rm -f $testdir/bdev.fio
218}
219
220function get_io_result() {
221	local limit_type=$1
222	local qos_dev=$2
223	local iostat_result
224	iostat_result=$($rootdir/scripts/iostat.py -d -i 1 -t $QOS_RUN_TIME | grep $qos_dev | tail -1)
225	if [ $limit_type = IOPS ]; then
226		iostat_result=$(awk '{print $2}' <<< $iostat_result)
227	elif [ $limit_type = BANDWIDTH ]; then
228		iostat_result=$(awk '{print $6}' <<< $iostat_result)
229	fi
230
231	echo ${iostat_result/.*/}
232}
233
234function run_qos_test() {
235	local qos_limit=$1
236	local qos_result=0
237
238	qos_result=$(get_io_result $2 $3)
239	if [ $2 = BANDWIDTH ]; then
240		qos_limit=$((qos_limit * 1024))
241	fi
242	lower_limit=$((qos_limit * 9 / 10))
243	upper_limit=$((qos_limit * 11 / 10))
244
245	# QoS realization is related with bytes transfered. It currently has some variation.
246	if [ $qos_result -lt $lower_limit ] || [ $qos_result -gt $upper_limit ]; then
247		echo "Failed to limit the io read rate of NULL bdev by qos"
248		$rpc_py bdev_malloc_delete $QOS_DEV_1
249		$rpc_py bdev_null_delete $QOS_DEV_2
250		killprocess $QOS_PID
251		exit 1
252	fi
253}
254
255function qos_function_test() {
256	local qos_lower_iops_limit=1000
257	local qos_lower_bw_limit=2
258	local io_result=0
259	local iops_limit=0
260	local bw_limit=0
261
262	io_result=$(get_io_result IOPS $QOS_DEV_1)
263	# Set the IOPS limit as one quarter of the measured performance without QoS
264	iops_limit=$(((io_result / 4) / qos_lower_iops_limit * qos_lower_iops_limit))
265	if [ $iops_limit -gt $qos_lower_iops_limit ]; then
266
267		# Run bdevperf with IOPS rate limit on bdev 1
268		$rpc_py bdev_set_qos_limit --rw_ios_per_sec $iops_limit $QOS_DEV_1
269		run_test "bdev_qos_iops" run_qos_test $iops_limit IOPS $QOS_DEV_1
270
271		# Run bdevperf with bandwidth rate limit on bdev 2
272		# Set the bandwidth limit as 1/10 of the measure performance without QoS
273		bw_limit=$(get_io_result BANDWIDTH $QOS_DEV_2)
274		bw_limit=$((bw_limit / 1024 / 10))
275		if [ $bw_limit -lt $qos_lower_bw_limit ]; then
276			bw_limit=$qos_lower_bw_limit
277		fi
278		$rpc_py bdev_set_qos_limit --rw_mbytes_per_sec $bw_limit $QOS_DEV_2
279		run_test "bdev_qos_bw" run_qos_test $bw_limit BANDWIDTH $QOS_DEV_2
280
281		# Run bdevperf with additional read only bandwidth rate limit on bdev 1
282		$rpc_py bdev_set_qos_limit --r_mbytes_per_sec $qos_lower_bw_limit $QOS_DEV_1
283		run_test "bdev_qos_ro_bw" run_qos_test $qos_lower_bw_limit BANDWIDTH $QOS_DEV_1
284	else
285		echo "Actual IOPS without limiting is too low - exit testing"
286	fi
287}
288
289function qos_test_suite() {
290	# Run bdevperf with QoS disabled first
291	"$testdir/bdevperf/bdevperf" -z -m 0x2 -q 256 -o 4096 -w randread -t 60 &
292	QOS_PID=$!
293	echo "Process qos testing pid: $QOS_PID"
294	trap 'killprocess $QOS_PID; exit 1' SIGINT SIGTERM EXIT
295	waitforlisten $QOS_PID
296
297	$rpc_py bdev_malloc_create -b $QOS_DEV_1 128 512
298	waitforbdev $QOS_DEV_1
299	$rpc_py bdev_null_create $QOS_DEV_2 128 512
300	waitforbdev $QOS_DEV_2
301
302	$rootdir/test/bdev/bdevperf/bdevperf.py perform_tests &
303	qos_function_test
304
305	$rpc_py bdev_malloc_delete $QOS_DEV_1
306	$rpc_py bdev_null_delete $QOS_DEV_2
307	killprocess $QOS_PID
308	trap - SIGINT SIGTERM EXIT
309}
310
311# Inital bdev creation and configuration
312#-----------------------------------------------------
313QOS_DEV_1="Malloc_0"
314QOS_DEV_2="Null_1"
315QOS_RUN_TIME=5
316
317if [ $(uname -s) = Linux ]; then
318	# Test dynamic memory management. All hugepages will be reserved at runtime
319	PRE_RESERVED_MEM=0
320else
321	# Dynamic memory management is not supported on BSD
322	PRE_RESERVED_MEM=2048
323fi
324
325test_type=${1:-bdev}
326start_spdk_tgt
327case "$test_type" in
328	bdev)
329		setup_bdev_conf
330		;;
331	nvme)
332		setup_nvme_conf
333		;;
334	gpt)
335		setup_gpt_conf
336		;;
337	crypto_aesni)
338		setup_crypto_aesni_conf
339		;;
340	crypto_qat)
341		setup_crypto_qat_conf
342		;;
343	pmem)
344		setup_pmem_conf
345		;;
346	rbd)
347		setup_rbd_conf
348		;;
349	*)
350		echo "invalid test name"
351		exit 1
352		;;
353esac
354
355# Generate json config and use it throughout all the tests
356cat <<- CONF > "$conf_file"
357	        {"subsystems":[
358	        $("$rpc_py" save_subsystem_config -n bdev)
359	        ]}
360CONF
361
362bdevs=$("$rpc_py" bdev_get_bdevs | jq -r '.[] | select(.claimed == false)')
363bdevs_name=$(echo $bdevs | jq -r '.name')
364bdev_list=($bdevs_name)
365hello_world_bdev=${bdev_list[0]}
366trap - SIGINT SIGTERM EXIT
367killprocess "$spdk_tgt_pid"
368# End bdev configuration
369#-----------------------------------------------------
370
371run_test "bdev_hello_world" $SPDK_EXAMPLE_DIR/hello_bdev --json "$conf_file" -b "$hello_world_bdev"
372run_test "bdev_bounds" bdev_bounds
373run_test "bdev_nbd" nbd_function_test $conf_file "$bdevs_name"
374if [[ $CONFIG_FIO_PLUGIN == y ]]; then
375	if [ "$test_type" = "nvme" ] || [ "$test_type" = "gpt" ]; then
376		# TODO: once we get real multi-ns drives, re-enable this test for NVMe.
377		echo "skipping fio tests on NVMe due to multi-ns failures."
378	else
379		run_test "bdev_fio" fio_test_suite
380	fi
381else
382	echo "FIO not available"
383	exit 1
384fi
385
386run_test "bdev_verify" $testdir/bdevperf/bdevperf --json "$conf_file" -q 128 -o 4096 -w verify -t 5 -C -m 0x3
387run_test "bdev_write_zeroes" $testdir/bdevperf/bdevperf --json "$conf_file" -q 128 -o 4096 -w write_zeroes -t 1
388
389if [[ $test_type == bdev ]]; then
390	run_test "bdev_qos" qos_test_suite
391fi
392
393# Temporarily disabled - infinite loop
394# if [ $RUN_NIGHTLY -eq 1 ]; then
395# run_test "bdev_reset" $testdir/bdevperf/bdevperf --json "$conf_file" -q 16 -w reset -o 4096 -t 60
396# fi
397
398# Bdev and configuration cleanup below this line
399#-----------------------------------------------------
400if [ "$test_type" = "gpt" ]; then
401	"$rootdir/scripts/setup.sh" reset
402	sleep 1s
403	if [[ -b $gpt_nvme ]]; then
404		dd if=/dev/zero of="$gpt_nvme" bs=4096 count=8 oflag=direct
405	fi
406fi
407
408cleanup
409