xref: /spdk/test/bdev/blockdev.sh (revision 99a43e75ed9ac3c87d23e3746173cf5a5a992544)
1#!/usr/bin/env bash
2#  SPDX-License-Identifier: BSD-3-Clause
3#  Copyright (C) 2016 Intel Corporation
4#  All rights reserved.
5#
6testdir=$(readlink -f $(dirname $0))
7rootdir=$(readlink -f $testdir/../..)
8source $rootdir/test/common/autotest_common.sh
9source $testdir/nbd_common.sh
10
11# nullglob will remove unmatched words containing '*', '?', '[' characters during word splitting.
12# This means that empty alias arrays will be removed instead of printing "[]", which breaks
13# consecutive "jq" calls, as the "aliases" key will have no value and the whole JSON will be
14# invalid. Hence do not enable this option for the duration of the tests in this script.
15shopt -s extglob
16
17rpc_py=rpc_cmd
18conf_file="$testdir/bdev.json"
19nonenclosed_conf_file="$testdir/nonenclosed.json"
20nonarray_conf_file="$testdir/nonarray.json"
21
22# Make sure the configuration is clean
23: > "$conf_file"
24
25function cleanup() {
26	rm -f "$SPDK_TEST_STORAGE/aiofile"
27	rm -f "$SPDK_TEST_STORAGE/spdk-pmem-pool"
28	rm -f "$conf_file"
29
30	if [[ $test_type == rbd ]]; then
31		rbd_cleanup
32	fi
33
34	if [[ $test_type == daos ]]; then
35		daos_cleanup
36	fi
37
38	if [[ "$test_type" = "gpt" ]]; then
39		"$rootdir/scripts/setup.sh" reset
40		if [[ -b $gpt_nvme ]]; then
41			wipefs --all "$gpt_nvme"
42		fi
43	fi
44	if [[ $test_type == xnvme ]]; then
45		"$rootdir/scripts/setup.sh"
46	fi
47}
48
49function start_spdk_tgt() {
50	"$SPDK_BIN_DIR/spdk_tgt" "$env_ctx" &
51	spdk_tgt_pid=$!
52	trap 'killprocess "$spdk_tgt_pid"; exit 1' SIGINT SIGTERM EXIT
53	waitforlisten "$spdk_tgt_pid"
54}
55
56function setup_bdev_conf() {
57	"$rpc_py" <<- RPC
58		bdev_split_create Malloc1 2
59		bdev_split_create -s 4 Malloc2 8
60		bdev_malloc_create -b Malloc0 32 512
61		bdev_malloc_create -b Malloc1 32 512
62		bdev_malloc_create -b Malloc2 32 512
63		bdev_malloc_create -b Malloc3 32 512
64		bdev_malloc_create -b Malloc4 32 512
65		bdev_malloc_create -b Malloc5 32 512
66		bdev_malloc_create -b Malloc6 32 512
67		bdev_malloc_create -b Malloc7 32 512
68		bdev_passthru_create -p TestPT -b Malloc3
69		bdev_raid_create -n raid0 -z 64 -r 0 -b "Malloc4 Malloc5"
70		bdev_raid_create -n concat0 -z 64 -r concat -b "Malloc6 Malloc7"
71		bdev_set_qos_limit --rw_mbytes_per_sec 100 Malloc3
72		bdev_set_qos_limit --rw_ios_per_sec 20000 Malloc0
73	RPC
74	if [[ $(uname -s) != "FreeBSD" ]]; then
75		dd if=/dev/zero of="$SPDK_TEST_STORAGE/aiofile" bs=2048 count=5000
76		"$rpc_py" bdev_aio_create "$SPDK_TEST_STORAGE/aiofile" AIO0 2048
77	fi
78}
79
80function setup_nvme_conf() {
81	local json
82	mapfile -t json < <("$rootdir/scripts/gen_nvme.sh")
83	"$rpc_py" load_subsystem_config -j "'${json[*]}'"
84}
85
86function setup_xnvme_conf() {
87	# TODO: Switch to io_uring_cmd when proper CI support is in place
88	local io_mechanism=io_uring
89	local nvme nvmes
90
91	"$rootdir/scripts/setup.sh" reset
92	get_zoned_devs
93
94	for nvme in /dev/nvme*n*; do
95		[[ -b $nvme && -z ${zoned_devs["${nvme##*/}"]} ]] || continue
96		nvmes+=("bdev_xnvme_create $nvme ${nvme##*/} $io_mechanism")
97	done
98
99	((${#nvmes[@]} > 0))
100	"$rpc_py" < <(printf '%s\n' "${nvmes[@]}")
101}
102
103function setup_gpt_conf() {
104	$rootdir/scripts/setup.sh reset
105	get_zoned_devs
106	# Get nvme devices by following drivers' links towards nvme class
107	local nvme_devs=(/sys/bus/pci/drivers/nvme/*/nvme/nvme*/nvme*n*) nvme_dev
108	gpt_nvme=""
109	# Pick first device which doesn't have any valid partition table
110	for nvme_dev in "${nvme_devs[@]}"; do
111		[[ -z ${zoned_devs["${nvme_dev##*/}"]} ]] || continue
112		dev=/dev/${nvme_dev##*/}
113		if ! pt=$(parted "$dev" -ms print 2>&1); then
114			[[ $pt == *"$dev: unrecognised disk label"* ]] || continue
115			gpt_nvme=$dev
116			break
117		fi
118	done
119	if [[ -n $gpt_nvme ]]; then
120		# Create gpt partition table
121		parted -s "$gpt_nvme" mklabel gpt mkpart SPDK_TEST_first '0%' '50%' mkpart SPDK_TEST_second '50%' '100%'
122		# change the GUID to SPDK GUID value
123		SPDK_GPT_GUID=$(get_spdk_gpt)
124		sgdisk -t "1:$SPDK_GPT_GUID" "$gpt_nvme"
125		sgdisk -t "2:$SPDK_GPT_GUID" "$gpt_nvme"
126		"$rootdir/scripts/setup.sh"
127		"$rpc_py" bdev_get_bdevs
128		setup_nvme_conf
129	else
130		printf 'Did not find any nvme block devices to work with, aborting the test\n' >&2
131		"$rootdir/scripts/setup.sh"
132		return 1
133	fi
134}
135
136function setup_crypto_aesni_conf() {
137	# Malloc0 and Malloc1 use AESNI
138	"$rpc_py" <<- RPC
139		bdev_malloc_create -b Malloc0 16 512
140		bdev_malloc_create -b Malloc1 16 512
141		bdev_crypto_create Malloc0 crypto_ram crypto_aesni_mb 01234567891234560123456789123456
142		bdev_crypto_create Malloc1 crypto_ram2 crypto_aesni_mb 90123456789123459012345678912345
143	RPC
144}
145
146function setup_crypto_qat_conf() {
147	# Malloc0 will use QAT AES_CBC
148	# Malloc1 will use QAT AES_XTS
149	"$rpc_py" <<- RPC
150		bdev_malloc_create -b Malloc0 16 512
151		bdev_malloc_create -b Malloc1 16 512
152		bdev_crypto_create Malloc0 crypto_ram crypto_qat 01234567891234560123456789123456
153		bdev_crypto_create -c AES_XTS -k2 01234567891234560123456789123456 Malloc1 crypto_ram3 crypto_qat 01234567891234560123456789123456
154		bdev_get_bdevs -b Malloc1
155	RPC
156}
157
158function setup_crypto_mlx5_conf() {
159	local key=$1
160	local block_key
161	local tweak_key
162	if [ ${#key} == 96 ]; then
163		# 96 bytes is 64 + 32 - AES_XTS_256 in hexlified format
164		# Copy first 64 chars into the 'key'. This gives 32 in the
165		# binary or 256 bit.
166		block_key=${key:0:64}
167		# Copy the the rest of the key and pass it as the 'key2'.
168		tweak_key=${key:64:32}
169	elif [ ${#key} == 160 ]; then
170		# 160 bytes is 128 + 32 - AES_XTS_512 in hexlified format
171		# Copy first 128 chars into the 'key'. This gives 64 in the
172		# binary or 512 bit.
173		block_key=${key:0:128}
174		# Copy the the rest of the key and pass it as the 'key2'.
175		tweak_key=${key:128:32}
176	else
177		echo "ERROR: Invalid DEK size for MLX5 crypto setup: ${#key}"
178		echo "ERROR: Supported key sizes for MLX5: 96 bytes (AES_XTS_256) and 160 bytes (AES_XTS_512)."
179		return 1
180	fi
181
182	# Malloc0 will use MLX5 AES_XTS
183	"$rpc_py" <<- RPC
184		bdev_malloc_create -b Malloc0 16 512
185		bdev_crypto_create -c AES_XTS -k2 $tweak_key Malloc0 crypto_ram4 mlx5_pci $block_key
186		bdev_get_bdevs -b Malloc0
187	RPC
188}
189
190function setup_pmem_conf() {
191	if hash pmempool; then
192		rm -f "$SPDK_TEST_STORAGE/spdk-pmem-pool"
193		pmempool create blk --size=32M 512 "$SPDK_TEST_STORAGE/spdk-pmem-pool"
194		"$rpc_py" bdev_pmem_create -n Pmem0 "$SPDK_TEST_STORAGE/spdk-pmem-pool"
195	else
196		return 1
197	fi
198}
199
200function setup_rbd_conf() {
201	timing_enter rbd_setup
202	rbd_setup 127.0.0.1
203	timing_exit rbd_setup
204
205	"$rpc_py" bdev_rbd_create -b Ceph0 rbd foo 512
206}
207
208function setup_daos_conf() {
209	local pool=testpool
210	local cont=testcont
211
212	timing_enter daos_setup
213	daos_setup $pool $cont
214	timing_exit daos_setup
215
216	"$rpc_py" bdev_daos_create Daos0 $pool $cont 16 4096
217}
218
219function setup_raid5f_conf() {
220	"$rpc_py" <<- RPC
221		bdev_malloc_create -b Malloc0 32 512
222		bdev_malloc_create -b Malloc1 32 512
223		bdev_malloc_create -b Malloc2 32 512
224		bdev_raid_create -n raid5f -z 2 -r 5f -b "Malloc0 Malloc1 Malloc2"
225	RPC
226}
227
228function bdev_bounds() {
229	$testdir/bdevio/bdevio -w -s $PRE_RESERVED_MEM --json "$conf_file" "$env_ctx" &
230	bdevio_pid=$!
231	trap 'cleanup; killprocess $bdevio_pid; exit 1' SIGINT SIGTERM EXIT
232	echo "Process bdevio pid: $bdevio_pid"
233	waitforlisten $bdevio_pid
234	$testdir/bdevio/tests.py perform_tests
235	killprocess $bdevio_pid
236	trap - SIGINT SIGTERM EXIT
237}
238
239function nbd_function_test() {
240	[[ $(uname -s) == Linux ]] || return 0
241
242	local rpc_server=/var/tmp/spdk-nbd.sock
243	local conf=$1
244	local bdev_all=($2)
245	local bdev_num=${#bdev_all[@]}
246
247	# FIXME: Centos7 in the CI is not shipped with a kernel supporting BLK_DEV_NBD
248	# so don't fail here for now.
249	[[ -e /sys/module/nbd ]] || modprobe -q nbd nbds_max=$bdev_num || return 0
250
251	local nbd_all=(/dev/nbd+([0-9]))
252	bdev_num=$((${#nbd_all[@]} < bdev_num ? ${#nbd_all[@]} : bdev_num))
253
254	local nbd_list=(${nbd_all[@]::bdev_num})
255	local bdev_list=(${bdev_all[@]::bdev_num})
256
257	$rootdir/test/app/bdev_svc/bdev_svc -r $rpc_server -i 0 --json "$conf" "$env_ctx" &
258	nbd_pid=$!
259	trap 'cleanup; killprocess $nbd_pid' SIGINT SIGTERM EXIT
260	waitforlisten $nbd_pid $rpc_server
261
262	nbd_rpc_start_stop_verify $rpc_server "${bdev_list[*]}"
263	nbd_rpc_data_verify $rpc_server "${bdev_list[*]}" "${nbd_list[*]}"
264	nbd_with_lvol_verify $rpc_server "${nbd_list[*]}"
265
266	killprocess $nbd_pid
267	trap - SIGINT SIGTERM EXIT
268}
269
270function fio_test_suite() {
271	local env_context
272
273	# Make sure that state files and anything else produced by fio test will
274	# stay at the testdir.
275	pushd $testdir
276	trap 'rm -f ./*.state; popd; exit 1' SIGINT SIGTERM EXIT
277
278	# Generate the fio config file given the list of all unclaimed bdevs
279	env_context=$(echo "$env_ctx" | sed 's/--env-context=//')
280	fio_config_gen $testdir/bdev.fio verify AIO "$env_context"
281	for b in $(echo $bdevs | jq -r '.name'); do
282		echo "[job_$b]" >> $testdir/bdev.fio
283		echo "filename=$b" >> $testdir/bdev.fio
284	done
285
286	local fio_params="--ioengine=spdk_bdev --iodepth=8 --bs=4k --runtime=10 $testdir/bdev.fio \
287			--verify_state_save=0 --spdk_json_conf=$conf_file"
288
289	run_test "bdev_fio_rw_verify" fio_bdev $fio_params --spdk_mem=$PRE_RESERVED_MEM --aux-path=$output_dir
290	rm -f ./*.state
291	rm -f $testdir/bdev.fio
292
293	# Generate the fio config file given the list of all unclaimed bdevs that support unmap
294	fio_config_gen $testdir/bdev.fio trim "" "$env_context"
295	if [ "$(echo $bdevs | jq -r 'select(.supported_io_types.unmap == true) | .name')" != "" ]; then
296		for b in $(echo $bdevs | jq -r 'select(.supported_io_types.unmap == true) | .name'); do
297			echo "[job_$b]" >> $testdir/bdev.fio
298			echo "filename=$b" >> $testdir/bdev.fio
299		done
300	else
301		rm -f $testdir/bdev.fio
302		popd
303		trap - SIGINT SIGTERM EXIT
304		return 0
305	fi
306
307	run_test "bdev_fio_trim" fio_bdev $fio_params --verify_state_save=0 --aux-path=$output_dir
308	rm -f ./*.state
309	rm -f $testdir/bdev.fio
310	popd
311	trap - SIGINT SIGTERM EXIT
312}
313
314function get_io_result() {
315	local limit_type=$1
316	local qos_dev=$2
317	local iostat_result
318	iostat_result=$($rootdir/scripts/iostat.py -d -i 1 -t $QOS_RUN_TIME | grep $qos_dev | tail -1)
319	if [ $limit_type = IOPS ]; then
320		iostat_result=$(awk '{print $2}' <<< $iostat_result)
321	elif [ $limit_type = BANDWIDTH ]; then
322		iostat_result=$(awk '{print $6}' <<< $iostat_result)
323	fi
324
325	echo ${iostat_result/.*/}
326}
327
328function run_qos_test() {
329	local qos_limit=$1
330	local qos_result=0
331
332	qos_result=$(get_io_result $2 $3)
333	if [ $2 = BANDWIDTH ]; then
334		qos_limit=$((qos_limit * 1024))
335	fi
336	lower_limit=$((qos_limit * 9 / 10))
337	upper_limit=$((qos_limit * 11 / 10))
338
339	# QoS realization is related with bytes transferred. It currently has some variation.
340	if [ $qos_result -lt $lower_limit ] || [ $qos_result -gt $upper_limit ]; then
341		echo "Failed to limit the io read rate of NULL bdev by qos"
342		$rpc_py bdev_malloc_delete $QOS_DEV_1
343		$rpc_py bdev_null_delete $QOS_DEV_2
344		killprocess $QOS_PID
345		exit 1
346	fi
347}
348
349function qos_function_test() {
350	local qos_lower_iops_limit=1000
351	local qos_lower_bw_limit=2
352	local io_result=0
353	local iops_limit=0
354	local bw_limit=0
355
356	io_result=$(get_io_result IOPS $QOS_DEV_1)
357	# Set the IOPS limit as one quarter of the measured performance without QoS
358	iops_limit=$(((io_result / 4) / qos_lower_iops_limit * qos_lower_iops_limit))
359	if [ $iops_limit -gt $qos_lower_iops_limit ]; then
360
361		# Run bdevperf with IOPS rate limit on bdev 1
362		$rpc_py bdev_set_qos_limit --rw_ios_per_sec $iops_limit $QOS_DEV_1
363		run_test "bdev_qos_iops" run_qos_test $iops_limit IOPS $QOS_DEV_1
364
365		# Run bdevperf with bandwidth rate limit on bdev 2
366		# Set the bandwidth limit as 1/10 of the measure performance without QoS
367		bw_limit=$(get_io_result BANDWIDTH $QOS_DEV_2)
368		bw_limit=$((bw_limit / 1024 / 10))
369		if [ $bw_limit -lt $qos_lower_bw_limit ]; then
370			bw_limit=$qos_lower_bw_limit
371		fi
372		$rpc_py bdev_set_qos_limit --rw_mbytes_per_sec $bw_limit $QOS_DEV_2
373		run_test "bdev_qos_bw" run_qos_test $bw_limit BANDWIDTH $QOS_DEV_2
374
375		# Run bdevperf with additional read only bandwidth rate limit on bdev 1
376		$rpc_py bdev_set_qos_limit --r_mbytes_per_sec $qos_lower_bw_limit $QOS_DEV_1
377		run_test "bdev_qos_ro_bw" run_qos_test $qos_lower_bw_limit BANDWIDTH $QOS_DEV_1
378	else
379		echo "Actual IOPS without limiting is too low - exit testing"
380	fi
381}
382
383function qos_test_suite() {
384	# Run bdevperf with QoS disabled first
385	"$rootdir/build/examples/bdevperf" -z -m 0x2 -q 256 -o 4096 -w randread -t 60 "$env_ctx" &
386	QOS_PID=$!
387	echo "Process qos testing pid: $QOS_PID"
388	trap 'cleanup; killprocess $QOS_PID; exit 1' SIGINT SIGTERM EXIT
389	waitforlisten $QOS_PID
390
391	$rpc_py bdev_malloc_create -b $QOS_DEV_1 128 512
392	waitforbdev $QOS_DEV_1
393	$rpc_py bdev_null_create $QOS_DEV_2 128 512
394	waitforbdev $QOS_DEV_2
395
396	$rootdir/examples/bdev/bdevperf/bdevperf.py perform_tests &
397	qos_function_test
398
399	$rpc_py bdev_malloc_delete $QOS_DEV_1
400	$rpc_py bdev_null_delete $QOS_DEV_2
401	killprocess $QOS_PID
402	trap - SIGINT SIGTERM EXIT
403}
404
405function error_test_suite() {
406	DEV_1="Dev_1"
407	DEV_2="Dev_2"
408	ERR_DEV="EE_Dev_1"
409
410	# Run bdevperf with 1 normal bdev and 1 error bdev, also continue on error
411	"$rootdir/build/examples/bdevperf" -z -m 0x2 -q 16 -o 4096 -w randread -t 5 -f "$env_ctx" &
412	ERR_PID=$!
413	echo "Process error testing pid: $ERR_PID"
414	waitforlisten $ERR_PID
415
416	$rpc_py bdev_malloc_create -b $DEV_1 128 512
417	waitforbdev $DEV_1
418	$rpc_py bdev_error_create $DEV_1
419	$rpc_py bdev_malloc_create -b $DEV_2 128 512
420	waitforbdev $DEV_2
421	$rpc_py bdev_error_inject_error $ERR_DEV 'all' 'failure' -n 5
422
423	$rootdir/examples/bdev/bdevperf/bdevperf.py -t 1 perform_tests &
424	sleep 1
425
426	# Bdevperf is expected to be there as the continue on error is set
427	if kill -0 $ERR_PID; then
428		echo "Process is existed as continue on error is set. Pid: $ERR_PID"
429	else
430		echo "Process exited unexpectedly. Pid: $ERR_PID"
431		exit 1
432	fi
433
434	# Delete the error devices
435	$rpc_py bdev_error_delete $ERR_DEV
436	$rpc_py bdev_malloc_delete $DEV_1
437	sleep 5
438	# Expected to exit normally
439	killprocess $ERR_PID
440
441	# Run bdevperf with 1 normal bdev and 1 error bdev, and exit on error
442	"$rootdir/build/examples/bdevperf" -z -m 0x2 -q 16 -o 4096 -w randread -t 5 "$env_ctx" &
443	ERR_PID=$!
444	echo "Process error testing pid: $ERR_PID"
445	waitforlisten $ERR_PID
446
447	$rpc_py bdev_malloc_create -b $DEV_1 128 512
448	waitforbdev $DEV_1
449	$rpc_py bdev_error_create $DEV_1
450	$rpc_py bdev_malloc_create -b $DEV_2 128 512
451	waitforbdev $DEV_2
452	$rpc_py bdev_error_inject_error $ERR_DEV 'all' 'failure' -n 5
453
454	$rootdir/examples/bdev/bdevperf/bdevperf.py -t 1 perform_tests &
455	NOT wait $ERR_PID
456}
457
458function qd_sampling_function_test() {
459	local bdev_name=$1
460	local sampling_period=10
461	local iostats
462
463	$rpc_py bdev_set_qd_sampling_period $bdev_name $sampling_period
464
465	iostats=$($rpc_py bdev_get_iostat -b $bdev_name)
466
467	qd_sampling_period=$(jq -r '.bdevs[0].queue_depth_polling_period' <<< "$iostats")
468
469	if [ $qd_sampling_period == null ] || [ $qd_sampling_period -ne $sampling_period ]; then
470		echo "Qeueue depth polling period is not right"
471		$rpc_py bdev_malloc_delete $QD_DEV
472		killprocess $QD_PID
473		exit 1
474	fi
475}
476
477function qd_sampling_test_suite() {
478	QD_DEV="Malloc_QD"
479
480	"$rootdir/build/examples/bdevperf" -z -m 0x3 -q 256 -o 4096 -w randread -t 5 -C "$env_ctx" &
481	QD_PID=$!
482	echo "Process bdev QD sampling period testing pid: $QD_PID"
483	trap 'cleanup; killprocess $QD_PID; exit 1' SIGINT SIGTERM EXIT
484	waitforlisten $QD_PID
485
486	$rpc_py bdev_malloc_create -b $QD_DEV 128 512
487	waitforbdev $QD_DEV
488
489	$rootdir/examples/bdev/bdevperf/bdevperf.py perform_tests &
490	sleep 2
491	qd_sampling_function_test $QD_DEV
492
493	$rpc_py bdev_malloc_delete $QD_DEV
494	killprocess $QD_PID
495	trap - SIGINT SIGTERM EXIT
496}
497
498function stat_function_test() {
499	local bdev_name=$1
500	local iostats
501	local io_count1
502	local io_count2
503	local iostats_per_channel
504	local io_count_per_channel1
505	local io_count_per_channel2
506	local io_count_per_channel_all=0
507
508	iostats=$($rpc_py bdev_get_iostat -b $bdev_name)
509	io_count1=$(jq -r '.bdevs[0].num_read_ops' <<< "$iostats")
510
511	iostats_per_channel=$($rpc_py bdev_get_iostat -b $bdev_name -c)
512	io_count_per_channel1=$(jq -r '.channels[0].num_read_ops' <<< "$iostats_per_channel")
513	io_count_per_channel_all=$((io_count_per_channel_all + io_count_per_channel1))
514	io_count_per_channel2=$(jq -r '.channels[1].num_read_ops' <<< "$iostats_per_channel")
515	io_count_per_channel_all=$((io_count_per_channel_all + io_count_per_channel2))
516
517	iostats=$($rpc_py bdev_get_iostat -b $bdev_name)
518	io_count2=$(jq -r '.bdevs[0].num_read_ops' <<< "$iostats")
519
520	# There is little time passed between the three iostats collected. So that
521	# the accumulated statistics from per channel data shall be bigger than the
522	# the first run and smaller than the third run in this short time of period.
523	if [ $io_count_per_channel_all -lt $io_count1 ] || [ $io_count_per_channel_all -gt $io_count2 ]; then
524		echo "Failed to collect the per Core IO statistics"
525		$rpc_py bdev_malloc_delete $STAT_DEV
526		killprocess $STAT_PID
527		exit 1
528	fi
529}
530
531function stat_test_suite() {
532	STAT_DEV="Malloc_STAT"
533
534	# Run bdevperf with 2 cores so as to collect per Core IO statistics
535	"$rootdir/build/examples/bdevperf" -z -m 0x3 -q 256 -o 4096 -w randread -t 10 -C "$env_ctx" &
536	STAT_PID=$!
537	echo "Process Bdev IO statistics testing pid: $STAT_PID"
538	trap 'cleanup; killprocess $STAT_PID; exit 1' SIGINT SIGTERM EXIT
539	waitforlisten $STAT_PID
540
541	$rpc_py bdev_malloc_create -b $STAT_DEV 128 512
542	waitforbdev $STAT_DEV
543
544	$rootdir/examples/bdev/bdevperf/bdevperf.py perform_tests &
545	sleep 2
546	stat_function_test $STAT_DEV
547
548	$rpc_py bdev_malloc_delete $STAT_DEV
549	killprocess $STAT_PID
550	trap - SIGINT SIGTERM EXIT
551}
552
553# Inital bdev creation and configuration
554#-----------------------------------------------------
555QOS_DEV_1="Malloc_0"
556QOS_DEV_2="Null_1"
557QOS_RUN_TIME=5
558
559if [ $(uname -s) = Linux ]; then
560	# Test dynamic memory management. All hugepages will be reserved at runtime
561	PRE_RESERVED_MEM=0
562else
563	# Dynamic memory management is not supported on BSD
564	PRE_RESERVED_MEM=2048
565fi
566
567test_type=${1:-bdev}
568crypto_device=$2
569wcs_file=$3
570dek=$4
571env_ctx=""
572if [ -n "$crypto_device" ] && [ -n "$wcs_file" ]; then
573	# We need full path here since fio perf test does 'pushd' to the test dir
574	# and crypto login of fio plugin test can fail.
575	wcs_file=$(readlink -f $wcs_file)
576	if [ -f $wcs_file ]; then
577		env_ctx="--env-context=--allow=$crypto_device,class=crypto,wcs_file=$wcs_file"
578	else
579		echo "ERROR: Credentials file $3 is not found!"
580		exit 1
581	fi
582fi
583start_spdk_tgt
584case "$test_type" in
585	bdev)
586		setup_bdev_conf
587		;;
588	nvme)
589		setup_nvme_conf
590		;;
591	gpt)
592		setup_gpt_conf
593		;;
594	crypto_aesni)
595		setup_crypto_aesni_conf
596		;;
597	crypto_qat)
598		setup_crypto_qat_conf
599		;;
600	crypto_mlx5)
601		setup_crypto_mlx5_conf $dek
602		;;
603	pmem)
604		setup_pmem_conf
605		;;
606	rbd)
607		setup_rbd_conf
608		;;
609	daos)
610		setup_daos_conf
611		;;
612	raid5f)
613		setup_raid5f_conf
614		;;
615	xnvme)
616		setup_xnvme_conf
617		;;
618	*)
619		echo "invalid test name"
620		exit 1
621		;;
622esac
623
624"$rpc_py" bdev_wait_for_examine
625
626# Generate json config and use it throughout all the tests
627cat <<- CONF > "$conf_file"
628	        {"subsystems":[
629	        $("$rpc_py" save_subsystem_config -n bdev)
630	        ]}
631CONF
632
633bdevs=$("$rpc_py" bdev_get_bdevs | jq -r '.[] | select(.claimed == false)')
634bdevs_name=$(echo $bdevs | jq -r '.name')
635bdev_list=($bdevs_name)
636
637hello_world_bdev=${bdev_list[0]}
638trap - SIGINT SIGTERM EXIT
639killprocess "$spdk_tgt_pid"
640# End bdev configuration
641#-----------------------------------------------------
642
643trap "cleanup" SIGINT SIGTERM EXIT
644
645run_test "bdev_hello_world" $SPDK_EXAMPLE_DIR/hello_bdev --json "$conf_file" -b "$hello_world_bdev" "$env_ctx"
646run_test "bdev_bounds" bdev_bounds "$env_ctx"
647run_test "bdev_nbd" nbd_function_test $conf_file "$bdevs_name" "$env_ctx"
648if [[ $CONFIG_FIO_PLUGIN == y ]]; then
649	if [ "$test_type" = "nvme" ] || [ "$test_type" = "gpt" ]; then
650		# TODO: once we get real multi-ns drives, re-enable this test for NVMe.
651		echo "skipping fio tests on NVMe due to multi-ns failures."
652	else
653		run_test "bdev_fio" fio_test_suite "$env_ctx"
654	fi
655else
656	echo "FIO not available"
657	exit 1
658fi
659
660trap "cleanup" SIGINT SIGTERM EXIT
661
662run_test "bdev_verify" $rootdir/build/examples/bdevperf --json "$conf_file" -q 128 -o 4096 -w verify -t 5 -C -m 0x3 "$env_ctx"
663run_test "bdev_write_zeroes" $rootdir/build/examples/bdevperf --json "$conf_file" -q 128 -o 4096 -w write_zeroes -t 1 "$env_ctx"
664
665# test json config not enclosed with {}
666run_test "bdev_json_nonenclosed" $rootdir/build/examples/bdevperf --json "$nonenclosed_conf_file" -q 128 -o 4096 -w write_zeroes -t 1 "$env_ctx" || true
667
668# test json config "subsystems" not with array
669run_test "bdev_json_nonarray" $rootdir/build/examples/bdevperf --json "$nonarray_conf_file" -q 128 -o 4096 -w write_zeroes -t 1 "$env_ctx" || true
670
671if [[ $test_type == bdev ]]; then
672	run_test "bdev_qos" qos_test_suite "$env_ctx"
673	run_test "bdev_qd_sampling" qd_sampling_test_suite "$env_ctx"
674	run_test "bdev_error" error_test_suite "$env_ctx"
675	run_test "bdev_stat" stat_test_suite "$env_ctx"
676fi
677
678# Temporarily disabled - infinite loop
679# if [ $RUN_NIGHTLY -eq 1 ]; then
680# run_test "bdev_reset" $rootdir/build/examples/bdevperf --json "$conf_file" -q 16 -w reset -o 4096 -t 60 "$env_ctx"
681# fi
682
683# Bdev and configuration cleanup below this line
684#-----------------------------------------------------
685
686trap - SIGINT SIGTERM EXIT
687cleanup
688