xref: /spdk/autotest.sh (revision d987d777d6b8ce05f11cb1d90f1241bfecfc9af4)
1#!/usr/bin/env bash
2#  SPDX-License-Identifier: BSD-3-Clause
3#  Copyright (C) 2015 Intel Corporation
4#  All rights reserved.
5#
6
7rootdir=$(readlink -f $(dirname $0))
8
9# In autotest_common.sh all tests are disabled by default.
10# If the configuration of tests is not provided, no tests will be carried out.
11if [[ ! -f $1 ]]; then
12	echo "ERROR: SPDK test configuration not specified"
13	exit 1
14fi
15
16# Autotest.sh, as part of autorun.sh, runs in a different
17# shell process than autobuild.sh. Use helper file to pass
18# over env variable containing libraries paths.
19if [[ -e /tmp/spdk-ld-path ]]; then
20	source /tmp/spdk-ld-path
21fi
22
23source "$1"
24source "$rootdir/test/common/autotest_common.sh"
25source "$rootdir/test/nvmf/common.sh"
26
27if [ $EUID -ne 0 ]; then
28	echo "$0 must be run as root"
29	exit 1
30fi
31
32if [ $(uname -s) = Linux ]; then
33	old_core_pattern=$(< /proc/sys/kernel/core_pattern)
34	mkdir -p "$output_dir/coredumps"
35	# Set core_pattern to a known value to avoid ABRT, systemd-coredump, etc.
36	# Dump the $output_dir path to a file so collector can pick it up while executing.
37	# We don't set in in the core_pattern command line because of the string length limitation
38	# of 128 bytes. See 'man core 5' for details.
39	echo "|$rootdir/scripts/core-collector.sh %P %s %t" > /proc/sys/kernel/core_pattern
40	echo "$output_dir/coredumps" > "$rootdir/.coredump_path"
41
42	# make sure nbd (network block device) driver is loaded if it is available
43	# this ensures that when tests need to use nbd, it will be fully initialized
44	modprobe nbd || true
45
46	if udevadm=$(type -P udevadm); then
47		"$udevadm" monitor --property &> "$output_dir/udev.log" &
48		udevadm_pid=$!
49	fi
50
51fi
52
53start_monitor_resources
54
55trap "autotest_cleanup || :; exit 1" SIGINT SIGTERM EXIT
56
57timing_enter autotest
58
59create_test_list
60
61src=$(readlink -f $(dirname $0))
62out=$output_dir
63cd $src
64
65freebsd_update_contigmem_mod
66freebsd_set_maxsock_buf
67
68if [[ $CONFIG_COVERAGE == y ]]; then
69	export LCOV_OPTS="
70		--rc lcov_branch_coverage=1
71		--rc lcov_function_coverage=1
72		--rc genhtml_branch_coverage=1
73		--rc genhtml_function_coverage=1
74		--rc genhtml_legend=1
75		--rc geninfo_all_blocks=1
76		$lcov_opt
77		"
78	export LCOV="lcov $LCOV_OPTS --no-external"
79	# Print lcov version to log
80	$LCOV -v
81	# zero out coverage data
82	$LCOV -q -c -i -t "Baseline" -d $src -o $out/cov_base.info
83fi
84
85# Make sure the disks are clean (no leftover partition tables)
86timing_enter pre_cleanup
87# Remove old domain socket pathname just in case
88rm -f /var/tmp/spdk*.sock
89
90# Load the kernel driver
91$rootdir/scripts/setup.sh reset
92
93get_zoned_devs
94
95if ((${#zoned_devs[@]} > 0)); then
96	# FIXME: For now make sure zoned devices are tested on-demand by
97	# a designated tests instead of falling into any other. The main
98	# concern here are fio workloads where specific configuration
99	# must be in place for it to work with the zoned device.
100	export PCI_BLOCKED="${zoned_devs[*]}"
101	export PCI_ZONED="${zoned_devs[*]}"
102fi
103
104# Delete all leftover lvols and gpt partitions
105# Matches both /dev/nvmeXnY on Linux and /dev/nvmeXnsY on BSD
106# Filter out nvme with partitions - the "p*" suffix
107for dev in /dev/nvme*n!(*p*); do
108	# Skip zoned devices as non-sequential IO will always fail
109	[[ -z ${zoned_devs["${dev##*/}"]} ]] || continue
110	if ! block_in_use "$dev"; then
111		dd if=/dev/zero of="$dev" bs=1M count=1
112	fi
113done
114
115sync
116
117if ! xtrace_disable_per_cmd reap_spdk_processes; then
118	echo "WARNING: Lingering SPDK processes were detected. Testing environment may be unstable" >&2
119fi
120
121if [[ $(uname -s) == Linux && $SPDK_TEST_SETUP -eq 1 ]]; then
122	run_test "setup.sh" "$rootdir/test/setup/test-setup.sh"
123fi
124
125$rootdir/scripts/setup.sh status
126
127if [[ $(uname -s) == Linux ]]; then
128	# Revert NVMe namespaces to default state
129	nvme_namespace_revert
130fi
131
132timing_exit pre_cleanup
133
134# set up huge pages
135timing_enter afterboot
136$rootdir/scripts/setup.sh
137timing_exit afterboot
138
139# Revert existing OPAL to factory settings that may have been left from earlier failed tests.
140# This ensures we won't hit any unexpected failures due to NVMe SSDs being locked.
141opal_revert_cleanup
142
143#####################
144# Unit Tests
145#####################
146
147if [ $SPDK_TEST_UNITTEST -eq 1 ]; then
148	run_test "unittest" $rootdir/test/unit/unittest.sh
149fi
150
151if [ $SPDK_RUN_FUNCTIONAL_TEST -eq 1 ]; then
152	if [[ $SPDK_TEST_CRYPTO -eq 1 || $SPDK_TEST_VBDEV_COMPRESS -eq 1 ]]; then
153		if [[ $SPDK_TEST_USE_IGB_UIO -eq 1 ]]; then
154			$rootdir/scripts/qat_setup.sh igb_uio
155		else
156			$rootdir/scripts/qat_setup.sh
157		fi
158	fi
159	timing_enter lib
160
161	if [[ $SPDK_TEST_URING -eq 1 ]]; then
162		export SPDK_SOCK_IMPL_DEFAULT="uring"
163	fi
164
165	run_test "env" $rootdir/test/env/env.sh
166	run_test "rpc" $rootdir/test/rpc/rpc.sh
167	run_test "skip_rpc" $rootdir/test/rpc/skip_rpc.sh
168	run_test "rpc_client" $rootdir/test/rpc_client/rpc_client.sh
169	run_test "json_config" $rootdir/test/json_config/json_config.sh
170	run_test "json_config_extra_key" $rootdir/test/json_config/json_config_extra_key.sh
171	run_test "alias_rpc" $rootdir/test/json_config/alias_rpc/alias_rpc.sh
172
173	if [[ $SPDK_JSONRPC_GO_CLIENT -eq 0 ]]; then
174		run_test "spdkcli_tcp" $rootdir/test/spdkcli/tcp.sh
175	fi
176
177	run_test "dpdk_mem_utility" $rootdir/test/dpdk_memory_utility/test_dpdk_mem_info.sh
178	run_test "event" $rootdir/test/event/event.sh
179	run_test "thread" $rootdir/test/thread/thread.sh
180
181	if [[ $SPDK_TEST_ACCEL -eq 1 ]]; then
182		run_test "accel" $rootdir/test/accel/accel.sh
183		run_test "accel_rpc" $rootdir/test/accel/accel_rpc.sh
184	fi
185
186	run_test "app_cmdline" $rootdir/test/app/cmdline.sh
187	run_test "version" $rootdir/test/app/version.sh
188
189	if [ $SPDK_TEST_BLOCKDEV -eq 1 ]; then
190		run_test "blockdev_general" $rootdir/test/bdev/blockdev.sh
191		run_test "bdevperf_config" $rootdir/test/bdev/bdevperf/test_config.sh
192		if [[ $(uname -s) == Linux ]]; then
193			run_test "reactor_set_interrupt" $rootdir/test/interrupt/reactor_set_interrupt.sh
194			run_test "reap_unregistered_poller" $rootdir/test/interrupt/reap_unregistered_poller.sh
195		fi
196	fi
197
198	if [[ $SPDK_TEST_RAID -eq 1 ]]; then
199		run_test "bdev_raid" $rootdir/test/bdev/bdev_raid.sh
200		run_test "spdkcli_raid" $rootdir/test/spdkcli/raid.sh
201		run_test "blockdev_raid5f" $rootdir/test/bdev/blockdev.sh "raid5f"
202	fi
203
204	if [[ $(uname -s) == Linux ]]; then
205		if [[ $SPDK_TEST_BLOCKDEV -eq 1 || $SPDK_TEST_URING -eq 1 ]]; then
206			# The crypto job also includes the SPDK_TEST_BLOCKDEV in its configuration hence the
207			# dd tests are executed there as well. However, these tests can take a significant
208			# amount of time to complete (up to 4min) on a physical system leading to a potential
209			# job timeout. Avoid that by skipping these tests - this should not affect the coverage
210			# since dd tests are still run as part of the vg jobs.
211			if [[ $SPDK_TEST_CRYPTO -eq 0 ]]; then
212				run_test "spdk_dd" $rootdir/test/dd/dd.sh
213			fi
214		fi
215	fi
216
217	if [ $SPDK_TEST_NVME -eq 1 ]; then
218		run_test "blockdev_nvme" $rootdir/test/bdev/blockdev.sh "nvme"
219		if [[ $(uname -s) == Linux ]]; then
220			run_test "blockdev_nvme_gpt" $rootdir/test/bdev/blockdev.sh "gpt"
221		fi
222		run_test "nvme" $rootdir/test/nvme/nvme.sh
223		if [[ $SPDK_TEST_NVME_PMR -eq 1 ]]; then
224			run_test "nvme_pmr" $rootdir/test/nvme/nvme_pmr.sh
225		fi
226
227		run_test "nvme_scc" $rootdir/test/nvme/nvme_scc.sh
228
229		if [[ $SPDK_TEST_NVME_BP -eq 1 ]]; then
230			run_test "nvme_bp" $rootdir/test/nvme/nvme_bp.sh
231		fi
232		if [[ $SPDK_TEST_NVME_CUSE -eq 1 ]]; then
233			run_test "nvme_cuse" $rootdir/test/nvme/cuse/nvme_cuse.sh
234		fi
235		if [[ $SPDK_TEST_NVME_CMB -eq 1 ]]; then
236			run_test "nvme_cmb" $rootdir/test/nvme/cmb/cmb.sh
237		fi
238		if [[ $SPDK_TEST_NVME_FDP -eq 1 ]]; then
239			run_test "nvme_fdp" test/nvme/nvme_fdp.sh
240		fi
241
242		if [[ $SPDK_TEST_NVME_ZNS -eq 1 ]]; then
243			run_test "nvme_zns" $rootdir/test/nvme/zns/zns.sh
244		fi
245
246		run_test "nvme_rpc" $rootdir/test/nvme/nvme_rpc.sh
247		run_test "nvme_rpc_timeouts" $rootdir/test/nvme/nvme_rpc_timeouts.sh
248
249		if [ $(uname -s) = Linux ]; then
250			run_test "sw_hotplug" $rootdir/test/nvme/sw_hotplug.sh
251		fi
252
253		if [[ $SPDK_TEST_XNVME -eq 1 ]]; then
254			run_test "nvme_xnvme" $rootdir/test/nvme/xnvme/xnvme.sh
255			run_test "blockdev_xnvme" $rootdir/test/bdev/blockdev.sh "xnvme"
256			# Run ublk with xnvme since they have similar kernel dependencies
257			run_test "ublk" $rootdir/test/ublk/ublk.sh
258			run_test "ublk_recovery" $rootdir/test/ublk/ublk_recovery.sh
259		fi
260	fi
261
262	if [ $SPDK_TEST_IOAT -eq 1 ]; then
263		run_test "ioat" $rootdir/test/ioat/ioat.sh
264	fi
265
266	timing_exit lib
267
268	if [ $SPDK_TEST_ISCSI -eq 1 ]; then
269		run_test "iscsi_tgt" $rootdir/test/iscsi_tgt/iscsi_tgt.sh
270		run_test "spdkcli_iscsi" $rootdir/test/spdkcli/iscsi.sh
271	fi
272
273	if [ $SPDK_TEST_BLOBFS -eq 1 ]; then
274		run_test "rocksdb" $rootdir/test/blobfs/rocksdb/rocksdb.sh
275		run_test "blobstore" $rootdir/test/blobstore/blobstore.sh
276		run_test "blobstore_grow" $rootdir/test/blobstore/blobstore_grow/blobstore_grow.sh
277		run_test "blobfs" $rootdir/test/blobfs/blobfs.sh
278		run_test "hello_blob" $SPDK_EXAMPLE_DIR/hello_blob \
279			examples/blob/hello_world/hello_blob.json
280	fi
281
282	if [ $SPDK_TEST_NVMF -eq 1 ]; then
283		export NET_TYPE
284		# The NVMe-oF run test cases are split out like this so that the parser that compiles the
285		# list of all tests can properly differentiate them. Please do not merge them into one line.
286		if [ "$SPDK_TEST_NVMF_TRANSPORT" = "rdma" ]; then
287			run_test "nvmf_rdma" $rootdir/test/nvmf/nvmf.sh --transport=$SPDK_TEST_NVMF_TRANSPORT
288			run_test "spdkcli_nvmf_rdma" $rootdir/test/spdkcli/nvmf.sh --transport=$SPDK_TEST_NVMF_TRANSPORT
289		elif [ "$SPDK_TEST_NVMF_TRANSPORT" = "tcp" ]; then
290			run_test "nvmf_tcp" $rootdir/test/nvmf/nvmf.sh --transport=$SPDK_TEST_NVMF_TRANSPORT
291			if [[ $SPDK_TEST_URING -eq 0 ]]; then
292				run_test "spdkcli_nvmf_tcp" $rootdir/test/spdkcli/nvmf.sh --transport=$SPDK_TEST_NVMF_TRANSPORT
293				run_test "nvmf_identify_passthru" $rootdir/test/nvmf/target/identify_passthru.sh --transport=$SPDK_TEST_NVMF_TRANSPORT
294			fi
295			run_test "nvmf_dif" $rootdir/test/nvmf/target/dif.sh
296			run_test "nvmf_abort_qd_sizes" $rootdir/test/nvmf/target/abort_qd_sizes.sh
297			# The keyring tests utilize NVMe/TLS
298			run_test "keyring_file" "$rootdir/test/keyring/file.sh"
299			if [[ "$CONFIG_HAVE_KEYUTILS" == y ]]; then
300				run_test "keyring_linux" "$rootdir/test/keyring/linux.sh"
301			fi
302		elif [ "$SPDK_TEST_NVMF_TRANSPORT" = "fc" ]; then
303			run_test "nvmf_fc" $rootdir/test/nvmf/nvmf.sh --transport=$SPDK_TEST_NVMF_TRANSPORT
304			run_test "spdkcli_nvmf_fc" $rootdir/test/spdkcli/nvmf.sh
305		else
306			echo "unknown NVMe transport, please specify rdma, tcp, or fc."
307			exit 1
308		fi
309	fi
310
311	# For vfio_user and vhost tests We need to make sure entire HUGEMEM default
312	# goes to a single node as we share hugepages with qemu instance(s) and we
313	# cannot split it across all numa nodes without making sure there's enough
314	# memory available.
315
316	if [ $SPDK_TEST_VHOST -eq 1 ]; then
317		HUGENODE=0 run_test "vhost" $rootdir/test/vhost/vhost.sh --iso
318	fi
319
320	if [ $SPDK_TEST_VFIOUSER_QEMU -eq 1 ]; then
321		HUGENODE=0 run_test "vfio_user_qemu" $rootdir/test/vfio_user/vfio_user.sh --iso
322	fi
323
324	if [ $SPDK_TEST_LVOL -eq 1 ]; then
325		run_test "lvol" $rootdir/test/lvol/lvol.sh
326		run_test "blob_io_wait" $rootdir/test/blobstore/blob_io_wait/blob_io_wait.sh
327	fi
328
329	if [ $SPDK_TEST_VHOST_INIT -eq 1 ]; then
330		timing_enter vhost_initiator
331		run_test "vhost_blockdev" $rootdir/test/vhost/initiator/blockdev.sh
332		run_test "spdkcli_virtio" $rootdir/test/spdkcli/virtio.sh
333		run_test "vhost_shared" $rootdir/test/vhost/shared/shared.sh
334		run_test "vhost_fuzz" $rootdir/test/vhost/fuzz/fuzz.sh
335		timing_exit vhost_initiator
336	fi
337
338	if [ $SPDK_TEST_RBD -eq 1 ]; then
339		run_test "blockdev_rbd" $rootdir/test/bdev/blockdev.sh "rbd"
340		run_test "spdkcli_rbd" $rootdir/test/spdkcli/rbd.sh
341	fi
342
343	if [ $SPDK_TEST_OCF -eq 1 ]; then
344		run_test "ocf" $rootdir/test/ocf/ocf.sh
345	fi
346
347	if [ $SPDK_TEST_FTL -eq 1 ]; then
348		run_test "ftl" $rootdir/test/ftl/ftl.sh
349	fi
350
351	if [ $SPDK_TEST_VMD -eq 1 ]; then
352		run_test "vmd" $rootdir/test/vmd/vmd.sh
353	fi
354
355	if [ $SPDK_TEST_VBDEV_COMPRESS -eq 1 ]; then
356		run_test "compress_compdev" $rootdir/test/compress/compress.sh "compdev"
357		run_test "compress_isal" $rootdir/test/compress/compress.sh "isal"
358	fi
359
360	if [ $SPDK_TEST_OPAL -eq 1 ]; then
361		run_test "nvme_opal" $rootdir/test/nvme/nvme_opal.sh
362	fi
363
364	if [ $SPDK_TEST_CRYPTO -eq 1 ]; then
365		run_test "blockdev_crypto_aesni" $rootdir/test/bdev/blockdev.sh "crypto_aesni"
366		run_test "blockdev_crypto_sw" $rootdir/test/bdev/blockdev.sh "crypto_sw"
367		run_test "blockdev_crypto_qat" $rootdir/test/bdev/blockdev.sh "crypto_qat"
368		run_test "chaining" $rootdir/test/bdev/chaining.sh
369	fi
370
371	if [[ $SPDK_TEST_SCHEDULER -eq 1 ]]; then
372		run_test "scheduler" $rootdir/test/scheduler/scheduler.sh
373	fi
374
375	if [[ $SPDK_TEST_SMA -eq 1 ]]; then
376		run_test "sma" $rootdir/test/sma/sma.sh
377	fi
378
379	if [[ $SPDK_TEST_FUZZER -eq 1 ]]; then
380		run_test "llvm_fuzz" $rootdir/test/fuzz/llvm.sh
381	fi
382
383	if [[ $SPDK_TEST_ACCEL_MLX5 -eq 1 ]]; then
384		run_test "accel_mlx5_crypto" $rootdir/test/accel/mlx5/accel_mlx5_crypto.sh
385		run_test "accel_mlx5_copy" $rootdir/test/accel/mlx5/accel_mlx5_copy.sh
386	fi
387fi
388
389trap - SIGINT SIGTERM EXIT
390
391timing_enter post_cleanup
392autotest_cleanup
393timing_exit post_cleanup
394
395timing_exit autotest
396chmod a+r $output_dir/timing.txt
397
398[[ -f "$output_dir/udev.log" ]] && rm -f "$output_dir/udev.log"
399
400if [[ $CONFIG_COVERAGE == y ]]; then
401	# generate coverage data and combine with baseline
402	$LCOV -q -c -d $src -t "$(hostname)" -o $out/cov_test.info
403	$LCOV -q -a $out/cov_base.info -a $out/cov_test.info -o $out/cov_total.info
404	$LCOV -q -r $out/cov_total.info '*/dpdk/*' -o $out/cov_total.info
405	$LCOV -q -r $out/cov_total.info '/usr/*' -o $out/cov_total.info
406	$LCOV -q -r $out/cov_total.info '*/examples/vmd/*' -o $out/cov_total.info
407	$LCOV -q -r $out/cov_total.info '*/app/spdk_lspci/*' -o $out/cov_total.info
408	$LCOV -q -r $out/cov_total.info '*/app/spdk_top/*' -o $out/cov_total.info
409	rm -f cov_base.info cov_test.info OLD_STDOUT OLD_STDERR
410fi
411