xref: /spdk/autotest.sh (revision a93a149c51f6811ff221fdd1dfe40ba0cea96f8c)
1#!/usr/bin/env bash
2#  SPDX-License-Identifier: BSD-3-Clause
3#  Copyright (C) 2015 Intel Corporation
4#  All rights reserved.
5#
6
7rootdir=$(readlink -f $(dirname $0))
8
9# In autotest_common.sh all tests are disabled by default.
10# If the configuration of tests is not provided, no tests will be carried out.
11if [[ ! -f $1 ]]; then
12	echo "ERROR: SPDK test configuration not specified"
13	exit 1
14fi
15
16# Autotest.sh, as part of autorun.sh, runs in a different
17# shell process than autobuild.sh. Use helper file to pass
18# over env variable containing libraries paths.
19if [[ -e /tmp/spdk-ld-path ]]; then
20	source /tmp/spdk-ld-path
21fi
22
23source "$1"
24source "$rootdir/test/common/autotest_common.sh"
25source "$rootdir/test/nvmf/common.sh"
26
27if [ $EUID -ne 0 ]; then
28	echo "$0 must be run as root"
29	exit 1
30fi
31
32if [ $(uname -s) = Linux ]; then
33	old_core_pattern=$(< /proc/sys/kernel/core_pattern)
34	mkdir -p "$output_dir/coredumps"
35	# Set core_pattern to a known value to avoid ABRT, systemd-coredump, etc.
36	# Dump the $output_dir path to a file so collector can pick it up while executing.
37	# We don't set in in the core_pattern command line because of the string length limitation
38	# of 128 bytes. See 'man core 5' for details.
39	echo "|$rootdir/scripts/core-collector.sh %P %s %t" > /proc/sys/kernel/core_pattern
40	echo "$output_dir/coredumps" > "$rootdir/.coredump_path"
41
42	# make sure nbd (network block device) driver is loaded if it is available
43	# this ensures that when tests need to use nbd, it will be fully initialized
44	modprobe nbd || true
45
46	if udevadm=$(type -P udevadm); then
47		"$udevadm" monitor --property &> "$output_dir/udev.log" &
48		udevadm_pid=$!
49	fi
50
51fi
52
53start_monitor_resources
54
55trap "autotest_cleanup || :; exit 1" SIGINT SIGTERM EXIT
56
57timing_enter autotest
58
59create_test_list
60
61src=$(readlink -f $(dirname $0))
62out=$output_dir
63cd $src
64
65freebsd_update_contigmem_mod
66freebsd_set_maxsock_buf
67
68# lcov takes considerable time to process clang coverage.
69# Disabling lcov allow us to do this.
70# More information: https://github.com/spdk/spdk/issues/1693
71CC_TYPE=$(grep CC_TYPE mk/cc.mk)
72if hash lcov && ! [[ "$CC_TYPE" == *"clang"* ]]; then
73	export LCOV_OPTS="
74		--rc lcov_branch_coverage=1
75		--rc lcov_function_coverage=1
76		--rc genhtml_branch_coverage=1
77		--rc genhtml_function_coverage=1
78		--rc genhtml_legend=1
79		--rc geninfo_all_blocks=1
80		"
81	export LCOV="lcov $LCOV_OPTS --no-external"
82	# Print lcov version to log
83	$LCOV -v
84	# zero out coverage data
85	$LCOV -q -c -i -t "Baseline" -d $src -o $out/cov_base.info
86fi
87
88# Make sure the disks are clean (no leftover partition tables)
89timing_enter pre_cleanup
90# Remove old domain socket pathname just in case
91rm -f /var/tmp/spdk*.sock
92
93# Load the kernel driver
94$rootdir/scripts/setup.sh reset
95
96get_zoned_devs
97
98if ((${#zoned_devs[@]} > 0)); then
99	# FIXME: For now make sure zoned devices are tested on-demand by
100	# a designated tests instead of falling into any other. The main
101	# concern here are fio workloads where specific configuration
102	# must be in place for it to work with the zoned device.
103	export PCI_BLOCKED="${zoned_devs[*]}"
104	export PCI_ZONED="${zoned_devs[*]}"
105fi
106
107# Delete all leftover lvols and gpt partitions
108# Matches both /dev/nvmeXnY on Linux and /dev/nvmeXnsY on BSD
109# Filter out nvme with partitions - the "p*" suffix
110for dev in /dev/nvme*n!(*p*); do
111	# Skip zoned devices as non-sequential IO will always fail
112	[[ -z ${zoned_devs["${dev##*/}"]} ]] || continue
113	if ! block_in_use "$dev"; then
114		dd if=/dev/zero of="$dev" bs=1M count=1
115	fi
116done
117
118sync
119
120if ! xtrace_disable_per_cmd reap_spdk_processes; then
121	echo "WARNING: Lingering SPDK processes were detected. Testing environment may be unstable" >&2
122fi
123
124if [[ $(uname -s) == Linux && $SPDK_TEST_SETUP -eq 1 ]]; then
125	run_test "setup.sh" "$rootdir/test/setup/test-setup.sh"
126fi
127
128$rootdir/scripts/setup.sh status
129
130if [[ $(uname -s) == Linux ]]; then
131	# Revert NVMe namespaces to default state
132	nvme_namespace_revert
133fi
134
135timing_exit pre_cleanup
136
137# set up huge pages
138timing_enter afterboot
139$rootdir/scripts/setup.sh
140timing_exit afterboot
141
142# Revert existing OPAL to factory settings that may have been left from earlier failed tests.
143# This ensures we won't hit any unexpected failures due to NVMe SSDs being locked.
144opal_revert_cleanup
145
146#####################
147# Unit Tests
148#####################
149
150if [ $SPDK_TEST_UNITTEST -eq 1 ]; then
151	run_test "unittest" $rootdir/test/unit/unittest.sh
152fi
153
154if [ $SPDK_RUN_FUNCTIONAL_TEST -eq 1 ]; then
155	if [[ $SPDK_TEST_CRYPTO -eq 1 || $SPDK_TEST_VBDEV_COMPRESS -eq 1 ]]; then
156		if [[ $SPDK_TEST_USE_IGB_UIO -eq 1 ]]; then
157			$rootdir/scripts/qat_setup.sh igb_uio
158		else
159			$rootdir/scripts/qat_setup.sh
160		fi
161	fi
162	timing_enter lib
163
164	if [[ $SPDK_TEST_URING -eq 1 ]]; then
165		export SPDK_SOCK_IMPL_DEFAULT="uring"
166	fi
167
168	run_test "env" $rootdir/test/env/env.sh
169	run_test "rpc" $rootdir/test/rpc/rpc.sh
170	run_test "skip_rpc" $rootdir/test/rpc/skip_rpc.sh
171	run_test "rpc_client" $rootdir/test/rpc_client/rpc_client.sh
172	run_test "json_config" $rootdir/test/json_config/json_config.sh
173	run_test "json_config_extra_key" $rootdir/test/json_config/json_config_extra_key.sh
174	run_test "alias_rpc" $rootdir/test/json_config/alias_rpc/alias_rpc.sh
175
176	if [[ $SPDK_JSONRPC_GO_CLIENT -eq 0 ]]; then
177		run_test "spdkcli_tcp" $rootdir/test/spdkcli/tcp.sh
178	fi
179
180	run_test "dpdk_mem_utility" $rootdir/test/dpdk_memory_utility/test_dpdk_mem_info.sh
181	run_test "event" $rootdir/test/event/event.sh
182	run_test "thread" $rootdir/test/thread/thread.sh
183
184	if [[ $SPDK_TEST_ACCEL -eq 1 ]]; then
185		run_test "accel" $rootdir/test/accel/accel.sh
186		run_test "accel_rpc" $rootdir/test/accel/accel_rpc.sh
187	fi
188
189	run_test "app_cmdline" $rootdir/test/app/cmdline.sh
190	run_test "version" $rootdir/test/app/version.sh
191
192	if [ $SPDK_TEST_BLOCKDEV -eq 1 ]; then
193		run_test "blockdev_general" $rootdir/test/bdev/blockdev.sh
194		run_test "bdevperf_config" $rootdir/test/bdev/bdevperf/test_config.sh
195		if [[ $(uname -s) == Linux ]]; then
196			run_test "reactor_set_interrupt" $rootdir/test/interrupt/reactor_set_interrupt.sh
197			run_test "reap_unregistered_poller" $rootdir/test/interrupt/reap_unregistered_poller.sh
198		fi
199	fi
200
201	if [[ $SPDK_TEST_RAID -eq 1 ]]; then
202		run_test "bdev_raid" $rootdir/test/bdev/bdev_raid.sh
203		run_test "spdkcli_raid" $rootdir/test/spdkcli/raid.sh
204		run_test "blockdev_raid5f" $rootdir/test/bdev/blockdev.sh "raid5f"
205	fi
206
207	if [[ $(uname -s) == Linux ]]; then
208		if [[ $SPDK_TEST_BLOCKDEV -eq 1 || $SPDK_TEST_URING -eq 1 ]]; then
209			# The crypto job also includes the SPDK_TEST_BLOCKDEV in its configuration hence the
210			# dd tests are executed there as well. However, these tests can take a significant
211			# amount of time to complete (up to 4min) on a physical system leading to a potential
212			# job timeout. Avoid that by skipping these tests - this should not affect the coverage
213			# since dd tests are still run as part of the vg jobs.
214			if [[ $SPDK_TEST_CRYPTO -eq 0 ]]; then
215				run_test "spdk_dd" $rootdir/test/dd/dd.sh
216			fi
217		fi
218	fi
219
220	if [ $SPDK_TEST_NVME -eq 1 ]; then
221		run_test "blockdev_nvme" $rootdir/test/bdev/blockdev.sh "nvme"
222		if [[ $(uname -s) == Linux ]]; then
223			run_test "blockdev_nvme_gpt" $rootdir/test/bdev/blockdev.sh "gpt"
224		fi
225		run_test "nvme" $rootdir/test/nvme/nvme.sh
226		if [[ $SPDK_TEST_NVME_PMR -eq 1 ]]; then
227			run_test "nvme_pmr" $rootdir/test/nvme/nvme_pmr.sh
228		fi
229
230		run_test "nvme_scc" $rootdir/test/nvme/nvme_scc.sh
231
232		if [[ $SPDK_TEST_NVME_BP -eq 1 ]]; then
233			run_test "nvme_bp" $rootdir/test/nvme/nvme_bp.sh
234		fi
235		if [[ $SPDK_TEST_NVME_CUSE -eq 1 ]]; then
236			run_test "nvme_cuse" $rootdir/test/nvme/cuse/nvme_cuse.sh
237		fi
238		if [[ $SPDK_TEST_NVME_CMB -eq 1 ]]; then
239			run_test "nvme_cmb" $rootdir/test/nvme/cmb/cmb.sh
240		fi
241		if [[ $SPDK_TEST_NVME_FDP -eq 1 ]]; then
242			run_test "nvme_fdp" test/nvme/nvme_fdp.sh
243		fi
244
245		if [[ $SPDK_TEST_NVME_ZNS -eq 1 ]]; then
246			run_test "nvme_zns" $rootdir/test/nvme/zns/zns.sh
247		fi
248
249		run_test "nvme_rpc" $rootdir/test/nvme/nvme_rpc.sh
250		run_test "nvme_rpc_timeouts" $rootdir/test/nvme/nvme_rpc_timeouts.sh
251
252		if [ $(uname -s) = Linux ]; then
253			run_test "sw_hotplug" $rootdir/test/nvme/sw_hotplug.sh
254		fi
255
256		if [[ $SPDK_TEST_XNVME -eq 1 ]]; then
257			run_test "nvme_xnvme" $rootdir/test/nvme/xnvme/xnvme.sh
258			run_test "blockdev_xnvme" $rootdir/test/bdev/blockdev.sh "xnvme"
259			# Run ublk with xnvme since they have similar kernel dependencies
260			run_test "ublk" $rootdir/test/ublk/ublk.sh
261			run_test "ublk_recovery" $rootdir/test/ublk/ublk_recovery.sh
262		fi
263	fi
264
265	if [ $SPDK_TEST_IOAT -eq 1 ]; then
266		run_test "ioat" $rootdir/test/ioat/ioat.sh
267	fi
268
269	timing_exit lib
270
271	if [ $SPDK_TEST_ISCSI -eq 1 ]; then
272		run_test "iscsi_tgt" $rootdir/test/iscsi_tgt/iscsi_tgt.sh
273		run_test "spdkcli_iscsi" $rootdir/test/spdkcli/iscsi.sh
274	fi
275
276	if [ $SPDK_TEST_BLOBFS -eq 1 ]; then
277		run_test "rocksdb" $rootdir/test/blobfs/rocksdb/rocksdb.sh
278		run_test "blobstore" $rootdir/test/blobstore/blobstore.sh
279		run_test "blobstore_grow" $rootdir/test/blobstore/blobstore_grow/blobstore_grow.sh
280		run_test "blobfs" $rootdir/test/blobfs/blobfs.sh
281		run_test "hello_blob" $SPDK_EXAMPLE_DIR/hello_blob \
282			examples/blob/hello_world/hello_blob.json
283	fi
284
285	if [ $SPDK_TEST_NVMF -eq 1 ]; then
286		export NET_TYPE
287		# The NVMe-oF run test cases are split out like this so that the parser that compiles the
288		# list of all tests can properly differentiate them. Please do not merge them into one line.
289		if [ "$SPDK_TEST_NVMF_TRANSPORT" = "rdma" ]; then
290			run_test "nvmf_rdma" $rootdir/test/nvmf/nvmf.sh --transport=$SPDK_TEST_NVMF_TRANSPORT
291			run_test "spdkcli_nvmf_rdma" $rootdir/test/spdkcli/nvmf.sh --transport=$SPDK_TEST_NVMF_TRANSPORT
292		elif [ "$SPDK_TEST_NVMF_TRANSPORT" = "tcp" ]; then
293			run_test "nvmf_tcp" $rootdir/test/nvmf/nvmf.sh --transport=$SPDK_TEST_NVMF_TRANSPORT
294			if [[ $SPDK_TEST_URING -eq 0 ]]; then
295				run_test "spdkcli_nvmf_tcp" $rootdir/test/spdkcli/nvmf.sh --transport=$SPDK_TEST_NVMF_TRANSPORT
296				run_test "nvmf_identify_passthru" $rootdir/test/nvmf/target/identify_passthru.sh --transport=$SPDK_TEST_NVMF_TRANSPORT
297			fi
298			run_test "nvmf_dif" $rootdir/test/nvmf/target/dif.sh
299			run_test "nvmf_abort_qd_sizes" $rootdir/test/nvmf/target/abort_qd_sizes.sh
300			# The keyring tests utilize NVMe/TLS
301			run_test "keyring_file" "$rootdir/test/keyring/file.sh"
302			if [[ "$CONFIG_HAVE_KEYUTILS" == y ]]; then
303				run_test "keyring_linux" "$rootdir/test/keyring/linux.sh"
304			fi
305		elif [ "$SPDK_TEST_NVMF_TRANSPORT" = "fc" ]; then
306			run_test "nvmf_fc" $rootdir/test/nvmf/nvmf.sh --transport=$SPDK_TEST_NVMF_TRANSPORT
307			run_test "spdkcli_nvmf_fc" $rootdir/test/spdkcli/nvmf.sh
308		else
309			echo "unknown NVMe transport, please specify rdma, tcp, or fc."
310			exit 1
311		fi
312	fi
313
314	# For vfio_user and vhost tests We need to make sure entire HUGEMEM default
315	# goes to a single node as we share hugepages with qemu instance(s) and we
316	# cannot split it across all numa nodes without making sure there's enough
317	# memory available.
318
319	if [ $SPDK_TEST_VHOST -eq 1 ]; then
320		HUGENODE=0 run_test "vhost" $rootdir/test/vhost/vhost.sh --iso
321	fi
322
323	if [ $SPDK_TEST_VFIOUSER_QEMU -eq 1 ]; then
324		HUGENODE=0 run_test "vfio_user_qemu" $rootdir/test/vfio_user/vfio_user.sh --iso
325	fi
326
327	if [ $SPDK_TEST_LVOL -eq 1 ]; then
328		run_test "lvol" $rootdir/test/lvol/lvol.sh
329		run_test "blob_io_wait" $rootdir/test/blobstore/blob_io_wait/blob_io_wait.sh
330	fi
331
332	if [ $SPDK_TEST_VHOST_INIT -eq 1 ]; then
333		timing_enter vhost_initiator
334		run_test "vhost_blockdev" $rootdir/test/vhost/initiator/blockdev.sh
335		run_test "spdkcli_virtio" $rootdir/test/spdkcli/virtio.sh
336		run_test "vhost_shared" $rootdir/test/vhost/shared/shared.sh
337		run_test "vhost_fuzz" $rootdir/test/vhost/fuzz/fuzz.sh
338		timing_exit vhost_initiator
339	fi
340
341	if [ $SPDK_TEST_RBD -eq 1 ]; then
342		run_test "blockdev_rbd" $rootdir/test/bdev/blockdev.sh "rbd"
343		run_test "spdkcli_rbd" $rootdir/test/spdkcli/rbd.sh
344	fi
345
346	if [ $SPDK_TEST_OCF -eq 1 ]; then
347		run_test "ocf" $rootdir/test/ocf/ocf.sh
348	fi
349
350	if [ $SPDK_TEST_FTL -eq 1 ]; then
351		run_test "ftl" $rootdir/test/ftl/ftl.sh
352	fi
353
354	if [ $SPDK_TEST_VMD -eq 1 ]; then
355		run_test "vmd" $rootdir/test/vmd/vmd.sh
356	fi
357
358	if [ $SPDK_TEST_VBDEV_COMPRESS -eq 1 ]; then
359		run_test "compress_compdev" $rootdir/test/compress/compress.sh "compdev"
360		run_test "compress_isal" $rootdir/test/compress/compress.sh "isal"
361	fi
362
363	if [ $SPDK_TEST_OPAL -eq 1 ]; then
364		run_test "nvme_opal" $rootdir/test/nvme/nvme_opal.sh
365	fi
366
367	if [ $SPDK_TEST_CRYPTO -eq 1 ]; then
368		run_test "blockdev_crypto_aesni" $rootdir/test/bdev/blockdev.sh "crypto_aesni"
369		run_test "blockdev_crypto_sw" $rootdir/test/bdev/blockdev.sh "crypto_sw"
370		run_test "blockdev_crypto_qat" $rootdir/test/bdev/blockdev.sh "crypto_qat"
371		run_test "chaining" $rootdir/test/bdev/chaining.sh
372	fi
373
374	if [[ $SPDK_TEST_SCHEDULER -eq 1 ]]; then
375		run_test "scheduler" $rootdir/test/scheduler/scheduler.sh
376	fi
377
378	if [[ $SPDK_TEST_SMA -eq 1 ]]; then
379		run_test "sma" $rootdir/test/sma/sma.sh
380	fi
381
382	if [[ $SPDK_TEST_FUZZER -eq 1 ]]; then
383		run_test "llvm_fuzz" $rootdir/test/fuzz/llvm.sh
384	fi
385
386	if [[ $SPDK_TEST_ACCEL_MLX5 -eq 1 ]]; then
387		run_test "accel_mlx5_crypto" $rootdir/test/accel/mlx5/accel_mlx5_crypto.sh
388	fi
389fi
390
391trap - SIGINT SIGTERM EXIT
392
393timing_enter post_cleanup
394autotest_cleanup
395timing_exit post_cleanup
396
397timing_exit autotest
398chmod a+r $output_dir/timing.txt
399
400[[ -f "$output_dir/udev.log" ]] && rm -f "$output_dir/udev.log"
401
402if hash lcov && ! [[ "$CC_TYPE" == *"clang"* ]]; then
403	# generate coverage data and combine with baseline
404	$LCOV -q -c -d $src -t "$(hostname)" -o $out/cov_test.info
405	$LCOV -q -a $out/cov_base.info -a $out/cov_test.info -o $out/cov_total.info
406	$LCOV -q -r $out/cov_total.info '*/dpdk/*' -o $out/cov_total.info
407	$LCOV -q -r $out/cov_total.info '/usr/*' -o $out/cov_total.info
408	$LCOV -q -r $out/cov_total.info '*/examples/vmd/*' -o $out/cov_total.info
409	$LCOV -q -r $out/cov_total.info '*/app/spdk_lspci/*' -o $out/cov_total.info
410	$LCOV -q -r $out/cov_total.info '*/app/spdk_top/*' -o $out/cov_total.info
411	rm -f cov_base.info cov_test.info OLD_STDOUT OLD_STDERR
412fi
413