xref: /spdk/autotest.sh (revision 8531656d379a9809102b4858f69950decf92a1c5)
1#!/usr/bin/env bash
2#  SPDX-License-Identifier: BSD-3-Clause
3#  Copyright (C) 2015 Intel Corporation
4#  All rights reserved.
5#
6
7rootdir=$(readlink -f $(dirname $0))
8
9# In autotest_common.sh all tests are disabled by default.
10# If the configuration of tests is not provided, no tests will be carried out.
11if [[ ! -f $1 ]]; then
12	echo "ERROR: SPDK test configuration not specified"
13	exit 1
14fi
15
16# Autotest.sh, as part of autorun.sh, runs in a different
17# shell process than autobuild.sh. Use helper file to pass
18# over env variable containing libraries paths.
19if [[ -e /tmp/spdk-ld-path ]]; then
20	source /tmp/spdk-ld-path
21fi
22
23source "$1"
24source "$rootdir/test/common/autotest_common.sh"
25source "$rootdir/test/nvmf/common.sh"
26
27if [ $EUID -ne 0 ]; then
28	echo "$0 must be run as root"
29	exit 1
30fi
31
32if [ $(uname -s) = Linux ]; then
33	old_core_pattern=$(< /proc/sys/kernel/core_pattern)
34	mkdir -p "$output_dir/coredumps"
35	# Set core_pattern to a known value to avoid ABRT, systemd-coredump, etc.
36	# Dump the $output_dir path to a file so collector can pick it up while executing.
37	# We don't set in in the core_pattern command line because of the string length limitation
38	# of 128 bytes. See 'man core 5' for details.
39	echo "|$rootdir/scripts/core-collector.sh %P %s %t" > /proc/sys/kernel/core_pattern
40	echo "$output_dir/coredumps" > "$rootdir/.coredump_path"
41
42	# make sure nbd (network block device) driver is loaded if it is available
43	# this ensures that when tests need to use nbd, it will be fully initialized
44	modprobe nbd || true
45
46	if udevadm=$(type -P udevadm); then
47		"$udevadm" monitor --property &> "$output_dir/udev.log" &
48		udevadm_pid=$!
49	fi
50
51fi
52
53start_monitor_resources
54
55trap "autotest_cleanup || :; exit 1" SIGINT SIGTERM EXIT
56
57timing_enter autotest
58
59create_test_list
60
61src=$(readlink -f $(dirname $0))
62out=$output_dir
63cd $src
64
65freebsd_update_contigmem_mod
66freebsd_set_maxsock_buf
67
68if [[ $CONFIG_COVERAGE == y ]]; then
69	# Print lcov version to log
70	$LCOV --version
71	# zero out coverage data
72	$LCOV -q -c --no-external -i -t "Baseline" -d $src -o $out/cov_base.info
73fi
74
75# Make sure the disks are clean (no leftover partition tables)
76timing_enter pre_cleanup
77# Remove old domain socket pathname just in case
78rm -f /var/tmp/spdk*.sock
79
80# Load the kernel driver
81$rootdir/scripts/setup.sh reset
82
83get_zoned_devs
84
85if ((${#zoned_devs[@]} > 0)); then
86	# FIXME: For now make sure zoned devices are tested on-demand by
87	# a designated tests instead of falling into any other. The main
88	# concern here are fio workloads where specific configuration
89	# must be in place for it to work with the zoned device.
90	export PCI_BLOCKED="${zoned_devs[*]}"
91	export PCI_ZONED="${zoned_devs[*]}"
92fi
93
94# Delete all leftover lvols and gpt partitions
95# Matches both /dev/nvmeXnY on Linux and /dev/nvmeXnsY on BSD
96# Filter out nvme with partitions - the "p*" suffix
97for dev in /dev/nvme*n!(*p*); do
98	# Skip zoned devices as non-sequential IO will always fail
99	[[ -z ${zoned_devs["${dev##*/}"]} ]] || continue
100	if ! block_in_use "$dev"; then
101		dd if=/dev/zero of="$dev" bs=1M count=1
102	fi
103done
104
105sync
106
107if ! xtrace_disable_per_cmd reap_spdk_processes; then
108	echo "WARNING: Lingering SPDK processes were detected. Testing environment may be unstable" >&2
109fi
110
111if [[ $(uname -s) == Linux && $SPDK_TEST_SETUP -eq 1 ]]; then
112	run_test "setup.sh" "$rootdir/test/setup/test-setup.sh"
113fi
114
115$rootdir/scripts/setup.sh status
116
117if [[ $(uname -s) == Linux ]]; then
118	# Revert NVMe namespaces to default state
119	nvme_namespace_revert
120fi
121
122timing_exit pre_cleanup
123
124# set up huge pages
125timing_enter afterboot
126$rootdir/scripts/setup.sh
127timing_exit afterboot
128
129# Revert existing OPAL to factory settings that may have been left from earlier failed tests.
130# This ensures we won't hit any unexpected failures due to NVMe SSDs being locked.
131opal_revert_cleanup
132
133#####################
134# Unit Tests
135#####################
136
137if [ $SPDK_TEST_UNITTEST -eq 1 ]; then
138	run_test "unittest" $rootdir/test/unit/unittest.sh
139fi
140
141if [ $SPDK_RUN_FUNCTIONAL_TEST -eq 1 ]; then
142	if [[ $SPDK_TEST_CRYPTO -eq 1 || $SPDK_TEST_VBDEV_COMPRESS -eq 1 ]]; then
143		if [[ $SPDK_TEST_USE_IGB_UIO -eq 1 ]]; then
144			$rootdir/scripts/qat_setup.sh igb_uio
145		else
146			$rootdir/scripts/qat_setup.sh
147		fi
148	fi
149	timing_enter lib
150
151	if [[ $SPDK_TEST_URING -eq 1 ]]; then
152		export SPDK_SOCK_IMPL_DEFAULT="uring"
153	fi
154
155	run_test "env" $rootdir/test/env/env.sh
156	run_test "rpc" $rootdir/test/rpc/rpc.sh
157	run_test "skip_rpc" $rootdir/test/rpc/skip_rpc.sh
158	run_test "rpc_client" $rootdir/test/rpc_client/rpc_client.sh
159	run_test "json_config" $rootdir/test/json_config/json_config.sh
160	run_test "json_config_extra_key" $rootdir/test/json_config/json_config_extra_key.sh
161	run_test "alias_rpc" $rootdir/test/json_config/alias_rpc/alias_rpc.sh
162
163	if [[ $SPDK_JSONRPC_GO_CLIENT -eq 0 ]]; then
164		run_test "spdkcli_tcp" $rootdir/test/spdkcli/tcp.sh
165	fi
166
167	run_test "dpdk_mem_utility" $rootdir/test/dpdk_memory_utility/test_dpdk_mem_info.sh
168	run_test "event" $rootdir/test/event/event.sh
169	run_test "thread" $rootdir/test/thread/thread.sh
170
171	if [[ $SPDK_TEST_ACCEL -eq 1 ]]; then
172		run_test "accel" $rootdir/test/accel/accel.sh
173		run_test "accel_rpc" $rootdir/test/accel/accel_rpc.sh
174	fi
175
176	run_test "app_cmdline" $rootdir/test/app/cmdline.sh
177	run_test "version" $rootdir/test/app/version.sh
178
179	if [ $SPDK_TEST_BLOCKDEV -eq 1 ]; then
180		run_test "blockdev_general" $rootdir/test/bdev/blockdev.sh
181		run_test "bdevperf_config" $rootdir/test/bdev/bdevperf/test_config.sh
182		if [[ $(uname -s) == Linux ]]; then
183			run_test "reactor_set_interrupt" $rootdir/test/interrupt/reactor_set_interrupt.sh
184			run_test "reap_unregistered_poller" $rootdir/test/interrupt/reap_unregistered_poller.sh
185		fi
186	fi
187
188	if [[ $SPDK_TEST_RAID -eq 1 ]]; then
189		run_test "bdev_raid" $rootdir/test/bdev/bdev_raid.sh
190		run_test "spdkcli_raid" $rootdir/test/spdkcli/raid.sh
191		run_test "blockdev_raid5f" $rootdir/test/bdev/blockdev.sh "raid5f"
192	fi
193
194	if [[ $(uname -s) == Linux ]]; then
195		if [[ $SPDK_TEST_BLOCKDEV -eq 1 || $SPDK_TEST_URING -eq 1 ]]; then
196			# The crypto job also includes the SPDK_TEST_BLOCKDEV in its configuration hence the
197			# dd tests are executed there as well. However, these tests can take a significant
198			# amount of time to complete (up to 4min) on a physical system leading to a potential
199			# job timeout. Avoid that by skipping these tests - this should not affect the coverage
200			# since dd tests are still run as part of the vg jobs.
201			if [[ $SPDK_TEST_CRYPTO -eq 0 ]]; then
202				run_test "spdk_dd" $rootdir/test/dd/dd.sh
203			fi
204		fi
205	fi
206
207	if [ $SPDK_TEST_NVME -eq 1 ]; then
208		run_test "blockdev_nvme" $rootdir/test/bdev/blockdev.sh "nvme"
209		if [[ $(uname -s) == Linux ]]; then
210			run_test "blockdev_nvme_gpt" $rootdir/test/bdev/blockdev.sh "gpt"
211		fi
212		run_test "nvme" $rootdir/test/nvme/nvme.sh
213		if [[ $SPDK_TEST_NVME_PMR -eq 1 ]]; then
214			run_test "nvme_pmr" $rootdir/test/nvme/nvme_pmr.sh
215		fi
216
217		run_test "nvme_scc" $rootdir/test/nvme/nvme_scc.sh
218
219		if [[ $SPDK_TEST_NVME_BP -eq 1 ]]; then
220			run_test "nvme_bp" $rootdir/test/nvme/nvme_bp.sh
221		fi
222		if [[ $SPDK_TEST_NVME_CUSE -eq 1 ]]; then
223			run_test "nvme_cuse" $rootdir/test/nvme/cuse/nvme_cuse.sh
224		fi
225		if [[ $SPDK_TEST_NVME_CMB -eq 1 ]]; then
226			run_test "nvme_cmb" $rootdir/test/nvme/cmb/cmb.sh
227		fi
228		if [[ $SPDK_TEST_NVME_FDP -eq 1 ]]; then
229			run_test "nvme_fdp" test/nvme/nvme_fdp.sh
230		fi
231
232		if [[ $SPDK_TEST_NVME_ZNS -eq 1 ]]; then
233			run_test "nvme_zns" $rootdir/test/nvme/zns/zns.sh
234		fi
235
236		run_test "nvme_rpc" $rootdir/test/nvme/nvme_rpc.sh
237		run_test "nvme_rpc_timeouts" $rootdir/test/nvme/nvme_rpc_timeouts.sh
238
239		if [ $(uname -s) = Linux ]; then
240			run_test "sw_hotplug" $rootdir/test/nvme/sw_hotplug.sh
241		fi
242
243		if [[ $SPDK_TEST_XNVME -eq 1 ]]; then
244			run_test "nvme_xnvme" $rootdir/test/nvme/xnvme/xnvme.sh
245			run_test "blockdev_xnvme" $rootdir/test/bdev/blockdev.sh "xnvme"
246			# Run ublk with xnvme since they have similar kernel dependencies
247			run_test "ublk" $rootdir/test/ublk/ublk.sh
248			run_test "ublk_recovery" $rootdir/test/ublk/ublk_recovery.sh
249		fi
250
251		if [[ $SPDK_TEST_NVME_INTERRUPT -eq 1 ]]; then
252			run_test "nvme_interrupt" "$rootdir/test/nvme/interrupt.sh"
253		fi
254	fi
255
256	if [ $SPDK_TEST_IOAT -eq 1 ]; then
257		run_test "ioat" $rootdir/test/ioat/ioat.sh
258	fi
259
260	timing_exit lib
261
262	if [ $SPDK_TEST_ISCSI -eq 1 ]; then
263		run_test "iscsi_tgt" $rootdir/test/iscsi_tgt/iscsi_tgt.sh
264		run_test "spdkcli_iscsi" $rootdir/test/spdkcli/iscsi.sh
265	fi
266
267	if [ $SPDK_TEST_BLOBFS -eq 1 ]; then
268		run_test "rocksdb" $rootdir/test/blobfs/rocksdb/rocksdb.sh
269		run_test "blobstore" $rootdir/test/blobstore/blobstore.sh
270		run_test "blobstore_grow" $rootdir/test/blobstore/blobstore_grow/blobstore_grow.sh
271		run_test "blobfs" $rootdir/test/blobfs/blobfs.sh
272		run_test "hello_blob" $SPDK_EXAMPLE_DIR/hello_blob \
273			examples/blob/hello_world/hello_blob.json
274	fi
275
276	if [ $SPDK_TEST_NVMF -eq 1 ]; then
277		export NET_TYPE
278		# The NVMe-oF run test cases are split out like this so that the parser that compiles the
279		# list of all tests can properly differentiate them. Please do not merge them into one line.
280		if [ "$SPDK_TEST_NVMF_TRANSPORT" = "rdma" ]; then
281			run_test "nvmf_rdma" $rootdir/test/nvmf/nvmf.sh --transport=$SPDK_TEST_NVMF_TRANSPORT
282			run_test "spdkcli_nvmf_rdma" $rootdir/test/spdkcli/nvmf.sh --transport=$SPDK_TEST_NVMF_TRANSPORT
283		elif [ "$SPDK_TEST_NVMF_TRANSPORT" = "tcp" ]; then
284			run_test "nvmf_tcp" $rootdir/test/nvmf/nvmf.sh --transport=$SPDK_TEST_NVMF_TRANSPORT
285			if [[ $SPDK_TEST_URING -eq 0 ]]; then
286				run_test "spdkcli_nvmf_tcp" $rootdir/test/spdkcli/nvmf.sh --transport=$SPDK_TEST_NVMF_TRANSPORT
287				run_test "nvmf_identify_passthru" $rootdir/test/nvmf/target/identify_passthru.sh --transport=$SPDK_TEST_NVMF_TRANSPORT
288			fi
289			run_test "nvmf_dif" $rootdir/test/nvmf/target/dif.sh
290			run_test "nvmf_abort_qd_sizes" $rootdir/test/nvmf/target/abort_qd_sizes.sh
291			# The keyring tests utilize NVMe/TLS
292			run_test "keyring_file" "$rootdir/test/keyring/file.sh"
293			if [[ "$CONFIG_HAVE_KEYUTILS" == y ]]; then
294				run_test "keyring_linux" "$rootdir/scripts/keyctl-session-wrapper" \
295					"$rootdir/test/keyring/linux.sh"
296			fi
297		elif [ "$SPDK_TEST_NVMF_TRANSPORT" = "fc" ]; then
298			run_test "nvmf_fc" $rootdir/test/nvmf/nvmf.sh --transport=$SPDK_TEST_NVMF_TRANSPORT
299			run_test "spdkcli_nvmf_fc" $rootdir/test/spdkcli/nvmf.sh
300		else
301			echo "unknown NVMe transport, please specify rdma, tcp, or fc."
302			exit 1
303		fi
304	fi
305
306	# For vfio_user and vhost tests We need to make sure entire HUGEMEM default
307	# goes to a single node as we share hugepages with qemu instance(s) and we
308	# cannot split it across all numa nodes without making sure there's enough
309	# memory available.
310
311	if [ $SPDK_TEST_VHOST -eq 1 ]; then
312		HUGENODE=0 run_test "vhost" $rootdir/test/vhost/vhost.sh --iso
313	fi
314
315	if [ $SPDK_TEST_VFIOUSER_QEMU -eq 1 ]; then
316		HUGENODE=0 run_test "vfio_user_qemu" $rootdir/test/vfio_user/vfio_user.sh --iso
317	fi
318
319	if [ $SPDK_TEST_LVOL -eq 1 ]; then
320		run_test "lvol" $rootdir/test/lvol/lvol.sh
321		run_test "blob_io_wait" $rootdir/test/blobstore/blob_io_wait/blob_io_wait.sh
322	fi
323
324	if [ $SPDK_TEST_VHOST_INIT -eq 1 ]; then
325		timing_enter vhost_initiator
326		run_test "vhost_blockdev" $rootdir/test/vhost/initiator/blockdev.sh
327		run_test "spdkcli_virtio" $rootdir/test/spdkcli/virtio.sh
328		run_test "vhost_shared" $rootdir/test/vhost/shared/shared.sh
329		run_test "vhost_fuzz" $rootdir/test/vhost/fuzz/fuzz.sh
330		timing_exit vhost_initiator
331	fi
332
333	if [ $SPDK_TEST_RBD -eq 1 ]; then
334		run_test "blockdev_rbd" $rootdir/test/bdev/blockdev.sh "rbd"
335		run_test "spdkcli_rbd" $rootdir/test/spdkcli/rbd.sh
336	fi
337
338	if [ $SPDK_TEST_OCF -eq 1 ]; then
339		run_test "ocf" $rootdir/test/ocf/ocf.sh
340	fi
341
342	if [ $SPDK_TEST_FTL -eq 1 ]; then
343		run_test "ftl" $rootdir/test/ftl/ftl.sh
344	fi
345
346	if [ $SPDK_TEST_VMD -eq 1 ]; then
347		run_test "vmd" $rootdir/test/vmd/vmd.sh
348	fi
349
350	if [ $SPDK_TEST_VBDEV_COMPRESS -eq 1 ]; then
351		run_test "compress_compdev" $rootdir/test/compress/compress.sh "compdev"
352		run_test "compress_isal" $rootdir/test/compress/compress.sh "isal"
353	fi
354
355	if [ $SPDK_TEST_OPAL -eq 1 ]; then
356		run_test "nvme_opal" $rootdir/test/nvme/nvme_opal.sh
357	fi
358
359	if [ $SPDK_TEST_CRYPTO -eq 1 ]; then
360		run_test "blockdev_crypto_aesni" $rootdir/test/bdev/blockdev.sh "crypto_aesni"
361		run_test "blockdev_crypto_sw" $rootdir/test/bdev/blockdev.sh "crypto_sw"
362		run_test "blockdev_crypto_qat" $rootdir/test/bdev/blockdev.sh "crypto_qat"
363		run_test "chaining" $rootdir/test/bdev/chaining.sh
364	fi
365
366	if [[ $SPDK_TEST_SCHEDULER -eq 1 ]]; then
367		run_test "scheduler" $rootdir/test/scheduler/scheduler.sh
368	fi
369
370	if [[ $SPDK_TEST_SMA -eq 1 ]]; then
371		run_test "sma" $rootdir/test/sma/sma.sh
372	fi
373
374	if [[ $SPDK_TEST_FUZZER -eq 1 ]]; then
375		run_test "llvm_fuzz" $rootdir/test/fuzz/llvm.sh
376	fi
377
378	if [[ $SPDK_TEST_ACCEL_MLX5 -eq 1 ]]; then
379		run_test "accel_mlx5_crypto" $rootdir/test/accel/mlx5/accel_mlx5_crypto.sh
380		run_test "accel_mlx5_copy" $rootdir/test/accel/mlx5/accel_mlx5_copy.sh
381		run_test "accel_mlx5_crc32c" $rootdir/test/accel/mlx5/accel_mlx5_crc32c.sh
382	fi
383fi
384
385trap - SIGINT SIGTERM EXIT
386
387timing_enter post_cleanup
388autotest_cleanup
389timing_exit post_cleanup
390
391timing_exit autotest
392chmod a+r $output_dir/timing.txt
393
394[[ -f "$output_dir/udev.log" ]] && rm -f "$output_dir/udev.log"
395
396if [[ $CONFIG_COVERAGE == y ]]; then
397	# generate coverage data and combine with baseline
398	$LCOV -q -c --no-external -d $src -t "$(hostname)" -o $out/cov_test.info
399	$LCOV -q -a $out/cov_base.info -a $out/cov_test.info -o $out/cov_total.info
400	$LCOV -q -r $out/cov_total.info '*/dpdk/*' -o $out/cov_total.info
401	# C++ headers in /usr can sometimes generate data even when specifying
402	# --no-external, so remove them. But we need to add an ignore-errors
403	# flag to squash warnings on systems where they don't generate data.
404	$LCOV -q -r $out/cov_total.info --ignore-errors unused,unused '/usr/*' -o $out/cov_total.info
405	$LCOV -q -r $out/cov_total.info '*/examples/vmd/*' -o $out/cov_total.info
406	$LCOV -q -r $out/cov_total.info '*/app/spdk_lspci/*' -o $out/cov_total.info
407	$LCOV -q -r $out/cov_total.info '*/app/spdk_top/*' -o $out/cov_total.info
408	rm -f cov_base.info cov_test.info OLD_STDOUT OLD_STDERR
409fi
410