xref: /spdk/autotest.sh (revision 33f97fa33ad89651d75bafb5fb87dc4cd28dde6a)
1#!/usr/bin/env bash
2
3rootdir=$(readlink -f $(dirname $0))
4
5# In autotest_common.sh all tests are disabled by default.
6# If the configuration of tests is not provided, no tests will be carried out.
7if [[ ! -f $1 ]]; then
8	echo "ERROR: SPDK test configuration not specified"
9	exit 1
10fi
11
12source "$1"
13source "$rootdir/test/common/autotest_common.sh"
14source "$rootdir/test/nvmf/common.sh"
15
16if [ $EUID -ne 0 ]; then
17	echo "$0 must be run as root"
18	exit 1
19fi
20
21if [ $(uname -s) = Linux ]; then
22	# set core_pattern to a known value to avoid ABRT, systemd-coredump, etc.
23	echo "core" > /proc/sys/kernel/core_pattern
24
25	# make sure nbd (network block device) driver is loaded if it is available
26	# this ensures that when tests need to use nbd, it will be fully initialized
27	modprobe nbd || true
28fi
29
30trap "process_core; autotest_cleanup; exit 1" SIGINT SIGTERM EXIT
31
32timing_enter autotest
33
34create_test_list
35
36src=$(readlink -f $(dirname $0))
37out=$output_dir
38cd $src
39
40./scripts/setup.sh status
41
42freebsd_update_contigmem_mod
43
44if hash lcov; then
45	# setup output dir for unittest.sh
46	export UT_COVERAGE=$out/ut_coverage
47	export LCOV_OPTS="
48		--rc lcov_branch_coverage=1
49		--rc lcov_function_coverage=1
50		--rc genhtml_branch_coverage=1
51		--rc genhtml_function_coverage=1
52		--rc genhtml_legend=1
53		--rc geninfo_all_blocks=1
54		"
55	export LCOV="lcov $LCOV_OPTS --no-external"
56	# Print lcov version to log
57	$LCOV -v
58	# zero out coverage data
59	$LCOV -q -c -i -t "Baseline" -d $src -o $out/cov_base.info
60fi
61
62# Make sure the disks are clean (no leftover partition tables)
63timing_enter cleanup
64# Remove old domain socket pathname just in case
65rm -f /var/tmp/spdk*.sock
66
67# Load the kernel driver
68./scripts/setup.sh reset
69
70# Let the kernel discover any filesystems or partitions
71sleep 10
72
73if [ $(uname -s) = Linux ]; then
74	# OCSSD devices drivers don't support IO issues by kernel so
75	# detect OCSSD devices and blacklist them (unbind from any driver).
76	# If test scripts want to use this device it needs to do this explicitly.
77	#
78	# If some OCSSD device is bound to other driver than nvme we won't be able to
79	# discover if it is OCSSD or not so load the kernel driver first.
80
81
82	while IFS= read -r -d '' dev
83	do
84		# Send Open Channel 2.0 Geometry opcode "0xe2" - not supported by NVMe device.
85		if nvme admin-passthru $dev --namespace-id=1 --data-len=4096  --opcode=0xe2 --read >/dev/null; then
86			bdf="$(basename $(readlink -e /sys/class/nvme/${dev#/dev/}/device))"
87			echo "INFO: blacklisting OCSSD device: $dev ($bdf)"
88			PCI_BLACKLIST+=" $bdf"
89			OCSSD_PCI_DEVICES+=" $bdf"
90		fi
91	done <   <(find /dev -maxdepth 1 -regex '/dev/nvme[0-9]+' -print0)
92
93	export OCSSD_PCI_DEVICES
94
95	# Now, bind blacklisted devices to pci-stub module. This will prevent
96	# automatic grabbing these devices when we add device/vendor ID to
97	# proper driver.
98	if [[ -n "$PCI_BLACKLIST" ]]; then
99		# shellcheck disable=SC2097,SC2098
100		PCI_WHITELIST="$PCI_BLACKLIST" \
101		PCI_BLACKLIST="" \
102		DRIVER_OVERRIDE="pci-stub" \
103			./scripts/setup.sh
104
105		# Export our blacklist so it will take effect during next setup.sh
106		export PCI_BLACKLIST
107	fi
108fi
109
110# Delete all leftover lvols and gpt partitions
111# Matches both /dev/nvmeXnY on Linux and /dev/nvmeXnsY on BSD
112# Filter out nvme with partitions - the "p*" suffix
113for dev in $(ls /dev/nvme*n* | grep -v p || true); do
114	dd if=/dev/zero of="$dev" bs=1M count=1
115done
116
117sync
118
119timing_exit cleanup
120
121# set up huge pages
122timing_enter afterboot
123./scripts/setup.sh
124timing_exit afterboot
125
126timing_enter nvmf_setup
127rdma_device_init
128timing_exit nvmf_setup
129
130if [[ $SPDK_TEST_CRYPTO -eq 1 || $SPDK_TEST_REDUCE -eq 1 ]]; then
131	if grep -q '#define SPDK_CONFIG_IGB_UIO_DRIVER 1' $rootdir/include/spdk/config.h; then
132		./scripts/qat_setup.sh igb_uio
133	else
134		./scripts/qat_setup.sh
135	fi
136fi
137
138# Revert existing OPAL to factory settings that may have been left from earlier failed tests.
139# This ensures we won't hit any unexpected failures due to NVMe SSDs being locked.
140# Disable this for now as we don't have opal test running
141# opal_revert_cleanup
142
143#####################
144# Unit Tests
145#####################
146
147if [ $SPDK_TEST_UNITTEST -eq 1 ]; then
148	run_test "unittest" ./test/unit/unittest.sh
149fi
150
151if [ $SPDK_RUN_FUNCTIONAL_TEST -eq 1 ]; then
152	timing_enter lib
153
154	run_test "env" test/env/env.sh
155	run_test "rpc" test/rpc/rpc.sh
156	run_test "rpc_client" test/rpc_client/rpc_client.sh
157	run_test "json_config" ./test/json_config/json_config.sh
158	run_test "alias_rpc" test/json_config/alias_rpc/alias_rpc.sh
159	run_test "spdkcli_tcp" test/spdkcli/tcp.sh
160        run_test "dpdk_mem_utility" test/dpdk_memory_utility/test_dpdk_mem_info.sh
161	run_test "event" test/event/event.sh
162
163	if [ $SPDK_TEST_BLOCKDEV -eq 1 ]; then
164		run_test "blockdev_general" test/bdev/blockdev.sh
165		run_test "bdev_raid" test/bdev/bdev_raid.sh
166	fi
167
168	if [ $SPDK_TEST_JSON -eq 1 ]; then
169		run_test "test_converter" test/config_converter/test_converter.sh
170	fi
171
172	if [ $SPDK_TEST_NVME -eq 1 ]; then
173		run_test "blockdev_nvme" test/bdev/blockdev.sh "nvme"
174		run_test "blockdev_nvme_gpt" test/bdev/blockdev.sh "gpt"
175		run_test "nvme" test/nvme/nvme.sh
176		if [[ $SPDK_TEST_NVME_CLI -eq 1 ]]; then
177			run_test "nvme_cli" test/nvme/spdk_nvme_cli.sh
178		fi
179		if [[ $SPDK_TEST_NVME_CUSE -eq 1 ]]; then
180			run_test "nvme_cuse" test/nvme/cuse/nvme_cuse.sh
181			run_test "nvme_cli_cuse" test/nvme/spdk_nvme_cli_cuse.sh
182			run_test "nvme_smartctl_cuse" test/nvme/spdk_smartctl_cuse.sh
183		fi
184		run_test "nvme_rpc" test/nvme/nvme_rpc.sh
185		# Only test hotplug without ASAN enabled. Since if it is
186		# enabled, it catches SEGV earlier than our handler which
187		# breaks the hotplug logic.
188		if [ $SPDK_RUN_ASAN -eq 0 ]; then
189			run_test "nvme_hotplug" test/nvme/hotplug.sh intel
190		fi
191	fi
192
193	if [ $SPDK_TEST_IOAT -eq 1 ]; then
194		run_test "ioat" test/ioat/ioat.sh
195	fi
196
197	timing_exit lib
198
199	if [ $SPDK_TEST_ISCSI -eq 1 ]; then
200		run_test "iscsi_tgt_posix" ./test/iscsi_tgt/iscsi_tgt.sh posix
201		run_test "spdkcli_iscsi" ./test/spdkcli/iscsi.sh
202
203		# Run raid spdkcli test under iSCSI since blockdev tests run on systems that can't run spdkcli yet
204		run_test "spdkcli_raid" test/spdkcli/raid.sh
205	fi
206
207	if [ $SPDK_TEST_VPP -eq 1 ]; then
208		run_test "iscsi_tgt_vpp" ./test/iscsi_tgt/iscsi_tgt.sh vpp
209	fi
210
211	if [ $SPDK_TEST_BLOBFS -eq 1 ]; then
212		run_test "rocksdb" ./test/blobfs/rocksdb/rocksdb.sh
213		run_test "blobstore" ./test/blobstore/blobstore.sh
214		run_test "blobfs" ./test/blobfs/blobfs.sh
215		run_test "hello_blob" ./examples/blob/hello_world/hello_blob \
216			examples/blob/hello_world/hello_blob.conf
217	fi
218
219	if [ $SPDK_TEST_NVMF -eq 1 ]; then
220		# The NVMe-oF run test cases are split out like this so that the parser that compiles the
221		# list of all tests can properly differentiate them. Please do not merge them into one line.
222		if [ "$SPDK_TEST_NVMF_TRANSPORT" = "rdma" ]; then
223			run_test "nvmf_rdma" ./test/nvmf/nvmf.sh --transport=$SPDK_TEST_NVMF_TRANSPORT
224			run_test "spdkcli_nvmf_rdma" ./test/spdkcli/nvmf.sh
225		elif [ "$SPDK_TEST_NVMF_TRANSPORT" = "tcp" ]; then
226			run_test "nvmf_tcp" ./test/nvmf/nvmf.sh --transport=$SPDK_TEST_NVMF_TRANSPORT
227			run_test "spdkcli_nvmf_tcp" ./test/spdkcli/nvmf.sh
228			run_test "nvmf_identify_passthru" test/nvmf/target/identify_passthru.sh --transport=$SPDK_TEST_NVMF_TRANSPORT
229		elif [ "$SPDK_TEST_NVMF_TRANSPORT" = "fc" ]; then
230				run_test "nvmf_fc" ./test/nvmf/nvmf.sh --transport=$SPDK_TEST_NVMF_TRANSPORT
231				run_test "spdkcli_nvmf_fc" ./test/spdkcli/nvmf.sh
232		else
233			echo "unknown NVMe transport, please specify rdma, tcp, or fc."
234			exit 1
235		fi
236	fi
237
238	if [ $SPDK_TEST_VHOST -eq 1 ]; then
239		run_test "vhost" ./test/vhost/vhost.sh
240	fi
241
242	if [ $SPDK_TEST_LVOL -eq 1 ]; then
243		run_test "lvol2" ./test/lvol/lvol2.sh
244		run_test "blob_io_wait" ./test/blobstore/blob_io_wait/blob_io_wait.sh
245	fi
246
247	if [ $SPDK_TEST_VHOST_INIT -eq 1 ]; then
248		timing_enter vhost_initiator
249		run_test "vhost_blockdev" ./test/vhost/initiator/blockdev.sh
250		run_test "spdkcli_virtio" ./test/spdkcli/virtio.sh
251		run_test "vhost_shared" ./test/vhost/shared/shared.sh
252		run_test "vhost_fuzz" ./test/vhost/fuzz/fuzz.sh
253		timing_exit vhost_initiator
254	fi
255
256	if [ $SPDK_TEST_PMDK -eq 1 ]; then
257		run_test "blockdev_pmem" ./test/bdev/blockdev.sh "pmem"
258		run_test "pmem" ./test/pmem/pmem.sh -x
259		run_test "spdkcli_pmem" ./test/spdkcli/pmem.sh
260	fi
261
262	if [ $SPDK_TEST_RBD -eq 1 ]; then
263		run_test "blockdev_rbd" ./test/bdev/blockdev.sh "rbd"
264		run_test "spdkcli_rbd" ./test/spdkcli/rbd.sh
265	fi
266
267	if [ $SPDK_TEST_OCF -eq 1 ]; then
268		run_test "ocf" ./test/ocf/ocf.sh
269	fi
270
271	if [ $SPDK_TEST_FTL -eq 1 ]; then
272		run_test "ftl" ./test/ftl/ftl.sh
273	fi
274
275	if [ $SPDK_TEST_VMD -eq 1 ]; then
276		run_test "vmd" ./test/vmd/vmd.sh
277	fi
278
279        if [ $SPDK_TEST_REDUCE -eq 1 ]; then
280                run_test "compress_qat" ./test/compress/compress.sh "qat"
281                run_test "compress_isal" ./test/compress/compress.sh "isal"
282        fi
283
284	if [ $SPDK_TEST_OPAL -eq 1 ]; then
285		run_test "nvme_opal" ./test/nvme/nvme_opal.sh
286	fi
287
288	if [ $SPDK_TEST_CRYPTO -eq 1 ]; then
289		run_test "blockdev_crypto_aesni" ./test/bdev/blockdev.sh "crypto_aesni"
290		# Proceed with the test only if QAT devices are in place
291		if [[ $(lspci -d:37c8) ]]; then
292			run_test "blockdev_crypto_qat" ./test/bdev/blockdev.sh "crypto_qat"
293		fi
294	fi
295fi
296
297timing_enter cleanup
298autotest_cleanup
299timing_exit cleanup
300
301timing_exit autotest
302chmod a+r $output_dir/timing.txt
303
304trap - SIGINT SIGTERM EXIT
305
306# catch any stray core files
307process_core
308
309if hash lcov; then
310	# generate coverage data and combine with baseline
311	$LCOV -q -c -d $src -t "$(hostname)" -o $out/cov_test.info
312	$LCOV -q -a $out/cov_base.info -a $out/cov_test.info -o $out/cov_total.info
313	$LCOV -q -r $out/cov_total.info '*/dpdk/*' -o $out/cov_total.info
314	$LCOV -q -r $out/cov_total.info '/usr/*' -o $out/cov_total.info
315	git clean -f "*.gcda"
316	rm -f cov_base.info cov_test.info OLD_STDOUT OLD_STDERR
317fi
318