xref: /spdk/test/nvmf/target/host_management.sh (revision 18c8b52afa69f39481ebb75711b2f30b11693f9d)
1#!/usr/bin/env bash
2
3testdir=$(readlink -f $(dirname $0))
4rootdir=$(readlink -f $testdir/../../..)
5source $rootdir/test/common/autotest_common.sh
6source $rootdir/test/nvmf/common.sh
7
8MALLOC_BDEV_SIZE=64
9MALLOC_BLOCK_SIZE=512
10
11function starttarget() {
12	# Start the target
13	nvmfappstart -m 0x1E
14
15	$rpc_py nvmf_create_transport $NVMF_TRANSPORT_OPTS -u 8192
16
17	timing_enter create_subsystem
18	# Create subsystem
19	rm -rf $testdir/rpcs.txt
20	cat <<- EOL >> $testdir/rpcs.txt
21		bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE -b Malloc0
22		nvmf_create_subsystem nqn.2016-06.io.spdk:cnode0 -s SPDK0
23		nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode0 Malloc0
24		nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode0 -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT
25		nvmf_subsystem_add_host nqn.2016-06.io.spdk:cnode0 nqn.2016-06.io.spdk:host0
26	EOL
27	$rpc_py < $testdir/rpcs.txt
28	timing_exit create_subsystems
29
30}
31
32function stoptarget() {
33	rm -f ./local-job0-0-verify.state
34	rm -rf $testdir/bdevperf.conf
35	rm -rf $testdir/rpcs.txt
36
37	nvmftestfini
38}
39
40function waitforio() {
41	# $1 = RPC socket
42	if [ -z "$1" ]; then
43		exit 1
44	fi
45	# $2 = bdev name
46	if [ -z "$2" ]; then
47		exit 1
48	fi
49	local ret=1
50	local i
51	for ((i = 10; i != 0; i--)); do
52		read_io_count=$($rpc_py -s $1 bdev_get_iostat -b $2 | jq -r '.bdevs[0].num_read_ops')
53		# A few I/O will happen during initial examine.  So wait until at least 100 I/O
54		#  have completed to know that bdevperf is really generating the I/O.
55		if [ $read_io_count -ge 100 ]; then
56			ret=0
57			break
58		fi
59		sleep 0.25
60	done
61	return $ret
62}
63
64# Add a host, start I/O, remove host, re-add host
65function nvmf_host_management() {
66	starttarget
67
68	# Run bdevperf
69	$rootdir/test/bdev/bdevperf/bdevperf -r /var/tmp/bdevperf.sock --json <(gen_nvmf_target_json "0") -q 64 -o 65536 -w verify -t 10 &
70	perfpid=$!
71	waitforlisten $perfpid /var/tmp/bdevperf.sock
72	$rpc_py -s /var/tmp/bdevperf.sock framework_wait_init
73
74	# Expand the trap to clean up bdevperf if something goes wrong
75	trap 'process_shm --id $NVMF_APP_SHM_ID; kill -9 $perfpid || true; nvmftestfini; exit 1' SIGINT SIGTERM EXIT
76
77	waitforio /var/tmp/bdevperf.sock Nvme0n1
78
79	# Remove the host while bdevperf is still running, then re-add it quickly. The host
80	# may attempt to reconnect.
81	$rpc_py nvmf_subsystem_remove_host nqn.2016-06.io.spdk:cnode0 nqn.2016-06.io.spdk:host0
82	$rpc_py nvmf_subsystem_add_host nqn.2016-06.io.spdk:cnode0 nqn.2016-06.io.spdk:host0
83
84	sleep 1
85
86	# TODO: Right now the NVMe-oF initiator will not correctly detect broken connections
87	# and so it will never shut down. Just kill it.
88	kill -9 $perfpid || true
89
90	# Run bdevperf
91	$rootdir/test/bdev/bdevperf/bdevperf -r /var/tmp/bdevperf.sock --json <(gen_nvmf_target_json "0") -q 64 -o 65536 -w verify -t 1
92
93	stoptarget
94}
95
96nvmftestinit
97
98run_test "nvmf_host_management" nvmf_host_management
99
100trap - SIGINT SIGTERM EXIT
101