xref: /spdk/test/nvmf/target/host_management.sh (revision 6ba113f04d888fdb7d747b22c0dbce8ad98766af)
1#!/usr/bin/env bash
2#  SPDX-License-Identifier: BSD-3-Clause
3#  Copyright (C) 2020 Intel Corporation
4#  All rights reserved.
5#
6testdir=$(readlink -f $(dirname $0))
7rootdir=$(readlink -f $testdir/../../..)
8source $rootdir/test/common/autotest_common.sh
9source $rootdir/test/nvmf/common.sh
10
11MALLOC_BDEV_SIZE=64
12MALLOC_BLOCK_SIZE=512
13
14function starttarget() {
15	# Start the target
16	nvmfappstart -m 0x1E
17
18	$rpc_py nvmf_create_transport $NVMF_TRANSPORT_OPTS -u 8192
19
20	timing_enter create_subsystem
21	# Create subsystem
22	rm -rf $testdir/rpcs.txt
23	cat <<- EOL >> $testdir/rpcs.txt
24		bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE -b Malloc0
25		nvmf_create_subsystem nqn.2016-06.io.spdk:cnode0 -s SPDK0
26		nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode0 Malloc0
27		nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode0 -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT
28		nvmf_subsystem_add_host nqn.2016-06.io.spdk:cnode0 nqn.2016-06.io.spdk:host0
29	EOL
30	$rpc_py < $testdir/rpcs.txt
31	timing_exit create_subsystems
32
33}
34
35function stoptarget() {
36	rm -f ./local-job0-0-verify.state
37	rm -rf $testdir/bdevperf.conf
38	rm -rf $testdir/rpcs.txt
39
40	nvmftestfini
41}
42
43function waitforio() {
44	# $1 = RPC socket
45	if [ -z "$1" ]; then
46		exit 1
47	fi
48	# $2 = bdev name
49	if [ -z "$2" ]; then
50		exit 1
51	fi
52	local ret=1
53	local i
54	for ((i = 10; i != 0; i--)); do
55		read_io_count=$($rpc_py -s $1 bdev_get_iostat -b $2 | jq -r '.bdevs[0].num_read_ops')
56		# A few I/O will happen during initial examine.  So wait until at least 100 I/O
57		#  have completed to know that bdevperf is really generating the I/O.
58		if [ $read_io_count -ge 100 ]; then
59			ret=0
60			break
61		fi
62		sleep 0.25
63	done
64	return $ret
65}
66
67# Add a host, start I/O, remove host, re-add host
68function nvmf_host_management() {
69	starttarget
70
71	# Run bdevperf
72	$rootdir/build/examples/bdevperf -r /var/tmp/bdevperf.sock --json <(gen_nvmf_target_json "0") -q 64 -o 65536 -w verify -t 10 "${NO_HUGE[@]}" &
73	perfpid=$!
74	waitforlisten $perfpid /var/tmp/bdevperf.sock
75	$rpc_py -s /var/tmp/bdevperf.sock framework_wait_init
76
77	# Expand the trap to clean up bdevperf if something goes wrong
78	trap 'process_shm --id $NVMF_APP_SHM_ID; kill -9 $perfpid || true; nvmftestfini; exit 1' SIGINT SIGTERM EXIT
79
80	waitforio /var/tmp/bdevperf.sock Nvme0n1
81
82	# Remove the host while bdevperf is still running, then re-add it quickly. The host
83	# may attempt to reconnect.
84	$rpc_py nvmf_subsystem_remove_host nqn.2016-06.io.spdk:cnode0 nqn.2016-06.io.spdk:host0
85	$rpc_py nvmf_subsystem_add_host nqn.2016-06.io.spdk:cnode0 nqn.2016-06.io.spdk:host0
86
87	sleep 1
88
89	# TODO: Right now the NVMe-oF initiator will not correctly detect broken connections
90	# and so it will never shut down. Just kill it.
91	kill -9 $perfpid || true
92
93	# Since we abruptly terminate $perfpid above, we need to do some cleanup on our own.
94	# In particular, we need to get rid of the cpu lock files that may potentially prevent
95	# the next instance of bdevperf from running.
96	# FIXME: Can't we just SIGTERM $perfpid above?
97	rm -f /var/tmp/spdk_cpu_lock*
98
99	# Run bdevperf
100	$rootdir/build/examples/bdevperf --json <(gen_nvmf_target_json "0") -q 64 -o 65536 \
101		-w verify -t 1 "${NO_HUGE[@]}"
102	stoptarget
103}
104
105nvmftestinit
106
107nvmf_host_management
108
109trap - SIGINT SIGTERM EXIT
110