xref: /spdk/test/nvmf/common.sh (revision 18c8b52afa69f39481ebb75711b2f30b11693f9d)
1NVMF_PORT=4420
2NVMF_SECOND_PORT=4421
3NVMF_THIRD_PORT=4422
4NVMF_IP_PREFIX="192.168.100"
5NVMF_IP_LEAST_ADDR=8
6NVMF_TCP_IP_ADDRESS="127.0.0.1"
7NVMF_TRANSPORT_OPTS=""
8NVMF_SERIAL=SPDK00000000000001
9NET_TYPE=${NET_TYPE:-phy-fallback}
10
11function build_nvmf_app_args() {
12	if [ $SPDK_RUN_NON_ROOT -eq 1 ]; then
13		# We assume that test script is started from sudo
14		NVMF_APP=(sudo -E -u $SUDO_USER "LD_LIBRARY_PATH=$LD_LIBRARY_PATH" "${NVMF_APP[@]}")
15	fi
16	NVMF_APP+=(-i "$NVMF_APP_SHM_ID" -e 0xFFFF)
17
18	if [ -n "$SPDK_HUGE_DIR" ]; then
19		NVMF_APP+=(--huge-dir "$SPDK_HUGE_DIR")
20	elif [ $SPDK_RUN_NON_ROOT -eq 1 ]; then
21		echo "In non-root test mode you have to set SPDK_HUGE_DIR variable." >&2
22		echo "For example:" >&2
23		echo "sudo mkdir /mnt/spdk_hugetlbfs" >&2
24		echo "sudo chown ${SUDO_USER}: /mnt/spdk_hugetlbfs" >&2
25		echo "export SPDK_HUGE_DIR=/mnt/spdk_hugetlbfs" >&2
26		return 1
27	fi
28}
29
30source "$rootdir/scripts/common.sh"
31
32: ${NVMF_APP_SHM_ID="0"}
33export NVMF_APP_SHM_ID
34build_nvmf_app_args
35
36have_pci_nics=0
37
38function rxe_cfg() {
39	"$rootdir/scripts/rxe_cfg_small.sh" "$@"
40}
41
42function load_ib_rdma_modules() {
43	if [ $(uname) != Linux ]; then
44		return 0
45	fi
46
47	modprobe ib_cm
48	modprobe ib_core
49	# Newer kernels do not have the ib_ucm module
50	modprobe ib_ucm || true
51	modprobe ib_umad
52	modprobe ib_uverbs
53	modprobe iw_cm
54	modprobe rdma_cm
55	modprobe rdma_ucm
56}
57
58function allocate_nic_ips() {
59	((count = NVMF_IP_LEAST_ADDR))
60	for nic_name in $(get_rdma_if_list); do
61		ip="$(get_ip_address $nic_name)"
62		if [[ -z $ip ]]; then
63			ip addr add $NVMF_IP_PREFIX.$count/24 dev $nic_name
64			ip link set $nic_name up
65			((count = count + 1))
66		fi
67		# dump configuration for debug log
68		ip addr show $nic_name
69	done
70}
71
72function get_available_rdma_ips() {
73	for nic_name in $(get_rdma_if_list); do
74		get_ip_address $nic_name
75	done
76}
77
78function get_rdma_if_list() {
79	local net_dev rxe_net_dev rxe_net_devs
80
81	mapfile -t rxe_net_devs < <(rxe_cfg rxe-net)
82
83	if ((${#net_devs[@]} == 0)); then
84		return 1
85	fi
86
87	# Pick only these devices which were found during gather_supported_nvmf_pci_devs() run
88	for net_dev in "${net_devs[@]}"; do
89		for rxe_net_dev in "${rxe_net_devs[@]}"; do
90			if [[ $net_dev == "$rxe_net_dev" ]]; then
91				echo "$net_dev"
92				continue 2
93			fi
94		done
95	done
96}
97
98function get_ip_address() {
99	interface=$1
100	ip -o -4 addr show $interface | awk '{print $4}' | cut -d"/" -f1
101}
102
103function nvmfcleanup() {
104	sync
105
106	if [ "$TEST_TRANSPORT" == "tcp" ] || [ "$TEST_TRANSPORT" == "rdma" ]; then
107		set +e
108		for i in {1..20}; do
109			modprobe -v -r nvme-$TEST_TRANSPORT
110			if modprobe -v -r nvme-fabrics; then
111				set -e
112				return 0
113			fi
114			sleep 1
115		done
116		set -e
117
118		# So far unable to remove the kernel modules. Try
119		# one more time and let it fail.
120		# Allow the transport module to fail for now. See Jim's comment
121		# about the nvme-tcp module below.
122		modprobe -v -r nvme-$TEST_TRANSPORT || true
123		modprobe -v -r nvme-fabrics
124	fi
125}
126
127function nvmf_veth_init() {
128	NVMF_INITIATOR_IP=10.0.0.1
129	NVMF_FIRST_TARGET_IP=10.0.0.2
130	NVMF_SECOND_TARGET_IP=10.0.0.3
131	NVMF_BRIDGE="nvmf_br"
132	NVMF_INITIATOR_INTERFACE="nvmf_init_if"
133	NVMF_INITIATOR_BRIDGE="nvmf_init_br"
134	NVMF_TARGET_NAMESPACE="nvmf_tgt_ns_spdk"
135	NVMF_TARGET_NS_CMD=(ip netns exec "$NVMF_TARGET_NAMESPACE")
136	NVMF_TARGET_INTERFACE="nvmf_tgt_if"
137	NVMF_TARGET_INTERFACE2="nvmf_tgt_if2"
138	NVMF_TARGET_BRIDGE="nvmf_tgt_br"
139	NVMF_TARGET_BRIDGE2="nvmf_tgt_br2"
140
141	ip link set $NVMF_INITIATOR_BRIDGE nomaster || true
142	ip link set $NVMF_TARGET_BRIDGE nomaster || true
143	ip link set $NVMF_TARGET_BRIDGE2 nomaster || true
144	ip link set $NVMF_INITIATOR_BRIDGE down || true
145	ip link set $NVMF_TARGET_BRIDGE down || true
146	ip link set $NVMF_TARGET_BRIDGE2 down || true
147	ip link delete $NVMF_BRIDGE type bridge || true
148	ip link delete $NVMF_INITIATOR_INTERFACE || true
149	"${NVMF_TARGET_NS_CMD[@]}" ip link delete $NVMF_TARGET_INTERFACE || true
150	"${NVMF_TARGET_NS_CMD[@]}" ip link delete $NVMF_TARGET_INTERFACE2 || true
151
152	# Create network namespace
153	ip netns add $NVMF_TARGET_NAMESPACE
154
155	# Create veth (Virtual ethernet) interface pairs
156	ip link add $NVMF_INITIATOR_INTERFACE type veth peer name $NVMF_INITIATOR_BRIDGE
157	ip link add $NVMF_TARGET_INTERFACE type veth peer name $NVMF_TARGET_BRIDGE
158	ip link add $NVMF_TARGET_INTERFACE2 type veth peer name $NVMF_TARGET_BRIDGE2
159
160	# Associate veth interface pairs with network namespace
161	ip link set $NVMF_TARGET_INTERFACE netns $NVMF_TARGET_NAMESPACE
162	ip link set $NVMF_TARGET_INTERFACE2 netns $NVMF_TARGET_NAMESPACE
163
164	# Allocate IP addresses
165	ip addr add $NVMF_INITIATOR_IP/24 dev $NVMF_INITIATOR_INTERFACE
166	"${NVMF_TARGET_NS_CMD[@]}" ip addr add $NVMF_FIRST_TARGET_IP/24 dev $NVMF_TARGET_INTERFACE
167	"${NVMF_TARGET_NS_CMD[@]}" ip addr add $NVMF_SECOND_TARGET_IP/24 dev $NVMF_TARGET_INTERFACE2
168
169	# Link up veth interfaces
170	ip link set $NVMF_INITIATOR_INTERFACE up
171	ip link set $NVMF_INITIATOR_BRIDGE up
172	ip link set $NVMF_TARGET_BRIDGE up
173	ip link set $NVMF_TARGET_BRIDGE2 up
174	"${NVMF_TARGET_NS_CMD[@]}" ip link set $NVMF_TARGET_INTERFACE up
175	"${NVMF_TARGET_NS_CMD[@]}" ip link set $NVMF_TARGET_INTERFACE2 up
176	"${NVMF_TARGET_NS_CMD[@]}" ip link set lo up
177
178	# Create a bridge
179	ip link add $NVMF_BRIDGE type bridge
180	ip link set $NVMF_BRIDGE up
181
182	# Add veth interfaces to the bridge
183	ip link set $NVMF_INITIATOR_BRIDGE master $NVMF_BRIDGE
184	ip link set $NVMF_TARGET_BRIDGE master $NVMF_BRIDGE
185	ip link set $NVMF_TARGET_BRIDGE2 master $NVMF_BRIDGE
186
187	# Accept connections from veth interface
188	iptables -I INPUT 1 -i $NVMF_INITIATOR_INTERFACE -p tcp --dport $NVMF_PORT -j ACCEPT
189	iptables -A FORWARD -i $NVMF_BRIDGE -o $NVMF_BRIDGE -j ACCEPT
190
191	# Verify connectivity
192	ping -c 1 $NVMF_FIRST_TARGET_IP
193	ping -c 1 $NVMF_SECOND_TARGET_IP
194	"${NVMF_TARGET_NS_CMD[@]}" ping -c 1 $NVMF_INITIATOR_IP
195
196	NVMF_APP=("${NVMF_TARGET_NS_CMD[@]}" "${NVMF_APP[@]}")
197}
198
199function nvmf_veth_fini() {
200	# Cleanup bridge, veth interfaces, and network namespace
201	# Note: removing one veth removes the pair
202	ip link set $NVMF_INITIATOR_BRIDGE nomaster
203	ip link set $NVMF_TARGET_BRIDGE nomaster
204	ip link set $NVMF_TARGET_BRIDGE2 nomaster
205	ip link set $NVMF_INITIATOR_BRIDGE down
206	ip link set $NVMF_TARGET_BRIDGE down
207	ip link set $NVMF_TARGET_BRIDGE2 down
208	ip link delete $NVMF_BRIDGE type bridge
209	ip link delete $NVMF_INITIATOR_INTERFACE
210	"${NVMF_TARGET_NS_CMD[@]}" ip link delete $NVMF_TARGET_INTERFACE
211	"${NVMF_TARGET_NS_CMD[@]}" ip link delete $NVMF_TARGET_INTERFACE2
212	remove_spdk_ns
213}
214
215function nvmf_tcp_init() {
216	NVMF_INITIATOR_IP=10.0.0.1
217	NVMF_FIRST_TARGET_IP=10.0.0.2
218	TCP_INTERFACE_LIST=("${net_devs[@]}")
219
220	# We need two net devs at minimum
221	((${#TCP_INTERFACE_LIST[@]} > 1))
222
223	NVMF_TARGET_INTERFACE=${TCP_INTERFACE_LIST[0]}
224	NVMF_INITIATOR_INTERFACE=${TCP_INTERFACE_LIST[1]}
225
226	# Skip case nvmf_multipath in nvmf_tcp_init(), it will be covered by nvmf_veth_init().
227	NVMF_SECOND_TARGET_IP=""
228
229	NVMF_TARGET_NAMESPACE="${NVMF_TARGET_INTERFACE}_ns_spdk"
230	NVMF_TARGET_NS_CMD=(ip netns exec "$NVMF_TARGET_NAMESPACE")
231	ip -4 addr flush $NVMF_TARGET_INTERFACE || true
232	ip -4 addr flush $NVMF_INITIATOR_INTERFACE || true
233
234	# Create network namespace
235	ip netns add $NVMF_TARGET_NAMESPACE
236
237	# Associate phy interface pairs with network namespace
238	ip link set $NVMF_TARGET_INTERFACE netns $NVMF_TARGET_NAMESPACE
239
240	# Allocate IP addresses
241	ip addr add $NVMF_INITIATOR_IP/24 dev $NVMF_INITIATOR_INTERFACE
242	"${NVMF_TARGET_NS_CMD[@]}" ip addr add $NVMF_FIRST_TARGET_IP/24 dev $NVMF_TARGET_INTERFACE
243
244	# Link up phy interfaces
245	ip link set $NVMF_INITIATOR_INTERFACE up
246
247	"${NVMF_TARGET_NS_CMD[@]}" ip link set $NVMF_TARGET_INTERFACE up
248	"${NVMF_TARGET_NS_CMD[@]}" ip link set lo up
249
250	# Accept connections from phy interface
251	iptables -I INPUT 1 -i $NVMF_INITIATOR_INTERFACE -p tcp --dport $NVMF_PORT -j ACCEPT
252
253	# Verify connectivity
254	ping -c 1 $NVMF_FIRST_TARGET_IP
255	"${NVMF_TARGET_NS_CMD[@]}" ping -c 1 $NVMF_INITIATOR_IP
256
257	NVMF_APP=("${NVMF_TARGET_NS_CMD[@]}" "${NVMF_APP[@]}")
258}
259
260function nvmf_tcp_fini() {
261	if [[ "$NVMF_TARGET_NAMESPACE" == "nvmf_tgt_ns" ]]; then
262		nvmf_veth_fini
263		return 0
264	fi
265	remove_spdk_ns
266	ip -4 addr flush $NVMF_INITIATOR_INTERFACE || :
267}
268
269function gather_supported_nvmf_pci_devs() {
270	# Go through the entire pci bus and gather all ethernet controllers we support for the nvmf tests.
271	# Focus on the hardware that's currently being tested by the CI.
272	xtrace_disable
273	cache_pci_bus_sysfs
274	xtrace_restore
275
276	local intel=0x8086 mellanox=0x15b3 pci
277
278	local -a pci_devs=()
279	local -a pci_net_devs=()
280	local -A pci_drivers=()
281
282	local -ga net_devs=()
283	local -ga e810=()
284	local -ga x722=()
285	local -ga mlx=()
286
287	# E810-XXV
288	e810+=(${pci_bus_cache["$intel:0x1592"]})
289	e810+=(${pci_bus_cache["$intel:0x159b"]})
290	# X722 10G
291	x722+=(${pci_bus_cache["$intel:0x37d2"]})
292	# ConnectX-5
293	mlx+=(${pci_bus_cache["$mellanox:0x1017"]})
294	# ConnectX-4
295	mlx+=(${pci_bus_cache["$mellanox:0x1015"]})
296	mlx+=(${pci_bus_cache["$mellanox:0x1013"]})
297
298	pci_devs+=("${e810[@]}")
299	if [[ $TEST_TRANSPORT == rdma ]]; then
300		pci_devs+=("${x722[@]}")
301		pci_devs+=("${mlx[@]}")
302	fi
303
304	# Try to respect what CI wants to test and override pci_devs[]
305	if [[ $SPDK_TEST_NVMF_NICS == mlx5 ]]; then
306		pci_devs=("${mlx[@]}")
307	elif [[ $SPDK_TEST_NVMF_NICS == e810 ]]; then
308		pci_devs=("${e810[@]}")
309	elif [[ $SPDK_TEST_NVMF_NICS == x722 ]]; then
310		pci_devs=("${x722[@]}")
311	fi
312
313	if ((${#pci_devs[@]} == 0)); then
314		return 1
315	fi
316
317	# Load proper kernel modules if necessary
318	for pci in "${pci_devs[@]}"; do
319		echo "Found $pci (${pci_ids_vendor["$pci"]} - ${pci_ids_device["$pci"]})"
320		if [[ ${pci_mod_resolved["$pci"]} == unknown ]]; then
321			echo "Unresolved modalias for $pci (${pci_mod_driver["$pci"]}). Driver not installed|builtin?"
322			continue
323		fi
324		if [[ ${pci_bus_driver["$pci"]} == unbound ]]; then
325			echo "$pci not bound, needs ${pci_mod_resolved["$pci"]}"
326			pci_drivers["${pci_mod_resolved["$pci"]}"]=1
327		fi
328	done
329
330	if ((${#pci_drivers[@]} > 0)); then
331		echo "Loading kernel modules: ${!pci_drivers[*]}"
332		modprobe -a "${!pci_drivers[@]}"
333	fi
334
335	# E810 cards also need irdma driver to be around.
336	if ((${#e810[@]} > 0)) && [[ $TEST_TRANSPORT == rdma ]]; then
337		if [[ -e /sys/module/irdma/parameters/roce_ena ]]; then
338			# Our tests don't play well with iWARP protocol. Make sure we use RoCEv2 instead.
339			(($(< /sys/module/irdma/parameters/roce_ena) != 1)) && modprobe -r irdma
340		fi
341		modinfo irdma && modprobe irdma roce_ena=1
342	fi > /dev/null
343
344	# All devices detected, kernel modules loaded. Now look under net class to see if there
345	# are any net devices bound to the controllers.
346	for pci in "${pci_devs[@]}"; do
347		if [[ ! -e /sys/bus/pci/devices/$pci/net ]]; then
348			echo "No net devices associated with $pci"
349			continue
350		fi
351		pci_net_devs=("/sys/bus/pci/devices/$pci/net/"*)
352		pci_net_devs=("${pci_net_devs[@]##*/}")
353		echo "Found net devices under $pci: ${pci_net_devs[*]}"
354		net_devs+=("${pci_net_devs[@]}")
355	done
356
357	if ((${#net_devs[@]} == 0)); then
358		return 1
359	fi
360}
361
362prepare_net_devs() {
363	local -g is_hw=no
364
365	remove_spdk_ns
366
367	[[ $NET_TYPE != virt ]] && gather_supported_nvmf_pci_devs && is_hw=yes
368
369	if [[ $is_hw == yes ]]; then
370		if [[ $TEST_TRANSPORT == tcp ]]; then
371			nvmf_tcp_init
372		elif [[ $TEST_TRANSPORT == rdma ]]; then
373			rdma_device_init
374		fi
375		return 0
376	elif [[ $NET_TYPE == phy ]]; then
377		echo "ERROR: No supported devices were found, cannot run the $TEST_TRANSPORT test"
378		return 1
379	elif [[ $NET_TYPE == phy-fallback ]]; then
380		echo "WARNING: No supported devices were found, fallback requested for $TEST_TRANSPORT test"
381	fi
382
383	# NET_TYPE == virt or phy-fallback
384	if [[ $TEST_TRANSPORT == tcp ]]; then
385		nvmf_veth_init
386		return 0
387	fi
388
389	echo "ERROR: virt and fallback setup is not supported for $TEST_TRANSPORT"
390	return 1
391}
392
393function nvmftestinit() {
394	if [ -z $TEST_TRANSPORT ]; then
395		echo "transport not specified - use --transport= to specify"
396		return 1
397	fi
398
399	trap 'nvmftestfini' SIGINT SIGTERM EXIT
400
401	prepare_net_devs
402
403	if [ "$TEST_MODE" == "iso" ]; then
404		$rootdir/scripts/setup.sh
405	fi
406
407	NVMF_TRANSPORT_OPTS="-t $TEST_TRANSPORT"
408	if [[ "$TEST_TRANSPORT" == "rdma" ]]; then
409		RDMA_IP_LIST=$(get_available_rdma_ips)
410		NVMF_FIRST_TARGET_IP=$(echo "$RDMA_IP_LIST" | head -n 1)
411		NVMF_SECOND_TARGET_IP=$(echo "$RDMA_IP_LIST" | tail -n +2 | head -n 1)
412		if [ -z $NVMF_FIRST_TARGET_IP ]; then
413			echo "no RDMA NIC for nvmf test"
414			exit 1
415		fi
416	elif [[ "$TEST_TRANSPORT" == "tcp" ]]; then
417		NVMF_TRANSPORT_OPTS="$NVMF_TRANSPORT_OPTS -o"
418	fi
419
420	if [ "$TEST_TRANSPORT" == "tcp" ] || [ "$TEST_TRANSPORT" == "rdma" ]; then
421		# currently we run the host/perf test for TCP even on systems without kernel nvme-tcp
422		#  support; that's fine since the host/perf test uses the SPDK initiator
423		# maybe later we will enforce modprobe to succeed once we have systems in the test pool
424		#  with nvme-tcp kernel support - but until then let this pass so we can still run the
425		#  host/perf test with the tcp transport
426		modprobe nvme-$TEST_TRANSPORT || true
427	fi
428}
429
430function nvmfappstart() {
431	timing_enter start_nvmf_tgt
432	"${NVMF_APP[@]}" "$@" &
433	nvmfpid=$!
434	waitforlisten $nvmfpid
435	timing_exit start_nvmf_tgt
436	trap 'process_shm --id $NVMF_APP_SHM_ID || :; nvmftestfini' SIGINT SIGTERM EXIT
437}
438
439function nvmftestfini() {
440	nvmfcleanup || :
441	if [ -n "$nvmfpid" ]; then
442		killprocess $nvmfpid
443	fi
444	if [ "$TEST_MODE" == "iso" ]; then
445		$rootdir/scripts/setup.sh reset
446	fi
447	if [[ "$TEST_TRANSPORT" == "tcp" ]]; then
448		nvmf_tcp_fini
449	fi
450}
451
452function rdma_device_init() {
453	load_ib_rdma_modules
454	allocate_nic_ips
455}
456
457function nvme_connect() {
458	local init_count
459	init_count=$(nvme list | wc -l)
460
461	if ! nvme connect "$@"; then return $?; fi
462
463	for i in $(seq 1 10); do
464		if [ $(nvme list | wc -l) -gt $init_count ]; then
465			return 0
466		else
467			sleep 1s
468		fi
469	done
470	return 1
471}
472
473function get_nvme_devs() {
474	local dev _
475
476	while read -r dev _; do
477		if [[ $dev == /dev/nvme* ]]; then
478			echo "$dev"
479		fi
480	done < <(nvme list)
481}
482
483function gen_nvmf_target_json() {
484	local subsystem config=()
485
486	for subsystem in "${@:-1}"; do
487		config+=(
488			"$(
489				cat <<- EOF
490					{
491					  "params": {
492					    "name": "Nvme$subsystem",
493					    "trtype": "$TEST_TRANSPORT",
494					    "traddr": "$NVMF_FIRST_TARGET_IP",
495					    "adrfam": "ipv4",
496					    "trsvcid": "$NVMF_PORT",
497					    "subnqn": "nqn.2016-06.io.spdk:cnode$subsystem",
498					    "hostnqn": "nqn.2016-06.io.spdk:host$subsystem",
499					    "hdgst": ${hdgst:-false},
500					    "ddgst": ${ddgst:-false}
501					  },
502					  "method": "bdev_nvme_attach_controller"
503					}
504				EOF
505			)"
506		)
507	done
508	jq . <<- JSON
509		{
510		  "subsystems": [
511		    {
512		      "subsystem": "bdev",
513		      "config": [
514			{
515			  "method": "bdev_nvme_set_options",
516			  "params": {
517				"action_on_timeout": "none",
518				"timeout_us": 0,
519				"retry_count": 4,
520				"arbitration_burst": 0,
521				"low_priority_weight": 0,
522				"medium_priority_weight": 0,
523				"high_priority_weight": 0,
524				"nvme_adminq_poll_period_us": 10000,
525				"keep_alive_timeout_ms" : 10000,
526				"nvme_ioq_poll_period_us": 0,
527				"io_queue_requests": 0,
528				"delay_cmd_submit": true
529			  }
530			},
531		        $(
532		IFS=","
533		printf '%s\n' "${config[*]}"
534		),
535			{
536			  "method": "bdev_wait_for_examine"
537			}
538		      ]
539		    }
540		  ]
541		}
542	JSON
543}
544
545function remove_spdk_ns() {
546	local ns
547	while read -r ns _; do
548		[[ $ns == *_spdk ]] || continue
549		ip netns delete "$ns"
550	done < <(ip netns list)
551	# Let it settle
552	sleep 1
553}
554
555configure_kernel_target() {
556	# Keep it global in scope for easier cleanup
557	kernel_name=${1:-kernel_target}
558	nvmet=/sys/kernel/config/nvmet
559	kernel_subsystem=$nvmet/subsystems/$kernel_name
560	kernel_namespace=$kernel_subsystem/namespaces/1
561	kernel_port=$nvmet/ports/1
562
563	local block nvme
564
565	if [[ ! -e /sys/module/nvmet ]]; then
566		modprobe nvmet
567	fi
568
569	[[ -e $nvmet ]]
570
571	"$rootdir/scripts/setup.sh" reset
572
573	# Find nvme with an active ns device
574	for block in /sys/block/nvme*; do
575		[[ -e $block ]] || continue
576		block_in_use "${block##*/}" || nvme="/dev/${block##*/}"
577	done
578
579	[[ -b $nvme ]]
580
581	mkdir "$kernel_subsystem"
582	mkdir "$kernel_namespace"
583	mkdir "$kernel_port"
584
585	# It allows only %llx value and for some reason kernel swaps the byte order
586	# so setting the serial is not very useful here
587	# "$kernel_subsystem/attr_serial"
588	echo "SPDK-$kernel_name" > "$kernel_subsystem/attr_model"
589
590	echo 1 > "$kernel_subsystem/attr_allow_any_host"
591	echo "$nvme" > "$kernel_namespace/device_path"
592	echo 1 > "$kernel_namespace/enable"
593
594	# By default use initiator ip which was set by nvmftestinit(). This is the
595	# interface which resides in the main net namespace and which is visible
596	# to nvmet.
597
598	echo "$NVMF_INITIATOR_IP" > "$kernel_port/addr_traddr"
599	echo "$TEST_TRANSPORT" > "$kernel_port/addr_trtype"
600	echo "$NVMF_PORT" > "$kernel_port/addr_trsvcid"
601	echo ipv4 > "$kernel_port/addr_adrfam"
602
603	# Enable the listener by linking the port to previously created subsystem
604	ln -s "$kernel_subsystem" "$kernel_port/subsystems/"
605
606	# Check if target is available
607	nvme discover -a "$NVMF_INITIATOR_IP" -t "$TEST_TRANSPORT" -s "$NVMF_PORT"
608}
609
610clean_kernel_target() {
611	[[ -e $kernel_subsystem ]] || return 0
612
613	echo 0 > "$kernel_namespace/enable"
614
615	rm -f "$kernel_port/subsystems/$kernel_name"
616	rmdir "$kernel_namespace"
617	rmdir "$kernel_port"
618	rmdir "$kernel_subsystem"
619
620	modules=(/sys/module/nvmet/holders/*)
621
622	modprobe -r "${modules[@]##*/}" nvmet
623}
624