xref: /spdk/test/nvmf/common.sh (revision b30d57cdad6d2bc75cc1e4e2ebbcebcb0d98dcfa)
1NVMF_PORT=4420
2NVMF_SECOND_PORT=4421
3NVMF_THIRD_PORT=4422
4NVMF_IP_PREFIX="192.168.100"
5NVMF_IP_LEAST_ADDR=8
6NVMF_TCP_IP_ADDRESS="127.0.0.1"
7NVMF_TRANSPORT_OPTS=""
8NVMF_SERIAL=SPDK00000000000001
9
10function build_nvmf_app_args() {
11	if [ $SPDK_RUN_NON_ROOT -eq 1 ]; then
12		NVMF_APP=(sudo -u "$USER" "${NVMF_APP[@]}")
13		NVMF_APP+=(-i "$NVMF_APP_SHM_ID" -e 0xFFFF)
14	else
15		NVMF_APP+=(-i "$NVMF_APP_SHM_ID" -e 0xFFFF)
16	fi
17}
18
19: ${NVMF_APP_SHM_ID="0"}
20export NVMF_APP_SHM_ID
21build_nvmf_app_args
22
23have_pci_nics=0
24
25function rxe_cfg() {
26	"$rootdir/scripts/rxe_cfg_small.sh" "$@"
27}
28
29function load_ib_rdma_modules() {
30	if [ $(uname) != Linux ]; then
31		return 0
32	fi
33
34	modprobe ib_cm
35	modprobe ib_core
36	# Newer kernels do not have the ib_ucm module
37	modprobe ib_ucm || true
38	modprobe ib_umad
39	modprobe ib_uverbs
40	modprobe iw_cm
41	modprobe rdma_cm
42	modprobe rdma_ucm
43}
44
45function detect_soft_roce_nics() {
46	rxe_cfg stop # make sure we run tests with a clean slate
47	rxe_cfg start
48}
49
50# args 1 and 2 represent the grep filters for finding our NICS.
51# subsequent args are all drivers that should be loaded if we find these NICs.
52# Those drivers should be supplied in the correct order.
53function detect_nics_and_probe_drivers() {
54	NIC_VENDOR="$1"
55	NIC_CLASS="$2"
56
57	nvmf_nic_bdfs=$(lspci | grep Ethernet | grep "$NIC_VENDOR" | grep "$NIC_CLASS" | awk -F ' ' '{print "0000:"$1}')
58
59	if [ -z "$nvmf_nic_bdfs" ]; then
60		return 0
61	fi
62
63	have_pci_nics=1
64	if [ $# -ge 2 ]; then
65		# shift out the first two positional arguments.
66		shift 2
67		# Iterate through the remaining arguments.
68		for i; do
69			modprobe "$i"
70		done
71	fi
72}
73
74function detect_pci_nics() {
75
76	if ! hash lspci; then
77		return 0
78	fi
79
80	detect_nics_and_probe_drivers "Mellanox" "ConnectX-4" "mlx4_core" "mlx4_ib" "mlx4_en"
81	detect_nics_and_probe_drivers "Mellanox" "ConnectX-5" "mlx5_core" "mlx5_ib"
82	detect_nics_and_probe_drivers "Intel" "X722" "i40e" "i40iw"
83	detect_nics_and_probe_drivers "Chelsio" "Unified Wire" "cxgb4" "iw_cxgb4"
84
85	if [ "$have_pci_nics" -eq "0" ]; then
86		return 0
87	fi
88
89	# Provide time for drivers to properly load.
90	sleep 5
91}
92
93function detect_rdma_nics() {
94	detect_pci_nics
95	if [ "$have_pci_nics" -eq "0" ]; then
96		detect_soft_roce_nics
97	fi
98}
99
100function allocate_nic_ips() {
101	((count = NVMF_IP_LEAST_ADDR))
102	for nic_name in $(get_rdma_if_list); do
103		ip="$(get_ip_address $nic_name)"
104		if [[ -z $ip ]]; then
105			ip addr add $NVMF_IP_PREFIX.$count/24 dev $nic_name
106			ip link set $nic_name up
107			((count = count + 1))
108		fi
109		# dump configuration for debug log
110		ip addr show $nic_name
111	done
112}
113
114function get_available_rdma_ips() {
115	for nic_name in $(get_rdma_if_list); do
116		get_ip_address $nic_name
117	done
118}
119
120function get_rdma_if_list() {
121	for nic_type in /sys/class/infiniband/*; do
122		[[ -e "$nic_type" ]] || break
123		for nic_name in /sys/class/infiniband/"$(basename ${nic_type})"/device/net/*; do
124			[[ -e "$nic_name" ]] || break
125			basename "$nic_name"
126		done
127	done
128}
129
130function get_ip_address() {
131	interface=$1
132	ip -o -4 addr show $interface | awk '{print $4}' | cut -d"/" -f1
133}
134
135function nvmfcleanup() {
136	sync
137	set +e
138	for i in {1..20}; do
139		modprobe -v -r nvme-$TEST_TRANSPORT
140		if modprobe -v -r nvme-fabrics; then
141			set -e
142			return 0
143		fi
144		sleep 1
145	done
146	set -e
147
148	# So far unable to remove the kernel modules. Try
149	# one more time and let it fail.
150	# Allow the transport module to fail for now. See Jim's comment
151	# about the nvme-tcp module below.
152	modprobe -v -r nvme-$TEST_TRANSPORT || true
153	modprobe -v -r nvme-fabrics
154}
155
156function nvmf_veth_init() {
157	NVMF_INITIATOR_IP=10.0.0.1
158	NVMF_FIRST_TARGET_IP=10.0.0.2
159	NVMF_SECOND_TARGET_IP=10.0.0.3
160	NVMF_BRIDGE="nvmf_br"
161	NVMF_INITIATOR_INTERFACE="nvmf_init_if"
162	NVMF_INITIATOR_BRIDGE="nvmf_init_br"
163	NVMF_TARGET_NAMESPACE="nvmf_tgt_ns"
164	NVMF_TARGET_NS_CMD=(ip netns exec "$NVMF_TARGET_NAMESPACE")
165	NVMF_TARGET_INTERFACE="nvmf_tgt_if"
166	NVMF_TARGET_INTERFACE2="nvmf_tgt_if2"
167	NVMF_TARGET_BRIDGE="nvmf_tgt_br"
168	NVMF_TARGET_BRIDGE2="nvmf_tgt_br2"
169
170	ip link set $NVMF_INITIATOR_BRIDGE nomaster || true
171	ip link set $NVMF_TARGET_BRIDGE nomaster || true
172	ip link set $NVMF_TARGET_BRIDGE2 nomaster || true
173	ip link set $NVMF_INITIATOR_BRIDGE down || true
174	ip link set $NVMF_TARGET_BRIDGE down || true
175	ip link set $NVMF_TARGET_BRIDGE2 down || true
176	ip link delete $NVMF_BRIDGE type bridge || true
177	ip link delete $NVMF_INITIATOR_INTERFACE || true
178	"${NVMF_TARGET_NS_CMD[@]}" ip link delete $NVMF_TARGET_INTERFACE || true
179	"${NVMF_TARGET_NS_CMD[@]}" ip link delete $NVMF_TARGET_INTERFACE2 || true
180	ip netns del $NVMF_TARGET_NAMESPACE || true
181
182	trap 'nvmf_veth_fini; exit 1' SIGINT SIGTERM EXIT
183
184	# Create network namespace
185	ip netns add $NVMF_TARGET_NAMESPACE
186
187	# Create veth (Virtual ethernet) interface pairs
188	ip link add $NVMF_INITIATOR_INTERFACE type veth peer name $NVMF_INITIATOR_BRIDGE
189	ip link add $NVMF_TARGET_INTERFACE type veth peer name $NVMF_TARGET_BRIDGE
190	ip link add $NVMF_TARGET_INTERFACE2 type veth peer name $NVMF_TARGET_BRIDGE2
191
192	# Associate veth interface pairs with network namespace
193	ip link set $NVMF_TARGET_INTERFACE netns $NVMF_TARGET_NAMESPACE
194	ip link set $NVMF_TARGET_INTERFACE2 netns $NVMF_TARGET_NAMESPACE
195
196	# Allocate IP addresses
197	ip addr add $NVMF_INITIATOR_IP/24 dev $NVMF_INITIATOR_INTERFACE
198	"${NVMF_TARGET_NS_CMD[@]}" ip addr add $NVMF_FIRST_TARGET_IP/24 dev $NVMF_TARGET_INTERFACE
199	"${NVMF_TARGET_NS_CMD[@]}" ip addr add $NVMF_SECOND_TARGET_IP/24 dev $NVMF_TARGET_INTERFACE2
200
201	# Link up veth interfaces
202	ip link set $NVMF_INITIATOR_INTERFACE up
203	ip link set $NVMF_INITIATOR_BRIDGE up
204	ip link set $NVMF_TARGET_BRIDGE up
205	ip link set $NVMF_TARGET_BRIDGE2 up
206	"${NVMF_TARGET_NS_CMD[@]}" ip link set $NVMF_TARGET_INTERFACE up
207	"${NVMF_TARGET_NS_CMD[@]}" ip link set $NVMF_TARGET_INTERFACE2 up
208	"${NVMF_TARGET_NS_CMD[@]}" ip link set lo up
209
210	# Create a bridge
211	ip link add $NVMF_BRIDGE type bridge
212	ip link set $NVMF_BRIDGE up
213
214	# Add veth interfaces to the bridge
215	ip link set $NVMF_INITIATOR_BRIDGE master $NVMF_BRIDGE
216	ip link set $NVMF_TARGET_BRIDGE master $NVMF_BRIDGE
217	ip link set $NVMF_TARGET_BRIDGE2 master $NVMF_BRIDGE
218
219	# Accept connections from veth interface
220	iptables -I INPUT 1 -i $NVMF_INITIATOR_INTERFACE -p tcp --dport $NVMF_PORT -j ACCEPT
221
222	# Verify connectivity
223	ping -c 1 $NVMF_FIRST_TARGET_IP
224	ping -c 1 $NVMF_SECOND_TARGET_IP
225	"${NVMF_TARGET_NS_CMD[@]}" ping -c 1 $NVMF_INITIATOR_IP
226
227	NVMF_APP=("${NVMF_TARGET_NS_CMD[@]}" "${NVMF_APP[@]}")
228}
229
230function nvmf_veth_fini() {
231	# Cleanup bridge, veth interfaces, and network namespace
232	# Note: removing one veth removes the pair
233	ip link set $NVMF_INITIATOR_BRIDGE nomaster
234	ip link set $NVMF_TARGET_BRIDGE nomaster
235	ip link set $NVMF_TARGET_BRIDGE2 nomaster
236	ip link set $NVMF_INITIATOR_BRIDGE down
237	ip link set $NVMF_TARGET_BRIDGE down
238	ip link set $NVMF_TARGET_BRIDGE2 down
239	ip link delete $NVMF_BRIDGE type bridge
240	ip link delete $NVMF_INITIATOR_INTERFACE
241	"${NVMF_TARGET_NS_CMD[@]}" ip link delete $NVMF_TARGET_INTERFACE
242	"${NVMF_TARGET_NS_CMD[@]}" ip link delete $NVMF_TARGET_INTERFACE2
243	ip netns del $NVMF_TARGET_NAMESPACE
244}
245
246function nvmftestinit() {
247	if [ -z $TEST_TRANSPORT ]; then
248		echo "transport not specified - use --transport= to specify"
249		return 1
250	fi
251	if [ "$TEST_MODE" == "iso" ]; then
252		$rootdir/scripts/setup.sh
253		if [ "$TEST_TRANSPORT" == "rdma" ]; then
254			rdma_device_init
255		fi
256	fi
257
258	NVMF_TRANSPORT_OPTS="-t $TEST_TRANSPORT"
259	if [ "$TEST_TRANSPORT" == "rdma" ]; then
260		RDMA_IP_LIST=$(get_available_rdma_ips)
261		NVMF_FIRST_TARGET_IP=$(echo "$RDMA_IP_LIST" | head -n 1)
262		NVMF_SECOND_TARGET_IP=$(echo "$RDMA_IP_LIST" | tail -n +2 | head -n 1)
263		if [ -z $NVMF_FIRST_TARGET_IP ]; then
264			echo "no NIC for nvmf test"
265			exit 0
266		fi
267	elif [ "$TEST_TRANSPORT" == "tcp" ]; then
268		nvmf_veth_init
269		NVMF_TRANSPORT_OPTS="$NVMF_TRANSPORT_OPTS -o"
270	fi
271
272	# currently we run the host/perf test for TCP even on systems without kernel nvme-tcp
273	#  support; that's fine since the host/perf test uses the SPDK initiator
274	# maybe later we will enforce modprobe to succeed once we have systems in the test pool
275	#  with nvme-tcp kernel support - but until then let this pass so we can still run the
276	#  host/perf test with the tcp transport
277	modprobe nvme-$TEST_TRANSPORT || true
278}
279
280function nvmfappstart() {
281	timing_enter start_nvmf_tgt
282	"${NVMF_APP[@]}" "$@" &
283	nvmfpid=$!
284	trap 'process_shm --id $NVMF_APP_SHM_ID; nvmftestfini; exit 1' SIGINT SIGTERM EXIT
285	waitforlisten $nvmfpid
286	timing_exit start_nvmf_tgt
287}
288
289function nvmftestfini() {
290	nvmfcleanup || :
291	if [ -n "$nvmfpid" ]; then
292		killprocess $nvmfpid
293	fi
294	if [ "$TEST_MODE" == "iso" ]; then
295		$rootdir/scripts/setup.sh reset
296		if [ "$TEST_TRANSPORT" == "rdma" ]; then
297			rdma_device_init
298		elif [ "$TEST_TRANSPORT" == "tcp" ]; then
299			nvmf_veth_fini
300		fi
301	fi
302}
303
304function rdma_device_init() {
305	load_ib_rdma_modules
306	detect_rdma_nics
307	allocate_nic_ips
308}
309
310function revert_soft_roce() {
311	rxe_cfg stop
312}
313
314function check_ip_is_soft_roce() {
315	if [ "$TEST_TRANSPORT" != "rdma" ]; then
316		return 0
317	fi
318	rxe_cfg status rxe | grep -wq "$1"
319}
320
321function nvme_connect() {
322	local init_count
323	init_count=$(nvme list | wc -l)
324
325	if ! nvme connect "$@"; then return $?; fi
326
327	for i in $(seq 1 10); do
328		if [ $(nvme list | wc -l) -gt $init_count ]; then
329			return 0
330		else
331			sleep 1s
332		fi
333	done
334	return 1
335}
336
337function get_nvme_devs() {
338	local dev rest
339
340	nvmes=()
341	while read -r dev rest; do
342		if [[ $dev == /dev/nvme* ]]; then
343			nvmes+=("$dev")
344		fi
345		if [[ $1 == print ]]; then
346			echo "$dev $rest"
347		fi
348	done < <(nvme list)
349	((${#nvmes[@]})) || return 1
350	echo "${#nvmes[@]}" >&2
351}
352
353function gen_nvmf_target_json() {
354	local subsystem config=()
355
356	for subsystem in "${@:-1}"; do
357		config+=(
358			"$(
359				cat <<- EOF
360					{
361					  "params": {
362					    "name": "Nvme$subsystem",
363					    "trtype": "$TEST_TRANSPORT",
364					    "traddr": "$NVMF_FIRST_TARGET_IP",
365					    "adrfam": "ipv4",
366					    "trsvcid": "$NVMF_PORT",
367					    "subnqn": "nqn.2016-06.io.spdk:cnode$subsystem",
368					    "hostnqn": "nqn.2016-06.io.spdk:host$subsystem"
369					  },
370					  "method": "bdev_nvme_attach_controller"
371					}
372				EOF
373			)"
374		)
375	done
376	jq . <<- JSON
377		{
378		  "subsystems": [
379		    {
380		      "subsystem": "bdev",
381		      "config": [
382			{
383			  "method": "bdev_nvme_set_options",
384			  "params": {
385				"action_on_timeout": "none",
386				"timeout_us": 0,
387				"retry_count": 4,
388				"arbitration_burst": 0,
389				"low_priority_weight": 0,
390				"medium_priority_weight": 0,
391				"high_priority_weight": 0,
392				"nvme_adminq_poll_period_us": 10000,
393				"keep_alive_timeout_ms" : 10000,
394				"nvme_ioq_poll_period_us": 0,
395				"io_queue_requests": 0,
396				"delay_cmd_submit": true
397			  }
398			},
399		        $(
400		IFS=","
401		printf '%s\n' "${config[*]}"
402		)
403		      ]
404		    }
405		  ]
406		}
407	JSON
408}
409