xref: /spdk/test/nvmf/target/perf_adq.sh (revision 8d8e7cd0e4832d3f916995304f5d369e1dab5f14)
1#!/usr/bin/env bash
2#  SPDX-License-Identifier: BSD-3-Clause
3#  Copyright (C) 2021 Intel Corporation
4#  All rights reserved.
5#
6testdir=$(readlink -f $(dirname $0))
7rootdir=$(readlink -f $testdir/../../..)
8source $rootdir/test/common/autotest_common.sh
9source $rootdir/test/nvmf/common.sh
10
11gather_supported_nvmf_pci_devs
12TCP_INTERFACE_LIST=("${net_devs[@]}")
13if ((${#TCP_INTERFACE_LIST[@]} == 0)); then
14	echo "ERROR: Physical TCP interfaces are not ready"
15	exit 1
16fi
17
18perf="$SPDK_BIN_DIR/spdk_nvme_perf"
19
20function adq_configure_driver() {
21	# Enable adding flows to hardware
22	"${NVMF_TARGET_NS_CMD[@]}" ethtool --offload $NVMF_TARGET_INTERFACE hw-tc-offload on
23	# ADQ driver turns on this switch by default, we need to turn it off for SPDK testing
24	"${NVMF_TARGET_NS_CMD[@]}" ethtool --set-priv-flags $NVMF_TARGET_INTERFACE channel-pkt-inspect-optimize off
25	# Since sockets are non-blocking, a non-zero value of net.core.busy_read is sufficient
26	sysctl -w net.core.busy_poll=1
27	sysctl -w net.core.busy_read=1
28
29	tc=/usr/sbin/tc
30	# Create 2 traffic classes and 2 tc1 queues
31	"${NVMF_TARGET_NS_CMD[@]}" $tc qdisc add dev $NVMF_TARGET_INTERFACE root \
32		mqprio num_tc 2 map 0 1 queues 2@0 2@2 hw 1 mode channel
33	"${NVMF_TARGET_NS_CMD[@]}" $tc qdisc add dev $NVMF_TARGET_INTERFACE ingress
34	# TC filter is configured using target address (traddr) and port number (trsvcid) to steer packets
35	"${NVMF_TARGET_NS_CMD[@]}" $tc filter add dev $NVMF_TARGET_INTERFACE protocol \
36		ip parent ffff: prio 1 flower dst_ip $NVMF_FIRST_TARGET_IP/32 ip_proto tcp dst_port $NVMF_PORT skip_sw hw_tc 1
37	# Setup mechanism for Tx queue selection based on Rx queue(s) map
38	"${NVMF_TARGET_NS_CMD[@]}" $rootdir/scripts/perf/nvmf/set_xps_rxqs $NVMF_TARGET_INTERFACE
39}
40
41function adq_configure_nvmf_target() {
42	socket_impl=$("$rpc_py" sock_get_default_impl | jq -r '.impl_name')
43	$rpc_py sock_impl_set_options --enable-placement-id $1 --enable-zerocopy-send-server -i $socket_impl
44	$rpc_py framework_start_init
45	$rpc_py nvmf_create_transport $NVMF_TRANSPORT_OPTS --io-unit-size 8192 --sock-priority $1
46	$rpc_py bdev_malloc_create 64 512 -b Malloc1
47	$rpc_py nvmf_create_subsystem nqn.2016-06.io.spdk:cnode1 -a -s SPDK00000000000001
48	$rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 Malloc1
49	$rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT
50}
51
52function adq_reload_driver() {
53	# Load sch_ modules by their canonical names in case they are blacklisted.
54	# >= 6.9 kernels call request_module() pointing at their alias now, hence
55	# when the request is passed down to modprobe it simply won't load it in
56	# blacklist is in place. So add whatever other NET_* modules are needed
57	# below.
58	modprobe -a \
59		sch_mqprio
60
61	rmmod ice
62	modprobe ice
63	sleep 5
64}
65
66# Clear the previous configuration that may have an impact.
67# At present, ADQ configuration is only applicable to the ice driver.
68adq_reload_driver
69
70# We are going to run the test twice, once with ADQ enabled and once with it disabled.
71# The nvmf target is given 4 cores and ADQ creates queues in one traffic class. We then run
72# perf with 4 cores (i.e. 4 connections) and examine how the connections are allocated to the nvmf target's
73# poll groups.
74
75# When ADQ is disabled, we expect 1 connection on each of the 4 poll groups.
76nvmftestinit
77nvmfappstart -m 0xF --wait-for-rpc
78adq_configure_nvmf_target 0
79$perf -q 64 -o 4096 -w randread -t 10 -c 0xF0 \
80	-r "trtype:${TEST_TRANSPORT} adrfam:IPv4 traddr:${NVMF_FIRST_TARGET_IP} trsvcid:${NVMF_PORT} \
81	subnqn:nqn.2016-06.io.spdk:cnode1" &
82perfpid=$!
83sleep 2
84
85nvmf_stats=$("$rpc_py" nvmf_get_stats)
86count=$(jq -r '.poll_groups[] | select(.current_io_qpairs == 1) | length' <<< $nvmf_stats | wc -l)
87if [[ "$count" -ne 4 ]]; then
88	echo "ERROR: With ADQ disabled, connections were not evenly distributed amongst poll groups!"
89	exit 1
90fi
91wait $perfpid
92nvmftestfini
93
94adq_reload_driver
95
96# When ADQ is enabled, we expect the connections to reside on AT MOST two poll groups.
97nvmftestinit
98adq_configure_driver
99nvmfappstart -m 0xF --wait-for-rpc
100adq_configure_nvmf_target 1
101$perf -q 64 -o 4096 -w randread -t 10 -c 0xF0 \
102	-r "trtype:${TEST_TRANSPORT} adrfam:IPv4 traddr:${NVMF_FIRST_TARGET_IP} trsvcid:${NVMF_PORT} \
103	subnqn:nqn.2016-06.io.spdk:cnode1" &
104perfpid=$!
105sleep 2
106
107nvmf_stats=$("$rpc_py" nvmf_get_stats)
108count=$(jq -r '.poll_groups[] | select(.current_io_qpairs == 0) | length' <<< $nvmf_stats | wc -l)
109if [[ "$count" -lt 2 ]]; then
110	echo "ERROR: With ADQ enabled, did not find 0 connections on 2 of the poll groups!"
111	exit 1
112fi
113
114wait $perfpid
115nvmftestfini
116
117trap - SIGINT SIGTERM EXIT
118