xref: /dpdk/drivers/vdpa/mlx5/mlx5_vdpa_steer.c (revision 91edbbfbb4b16c64437a340e9fb17912cf294207)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2019 Mellanox Technologies, Ltd
3  */
4 #include <netinet/in.h>
5 
6 #include <rte_malloc.h>
7 #include <rte_errno.h>
8 #include <rte_common.h>
9 
10 #include <mlx5_common.h>
11 
12 #include "mlx5_vdpa_utils.h"
13 #include "mlx5_vdpa.h"
14 
15 static void
mlx5_vdpa_rss_flows_destroy(struct mlx5_vdpa_priv * priv)16 mlx5_vdpa_rss_flows_destroy(struct mlx5_vdpa_priv *priv)
17 {
18 	unsigned i;
19 
20 	for (i = 0; i < RTE_DIM(priv->steer.rss); ++i) {
21 		if (priv->steer.rss[i].flow) {
22 			claim_zero(mlx5_glue->dv_destroy_flow
23 						     (priv->steer.rss[i].flow));
24 			priv->steer.rss[i].flow = NULL;
25 		}
26 		if (priv->steer.rss[i].tir_action) {
27 			claim_zero(mlx5_glue->destroy_flow_action
28 					       (priv->steer.rss[i].tir_action));
29 			priv->steer.rss[i].tir_action = NULL;
30 		}
31 		if (priv->steer.rss[i].tir) {
32 			claim_zero(mlx5_devx_cmd_destroy
33 						      (priv->steer.rss[i].tir));
34 			priv->steer.rss[i].tir = NULL;
35 		}
36 		if (priv->steer.rss[i].matcher) {
37 			claim_zero(mlx5_glue->dv_destroy_flow_matcher
38 						  (priv->steer.rss[i].matcher));
39 			priv->steer.rss[i].matcher = NULL;
40 		}
41 	}
42 }
43 
44 void
mlx5_vdpa_steer_unset(struct mlx5_vdpa_priv * priv)45 mlx5_vdpa_steer_unset(struct mlx5_vdpa_priv *priv)
46 {
47 	mlx5_vdpa_rss_flows_destroy(priv);
48 	if (priv->steer.rqt) {
49 		claim_zero(mlx5_devx_cmd_destroy(priv->steer.rqt));
50 		priv->steer.rqt = NULL;
51 	}
52 }
53 
54 #define MLX5_VDPA_DEFAULT_RQT_SIZE 512
55 /*
56  * Return the number of queues configured to the table on success, otherwise
57  * -1 on error.
58  */
59 static int
mlx5_vdpa_rqt_prepare(struct mlx5_vdpa_priv * priv,bool is_dummy)60 mlx5_vdpa_rqt_prepare(struct mlx5_vdpa_priv *priv, bool is_dummy)
61 {
62 	int i;
63 	uint32_t rqt_n = RTE_MIN(MLX5_VDPA_DEFAULT_RQT_SIZE,
64 				 1 << priv->log_max_rqt_size);
65 	struct mlx5_devx_rqt_attr *attr = rte_zmalloc(__func__, sizeof(*attr)
66 						      + rqt_n *
67 						      sizeof(uint32_t), 0);
68 	uint32_t k = 0, j;
69 	int ret = 0, num;
70 	uint16_t nr_vring = is_dummy ?
71 	(((priv->queues * 2) < priv->caps.max_num_virtio_queues) ?
72 	(priv->queues * 2) : priv->caps.max_num_virtio_queues) : priv->nr_virtqs;
73 
74 	if (!attr) {
75 		DRV_LOG(ERR, "Failed to allocate RQT attributes memory.");
76 		rte_errno = ENOMEM;
77 		return -ENOMEM;
78 	}
79 	for (i = 0; i < nr_vring; i++) {
80 		if (is_virtq_recvq(i, priv->nr_virtqs) &&
81 			(is_dummy || (priv->virtqs[i].enable &&
82 			priv->virtqs[i].configured)) &&
83 			priv->virtqs[i].virtq) {
84 			attr->rq_list[k] = priv->virtqs[i].virtq->id;
85 			k++;
86 		}
87 	}
88 	if (k == 0)
89 		/* No enabled RQ to configure for RSS. */
90 		return 0;
91 	num = (int)k;
92 	for (j = 0; k != rqt_n; ++k, ++j)
93 		attr->rq_list[k] = attr->rq_list[j];
94 	attr->rq_type = MLX5_INLINE_Q_TYPE_VIRTQ;
95 	attr->rqt_max_size = rqt_n;
96 	attr->rqt_actual_size = rqt_n;
97 	if (!priv->steer.rqt) {
98 		priv->steer.rqt = mlx5_devx_cmd_create_rqt(priv->cdev->ctx,
99 							   attr);
100 		if (!priv->steer.rqt) {
101 			DRV_LOG(ERR, "Failed to create RQT.");
102 			ret = -rte_errno;
103 		}
104 	} else {
105 		ret = mlx5_devx_cmd_modify_rqt(priv->steer.rqt, attr);
106 		if (ret)
107 			DRV_LOG(ERR, "Failed to modify RQT.");
108 	}
109 	rte_free(attr);
110 	return ret ? -1 : num;
111 }
112 
113 static int __rte_unused
mlx5_vdpa_rss_flows_create(struct mlx5_vdpa_priv * priv)114 mlx5_vdpa_rss_flows_create(struct mlx5_vdpa_priv *priv)
115 {
116 #ifdef HAVE_MLX5DV_DR
117 	struct mlx5_devx_tir_attr tir_att = {
118 		.disp_type = MLX5_TIRC_DISP_TYPE_INDIRECT,
119 		.rx_hash_fn = MLX5_RX_HASH_FN_TOEPLITZ,
120 		.transport_domain = priv->td->id,
121 		.indirect_table = priv->steer.rqt->id,
122 		.rx_hash_symmetric = 1,
123 		.rx_hash_toeplitz_key = { 0x2c, 0xc6, 0x81, 0xd1,
124 					  0x5b, 0xdb, 0xf4, 0xf7,
125 					  0xfc, 0xa2, 0x83, 0x19,
126 					  0xdb, 0x1a, 0x3e, 0x94,
127 					  0x6b, 0x9e, 0x38, 0xd9,
128 					  0x2c, 0x9c, 0x03, 0xd1,
129 					  0xad, 0x99, 0x44, 0xa7,
130 					  0xd9, 0x56, 0x3d, 0x59,
131 					  0x06, 0x3c, 0x25, 0xf3,
132 					  0xfc, 0x1f, 0xdc, 0x2a },
133 	};
134 	struct {
135 		size_t size;
136 		/**< Size of match value. Do NOT split size and key! */
137 		uint32_t buf[MLX5_ST_SZ_DW(fte_match_param)];
138 		/**< Matcher value. This value is used as the mask or a key. */
139 	} matcher_mask = {
140 				.size = sizeof(matcher_mask.buf) -
141 					MLX5_ST_SZ_BYTES(fte_match_set_misc4) -
142 					MLX5_ST_SZ_BYTES(fte_match_set_misc5),
143 			},
144 	  matcher_value = {
145 				.size = sizeof(matcher_value.buf) -
146 					MLX5_ST_SZ_BYTES(fte_match_set_misc4) -
147 					MLX5_ST_SZ_BYTES(fte_match_set_misc5),
148 			};
149 	struct mlx5dv_flow_matcher_attr dv_attr = {
150 		.type = IBV_FLOW_ATTR_NORMAL,
151 		.match_mask = (void *)&matcher_mask,
152 	};
153 	void *match_m = matcher_mask.buf;
154 	void *match_v = matcher_value.buf;
155 	void *headers_m = MLX5_ADDR_OF(fte_match_param, match_m, outer_headers);
156 	void *headers_v = MLX5_ADDR_OF(fte_match_param, match_v, outer_headers);
157 	void *actions[1];
158 	const uint8_t l3_hash =
159 		(1 << MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_SRC_IP) |
160 		(1 << MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_DST_IP);
161 	const uint8_t l4_hash =
162 		(1 << MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_SPORT) |
163 		(1 << MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_DPORT);
164 	enum { PRIO, CRITERIA, IP_VER_M, IP_VER_V, IP_PROT_M, IP_PROT_V, L3_BIT,
165 	       L4_BIT, HASH, END};
166 	const uint8_t vars[RTE_DIM(priv->steer.rss)][END] = {
167 		{ 7, 0, 0, 0, 0, 0, 0, 0, 0 },
168 		{ 6, 1 << MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT, 0xf, 4, 0, 0,
169 		 MLX5_L3_PROT_TYPE_IPV4, 0, l3_hash },
170 		{ 6, 1 << MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT, 0xf, 6, 0, 0,
171 		 MLX5_L3_PROT_TYPE_IPV6, 0, l3_hash },
172 		{ 5, 1 << MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT, 0xf, 4, 0xff,
173 		 IPPROTO_UDP, MLX5_L3_PROT_TYPE_IPV4, MLX5_L4_PROT_TYPE_UDP,
174 		 l3_hash | l4_hash },
175 		{ 5, 1 << MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT, 0xf, 4, 0xff,
176 		 IPPROTO_TCP, MLX5_L3_PROT_TYPE_IPV4, MLX5_L4_PROT_TYPE_TCP,
177 		 l3_hash | l4_hash },
178 		{ 5, 1 << MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT, 0xf, 6, 0xff,
179 		 IPPROTO_UDP, MLX5_L3_PROT_TYPE_IPV6, MLX5_L4_PROT_TYPE_UDP,
180 		 l3_hash | l4_hash },
181 		{ 5, 1 << MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT, 0xf, 6, 0xff,
182 		 IPPROTO_TCP, MLX5_L3_PROT_TYPE_IPV6, MLX5_L4_PROT_TYPE_TCP,
183 		 l3_hash | l4_hash },
184 	};
185 	unsigned i;
186 
187 	for (i = 0; i < RTE_DIM(priv->steer.rss); ++i) {
188 		dv_attr.priority = vars[i][PRIO];
189 		dv_attr.match_criteria_enable = vars[i][CRITERIA];
190 		MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version,
191 			 vars[i][IP_VER_M]);
192 		MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version,
193 			 vars[i][IP_VER_V]);
194 		MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
195 			 vars[i][IP_PROT_M]);
196 		MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
197 			 vars[i][IP_PROT_V]);
198 		tir_att.rx_hash_field_selector_outer.l3_prot_type =
199 								vars[i][L3_BIT];
200 		tir_att.rx_hash_field_selector_outer.l4_prot_type =
201 								vars[i][L4_BIT];
202 		tir_att.rx_hash_field_selector_outer.selected_fields =
203 								  vars[i][HASH];
204 		priv->steer.rss[i].matcher = mlx5_glue->dv_create_flow_matcher
205 				   (priv->cdev->ctx, &dv_attr, priv->steer.tbl);
206 		if (!priv->steer.rss[i].matcher) {
207 			DRV_LOG(ERR, "Failed to create matcher %d.", i);
208 			goto error;
209 		}
210 		priv->steer.rss[i].tir = mlx5_devx_cmd_create_tir
211 						    (priv->cdev->ctx, &tir_att);
212 		if (!priv->steer.rss[i].tir) {
213 			DRV_LOG(ERR, "Failed to create TIR %d.", i);
214 			goto error;
215 		}
216 		priv->steer.rss[i].tir_action =
217 				mlx5_glue->dv_create_flow_action_dest_devx_tir
218 						  (priv->steer.rss[i].tir->obj);
219 		if (!priv->steer.rss[i].tir_action) {
220 			DRV_LOG(ERR, "Failed to create TIR action %d.", i);
221 			goto error;
222 		}
223 		actions[0] = priv->steer.rss[i].tir_action;
224 		priv->steer.rss[i].flow = mlx5_glue->dv_create_flow
225 					(priv->steer.rss[i].matcher,
226 					 (void *)&matcher_value, 1, actions);
227 		if (!priv->steer.rss[i].flow) {
228 			DRV_LOG(ERR, "Failed to create flow %d.", i);
229 			goto error;
230 		}
231 	}
232 	return 0;
233 error:
234 	/* Resources will be freed by the caller. */
235 	return -1;
236 #else
237 	(void)priv;
238 	return -ENOTSUP;
239 #endif /* HAVE_MLX5DV_DR */
240 }
241 
242 int
mlx5_vdpa_steer_update(struct mlx5_vdpa_priv * priv,bool is_dummy)243 mlx5_vdpa_steer_update(struct mlx5_vdpa_priv *priv, bool is_dummy)
244 {
245 	int ret;
246 
247 	pthread_mutex_lock(&priv->steer_update_lock);
248 	ret = mlx5_vdpa_rqt_prepare(priv, is_dummy);
249 	if (ret == 0) {
250 		mlx5_vdpa_steer_unset(priv);
251 	} else if (ret < 0) {
252 		pthread_mutex_unlock(&priv->steer_update_lock);
253 		return ret;
254 	} else if (!priv->steer.rss[0].flow) {
255 		ret = mlx5_vdpa_rss_flows_create(priv);
256 		if (ret) {
257 			DRV_LOG(ERR, "Cannot create RSS flows.");
258 			pthread_mutex_unlock(&priv->steer_update_lock);
259 			return -1;
260 		}
261 	}
262 	pthread_mutex_unlock(&priv->steer_update_lock);
263 	return 0;
264 }
265 
266 int
mlx5_vdpa_steer_setup(struct mlx5_vdpa_priv * priv)267 mlx5_vdpa_steer_setup(struct mlx5_vdpa_priv *priv)
268 {
269 	if (mlx5_vdpa_steer_update(priv, false))
270 		goto error;
271 	return 0;
272 error:
273 	mlx5_vdpa_steer_unset(priv);
274 	return -1;
275 }
276