xref: /dpdk/drivers/vdpa/mlx5/mlx5_vdpa_steer.c (revision 68a03efeed657e6e05f281479b33b51102797e15)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2019 Mellanox Technologies, Ltd
3  */
4 #include <netinet/in.h>
5 
6 #include <rte_malloc.h>
7 #include <rte_errno.h>
8 #include <rte_common.h>
9 
10 #include <mlx5_common.h>
11 
12 #include "mlx5_vdpa_utils.h"
13 #include "mlx5_vdpa.h"
14 
15 static void
16 mlx5_vdpa_rss_flows_destroy(struct mlx5_vdpa_priv *priv)
17 {
18 	unsigned i;
19 
20 	for (i = 0; i < RTE_DIM(priv->steer.rss); ++i) {
21 		if (priv->steer.rss[i].flow) {
22 			claim_zero(mlx5_glue->dv_destroy_flow
23 						     (priv->steer.rss[i].flow));
24 			priv->steer.rss[i].flow = NULL;
25 		}
26 		if (priv->steer.rss[i].tir_action) {
27 			claim_zero(mlx5_glue->destroy_flow_action
28 					       (priv->steer.rss[i].tir_action));
29 			priv->steer.rss[i].tir_action = NULL;
30 		}
31 		if (priv->steer.rss[i].tir) {
32 			claim_zero(mlx5_devx_cmd_destroy
33 						      (priv->steer.rss[i].tir));
34 			priv->steer.rss[i].tir = NULL;
35 		}
36 		if (priv->steer.rss[i].matcher) {
37 			claim_zero(mlx5_glue->dv_destroy_flow_matcher
38 						  (priv->steer.rss[i].matcher));
39 			priv->steer.rss[i].matcher = NULL;
40 		}
41 	}
42 }
43 
44 void
45 mlx5_vdpa_steer_unset(struct mlx5_vdpa_priv *priv)
46 {
47 	mlx5_vdpa_rss_flows_destroy(priv);
48 	if (priv->steer.tbl) {
49 		claim_zero(mlx5_glue->dr_destroy_flow_tbl(priv->steer.tbl));
50 		priv->steer.tbl = NULL;
51 	}
52 	if (priv->steer.domain) {
53 		claim_zero(mlx5_glue->dr_destroy_domain(priv->steer.domain));
54 		priv->steer.domain = NULL;
55 	}
56 	if (priv->steer.rqt) {
57 		claim_zero(mlx5_devx_cmd_destroy(priv->steer.rqt));
58 		priv->steer.rqt = NULL;
59 	}
60 }
61 
62 #define MLX5_VDPA_DEFAULT_RQT_SIZE 512
63 /*
64  * Return the number of queues configured to the table on success, otherwise
65  * -1 on error.
66  */
67 static int
68 mlx5_vdpa_rqt_prepare(struct mlx5_vdpa_priv *priv)
69 {
70 	int i;
71 	uint32_t rqt_n = RTE_MIN(MLX5_VDPA_DEFAULT_RQT_SIZE,
72 				 1 << priv->log_max_rqt_size);
73 	struct mlx5_devx_rqt_attr *attr = rte_zmalloc(__func__, sizeof(*attr)
74 						      + rqt_n *
75 						      sizeof(uint32_t), 0);
76 	uint32_t k = 0, j;
77 	int ret = 0, num;
78 
79 	if (!attr) {
80 		DRV_LOG(ERR, "Failed to allocate RQT attributes memory.");
81 		rte_errno = ENOMEM;
82 		return -ENOMEM;
83 	}
84 	for (i = 0; i < priv->nr_virtqs; i++) {
85 		if (is_virtq_recvq(i, priv->nr_virtqs) &&
86 		    priv->virtqs[i].enable && priv->virtqs[i].virtq) {
87 			attr->rq_list[k] = priv->virtqs[i].virtq->id;
88 			k++;
89 		}
90 	}
91 	if (k == 0)
92 		/* No enabled RQ to configure for RSS. */
93 		return 0;
94 	num = (int)k;
95 	for (j = 0; k != rqt_n; ++k, ++j)
96 		attr->rq_list[k] = attr->rq_list[j];
97 	attr->rq_type = MLX5_INLINE_Q_TYPE_VIRTQ;
98 	attr->rqt_max_size = rqt_n;
99 	attr->rqt_actual_size = rqt_n;
100 	if (!priv->steer.rqt) {
101 		priv->steer.rqt = mlx5_devx_cmd_create_rqt(priv->ctx, attr);
102 		if (!priv->steer.rqt) {
103 			DRV_LOG(ERR, "Failed to create RQT.");
104 			ret = -rte_errno;
105 		}
106 	} else {
107 		ret = mlx5_devx_cmd_modify_rqt(priv->steer.rqt, attr);
108 		if (ret)
109 			DRV_LOG(ERR, "Failed to modify RQT.");
110 	}
111 	rte_free(attr);
112 	return ret ? -1 : num;
113 }
114 
115 static int __rte_unused
116 mlx5_vdpa_rss_flows_create(struct mlx5_vdpa_priv *priv)
117 {
118 #ifdef HAVE_MLX5DV_DR
119 	struct mlx5_devx_tir_attr tir_att = {
120 		.disp_type = MLX5_TIRC_DISP_TYPE_INDIRECT,
121 		.rx_hash_fn = MLX5_RX_HASH_FN_TOEPLITZ,
122 		.transport_domain = priv->td->id,
123 		.indirect_table = priv->steer.rqt->id,
124 		.rx_hash_symmetric = 1,
125 		.rx_hash_toeplitz_key = { 0x2c, 0xc6, 0x81, 0xd1,
126 					  0x5b, 0xdb, 0xf4, 0xf7,
127 					  0xfc, 0xa2, 0x83, 0x19,
128 					  0xdb, 0x1a, 0x3e, 0x94,
129 					  0x6b, 0x9e, 0x38, 0xd9,
130 					  0x2c, 0x9c, 0x03, 0xd1,
131 					  0xad, 0x99, 0x44, 0xa7,
132 					  0xd9, 0x56, 0x3d, 0x59,
133 					  0x06, 0x3c, 0x25, 0xf3,
134 					  0xfc, 0x1f, 0xdc, 0x2a },
135 	};
136 	struct {
137 		size_t size;
138 		/**< Size of match value. Do NOT split size and key! */
139 		uint32_t buf[MLX5_ST_SZ_DW(fte_match_param)];
140 		/**< Matcher value. This value is used as the mask or a key. */
141 	} matcher_mask = {
142 				.size = sizeof(matcher_mask.buf) -
143 					MLX5_ST_SZ_BYTES(fte_match_set_misc4),
144 			},
145 	  matcher_value = {
146 				.size = sizeof(matcher_value.buf) -
147 					MLX5_ST_SZ_BYTES(fte_match_set_misc4),
148 			};
149 	struct mlx5dv_flow_matcher_attr dv_attr = {
150 		.type = IBV_FLOW_ATTR_NORMAL,
151 		.match_mask = (void *)&matcher_mask,
152 	};
153 	void *match_m = matcher_mask.buf;
154 	void *match_v = matcher_value.buf;
155 	void *headers_m = MLX5_ADDR_OF(fte_match_param, match_m, outer_headers);
156 	void *headers_v = MLX5_ADDR_OF(fte_match_param, match_v, outer_headers);
157 	void *actions[1];
158 	const uint8_t l3_hash =
159 		(1 << MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_SRC_IP) |
160 		(1 << MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_DST_IP);
161 	const uint8_t l4_hash =
162 		(1 << MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_SPORT) |
163 		(1 << MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_DPORT);
164 	enum { PRIO, CRITERIA, IP_VER_M, IP_VER_V, IP_PROT_M, IP_PROT_V, L3_BIT,
165 	       L4_BIT, HASH, END};
166 	const uint8_t vars[RTE_DIM(priv->steer.rss)][END] = {
167 		{ 7, 0, 0, 0, 0, 0, 0, 0, 0 },
168 		{ 6, 1 << MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT, 0xf, 4, 0, 0,
169 		 MLX5_L3_PROT_TYPE_IPV4, 0, l3_hash },
170 		{ 6, 1 << MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT, 0xf, 6, 0, 0,
171 		 MLX5_L3_PROT_TYPE_IPV6, 0, l3_hash },
172 		{ 5, 1 << MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT, 0xf, 4, 0xff,
173 		 IPPROTO_UDP, MLX5_L3_PROT_TYPE_IPV4, MLX5_L4_PROT_TYPE_UDP,
174 		 l3_hash | l4_hash },
175 		{ 5, 1 << MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT, 0xf, 4, 0xff,
176 		 IPPROTO_TCP, MLX5_L3_PROT_TYPE_IPV4, MLX5_L4_PROT_TYPE_TCP,
177 		 l3_hash | l4_hash },
178 		{ 5, 1 << MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT, 0xf, 6, 0xff,
179 		 IPPROTO_UDP, MLX5_L3_PROT_TYPE_IPV6, MLX5_L4_PROT_TYPE_UDP,
180 		 l3_hash | l4_hash },
181 		{ 5, 1 << MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT, 0xf, 6, 0xff,
182 		 IPPROTO_TCP, MLX5_L3_PROT_TYPE_IPV6, MLX5_L4_PROT_TYPE_TCP,
183 		 l3_hash | l4_hash },
184 	};
185 	unsigned i;
186 
187 	for (i = 0; i < RTE_DIM(priv->steer.rss); ++i) {
188 		dv_attr.priority = vars[i][PRIO];
189 		dv_attr.match_criteria_enable = vars[i][CRITERIA];
190 		MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version,
191 			 vars[i][IP_VER_M]);
192 		MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version,
193 			 vars[i][IP_VER_V]);
194 		MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
195 			 vars[i][IP_PROT_M]);
196 		MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
197 			 vars[i][IP_PROT_V]);
198 		tir_att.rx_hash_field_selector_outer.l3_prot_type =
199 								vars[i][L3_BIT];
200 		tir_att.rx_hash_field_selector_outer.l4_prot_type =
201 								vars[i][L4_BIT];
202 		tir_att.rx_hash_field_selector_outer.selected_fields =
203 								  vars[i][HASH];
204 		priv->steer.rss[i].matcher = mlx5_glue->dv_create_flow_matcher
205 					 (priv->ctx, &dv_attr, priv->steer.tbl);
206 		if (!priv->steer.rss[i].matcher) {
207 			DRV_LOG(ERR, "Failed to create matcher %d.", i);
208 			goto error;
209 		}
210 		priv->steer.rss[i].tir = mlx5_devx_cmd_create_tir(priv->ctx,
211 								  &tir_att);
212 		if (!priv->steer.rss[i].tir) {
213 			DRV_LOG(ERR, "Failed to create TIR %d.", i);
214 			goto error;
215 		}
216 		priv->steer.rss[i].tir_action =
217 				mlx5_glue->dv_create_flow_action_dest_devx_tir
218 						  (priv->steer.rss[i].tir->obj);
219 		if (!priv->steer.rss[i].tir_action) {
220 			DRV_LOG(ERR, "Failed to create TIR action %d.", i);
221 			goto error;
222 		}
223 		actions[0] = priv->steer.rss[i].tir_action;
224 		priv->steer.rss[i].flow = mlx5_glue->dv_create_flow
225 					(priv->steer.rss[i].matcher,
226 					 (void *)&matcher_value, 1, actions);
227 		if (!priv->steer.rss[i].flow) {
228 			DRV_LOG(ERR, "Failed to create flow %d.", i);
229 			goto error;
230 		}
231 	}
232 	return 0;
233 error:
234 	/* Resources will be freed by the caller. */
235 	return -1;
236 #else
237 	(void)priv;
238 	return -ENOTSUP;
239 #endif /* HAVE_MLX5DV_DR */
240 }
241 
242 int
243 mlx5_vdpa_steer_update(struct mlx5_vdpa_priv *priv)
244 {
245 	int ret = mlx5_vdpa_rqt_prepare(priv);
246 
247 	if (ret == 0) {
248 		mlx5_vdpa_rss_flows_destroy(priv);
249 		if (priv->steer.rqt) {
250 			claim_zero(mlx5_devx_cmd_destroy(priv->steer.rqt));
251 			priv->steer.rqt = NULL;
252 		}
253 	} else if (ret < 0) {
254 		return ret;
255 	} else if (!priv->steer.rss[0].flow) {
256 		ret = mlx5_vdpa_rss_flows_create(priv);
257 		if (ret) {
258 			DRV_LOG(ERR, "Cannot create RSS flows.");
259 			return -1;
260 		}
261 	}
262 	return 0;
263 }
264 
265 int
266 mlx5_vdpa_steer_setup(struct mlx5_vdpa_priv *priv)
267 {
268 #ifdef HAVE_MLX5DV_DR
269 	priv->steer.domain = mlx5_glue->dr_create_domain(priv->ctx,
270 						  MLX5DV_DR_DOMAIN_TYPE_NIC_RX);
271 	if (!priv->steer.domain) {
272 		DRV_LOG(ERR, "Failed to create Rx domain.");
273 		goto error;
274 	}
275 	priv->steer.tbl = mlx5_glue->dr_create_flow_tbl(priv->steer.domain, 0);
276 	if (!priv->steer.tbl) {
277 		DRV_LOG(ERR, "Failed to create table 0 with Rx domain.");
278 		goto error;
279 	}
280 	if (mlx5_vdpa_steer_update(priv))
281 		goto error;
282 	return 0;
283 error:
284 	mlx5_vdpa_steer_unset(priv);
285 	return -1;
286 #else
287 	(void)priv;
288 	return -ENOTSUP;
289 #endif /* HAVE_MLX5DV_DR */
290 }
291