xref: /dpdk/drivers/net/dpaa2/base/dpaa2_hw_dpni.c (revision 68a03efeed657e6e05f281479b33b51102797e15)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4  *   Copyright 2016-2021 NXP
5  *
6  */
7 
8 #include <time.h>
9 #include <net/if.h>
10 
11 #include <rte_mbuf.h>
12 #include <ethdev_driver.h>
13 #include <rte_malloc.h>
14 #include <rte_memcpy.h>
15 #include <rte_string_fns.h>
16 #include <rte_cycles.h>
17 #include <rte_kvargs.h>
18 #include <rte_dev.h>
19 
20 #include <dpaa2_pmd_logs.h>
21 #include <dpaa2_hw_pvt.h>
22 #include <dpaa2_hw_mempool.h>
23 
24 #include "../dpaa2_ethdev.h"
25 
26 int
27 dpaa2_distset_to_dpkg_profile_cfg(
28 		uint64_t req_dist_set,
29 		struct dpkg_profile_cfg *kg_cfg);
30 
31 int
32 rte_pmd_dpaa2_set_custom_hash(uint16_t port_id,
33 			      uint16_t offset,
34 			      uint8_t size)
35 {
36 	struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id];
37 	struct dpaa2_dev_priv *priv = eth_dev->data->dev_private;
38 	struct fsl_mc_io *dpni = priv->hw;
39 	struct dpni_rx_tc_dist_cfg tc_cfg;
40 	struct dpkg_profile_cfg kg_cfg;
41 	void *p_params;
42 	int ret, tc_index = 0;
43 
44 	p_params = rte_zmalloc(
45 		NULL, DIST_PARAM_IOVA_SIZE, RTE_CACHE_LINE_SIZE);
46 	if (!p_params) {
47 		DPAA2_PMD_ERR("Unable to allocate flow-dist parameters");
48 		return -ENOMEM;
49 	}
50 
51 	kg_cfg.extracts[0].type = DPKG_EXTRACT_FROM_DATA;
52 	kg_cfg.extracts[0].extract.from_data.offset = offset;
53 	kg_cfg.extracts[0].extract.from_data.size = size;
54 	kg_cfg.extracts[0].num_of_byte_masks = 0;
55 	kg_cfg.num_extracts = 1;
56 
57 	ret = dpkg_prepare_key_cfg(&kg_cfg, p_params);
58 	if (ret) {
59 		DPAA2_PMD_ERR("Unable to prepare extract parameters");
60 		rte_free(p_params);
61 		return ret;
62 	}
63 
64 	memset(&tc_cfg, 0, sizeof(struct dpni_rx_tc_dist_cfg));
65 	tc_cfg.key_cfg_iova = (size_t)(DPAA2_VADDR_TO_IOVA(p_params));
66 	tc_cfg.dist_size = eth_dev->data->nb_rx_queues;
67 	tc_cfg.dist_mode = DPNI_DIST_MODE_HASH;
68 
69 	ret = dpni_set_rx_tc_dist(dpni, CMD_PRI_LOW, priv->token, tc_index,
70 				  &tc_cfg);
71 	rte_free(p_params);
72 	if (ret) {
73 		DPAA2_PMD_ERR(
74 			     "Setting distribution for Rx failed with err: %d",
75 			     ret);
76 		return ret;
77 	}
78 
79 	return 0;
80 }
81 
82 int
83 dpaa2_setup_flow_dist(struct rte_eth_dev *eth_dev,
84 	uint64_t req_dist_set, int tc_index)
85 {
86 	struct dpaa2_dev_priv *priv = eth_dev->data->dev_private;
87 	struct fsl_mc_io *dpni = priv->hw;
88 	struct dpni_rx_dist_cfg tc_cfg;
89 	struct dpkg_profile_cfg kg_cfg;
90 	void *p_params;
91 	int ret, tc_dist_queues;
92 
93 	/*TC distribution size is set with dist_queues or
94 	 * nb_rx_queues % dist_queues in order of TC priority index.
95 	 * Calculating dist size for this tc_index:-
96 	 */
97 	tc_dist_queues = eth_dev->data->nb_rx_queues -
98 		tc_index * priv->dist_queues;
99 	if (tc_dist_queues <= 0) {
100 		DPAA2_PMD_INFO("No distribution on TC%d", tc_index);
101 		return 0;
102 	}
103 
104 	if (tc_dist_queues > priv->dist_queues)
105 		tc_dist_queues = priv->dist_queues;
106 
107 	p_params = rte_malloc(
108 		NULL, DIST_PARAM_IOVA_SIZE, RTE_CACHE_LINE_SIZE);
109 	if (!p_params) {
110 		DPAA2_PMD_ERR("Unable to allocate flow-dist parameters");
111 		return -ENOMEM;
112 	}
113 
114 	memset(p_params, 0, DIST_PARAM_IOVA_SIZE);
115 	memset(&tc_cfg, 0, sizeof(struct dpni_rx_dist_cfg));
116 
117 	ret = dpaa2_distset_to_dpkg_profile_cfg(req_dist_set, &kg_cfg);
118 	if (ret) {
119 		DPAA2_PMD_ERR("Given RSS Hash (%" PRIx64 ") not supported",
120 			      req_dist_set);
121 		rte_free(p_params);
122 		return ret;
123 	}
124 
125 	tc_cfg.key_cfg_iova = (uint64_t)(DPAA2_VADDR_TO_IOVA(p_params));
126 	tc_cfg.dist_size = tc_dist_queues;
127 	tc_cfg.enable = true;
128 	tc_cfg.tc = tc_index;
129 
130 	ret = dpkg_prepare_key_cfg(&kg_cfg, p_params);
131 	if (ret) {
132 		DPAA2_PMD_ERR("Unable to prepare extract parameters");
133 		rte_free(p_params);
134 		return ret;
135 	}
136 
137 	ret = dpni_set_rx_hash_dist(dpni, CMD_PRI_LOW, priv->token, &tc_cfg);
138 	rte_free(p_params);
139 	if (ret) {
140 		DPAA2_PMD_ERR(
141 			     "Setting distribution for Rx failed with err: %d",
142 			     ret);
143 		return ret;
144 	}
145 
146 	return 0;
147 }
148 
149 int dpaa2_remove_flow_dist(
150 	struct rte_eth_dev *eth_dev,
151 	uint8_t tc_index)
152 {
153 	struct dpaa2_dev_priv *priv = eth_dev->data->dev_private;
154 	struct fsl_mc_io *dpni = priv->hw;
155 	struct dpni_rx_dist_cfg tc_cfg;
156 	struct dpkg_profile_cfg kg_cfg;
157 	void *p_params;
158 	int ret;
159 
160 	p_params = rte_malloc(
161 		NULL, DIST_PARAM_IOVA_SIZE, RTE_CACHE_LINE_SIZE);
162 	if (!p_params) {
163 		DPAA2_PMD_ERR("Unable to allocate flow-dist parameters");
164 		return -ENOMEM;
165 	}
166 
167 	memset(&tc_cfg, 0, sizeof(struct dpni_rx_dist_cfg));
168 	tc_cfg.dist_size = 0;
169 	tc_cfg.key_cfg_iova = (uint64_t)(DPAA2_VADDR_TO_IOVA(p_params));
170 	tc_cfg.enable = true;
171 	tc_cfg.tc = tc_index;
172 
173 	memset(p_params, 0, DIST_PARAM_IOVA_SIZE);
174 	kg_cfg.num_extracts = 0;
175 	ret = dpkg_prepare_key_cfg(&kg_cfg, p_params);
176 	if (ret) {
177 		DPAA2_PMD_ERR("Unable to prepare extract parameters");
178 		rte_free(p_params);
179 		return ret;
180 	}
181 
182 	ret = dpni_set_rx_hash_dist(dpni, CMD_PRI_LOW, priv->token,
183 			&tc_cfg);
184 	rte_free(p_params);
185 	if (ret)
186 		DPAA2_PMD_ERR(
187 			     "Setting distribution for Rx failed with err: %d",
188 			     ret);
189 	return ret;
190 }
191 
192 int
193 dpaa2_distset_to_dpkg_profile_cfg(
194 		uint64_t req_dist_set,
195 		struct dpkg_profile_cfg *kg_cfg)
196 {
197 	uint32_t loop = 0, i = 0;
198 	uint64_t dist_field = 0;
199 	int l2_configured = 0, l3_configured = 0;
200 	int l4_configured = 0, sctp_configured = 0;
201 	int mpls_configured = 0;
202 
203 	memset(kg_cfg, 0, sizeof(struct dpkg_profile_cfg));
204 	while (req_dist_set) {
205 		if (req_dist_set % 2 != 0) {
206 			dist_field = 1U << loop;
207 			switch (dist_field) {
208 			case ETH_RSS_L2_PAYLOAD:
209 
210 				if (l2_configured)
211 					break;
212 				l2_configured = 1;
213 
214 				kg_cfg->extracts[i].extract.from_hdr.prot =
215 					NET_PROT_ETH;
216 				kg_cfg->extracts[i].extract.from_hdr.field =
217 					NH_FLD_ETH_TYPE;
218 				kg_cfg->extracts[i].type =
219 					DPKG_EXTRACT_FROM_HDR;
220 				kg_cfg->extracts[i].extract.from_hdr.type =
221 					DPKG_FULL_FIELD;
222 				i++;
223 			break;
224 
225 			case ETH_RSS_MPLS:
226 
227 				if (mpls_configured)
228 					break;
229 				mpls_configured = 1;
230 
231 				kg_cfg->extracts[i].extract.from_hdr.prot =
232 					NET_PROT_MPLS;
233 				kg_cfg->extracts[i].extract.from_hdr.field =
234 					NH_FLD_MPLS_MPLSL_1;
235 				kg_cfg->extracts[i].type =
236 					DPKG_EXTRACT_FROM_HDR;
237 				kg_cfg->extracts[i].extract.from_hdr.type =
238 					DPKG_FULL_FIELD;
239 				i++;
240 
241 				kg_cfg->extracts[i].extract.from_hdr.prot =
242 					NET_PROT_MPLS;
243 				kg_cfg->extracts[i].extract.from_hdr.field =
244 					NH_FLD_MPLS_MPLSL_2;
245 				kg_cfg->extracts[i].type =
246 					DPKG_EXTRACT_FROM_HDR;
247 				kg_cfg->extracts[i].extract.from_hdr.type =
248 					DPKG_FULL_FIELD;
249 				i++;
250 
251 				kg_cfg->extracts[i].extract.from_hdr.prot =
252 					NET_PROT_MPLS;
253 				kg_cfg->extracts[i].extract.from_hdr.field =
254 					NH_FLD_MPLS_MPLSL_N;
255 				kg_cfg->extracts[i].type =
256 					DPKG_EXTRACT_FROM_HDR;
257 				kg_cfg->extracts[i].extract.from_hdr.type =
258 					DPKG_FULL_FIELD;
259 				i++;
260 				break;
261 
262 			case ETH_RSS_IPV4:
263 			case ETH_RSS_FRAG_IPV4:
264 			case ETH_RSS_NONFRAG_IPV4_OTHER:
265 			case ETH_RSS_IPV6:
266 			case ETH_RSS_FRAG_IPV6:
267 			case ETH_RSS_NONFRAG_IPV6_OTHER:
268 			case ETH_RSS_IPV6_EX:
269 
270 				if (l3_configured)
271 					break;
272 				l3_configured = 1;
273 
274 				kg_cfg->extracts[i].extract.from_hdr.prot =
275 					NET_PROT_IP;
276 				kg_cfg->extracts[i].extract.from_hdr.field =
277 					NH_FLD_IP_SRC;
278 				kg_cfg->extracts[i].type =
279 					DPKG_EXTRACT_FROM_HDR;
280 				kg_cfg->extracts[i].extract.from_hdr.type =
281 					DPKG_FULL_FIELD;
282 				i++;
283 
284 				kg_cfg->extracts[i].extract.from_hdr.prot =
285 					NET_PROT_IP;
286 				kg_cfg->extracts[i].extract.from_hdr.field =
287 					NH_FLD_IP_DST;
288 				kg_cfg->extracts[i].type =
289 					DPKG_EXTRACT_FROM_HDR;
290 				kg_cfg->extracts[i].extract.from_hdr.type =
291 					DPKG_FULL_FIELD;
292 				i++;
293 
294 				kg_cfg->extracts[i].extract.from_hdr.prot =
295 					NET_PROT_IP;
296 				kg_cfg->extracts[i].extract.from_hdr.field =
297 					NH_FLD_IP_PROTO;
298 				kg_cfg->extracts[i].type =
299 					DPKG_EXTRACT_FROM_HDR;
300 				kg_cfg->extracts[i].extract.from_hdr.type =
301 					DPKG_FULL_FIELD;
302 				kg_cfg->num_extracts++;
303 				i++;
304 			break;
305 
306 			case ETH_RSS_NONFRAG_IPV4_TCP:
307 			case ETH_RSS_NONFRAG_IPV6_TCP:
308 			case ETH_RSS_NONFRAG_IPV4_UDP:
309 			case ETH_RSS_NONFRAG_IPV6_UDP:
310 			case ETH_RSS_IPV6_TCP_EX:
311 			case ETH_RSS_IPV6_UDP_EX:
312 
313 				if (l4_configured)
314 					break;
315 				l4_configured = 1;
316 
317 				kg_cfg->extracts[i].extract.from_hdr.prot =
318 					NET_PROT_TCP;
319 				kg_cfg->extracts[i].extract.from_hdr.field =
320 					NH_FLD_TCP_PORT_SRC;
321 				kg_cfg->extracts[i].type =
322 					DPKG_EXTRACT_FROM_HDR;
323 				kg_cfg->extracts[i].extract.from_hdr.type =
324 					DPKG_FULL_FIELD;
325 				i++;
326 
327 				kg_cfg->extracts[i].extract.from_hdr.prot =
328 					NET_PROT_TCP;
329 				kg_cfg->extracts[i].extract.from_hdr.field =
330 					NH_FLD_TCP_PORT_SRC;
331 				kg_cfg->extracts[i].type =
332 					DPKG_EXTRACT_FROM_HDR;
333 				kg_cfg->extracts[i].extract.from_hdr.type =
334 					DPKG_FULL_FIELD;
335 				i++;
336 				break;
337 
338 			case ETH_RSS_NONFRAG_IPV4_SCTP:
339 			case ETH_RSS_NONFRAG_IPV6_SCTP:
340 
341 				if (sctp_configured)
342 					break;
343 				sctp_configured = 1;
344 
345 				kg_cfg->extracts[i].extract.from_hdr.prot =
346 					NET_PROT_SCTP;
347 				kg_cfg->extracts[i].extract.from_hdr.field =
348 					NH_FLD_SCTP_PORT_SRC;
349 				kg_cfg->extracts[i].type =
350 					DPKG_EXTRACT_FROM_HDR;
351 				kg_cfg->extracts[i].extract.from_hdr.type =
352 					DPKG_FULL_FIELD;
353 				i++;
354 
355 				kg_cfg->extracts[i].extract.from_hdr.prot =
356 					NET_PROT_SCTP;
357 				kg_cfg->extracts[i].extract.from_hdr.field =
358 					NH_FLD_SCTP_PORT_DST;
359 				kg_cfg->extracts[i].type =
360 					DPKG_EXTRACT_FROM_HDR;
361 				kg_cfg->extracts[i].extract.from_hdr.type =
362 					DPKG_FULL_FIELD;
363 				i++;
364 				break;
365 
366 			default:
367 				DPAA2_PMD_WARN(
368 				      "unsupported flow dist option 0x%" PRIx64,
369 					     dist_field);
370 				return -EINVAL;
371 			}
372 		}
373 		req_dist_set = req_dist_set >> 1;
374 		loop++;
375 	}
376 	kg_cfg->num_extracts = i;
377 	return 0;
378 }
379 
380 int
381 dpaa2_attach_bp_list(struct dpaa2_dev_priv *priv,
382 		     void *blist)
383 {
384 	/* Function to attach a DPNI with a buffer pool list. Buffer pool list
385 	 * handle is passed in blist.
386 	 */
387 	int32_t retcode;
388 	struct fsl_mc_io *dpni = priv->hw;
389 	struct dpni_pools_cfg bpool_cfg;
390 	struct dpaa2_bp_list *bp_list = (struct dpaa2_bp_list *)blist;
391 	struct dpni_buffer_layout layout;
392 	int tot_size;
393 
394 	/* ... rx buffer layout .
395 	 * Check alignment for buffer layouts first
396 	 */
397 
398 	/* ... rx buffer layout ... */
399 	tot_size = RTE_PKTMBUF_HEADROOM;
400 	tot_size = RTE_ALIGN_CEIL(tot_size, DPAA2_PACKET_LAYOUT_ALIGN);
401 
402 	memset(&layout, 0, sizeof(struct dpni_buffer_layout));
403 	layout.options = DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM |
404 			 DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
405 			 DPNI_BUF_LAYOUT_OPT_PARSER_RESULT |
406 			 DPNI_BUF_LAYOUT_OPT_DATA_ALIGN |
407 			 DPNI_BUF_LAYOUT_OPT_TIMESTAMP |
408 			 DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE;
409 
410 	layout.pass_timestamp = true;
411 	layout.pass_frame_status = 1;
412 	layout.private_data_size = DPAA2_FD_PTA_SIZE;
413 	layout.pass_parser_result = 1;
414 	layout.data_align = DPAA2_PACKET_LAYOUT_ALIGN;
415 	layout.data_head_room = tot_size - DPAA2_FD_PTA_SIZE -
416 				DPAA2_MBUF_HW_ANNOTATION;
417 	retcode = dpni_set_buffer_layout(dpni, CMD_PRI_LOW, priv->token,
418 					 DPNI_QUEUE_RX, &layout);
419 	if (retcode) {
420 		DPAA2_PMD_ERR("Error configuring buffer pool Rx layout (%d)",
421 			     retcode);
422 		return retcode;
423 	}
424 
425 	/*Attach buffer pool to the network interface as described by the user*/
426 	memset(&bpool_cfg, 0, sizeof(struct dpni_pools_cfg));
427 	bpool_cfg.num_dpbp = 1;
428 	bpool_cfg.pools[0].dpbp_id = bp_list->buf_pool.dpbp_node->dpbp_id;
429 	bpool_cfg.pools[0].backup_pool = 0;
430 	bpool_cfg.pools[0].buffer_size = RTE_ALIGN_CEIL(bp_list->buf_pool.size,
431 						DPAA2_PACKET_LAYOUT_ALIGN);
432 	bpool_cfg.pools[0].priority_mask = 0;
433 
434 	retcode = dpni_set_pools(dpni, CMD_PRI_LOW, priv->token, &bpool_cfg);
435 	if (retcode != 0) {
436 		DPAA2_PMD_ERR("Error configuring buffer pool on interface."
437 			      " bpid = %d error code = %d",
438 			      bpool_cfg.pools[0].dpbp_id, retcode);
439 		return retcode;
440 	}
441 
442 	priv->bp_list = bp_list;
443 	return 0;
444 }
445