xref: /dpdk/drivers/net/dpaa2/base/dpaa2_hw_dpni.c (revision 9e991f217fc8719e38a812dc280dba5f84db9f59)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4  *   Copyright 2016-2019 NXP
5  *
6  */
7 
8 #include <time.h>
9 #include <net/if.h>
10 
11 #include <rte_mbuf.h>
12 #include <rte_ethdev_driver.h>
13 #include <rte_malloc.h>
14 #include <rte_memcpy.h>
15 #include <rte_string_fns.h>
16 #include <rte_cycles.h>
17 #include <rte_kvargs.h>
18 #include <rte_dev.h>
19 
20 #include <dpaa2_pmd_logs.h>
21 #include <dpaa2_hw_pvt.h>
22 #include <dpaa2_hw_mempool.h>
23 
24 #include "../dpaa2_ethdev.h"
25 
26 int
27 dpaa2_distset_to_dpkg_profile_cfg(
28 		uint64_t req_dist_set,
29 		struct dpkg_profile_cfg *kg_cfg);
30 
31 int
32 rte_pmd_dpaa2_set_custom_hash(uint16_t port_id,
33 			      uint16_t offset,
34 			      uint8_t size)
35 {
36 	struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id];
37 	struct dpaa2_dev_priv *priv = eth_dev->data->dev_private;
38 	struct fsl_mc_io *dpni = priv->hw;
39 	struct dpni_rx_tc_dist_cfg tc_cfg;
40 	struct dpkg_profile_cfg kg_cfg;
41 	void *p_params;
42 	int ret, tc_index = 0;
43 
44 	p_params = rte_zmalloc(
45 		NULL, DIST_PARAM_IOVA_SIZE, RTE_CACHE_LINE_SIZE);
46 	if (!p_params) {
47 		DPAA2_PMD_ERR("Unable to allocate flow-dist parameters");
48 		return -ENOMEM;
49 	}
50 
51 	kg_cfg.extracts[0].type = DPKG_EXTRACT_FROM_DATA;
52 	kg_cfg.extracts[0].extract.from_data.offset = offset;
53 	kg_cfg.extracts[0].extract.from_data.size = size;
54 	kg_cfg.extracts[0].num_of_byte_masks = 0;
55 	kg_cfg.num_extracts = 1;
56 
57 	ret = dpkg_prepare_key_cfg(&kg_cfg, p_params);
58 	if (ret) {
59 		DPAA2_PMD_ERR("Unable to prepare extract parameters");
60 		rte_free(p_params);
61 		return ret;
62 	}
63 
64 	memset(&tc_cfg, 0, sizeof(struct dpni_rx_tc_dist_cfg));
65 	tc_cfg.key_cfg_iova = (size_t)(DPAA2_VADDR_TO_IOVA(p_params));
66 	tc_cfg.dist_size = eth_dev->data->nb_rx_queues;
67 	tc_cfg.dist_mode = DPNI_DIST_MODE_HASH;
68 
69 	ret = dpni_set_rx_tc_dist(dpni, CMD_PRI_LOW, priv->token, tc_index,
70 				  &tc_cfg);
71 	rte_free(p_params);
72 	if (ret) {
73 		DPAA2_PMD_ERR(
74 			     "Setting distribution for Rx failed with err: %d",
75 			     ret);
76 		return ret;
77 	}
78 
79 	return 0;
80 }
81 
82 int
83 dpaa2_setup_flow_dist(struct rte_eth_dev *eth_dev,
84 		      uint64_t req_dist_set)
85 {
86 	struct dpaa2_dev_priv *priv = eth_dev->data->dev_private;
87 	struct fsl_mc_io *dpni = priv->hw;
88 	struct dpni_rx_tc_dist_cfg tc_cfg;
89 	struct dpkg_profile_cfg kg_cfg;
90 	void *p_params;
91 	int ret, tc_index = 0;
92 
93 	p_params = rte_malloc(
94 		NULL, DIST_PARAM_IOVA_SIZE, RTE_CACHE_LINE_SIZE);
95 	if (!p_params) {
96 		DPAA2_PMD_ERR("Unable to allocate flow-dist parameters");
97 		return -ENOMEM;
98 	}
99 	memset(p_params, 0, DIST_PARAM_IOVA_SIZE);
100 	memset(&tc_cfg, 0, sizeof(struct dpni_rx_tc_dist_cfg));
101 
102 	ret = dpaa2_distset_to_dpkg_profile_cfg(req_dist_set, &kg_cfg);
103 	if (ret) {
104 		DPAA2_PMD_ERR("Given RSS Hash (%" PRIx64 ") not supported",
105 			      req_dist_set);
106 		rte_free(p_params);
107 		return ret;
108 	}
109 	tc_cfg.key_cfg_iova = (uint64_t)(DPAA2_VADDR_TO_IOVA(p_params));
110 	tc_cfg.dist_size = eth_dev->data->nb_rx_queues;
111 	tc_cfg.dist_mode = DPNI_DIST_MODE_HASH;
112 
113 	ret = dpkg_prepare_key_cfg(&kg_cfg, p_params);
114 	if (ret) {
115 		DPAA2_PMD_ERR("Unable to prepare extract parameters");
116 		rte_free(p_params);
117 		return ret;
118 	}
119 
120 	ret = dpni_set_rx_tc_dist(dpni, CMD_PRI_LOW, priv->token, tc_index,
121 				  &tc_cfg);
122 	rte_free(p_params);
123 	if (ret) {
124 		DPAA2_PMD_ERR(
125 			     "Setting distribution for Rx failed with err: %d",
126 			     ret);
127 		return ret;
128 	}
129 
130 	return 0;
131 }
132 
133 int dpaa2_remove_flow_dist(
134 	struct rte_eth_dev *eth_dev,
135 	uint8_t tc_index)
136 {
137 	struct dpaa2_dev_priv *priv = eth_dev->data->dev_private;
138 	struct fsl_mc_io *dpni = priv->hw;
139 	struct dpni_rx_tc_dist_cfg tc_cfg;
140 	struct dpkg_profile_cfg kg_cfg;
141 	void *p_params;
142 	int ret;
143 
144 	p_params = rte_malloc(
145 		NULL, DIST_PARAM_IOVA_SIZE, RTE_CACHE_LINE_SIZE);
146 	if (!p_params) {
147 		DPAA2_PMD_ERR("Unable to allocate flow-dist parameters");
148 		return -ENOMEM;
149 	}
150 	memset(p_params, 0, DIST_PARAM_IOVA_SIZE);
151 	memset(&tc_cfg, 0, sizeof(struct dpni_rx_tc_dist_cfg));
152 	kg_cfg.num_extracts = 0;
153 	tc_cfg.key_cfg_iova = (uint64_t)(DPAA2_VADDR_TO_IOVA(p_params));
154 	tc_cfg.dist_size = 0;
155 	tc_cfg.dist_mode = DPNI_DIST_MODE_NONE;
156 
157 	ret = dpkg_prepare_key_cfg(&kg_cfg, p_params);
158 	if (ret) {
159 		DPAA2_PMD_ERR("Unable to prepare extract parameters");
160 		rte_free(p_params);
161 		return ret;
162 	}
163 
164 	ret = dpni_set_rx_tc_dist(dpni, CMD_PRI_LOW, priv->token, tc_index,
165 				  &tc_cfg);
166 	rte_free(p_params);
167 	if (ret)
168 		DPAA2_PMD_ERR(
169 			     "Setting distribution for Rx failed with err: %d",
170 			     ret);
171 	return ret;
172 }
173 
174 int
175 dpaa2_distset_to_dpkg_profile_cfg(
176 		uint64_t req_dist_set,
177 		struct dpkg_profile_cfg *kg_cfg)
178 {
179 	uint32_t loop = 0, i = 0, dist_field = 0;
180 	int l2_configured = 0, l3_configured = 0;
181 	int l4_configured = 0, sctp_configured = 0;
182 
183 	memset(kg_cfg, 0, sizeof(struct dpkg_profile_cfg));
184 	while (req_dist_set) {
185 		if (req_dist_set % 2 != 0) {
186 			dist_field = 1U << loop;
187 			switch (dist_field) {
188 			case ETH_RSS_L2_PAYLOAD:
189 
190 				if (l2_configured)
191 					break;
192 				l2_configured = 1;
193 
194 				kg_cfg->extracts[i].extract.from_hdr.prot =
195 					NET_PROT_ETH;
196 				kg_cfg->extracts[i].extract.from_hdr.field =
197 					NH_FLD_ETH_TYPE;
198 				kg_cfg->extracts[i].type =
199 					DPKG_EXTRACT_FROM_HDR;
200 				kg_cfg->extracts[i].extract.from_hdr.type =
201 					DPKG_FULL_FIELD;
202 				i++;
203 			break;
204 
205 			case ETH_RSS_IPV4:
206 			case ETH_RSS_FRAG_IPV4:
207 			case ETH_RSS_NONFRAG_IPV4_OTHER:
208 			case ETH_RSS_IPV6:
209 			case ETH_RSS_FRAG_IPV6:
210 			case ETH_RSS_NONFRAG_IPV6_OTHER:
211 			case ETH_RSS_IPV6_EX:
212 
213 				if (l3_configured)
214 					break;
215 				l3_configured = 1;
216 
217 				kg_cfg->extracts[i].extract.from_hdr.prot =
218 					NET_PROT_IP;
219 				kg_cfg->extracts[i].extract.from_hdr.field =
220 					NH_FLD_IP_SRC;
221 				kg_cfg->extracts[i].type =
222 					DPKG_EXTRACT_FROM_HDR;
223 				kg_cfg->extracts[i].extract.from_hdr.type =
224 					DPKG_FULL_FIELD;
225 				i++;
226 
227 				kg_cfg->extracts[i].extract.from_hdr.prot =
228 					NET_PROT_IP;
229 				kg_cfg->extracts[i].extract.from_hdr.field =
230 					NH_FLD_IP_DST;
231 				kg_cfg->extracts[i].type =
232 					DPKG_EXTRACT_FROM_HDR;
233 				kg_cfg->extracts[i].extract.from_hdr.type =
234 					DPKG_FULL_FIELD;
235 				i++;
236 
237 				kg_cfg->extracts[i].extract.from_hdr.prot =
238 					NET_PROT_IP;
239 				kg_cfg->extracts[i].extract.from_hdr.field =
240 					NH_FLD_IP_PROTO;
241 				kg_cfg->extracts[i].type =
242 					DPKG_EXTRACT_FROM_HDR;
243 				kg_cfg->extracts[i].extract.from_hdr.type =
244 					DPKG_FULL_FIELD;
245 				kg_cfg->num_extracts++;
246 				i++;
247 			break;
248 
249 			case ETH_RSS_NONFRAG_IPV4_TCP:
250 			case ETH_RSS_NONFRAG_IPV6_TCP:
251 			case ETH_RSS_NONFRAG_IPV4_UDP:
252 			case ETH_RSS_NONFRAG_IPV6_UDP:
253 			case ETH_RSS_IPV6_TCP_EX:
254 			case ETH_RSS_IPV6_UDP_EX:
255 
256 				if (l4_configured)
257 					break;
258 				l4_configured = 1;
259 
260 				kg_cfg->extracts[i].extract.from_hdr.prot =
261 					NET_PROT_TCP;
262 				kg_cfg->extracts[i].extract.from_hdr.field =
263 					NH_FLD_TCP_PORT_SRC;
264 				kg_cfg->extracts[i].type =
265 					DPKG_EXTRACT_FROM_HDR;
266 				kg_cfg->extracts[i].extract.from_hdr.type =
267 					DPKG_FULL_FIELD;
268 				i++;
269 
270 				kg_cfg->extracts[i].extract.from_hdr.prot =
271 					NET_PROT_TCP;
272 				kg_cfg->extracts[i].extract.from_hdr.field =
273 					NH_FLD_TCP_PORT_SRC;
274 				kg_cfg->extracts[i].type =
275 					DPKG_EXTRACT_FROM_HDR;
276 				kg_cfg->extracts[i].extract.from_hdr.type =
277 					DPKG_FULL_FIELD;
278 				i++;
279 				break;
280 
281 			case ETH_RSS_NONFRAG_IPV4_SCTP:
282 			case ETH_RSS_NONFRAG_IPV6_SCTP:
283 
284 				if (sctp_configured)
285 					break;
286 				sctp_configured = 1;
287 
288 				kg_cfg->extracts[i].extract.from_hdr.prot =
289 					NET_PROT_SCTP;
290 				kg_cfg->extracts[i].extract.from_hdr.field =
291 					NH_FLD_SCTP_PORT_SRC;
292 				kg_cfg->extracts[i].type =
293 					DPKG_EXTRACT_FROM_HDR;
294 				kg_cfg->extracts[i].extract.from_hdr.type =
295 					DPKG_FULL_FIELD;
296 				i++;
297 
298 				kg_cfg->extracts[i].extract.from_hdr.prot =
299 					NET_PROT_SCTP;
300 				kg_cfg->extracts[i].extract.from_hdr.field =
301 					NH_FLD_SCTP_PORT_DST;
302 				kg_cfg->extracts[i].type =
303 					DPKG_EXTRACT_FROM_HDR;
304 				kg_cfg->extracts[i].extract.from_hdr.type =
305 					DPKG_FULL_FIELD;
306 				i++;
307 				break;
308 
309 			default:
310 				DPAA2_PMD_WARN(
311 					     "Unsupported flow dist option %x",
312 					     dist_field);
313 				return -EINVAL;
314 			}
315 		}
316 		req_dist_set = req_dist_set >> 1;
317 		loop++;
318 	}
319 	kg_cfg->num_extracts = i;
320 	return 0;
321 }
322 
323 int
324 dpaa2_attach_bp_list(struct dpaa2_dev_priv *priv,
325 		     void *blist)
326 {
327 	/* Function to attach a DPNI with a buffer pool list. Buffer pool list
328 	 * handle is passed in blist.
329 	 */
330 	int32_t retcode;
331 	struct fsl_mc_io *dpni = priv->hw;
332 	struct dpni_pools_cfg bpool_cfg;
333 	struct dpaa2_bp_list *bp_list = (struct dpaa2_bp_list *)blist;
334 	struct dpni_buffer_layout layout;
335 	int tot_size;
336 
337 	/* ... rx buffer layout .
338 	 * Check alignment for buffer layouts first
339 	 */
340 
341 	/* ... rx buffer layout ... */
342 	tot_size = RTE_PKTMBUF_HEADROOM;
343 	tot_size = RTE_ALIGN_CEIL(tot_size, DPAA2_PACKET_LAYOUT_ALIGN);
344 
345 	memset(&layout, 0, sizeof(struct dpni_buffer_layout));
346 	layout.options = DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM |
347 			 DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
348 			 DPNI_BUF_LAYOUT_OPT_PARSER_RESULT |
349 			 DPNI_BUF_LAYOUT_OPT_DATA_ALIGN |
350 			 DPNI_BUF_LAYOUT_OPT_TIMESTAMP |
351 			 DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE;
352 
353 	layout.pass_timestamp = true;
354 	layout.pass_frame_status = 1;
355 	layout.private_data_size = DPAA2_FD_PTA_SIZE;
356 	layout.pass_parser_result = 1;
357 	layout.data_align = DPAA2_PACKET_LAYOUT_ALIGN;
358 	layout.data_head_room = tot_size - DPAA2_FD_PTA_SIZE -
359 				DPAA2_MBUF_HW_ANNOTATION;
360 	retcode = dpni_set_buffer_layout(dpni, CMD_PRI_LOW, priv->token,
361 					 DPNI_QUEUE_RX, &layout);
362 	if (retcode) {
363 		DPAA2_PMD_ERR("Error configuring buffer pool Rx layout (%d)",
364 			     retcode);
365 		return retcode;
366 	}
367 
368 	/*Attach buffer pool to the network interface as described by the user*/
369 	memset(&bpool_cfg, 0, sizeof(struct dpni_pools_cfg));
370 	bpool_cfg.num_dpbp = 1;
371 	bpool_cfg.pools[0].dpbp_id = bp_list->buf_pool.dpbp_node->dpbp_id;
372 	bpool_cfg.pools[0].backup_pool = 0;
373 	bpool_cfg.pools[0].buffer_size = RTE_ALIGN_CEIL(bp_list->buf_pool.size,
374 						DPAA2_PACKET_LAYOUT_ALIGN);
375 	bpool_cfg.pools[0].priority_mask = 0;
376 
377 	retcode = dpni_set_pools(dpni, CMD_PRI_LOW, priv->token, &bpool_cfg);
378 	if (retcode != 0) {
379 		DPAA2_PMD_ERR("Error configuring buffer pool on interface."
380 			      " bpid = %d error code = %d",
381 			      bpool_cfg.pools[0].dpbp_id, retcode);
382 		return retcode;
383 	}
384 
385 	priv->bp_list = bp_list;
386 	return 0;
387 }
388