xref: /dpdk/drivers/net/dpaa2/dpaa2_ethdev.h (revision a8a6b82e80ef3f96ca3370a98c67ae09df940886)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright (c) 2015-2016 Freescale Semiconductor, Inc. All rights reserved.
4  *   Copyright 2016-2022 NXP
5  *
6  */
7 
8 #ifndef _DPAA2_ETHDEV_H
9 #define _DPAA2_ETHDEV_H
10 
11 #include <rte_compat.h>
12 #include <rte_event_eth_rx_adapter.h>
13 #include <rte_pmd_dpaa2.h>
14 
15 #include <bus_fslmc_driver.h>
16 #include <dpaa2_hw_pvt.h>
17 #include "dpaa2_tm.h"
18 
19 #include <mc/fsl_dpni.h>
20 #include <mc/fsl_mc_sys.h>
21 
22 #include "base/dpaa2_hw_dpni_annot.h"
23 
24 #define DPAA2_MIN_RX_BUF_SIZE 512
25 #define DPAA2_MAX_RX_PKT_LEN  10240 /*WRIOP support*/
26 #define NET_DPAA2_PMD_DRIVER_NAME net_dpaa2
27 
28 #define MAX_TCS			DPNI_MAX_TC
29 #define MAX_RX_QUEUES		128
30 #define MAX_TX_QUEUES		16
31 #define MAX_DPNI		8
32 #define DPAA2_MAX_CHANNELS	16
33 
34 #define DPAA2_RX_DEFAULT_NBDESC 512
35 
36 #define DPAA2_ETH_MAX_LEN (RTE_ETHER_MTU + \
37 			   RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + \
38 			   VLAN_TAG_SIZE)
39 
40 /*default tc to be used for ,congestion, distribution etc configuration. */
41 #define DPAA2_DEF_TC		0
42 
43 /* Threshold for a Tx queue to *Enter* Congestion state.
44  */
45 #define CONG_ENTER_TX_THRESHOLD   512
46 
47 /* Threshold for a queue to *Exit* Congestion state.
48  */
49 #define CONG_EXIT_TX_THRESHOLD    480
50 
51 #define CONG_RETRY_COUNT 18000
52 
53 /* RX queue tail drop threshold
54  * currently considering 64 KB packets
55  */
56 #define CONG_THRESHOLD_RX_BYTES_Q  (64 * 1024)
57 #define CONG_RX_OAL	128
58 
59 /* Size of the input SMMU mapped memory required by MC */
60 #define DIST_PARAM_IOVA_SIZE 256
61 
62 /* Enable TX Congestion control support
63  * default is disable
64  */
65 #define DPAA2_TX_CGR_OFF	0x01
66 
67 /* Disable RX tail drop, default is enable */
68 #define DPAA2_RX_TAILDROP_OFF	0x04
69 /* Tx confirmation enabled */
70 #define DPAA2_TX_CONF_ENABLE	0x06
71 
72 /* DPDMUX index for DPMAC */
73 #define DPAA2_DPDMUX_DPMAC_IDX 0
74 
75 /* HW loopback the egress traffic to self ingress*/
76 #define DPAA2_TX_MAC_LOOPBACK_MODE 0x20
77 
78 #define DPAA2_TX_SERDES_LOOPBACK_MODE 0x40
79 
80 #define DPAA2_TX_DPNI_LOOPBACK_MODE 0x80
81 
82 #define DPAA2_TX_LOOPBACK_MODE \
83 	(DPAA2_TX_MAC_LOOPBACK_MODE | \
84 	DPAA2_TX_SERDES_LOOPBACK_MODE | \
85 	DPAA2_TX_DPNI_LOOPBACK_MODE)
86 
87 #define DPAA2_RSS_OFFLOAD_ALL ( \
88 	RTE_ETH_RSS_L2_PAYLOAD | \
89 	RTE_ETH_RSS_IP | \
90 	RTE_ETH_RSS_UDP | \
91 	RTE_ETH_RSS_TCP | \
92 	RTE_ETH_RSS_SCTP | \
93 	RTE_ETH_RSS_MPLS | \
94 	RTE_ETH_RSS_C_VLAN | \
95 	RTE_ETH_RSS_S_VLAN | \
96 	RTE_ETH_RSS_ESP | \
97 	RTE_ETH_RSS_AH | \
98 	RTE_ETH_RSS_PPPOE)
99 
100 /* LX2 FRC Parsed values (Little Endian) */
101 #define DPAA2_PKT_TYPE_ETHER		0x0060
102 #define DPAA2_PKT_TYPE_IPV4		0x0000
103 #define DPAA2_PKT_TYPE_IPV6		0x0020
104 #define DPAA2_PKT_TYPE_IPV4_EXT \
105 			(0x0001 | DPAA2_PKT_TYPE_IPV4)
106 #define DPAA2_PKT_TYPE_IPV6_EXT \
107 			(0x0001 | DPAA2_PKT_TYPE_IPV6)
108 #define DPAA2_PKT_TYPE_IPV4_TCP \
109 			(0x000e | DPAA2_PKT_TYPE_IPV4)
110 #define DPAA2_PKT_TYPE_IPV6_TCP \
111 			(0x000e | DPAA2_PKT_TYPE_IPV6)
112 #define DPAA2_PKT_TYPE_IPV4_UDP \
113 			(0x0010 | DPAA2_PKT_TYPE_IPV4)
114 #define DPAA2_PKT_TYPE_IPV6_UDP \
115 			(0x0010 | DPAA2_PKT_TYPE_IPV6)
116 #define DPAA2_PKT_TYPE_IPV4_SCTP	\
117 			(0x000f | DPAA2_PKT_TYPE_IPV4)
118 #define DPAA2_PKT_TYPE_IPV6_SCTP	\
119 			(0x000f | DPAA2_PKT_TYPE_IPV6)
120 #define DPAA2_PKT_TYPE_IPV4_ICMP \
121 			(0x0003 | DPAA2_PKT_TYPE_IPV4_EXT)
122 #define DPAA2_PKT_TYPE_IPV6_ICMP \
123 			(0x0003 | DPAA2_PKT_TYPE_IPV6_EXT)
124 #define DPAA2_PKT_TYPE_VLAN_1		0x0160
125 #define DPAA2_PKT_TYPE_VLAN_2		0x0260
126 
127 /* Global pool used by driver for SG list TX */
128 extern struct rte_mempool *dpaa2_tx_sg_pool;
129 /* Maximum SG segments */
130 #define DPAA2_MAX_SGS 128
131 /* SG pool size */
132 #define DPAA2_POOL_SIZE 2048
133 /* SG pool cache size */
134 #define DPAA2_POOL_CACHE_SIZE 256
135 /* structure to free external and indirect
136  * buffers.
137  */
138 struct sw_buf_free {
139 	/* To which packet this segment belongs */
140 	uint16_t pkt_id;
141 	/* The actual segment */
142 	struct rte_mbuf *seg;
143 };
144 
145 /* enable timestamp in mbuf*/
146 extern bool dpaa2_enable_ts[];
147 extern uint64_t dpaa2_timestamp_rx_dynflag;
148 extern int dpaa2_timestamp_dynfield_offset;
149 
150 /* Externally defined */
151 extern const struct rte_flow_ops dpaa2_flow_ops;
152 
153 extern const struct rte_tm_ops dpaa2_tm_ops;
154 
155 extern bool dpaa2_enable_err_queue;
156 
157 extern bool dpaa2_print_parser_result;
158 
159 #define DPAA2_FAPR_SIZE \
160 	(sizeof(struct dpaa2_annot_hdr) - \
161 	offsetof(struct dpaa2_annot_hdr, word3))
162 
163 #define DPAA2_PR_NXTHDR_OFFSET 0
164 
165 #define DPAA2_FAFE_PSR_OFFSET 2
166 #define DPAA2_FAFE_PSR_SIZE 2
167 
168 #define DPAA2_FAF_PSR_OFFSET 4
169 #define DPAA2_FAF_PSR_SIZE 12
170 
171 #define DPAA2_FAF_TOTAL_SIZE \
172 	(DPAA2_FAFE_PSR_SIZE + DPAA2_FAF_PSR_SIZE)
173 
174 /* Just most popular Frame attribute flags (FAF) here.*/
175 enum dpaa2_rx_faf_offset {
176 	/* Set by SP start*/
177 	FAFE_VXLAN_IN_VLAN_FRAM = 0,
178 	FAFE_VXLAN_IN_IPV4_FRAM = 1,
179 	FAFE_VXLAN_IN_IPV6_FRAM = 2,
180 	FAFE_VXLAN_IN_UDP_FRAM = 3,
181 	FAFE_VXLAN_IN_TCP_FRAM = 4,
182 
183 	FAFE_ECPRI_FRAM = 7,
184 	/* Set by SP end*/
185 
186 	FAF_GTP_PRIMED_FRAM = 1 + DPAA2_FAFE_PSR_SIZE * 8,
187 	FAF_PTP_FRAM = 3 + DPAA2_FAFE_PSR_SIZE * 8,
188 	FAF_VXLAN_FRAM = 4 + DPAA2_FAFE_PSR_SIZE * 8,
189 	FAF_ETH_FRAM = 10 + DPAA2_FAFE_PSR_SIZE * 8,
190 	FAF_LLC_SNAP_FRAM = 18 + DPAA2_FAFE_PSR_SIZE * 8,
191 	FAF_VLAN_FRAM = 21 + DPAA2_FAFE_PSR_SIZE * 8,
192 	FAF_PPPOE_PPP_FRAM = 25 + DPAA2_FAFE_PSR_SIZE * 8,
193 	FAF_MPLS_FRAM = 27 + DPAA2_FAFE_PSR_SIZE * 8,
194 	FAF_ARP_FRAM = 30 + DPAA2_FAFE_PSR_SIZE * 8,
195 	FAF_IPV4_FRAM = 34 + DPAA2_FAFE_PSR_SIZE * 8,
196 	FAF_IPV6_FRAM = 42 + DPAA2_FAFE_PSR_SIZE * 8,
197 	FAF_IP_FRAM = 48 + DPAA2_FAFE_PSR_SIZE * 8,
198 	FAF_ICMP_FRAM = 57 + DPAA2_FAFE_PSR_SIZE * 8,
199 	FAF_IGMP_FRAM = 58 + DPAA2_FAFE_PSR_SIZE * 8,
200 	FAF_GRE_FRAM = 65 + DPAA2_FAFE_PSR_SIZE * 8,
201 	FAF_UDP_FRAM = 70 + DPAA2_FAFE_PSR_SIZE * 8,
202 	FAF_TCP_FRAM = 72 + DPAA2_FAFE_PSR_SIZE * 8,
203 	FAF_IPSEC_FRAM = 77 + DPAA2_FAFE_PSR_SIZE * 8,
204 	FAF_IPSEC_ESP_FRAM = 78 + DPAA2_FAFE_PSR_SIZE * 8,
205 	FAF_IPSEC_AH_FRAM = 79 + DPAA2_FAFE_PSR_SIZE * 8,
206 	FAF_SCTP_FRAM = 81 + DPAA2_FAFE_PSR_SIZE * 8,
207 	FAF_DCCP_FRAM = 83 + DPAA2_FAFE_PSR_SIZE * 8,
208 	FAF_GTP_FRAM = 87 + DPAA2_FAFE_PSR_SIZE * 8,
209 	FAF_ESP_FRAM = 89 + DPAA2_FAFE_PSR_SIZE * 8,
210 };
211 
212 enum dpaa2_ecpri_fafe_type {
213 	ECPRI_FAFE_TYPE_0 = (8 - FAFE_ECPRI_FRAM),
214 	ECPRI_FAFE_TYPE_1 = (8 - FAFE_ECPRI_FRAM) | (1 << 1),
215 	ECPRI_FAFE_TYPE_2 = (8 - FAFE_ECPRI_FRAM) | (2 << 1),
216 	ECPRI_FAFE_TYPE_3 = (8 - FAFE_ECPRI_FRAM) | (3 << 1),
217 	ECPRI_FAFE_TYPE_4 = (8 - FAFE_ECPRI_FRAM) | (4 << 1),
218 	ECPRI_FAFE_TYPE_5 = (8 - FAFE_ECPRI_FRAM) | (5 << 1),
219 	ECPRI_FAFE_TYPE_6 = (8 - FAFE_ECPRI_FRAM) | (6 << 1),
220 	ECPRI_FAFE_TYPE_7 = (8 - FAFE_ECPRI_FRAM) | (7 << 1)
221 };
222 
223 #define DPAA2_PR_ETH_OFF_OFFSET 19
224 #define DPAA2_PR_TCI_OFF_OFFSET 21
225 #define DPAA2_PR_LAST_ETYPE_OFFSET 23
226 #define DPAA2_PR_L3_OFF_OFFSET 27
227 #define DPAA2_PR_L4_OFF_OFFSET 30
228 #define DPAA2_PR_L5_OFF_OFFSET 31
229 #define DPAA2_PR_NXTHDR_OFF_OFFSET 34
230 
231 /* Set by SP for vxlan distribution start*/
232 #define DPAA2_VXLAN_IN_TCI_OFFSET 16
233 
234 #define DPAA2_VXLAN_IN_DADDR0_OFFSET 20
235 #define DPAA2_VXLAN_IN_DADDR1_OFFSET 22
236 #define DPAA2_VXLAN_IN_DADDR2_OFFSET 24
237 #define DPAA2_VXLAN_IN_DADDR3_OFFSET 25
238 #define DPAA2_VXLAN_IN_DADDR4_OFFSET 26
239 #define DPAA2_VXLAN_IN_DADDR5_OFFSET 28
240 
241 #define DPAA2_VXLAN_IN_SADDR0_OFFSET 29
242 #define DPAA2_VXLAN_IN_SADDR1_OFFSET 32
243 #define DPAA2_VXLAN_IN_SADDR2_OFFSET 33
244 #define DPAA2_VXLAN_IN_SADDR3_OFFSET 35
245 #define DPAA2_VXLAN_IN_SADDR4_OFFSET 41
246 #define DPAA2_VXLAN_IN_SADDR5_OFFSET 42
247 
248 #define DPAA2_VXLAN_VNI_OFFSET 43
249 #define DPAA2_VXLAN_IN_TYPE_OFFSET 46
250 /* Set by SP for vxlan distribution end*/
251 
252 /* ECPRI shares SP context with VXLAN*/
253 #define DPAA2_ECPRI_MSG_OFFSET DPAA2_VXLAN_VNI_OFFSET
254 
255 #define DPAA2_ECPRI_MAX_EXTRACT_NB 8
256 
257 struct ipv4_sd_addr_extract_rule {
258 	uint32_t ipv4_src;
259 	uint32_t ipv4_dst;
260 };
261 
262 struct ipv6_sd_addr_extract_rule {
263 	uint8_t ipv6_src[NH_FLD_IPV6_ADDR_SIZE];
264 	uint8_t ipv6_dst[NH_FLD_IPV6_ADDR_SIZE];
265 };
266 
267 struct ipv4_ds_addr_extract_rule {
268 	uint32_t ipv4_dst;
269 	uint32_t ipv4_src;
270 };
271 
272 struct ipv6_ds_addr_extract_rule {
273 	uint8_t ipv6_dst[NH_FLD_IPV6_ADDR_SIZE];
274 	uint8_t ipv6_src[NH_FLD_IPV6_ADDR_SIZE];
275 };
276 
277 union ip_addr_extract_rule {
278 	struct ipv4_sd_addr_extract_rule ipv4_sd_addr;
279 	struct ipv6_sd_addr_extract_rule ipv6_sd_addr;
280 	struct ipv4_ds_addr_extract_rule ipv4_ds_addr;
281 	struct ipv6_ds_addr_extract_rule ipv6_ds_addr;
282 };
283 
284 union ip_src_addr_extract_rule {
285 	uint32_t ipv4_src;
286 	uint8_t ipv6_src[NH_FLD_IPV6_ADDR_SIZE];
287 };
288 
289 union ip_dst_addr_extract_rule {
290 	uint32_t ipv4_dst;
291 	uint8_t ipv6_dst[NH_FLD_IPV6_ADDR_SIZE];
292 };
293 
294 enum ip_addr_extract_type {
295 	IP_NONE_ADDR_EXTRACT,
296 	IP_SRC_EXTRACT,
297 	IP_DST_EXTRACT,
298 	IP_SRC_DST_EXTRACT,
299 	IP_DST_SRC_EXTRACT
300 };
301 
302 enum key_prot_type {
303 	/* HW extracts from standard protocol fields*/
304 	DPAA2_NET_PROT_KEY,
305 	/* HW extracts from FAF of PR*/
306 	DPAA2_FAF_KEY,
307 	/* HW extracts from PR other than FAF*/
308 	DPAA2_PR_KEY
309 };
310 
311 struct key_prot_field {
312 	enum key_prot_type type;
313 	enum net_prot prot;
314 	uint32_t key_field;
315 };
316 
317 struct dpaa2_raw_region {
318 	uint8_t raw_start;
319 	uint8_t raw_size;
320 };
321 
322 struct dpaa2_key_profile {
323 	uint8_t num;
324 	uint8_t key_offset[DPKG_MAX_NUM_OF_EXTRACTS];
325 	uint8_t key_size[DPKG_MAX_NUM_OF_EXTRACTS];
326 
327 	enum ip_addr_extract_type ip_addr_type;
328 	uint8_t ip_addr_extract_pos;
329 	uint8_t ip_addr_extract_off;
330 
331 	uint8_t raw_extract_pos;
332 	uint8_t raw_extract_off;
333 	uint8_t raw_extract_num;
334 
335 	uint8_t l4_src_port_present;
336 	uint8_t l4_src_port_pos;
337 	uint8_t l4_src_port_offset;
338 	uint8_t l4_dst_port_present;
339 	uint8_t l4_dst_port_pos;
340 	uint8_t l4_dst_port_offset;
341 	struct key_prot_field prot_field[DPKG_MAX_NUM_OF_EXTRACTS];
342 	uint16_t key_max_size;
343 	struct dpaa2_raw_region raw_region;
344 };
345 
346 struct dpaa2_key_extract {
347 	struct dpkg_profile_cfg dpkg;
348 	struct dpaa2_key_profile key_profile;
349 };
350 
351 struct extract_s {
352 	struct dpaa2_key_extract qos_key_extract;
353 	struct dpaa2_key_extract tc_key_extract[MAX_TCS];
354 	uint8_t *qos_extract_param;
355 	uint8_t *tc_extract_param[MAX_TCS];
356 };
357 
358 struct dpaa2_dev_priv {
359 	void *hw;
360 	int32_t hw_id;
361 	int32_t qdid;
362 	uint16_t token;
363 	uint8_t nb_tx_queues;
364 	uint8_t nb_rx_queues;
365 	uint32_t options;
366 	void *rx_vq[MAX_RX_QUEUES];
367 	void *tx_vq[MAX_TX_QUEUES];
368 	struct dpaa2_bp_list *bp_list; /**<Attached buffer pool list */
369 	void *tx_conf_vq[MAX_TX_QUEUES * DPAA2_MAX_CHANNELS];
370 	void *rx_err_vq;
371 	uint8_t flags; /*dpaa2 config flags */
372 	uint8_t max_mac_filters;
373 	uint8_t max_vlan_filters;
374 	uint8_t num_rx_tc;
375 	uint8_t num_tx_tc;
376 	uint16_t qos_entries;
377 	uint16_t fs_entries;
378 	uint8_t dist_queues;
379 	uint8_t num_channels;
380 	uint8_t en_ordered;
381 	uint8_t en_loose_ordered;
382 	uint8_t max_cgs;
383 	uint8_t cgid_in_use[MAX_RX_QUEUES];
384 
385 	struct extract_s extract;
386 
387 	uint16_t ss_offset;
388 	uint64_t ss_iova;
389 	uint64_t ss_param_iova;
390 	/*stores timestamp of last received packet on dev*/
391 	uint64_t rx_timestamp;
392 	/*stores timestamp of last received tx confirmation packet on dev*/
393 	uint64_t tx_timestamp;
394 	/* stores pointer to next tx_conf queue that should be processed,
395 	 * it corresponds to last packet transmitted
396 	 */
397 	struct dpaa2_queue *next_tx_conf_queue;
398 
399 	struct rte_eth_dev *eth_dev; /**< Pointer back to holding ethdev */
400 	rte_spinlock_t lpbk_qp_lock;
401 
402 	uint8_t channel_inuse;
403 	/* Stores correction offset for one step timestamping */
404 	uint16_t ptp_correction_offset;
405 
406 	struct dpaa2_dev_flow *curr;
407 	LIST_HEAD(, dpaa2_dev_flow) flows;
408 	LIST_HEAD(nodes, dpaa2_tm_node) nodes;
409 	LIST_HEAD(shaper_profiles, dpaa2_tm_shaper_profile) shaper_profiles;
410 };
411 
412 int dpaa2_distset_to_dpkg_profile_cfg(uint64_t req_dist_set,
413 				      struct dpkg_profile_cfg *kg_cfg);
414 
415 int dpaa2_setup_flow_dist(struct rte_eth_dev *eth_dev,
416 		uint64_t req_dist_set, int tc_index);
417 
418 int dpaa2_remove_flow_dist(struct rte_eth_dev *eth_dev,
419 			   uint8_t tc_index);
420 
421 int dpaa2_attach_bp_list(struct dpaa2_dev_priv *priv,
422 	struct fsl_mc_io *dpni, void *blist);
423 
424 __rte_internal
425 int dpaa2_eth_eventq_attach(const struct rte_eth_dev *dev,
426 		int eth_rx_queue_id,
427 		struct dpaa2_dpcon_dev *dpcon,
428 		const struct rte_event_eth_rx_adapter_queue_conf *queue_conf);
429 
430 __rte_internal
431 int dpaa2_eth_eventq_detach(const struct rte_eth_dev *dev,
432 		int eth_rx_queue_id);
433 
434 uint16_t dpaa2_dev_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts);
435 
436 uint16_t dpaa2_dev_loopback_rx(void *queue, struct rte_mbuf **bufs,
437 				uint16_t nb_pkts);
438 
439 uint16_t dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs,
440 			       uint16_t nb_pkts);
441 void dpaa2_dev_process_parallel_event(struct qbman_swp *swp,
442 				      const struct qbman_fd *fd,
443 				      const struct qbman_result *dq,
444 				      struct dpaa2_queue *rxq,
445 				      struct rte_event *ev);
446 void dpaa2_dev_process_atomic_event(struct qbman_swp *swp,
447 				    const struct qbman_fd *fd,
448 				    const struct qbman_result *dq,
449 				    struct dpaa2_queue *rxq,
450 				    struct rte_event *ev);
451 void dpaa2_dev_process_ordered_event(struct qbman_swp *swp,
452 				     const struct qbman_fd *fd,
453 				     const struct qbman_result *dq,
454 				     struct dpaa2_queue *rxq,
455 				     struct rte_event *ev);
456 uint16_t dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts);
457 uint16_t dpaa2_dev_tx_ordered(void *queue, struct rte_mbuf **bufs,
458 			      uint16_t nb_pkts);
459 __rte_internal
460 uint16_t dpaa2_dev_tx_multi_txq_ordered(void **queue,
461 		struct rte_mbuf **bufs, uint16_t nb_pkts);
462 
463 void dpaa2_dev_free_eqresp_buf(uint16_t eqresp_ci, struct dpaa2_queue *dpaa2_q);
464 void dpaa2_flow_clean(struct rte_eth_dev *dev);
465 uint16_t dpaa2_dev_tx_conf(void *queue)  __rte_unused;
466 
467 int dpaa2_timesync_enable(struct rte_eth_dev *dev);
468 int dpaa2_timesync_disable(struct rte_eth_dev *dev);
469 int dpaa2_timesync_read_time(struct rte_eth_dev *dev,
470 					struct timespec *timestamp);
471 int dpaa2_timesync_write_time(struct rte_eth_dev *dev,
472 					const struct timespec *timestamp);
473 int dpaa2_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta);
474 int dpaa2_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
475 						struct timespec *timestamp,
476 						uint32_t flags __rte_unused);
477 int dpaa2_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
478 					  struct timespec *timestamp);
479 
480 int dpaa2_dev_recycle_config(struct rte_eth_dev *eth_dev);
481 int dpaa2_dev_recycle_deconfig(struct rte_eth_dev *eth_dev);
482 int dpaa2_dev_recycle_qp_setup(struct rte_dpaa2_device *dpaa2_dev,
483 	uint16_t qidx, uint64_t cntx,
484 	eth_rx_burst_t tx_lpbk, eth_tx_burst_t rx_lpbk,
485 	struct dpaa2_queue **txq,
486 	struct dpaa2_queue **rxq);
487 
488 #endif /* _DPAA2_ETHDEV_H */
489