xref: /dpdk/drivers/net/dpaa2/dpaa2_ethdev.h (revision 25d0ae6242453c3e482d752495d5a30f3ada11dd)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright (c) 2015-2016 Freescale Semiconductor, Inc. All rights reserved.
4  *   Copyright 2016-2022 NXP
5  *
6  */
7 
8 #ifndef _DPAA2_ETHDEV_H
9 #define _DPAA2_ETHDEV_H
10 
11 #include <rte_compat.h>
12 #include <rte_event_eth_rx_adapter.h>
13 #include <rte_pmd_dpaa2.h>
14 
15 #include <bus_fslmc_driver.h>
16 #include <dpaa2_hw_pvt.h>
17 #include "dpaa2_tm.h"
18 
19 #include <mc/fsl_dpni.h>
20 #include <mc/fsl_mc_sys.h>
21 
22 #include "base/dpaa2_hw_dpni_annot.h"
23 
24 #define DPAA2_MIN_RX_BUF_SIZE 512
25 #define DPAA2_MAX_RX_PKT_LEN  10240 /*WRIOP support*/
26 #define NET_DPAA2_PMD_DRIVER_NAME net_dpaa2
27 
28 #define MAX_TCS			DPNI_MAX_TC
29 #define MAX_RX_QUEUES		128
30 #define MAX_TX_QUEUES		16
31 #define MAX_DPNI		8
32 #define DPAA2_MAX_CHANNELS	16
33 
34 #define DPAA2_EXTRACT_PARAM_MAX_SIZE 256
35 #define DPAA2_EXTRACT_ALLOC_KEY_MAX_SIZE 256
36 
37 #define DPAA2_RX_DEFAULT_NBDESC 512
38 
39 #define DPAA2_ETH_MAX_LEN (RTE_ETHER_MTU + \
40 			   RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + \
41 			   VLAN_TAG_SIZE)
42 
43 /*default tc to be used for ,congestion, distribution etc configuration. */
44 #define DPAA2_DEF_TC		0
45 
46 /* Threshold for a Tx queue to *Enter* Congestion state.
47  */
48 #define CONG_ENTER_TX_THRESHOLD   512
49 
50 /* Threshold for a queue to *Exit* Congestion state.
51  */
52 #define CONG_EXIT_TX_THRESHOLD    480
53 
54 #define CONG_RETRY_COUNT 18000
55 
56 /* RX queue tail drop threshold
57  * currently considering 64 KB packets
58  */
59 #define CONG_THRESHOLD_RX_BYTES_Q  (64 * 1024)
60 #define CONG_RX_OAL	128
61 
62 /* Size of the input SMMU mapped memory required by MC */
63 #define DIST_PARAM_IOVA_SIZE 256
64 
65 /* Enable TX Congestion control support
66  * default is disable
67  */
68 #define DPAA2_TX_CGR_OFF	0x01
69 
70 /* Disable RX tail drop, default is enable */
71 #define DPAA2_RX_TAILDROP_OFF	0x04
72 /* Tx confirmation enabled */
73 #define DPAA2_TX_CONF_ENABLE	0x06
74 
75 /* DPDMUX index for DPMAC */
76 #define DPAA2_DPDMUX_DPMAC_IDX 0
77 
78 /* HW loopback the egress traffic to self ingress*/
79 #define DPAA2_TX_MAC_LOOPBACK_MODE 0x20
80 
81 #define DPAA2_TX_SERDES_LOOPBACK_MODE 0x40
82 
83 #define DPAA2_TX_DPNI_LOOPBACK_MODE 0x80
84 
85 #define DPAA2_TX_LOOPBACK_MODE \
86 	(DPAA2_TX_MAC_LOOPBACK_MODE | \
87 	DPAA2_TX_SERDES_LOOPBACK_MODE | \
88 	DPAA2_TX_DPNI_LOOPBACK_MODE)
89 
90 #define DPAA2_RSS_OFFLOAD_ALL ( \
91 	RTE_ETH_RSS_L2_PAYLOAD | \
92 	RTE_ETH_RSS_IP | \
93 	RTE_ETH_RSS_UDP | \
94 	RTE_ETH_RSS_TCP | \
95 	RTE_ETH_RSS_SCTP | \
96 	RTE_ETH_RSS_MPLS | \
97 	RTE_ETH_RSS_C_VLAN | \
98 	RTE_ETH_RSS_S_VLAN | \
99 	RTE_ETH_RSS_ESP | \
100 	RTE_ETH_RSS_AH | \
101 	RTE_ETH_RSS_PPPOE)
102 
103 /* LX2 FRC Parsed values (Little Endian) */
104 #define DPAA2_PKT_TYPE_ETHER		0x0060
105 #define DPAA2_PKT_TYPE_IPV4		0x0000
106 #define DPAA2_PKT_TYPE_IPV6		0x0020
107 #define DPAA2_PKT_TYPE_IPV4_EXT \
108 			(0x0001 | DPAA2_PKT_TYPE_IPV4)
109 #define DPAA2_PKT_TYPE_IPV6_EXT \
110 			(0x0001 | DPAA2_PKT_TYPE_IPV6)
111 #define DPAA2_PKT_TYPE_IPV4_TCP \
112 			(0x000e | DPAA2_PKT_TYPE_IPV4)
113 #define DPAA2_PKT_TYPE_IPV6_TCP \
114 			(0x000e | DPAA2_PKT_TYPE_IPV6)
115 #define DPAA2_PKT_TYPE_IPV4_UDP \
116 			(0x0010 | DPAA2_PKT_TYPE_IPV4)
117 #define DPAA2_PKT_TYPE_IPV6_UDP \
118 			(0x0010 | DPAA2_PKT_TYPE_IPV6)
119 #define DPAA2_PKT_TYPE_IPV4_SCTP	\
120 			(0x000f | DPAA2_PKT_TYPE_IPV4)
121 #define DPAA2_PKT_TYPE_IPV6_SCTP	\
122 			(0x000f | DPAA2_PKT_TYPE_IPV6)
123 #define DPAA2_PKT_TYPE_IPV4_ICMP \
124 			(0x0003 | DPAA2_PKT_TYPE_IPV4_EXT)
125 #define DPAA2_PKT_TYPE_IPV6_ICMP \
126 			(0x0003 | DPAA2_PKT_TYPE_IPV6_EXT)
127 #define DPAA2_PKT_TYPE_VLAN_1		0x0160
128 #define DPAA2_PKT_TYPE_VLAN_2		0x0260
129 
130 /* Global pool used by driver for SG list TX */
131 extern struct rte_mempool *dpaa2_tx_sg_pool;
132 /* Maximum SG segments */
133 #define DPAA2_MAX_SGS 128
134 /* SG pool size */
135 #define DPAA2_POOL_SIZE 2048
136 /* SG pool cache size */
137 #define DPAA2_POOL_CACHE_SIZE 256
138 /* structure to free external and indirect
139  * buffers.
140  */
141 struct sw_buf_free {
142 	/* To which packet this segment belongs */
143 	uint16_t pkt_id;
144 	/* The actual segment */
145 	struct rte_mbuf *seg;
146 };
147 
148 /* enable timestamp in mbuf*/
149 extern bool dpaa2_enable_ts[];
150 extern uint64_t dpaa2_timestamp_rx_dynflag;
151 extern int dpaa2_timestamp_dynfield_offset;
152 
153 /* Externally defined */
154 extern const struct rte_flow_ops dpaa2_flow_ops;
155 
156 extern const struct rte_tm_ops dpaa2_tm_ops;
157 
158 extern bool dpaa2_enable_err_queue;
159 
160 extern bool dpaa2_print_parser_result;
161 
162 #define DPAA2_FAPR_SIZE \
163 	(sizeof(struct dpaa2_annot_hdr) - \
164 	offsetof(struct dpaa2_annot_hdr, word3))
165 
166 #define DPAA2_PR_NXTHDR_OFFSET 0
167 
168 #define DPAA2_FAFE_PSR_OFFSET 2
169 #define DPAA2_FAFE_PSR_SIZE 2
170 
171 #define DPAA2_FAF_PSR_OFFSET 4
172 #define DPAA2_FAF_PSR_SIZE 12
173 
174 #define DPAA2_FAF_TOTAL_SIZE \
175 	(DPAA2_FAFE_PSR_SIZE + DPAA2_FAF_PSR_SIZE)
176 
177 /* Just most popular Frame attribute flags (FAF) here.*/
178 enum dpaa2_rx_faf_offset {
179 	/* Set by SP start*/
180 	FAFE_VXLAN_IN_VLAN_FRAM = 0,
181 	FAFE_VXLAN_IN_IPV4_FRAM = 1,
182 	FAFE_VXLAN_IN_IPV6_FRAM = 2,
183 	FAFE_VXLAN_IN_UDP_FRAM = 3,
184 	FAFE_VXLAN_IN_TCP_FRAM = 4,
185 
186 	FAFE_ECPRI_FRAM = 7,
187 	/* Set by SP end*/
188 
189 	FAF_GTP_PRIMED_FRAM = 1 + DPAA2_FAFE_PSR_SIZE * 8,
190 	FAF_PTP_FRAM = 3 + DPAA2_FAFE_PSR_SIZE * 8,
191 	FAF_VXLAN_FRAM = 4 + DPAA2_FAFE_PSR_SIZE * 8,
192 	FAF_ETH_FRAM = 10 + DPAA2_FAFE_PSR_SIZE * 8,
193 	FAF_LLC_SNAP_FRAM = 18 + DPAA2_FAFE_PSR_SIZE * 8,
194 	FAF_VLAN_FRAM = 21 + DPAA2_FAFE_PSR_SIZE * 8,
195 	FAF_PPPOE_PPP_FRAM = 25 + DPAA2_FAFE_PSR_SIZE * 8,
196 	FAF_MPLS_FRAM = 27 + DPAA2_FAFE_PSR_SIZE * 8,
197 	FAF_ARP_FRAM = 30 + DPAA2_FAFE_PSR_SIZE * 8,
198 	FAF_IPV4_FRAM = 34 + DPAA2_FAFE_PSR_SIZE * 8,
199 	FAF_IPV6_FRAM = 42 + DPAA2_FAFE_PSR_SIZE * 8,
200 	FAF_IP_FRAM = 48 + DPAA2_FAFE_PSR_SIZE * 8,
201 	FAF_ICMP_FRAM = 57 + DPAA2_FAFE_PSR_SIZE * 8,
202 	FAF_IGMP_FRAM = 58 + DPAA2_FAFE_PSR_SIZE * 8,
203 	FAF_GRE_FRAM = 65 + DPAA2_FAFE_PSR_SIZE * 8,
204 	FAF_UDP_FRAM = 70 + DPAA2_FAFE_PSR_SIZE * 8,
205 	FAF_TCP_FRAM = 72 + DPAA2_FAFE_PSR_SIZE * 8,
206 	FAF_IPSEC_FRAM = 77 + DPAA2_FAFE_PSR_SIZE * 8,
207 	FAF_IPSEC_ESP_FRAM = 78 + DPAA2_FAFE_PSR_SIZE * 8,
208 	FAF_IPSEC_AH_FRAM = 79 + DPAA2_FAFE_PSR_SIZE * 8,
209 	FAF_SCTP_FRAM = 81 + DPAA2_FAFE_PSR_SIZE * 8,
210 	FAF_DCCP_FRAM = 83 + DPAA2_FAFE_PSR_SIZE * 8,
211 	FAF_GTP_FRAM = 87 + DPAA2_FAFE_PSR_SIZE * 8,
212 	FAF_ESP_FRAM = 89 + DPAA2_FAFE_PSR_SIZE * 8,
213 };
214 
215 enum dpaa2_ecpri_fafe_type {
216 	ECPRI_FAFE_TYPE_0 = (8 - FAFE_ECPRI_FRAM),
217 	ECPRI_FAFE_TYPE_1 = (8 - FAFE_ECPRI_FRAM) | (1 << 1),
218 	ECPRI_FAFE_TYPE_2 = (8 - FAFE_ECPRI_FRAM) | (2 << 1),
219 	ECPRI_FAFE_TYPE_3 = (8 - FAFE_ECPRI_FRAM) | (3 << 1),
220 	ECPRI_FAFE_TYPE_4 = (8 - FAFE_ECPRI_FRAM) | (4 << 1),
221 	ECPRI_FAFE_TYPE_5 = (8 - FAFE_ECPRI_FRAM) | (5 << 1),
222 	ECPRI_FAFE_TYPE_6 = (8 - FAFE_ECPRI_FRAM) | (6 << 1),
223 	ECPRI_FAFE_TYPE_7 = (8 - FAFE_ECPRI_FRAM) | (7 << 1)
224 };
225 
226 #define DPAA2_PR_ETH_OFF_OFFSET 19
227 #define DPAA2_PR_TCI_OFF_OFFSET 21
228 #define DPAA2_PR_LAST_ETYPE_OFFSET 23
229 #define DPAA2_PR_L3_OFF_OFFSET 27
230 #define DPAA2_PR_L4_OFF_OFFSET 30
231 #define DPAA2_PR_L5_OFF_OFFSET 31
232 #define DPAA2_PR_NXTHDR_OFF_OFFSET 34
233 
234 /* Set by SP for vxlan distribution start*/
235 #define DPAA2_VXLAN_IN_TCI_OFFSET 16
236 
237 #define DPAA2_VXLAN_IN_DADDR0_OFFSET 20
238 #define DPAA2_VXLAN_IN_DADDR1_OFFSET 22
239 #define DPAA2_VXLAN_IN_DADDR2_OFFSET 24
240 #define DPAA2_VXLAN_IN_DADDR3_OFFSET 25
241 #define DPAA2_VXLAN_IN_DADDR4_OFFSET 26
242 #define DPAA2_VXLAN_IN_DADDR5_OFFSET 28
243 
244 #define DPAA2_VXLAN_IN_SADDR0_OFFSET 29
245 #define DPAA2_VXLAN_IN_SADDR1_OFFSET 32
246 #define DPAA2_VXLAN_IN_SADDR2_OFFSET 33
247 #define DPAA2_VXLAN_IN_SADDR3_OFFSET 35
248 #define DPAA2_VXLAN_IN_SADDR4_OFFSET 41
249 #define DPAA2_VXLAN_IN_SADDR5_OFFSET 42
250 
251 #define DPAA2_VXLAN_VNI_OFFSET 43
252 #define DPAA2_VXLAN_IN_TYPE_OFFSET 46
253 /* Set by SP for vxlan distribution end*/
254 
255 /* ECPRI shares SP context with VXLAN*/
256 #define DPAA2_ECPRI_MSG_OFFSET DPAA2_VXLAN_VNI_OFFSET
257 
258 #define DPAA2_ECPRI_MAX_EXTRACT_NB 8
259 
260 struct ipv4_sd_addr_extract_rule {
261 	uint32_t ipv4_src;
262 	uint32_t ipv4_dst;
263 };
264 
265 struct ipv6_sd_addr_extract_rule {
266 	uint8_t ipv6_src[NH_FLD_IPV6_ADDR_SIZE];
267 	uint8_t ipv6_dst[NH_FLD_IPV6_ADDR_SIZE];
268 };
269 
270 struct ipv4_ds_addr_extract_rule {
271 	uint32_t ipv4_dst;
272 	uint32_t ipv4_src;
273 };
274 
275 struct ipv6_ds_addr_extract_rule {
276 	uint8_t ipv6_dst[NH_FLD_IPV6_ADDR_SIZE];
277 	uint8_t ipv6_src[NH_FLD_IPV6_ADDR_SIZE];
278 };
279 
280 union ip_addr_extract_rule {
281 	struct ipv4_sd_addr_extract_rule ipv4_sd_addr;
282 	struct ipv6_sd_addr_extract_rule ipv6_sd_addr;
283 	struct ipv4_ds_addr_extract_rule ipv4_ds_addr;
284 	struct ipv6_ds_addr_extract_rule ipv6_ds_addr;
285 };
286 
287 union ip_src_addr_extract_rule {
288 	uint32_t ipv4_src;
289 	uint8_t ipv6_src[NH_FLD_IPV6_ADDR_SIZE];
290 };
291 
292 union ip_dst_addr_extract_rule {
293 	uint32_t ipv4_dst;
294 	uint8_t ipv6_dst[NH_FLD_IPV6_ADDR_SIZE];
295 };
296 
297 enum ip_addr_extract_type {
298 	IP_NONE_ADDR_EXTRACT,
299 	IP_SRC_EXTRACT,
300 	IP_DST_EXTRACT,
301 	IP_SRC_DST_EXTRACT,
302 	IP_DST_SRC_EXTRACT
303 };
304 
305 enum key_prot_type {
306 	/* HW extracts from standard protocol fields*/
307 	DPAA2_NET_PROT_KEY,
308 	/* HW extracts from FAF of PR*/
309 	DPAA2_FAF_KEY,
310 	/* HW extracts from PR other than FAF*/
311 	DPAA2_PR_KEY
312 };
313 
314 struct key_prot_field {
315 	enum key_prot_type type;
316 	enum net_prot prot;
317 	uint32_t key_field;
318 };
319 
320 struct dpaa2_raw_region {
321 	uint8_t raw_start;
322 	uint8_t raw_size;
323 };
324 
325 struct dpaa2_key_profile {
326 	uint8_t num;
327 	uint8_t key_offset[DPKG_MAX_NUM_OF_EXTRACTS];
328 	uint8_t key_size[DPKG_MAX_NUM_OF_EXTRACTS];
329 
330 	enum ip_addr_extract_type ip_addr_type;
331 	uint8_t ip_addr_extract_pos;
332 	uint8_t ip_addr_extract_off;
333 
334 	uint8_t raw_extract_pos;
335 	uint8_t raw_extract_off;
336 	uint8_t raw_extract_num;
337 
338 	uint8_t l4_src_port_present;
339 	uint8_t l4_src_port_pos;
340 	uint8_t l4_src_port_offset;
341 	uint8_t l4_dst_port_present;
342 	uint8_t l4_dst_port_pos;
343 	uint8_t l4_dst_port_offset;
344 	struct key_prot_field prot_field[DPKG_MAX_NUM_OF_EXTRACTS];
345 	uint16_t key_max_size;
346 	struct dpaa2_raw_region raw_region;
347 };
348 
349 struct dpaa2_key_extract {
350 	struct dpkg_profile_cfg dpkg;
351 	struct dpaa2_key_profile key_profile;
352 };
353 
354 struct extract_s {
355 	struct dpaa2_key_extract qos_key_extract;
356 	struct dpaa2_key_extract tc_key_extract[MAX_TCS];
357 	uint8_t *qos_extract_param;
358 	uint8_t *tc_extract_param[MAX_TCS];
359 };
360 
361 struct dpaa2_dev_priv {
362 	void *hw;
363 	int32_t hw_id;
364 	int32_t qdid;
365 	uint16_t token;
366 	uint8_t nb_tx_queues;
367 	uint8_t nb_rx_queues;
368 	uint32_t options;
369 	void *rx_vq[MAX_RX_QUEUES];
370 	void *tx_vq[MAX_TX_QUEUES];
371 	struct dpaa2_bp_list *bp_list; /**<Attached buffer pool list */
372 	void *tx_conf_vq[MAX_TX_QUEUES * DPAA2_MAX_CHANNELS];
373 	void *rx_err_vq;
374 	uint8_t flags; /*dpaa2 config flags */
375 	uint8_t max_mac_filters;
376 	uint8_t max_vlan_filters;
377 	uint8_t num_rx_tc;
378 	uint8_t num_tx_tc;
379 	uint16_t qos_entries;
380 	uint16_t fs_entries;
381 	uint8_t dist_queues;
382 	uint8_t num_channels;
383 	uint8_t en_ordered;
384 	uint8_t en_loose_ordered;
385 	uint8_t max_cgs;
386 	uint8_t cgid_in_use[MAX_RX_QUEUES];
387 
388 	struct extract_s extract;
389 
390 	uint16_t ss_offset;
391 	uint64_t ss_iova;
392 	uint64_t ss_param_iova;
393 	/*stores timestamp of last received packet on dev*/
394 	uint64_t rx_timestamp;
395 	/*stores timestamp of last received tx confirmation packet on dev*/
396 	uint64_t tx_timestamp;
397 	/* stores pointer to next tx_conf queue that should be processed,
398 	 * it corresponds to last packet transmitted
399 	 */
400 	struct dpaa2_queue *next_tx_conf_queue;
401 
402 	struct rte_eth_dev *eth_dev; /**< Pointer back to holding ethdev */
403 	rte_spinlock_t lpbk_qp_lock;
404 
405 	uint8_t channel_inuse;
406 	/* Stores correction offset for one step timestamping */
407 	uint16_t ptp_correction_offset;
408 
409 	struct dpaa2_dev_flow *curr;
410 	LIST_HEAD(, dpaa2_dev_flow) flows;
411 	LIST_HEAD(nodes, dpaa2_tm_node) nodes;
412 	LIST_HEAD(shaper_profiles, dpaa2_tm_shaper_profile) shaper_profiles;
413 };
414 
415 int dpaa2_distset_to_dpkg_profile_cfg(uint64_t req_dist_set,
416 				      struct dpkg_profile_cfg *kg_cfg);
417 
418 int dpaa2_setup_flow_dist(struct rte_eth_dev *eth_dev,
419 		uint64_t req_dist_set, int tc_index);
420 
421 int dpaa2_remove_flow_dist(struct rte_eth_dev *eth_dev,
422 			   uint8_t tc_index);
423 
424 int dpaa2_attach_bp_list(struct dpaa2_dev_priv *priv,
425 	struct fsl_mc_io *dpni, void *blist);
426 
427 __rte_internal
428 int dpaa2_eth_eventq_attach(const struct rte_eth_dev *dev,
429 		int eth_rx_queue_id,
430 		struct dpaa2_dpcon_dev *dpcon,
431 		const struct rte_event_eth_rx_adapter_queue_conf *queue_conf);
432 
433 __rte_internal
434 int dpaa2_eth_eventq_detach(const struct rte_eth_dev *dev,
435 		int eth_rx_queue_id);
436 
437 uint16_t dpaa2_dev_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts);
438 
439 uint16_t dpaa2_dev_loopback_rx(void *queue, struct rte_mbuf **bufs,
440 				uint16_t nb_pkts);
441 
442 uint16_t dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs,
443 			       uint16_t nb_pkts);
444 void dpaa2_dev_process_parallel_event(struct qbman_swp *swp,
445 				      const struct qbman_fd *fd,
446 				      const struct qbman_result *dq,
447 				      struct dpaa2_queue *rxq,
448 				      struct rte_event *ev);
449 void dpaa2_dev_process_atomic_event(struct qbman_swp *swp,
450 				    const struct qbman_fd *fd,
451 				    const struct qbman_result *dq,
452 				    struct dpaa2_queue *rxq,
453 				    struct rte_event *ev);
454 void dpaa2_dev_process_ordered_event(struct qbman_swp *swp,
455 				     const struct qbman_fd *fd,
456 				     const struct qbman_result *dq,
457 				     struct dpaa2_queue *rxq,
458 				     struct rte_event *ev);
459 uint16_t dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts);
460 uint16_t dpaa2_dev_tx_ordered(void *queue, struct rte_mbuf **bufs,
461 			      uint16_t nb_pkts);
462 __rte_internal
463 uint16_t dpaa2_dev_tx_multi_txq_ordered(void **queue,
464 		struct rte_mbuf **bufs, uint16_t nb_pkts);
465 
466 void dpaa2_dev_free_eqresp_buf(uint16_t eqresp_ci, struct dpaa2_queue *dpaa2_q);
467 void dpaa2_flow_clean(struct rte_eth_dev *dev);
468 uint16_t dpaa2_dev_tx_conf(void *queue)  __rte_unused;
469 
470 int dpaa2_timesync_enable(struct rte_eth_dev *dev);
471 int dpaa2_timesync_disable(struct rte_eth_dev *dev);
472 int dpaa2_timesync_read_time(struct rte_eth_dev *dev,
473 					struct timespec *timestamp);
474 int dpaa2_timesync_write_time(struct rte_eth_dev *dev,
475 					const struct timespec *timestamp);
476 int dpaa2_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta);
477 int dpaa2_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
478 						struct timespec *timestamp,
479 						uint32_t flags __rte_unused);
480 int dpaa2_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
481 					  struct timespec *timestamp);
482 
483 int dpaa2_dev_recycle_config(struct rte_eth_dev *eth_dev);
484 int dpaa2_dev_recycle_deconfig(struct rte_eth_dev *eth_dev);
485 int dpaa2_soft_parser_loaded(void);
486 
487 int dpaa2_dev_recycle_qp_setup(struct rte_dpaa2_device *dpaa2_dev,
488 	uint16_t qidx, uint64_t cntx,
489 	eth_rx_burst_t tx_lpbk, eth_tx_burst_t rx_lpbk,
490 	struct dpaa2_queue **txq,
491 	struct dpaa2_queue **rxq);
492 
493 #endif /* _DPAA2_ETHDEV_H */
494