xref: /dpdk/drivers/net/dpaa2/dpaa2_ethdev.h (revision 25e5845b5272764d8c2cbf64a9fc5989b34a932c)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright (c) 2015-2016 Freescale Semiconductor, Inc. All rights reserved.
4  *   Copyright 2016-2022 NXP
5  *
6  */
7 
8 #ifndef _DPAA2_ETHDEV_H
9 #define _DPAA2_ETHDEV_H
10 
11 #include <rte_compat.h>
12 #include <rte_event_eth_rx_adapter.h>
13 #include <rte_pmd_dpaa2.h>
14 
15 #include <bus_fslmc_driver.h>
16 #include <dpaa2_hw_pvt.h>
17 #include "dpaa2_tm.h"
18 
19 #include <mc/fsl_dpni.h>
20 #include <mc/fsl_mc_sys.h>
21 
22 #include "base/dpaa2_hw_dpni_annot.h"
23 
24 #define DPAA2_MIN_RX_BUF_SIZE 512
25 #define DPAA2_MAX_RX_PKT_LEN  10240 /*WRIOP support*/
26 #define NET_DPAA2_PMD_DRIVER_NAME net_dpaa2
27 
28 #define MAX_TCS			DPNI_MAX_TC
29 #define MAX_RX_QUEUES		128
30 #define MAX_TX_QUEUES		16
31 #define MAX_DPNI		8
32 #define DPAA2_MAX_CHANNELS	16
33 
34 #define DPAA2_EXTRACT_PARAM_MAX_SIZE 256
35 #define DPAA2_EXTRACT_ALLOC_KEY_MAX_SIZE 256
36 
37 #define DPAA2_RX_DEFAULT_NBDESC 512
38 
39 #define DPAA2_ETH_MAX_LEN (RTE_ETHER_MTU + \
40 			   RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + \
41 			   VLAN_TAG_SIZE)
42 
43 /*default tc to be used for ,congestion, distribution etc configuration. */
44 #define DPAA2_DEF_TC		0
45 
46 /* Threshold for a Tx queue to *Enter* Congestion state.
47  */
48 #define CONG_ENTER_TX_THRESHOLD   512
49 
50 /* Threshold for a queue to *Exit* Congestion state.
51  */
52 #define CONG_EXIT_TX_THRESHOLD    480
53 
54 #define CONG_RETRY_COUNT 18000
55 
56 /* RX queue tail drop threshold
57  * currently considering 64 KB packets
58  */
59 #define CONG_THRESHOLD_RX_BYTES_Q  (64 * 1024)
60 #define CONG_RX_OAL	128
61 
62 /* Size of the input SMMU mapped memory required by MC */
63 #define DIST_PARAM_IOVA_SIZE 256
64 
65 /* Enable TX Congestion control support
66  * default is disable
67  */
68 #define DPAA2_TX_CGR_OFF	0x01
69 
70 /* Disable RX tail drop, default is enable */
71 #define DPAA2_RX_TAILDROP_OFF	0x04
72 /* Tx confirmation enabled */
73 #define DPAA2_TX_CONF_ENABLE	0x06
74 
75 /* DPDMUX index for DPMAC */
76 #define DPAA2_DPDMUX_DPMAC_IDX 0
77 
78 /* HW loopback the egress traffic to self ingress*/
79 #define DPAA2_TX_MAC_LOOPBACK_MODE 0x20
80 
81 #define DPAA2_TX_SERDES_LOOPBACK_MODE 0x40
82 
83 #define DPAA2_TX_DPNI_LOOPBACK_MODE 0x80
84 
85 #define DPAA2_TX_LOOPBACK_MODE \
86 	(DPAA2_TX_MAC_LOOPBACK_MODE | \
87 	DPAA2_TX_SERDES_LOOPBACK_MODE | \
88 	DPAA2_TX_DPNI_LOOPBACK_MODE)
89 
90 #define DPAA2_RSS_OFFLOAD_ALL ( \
91 	RTE_ETH_RSS_L2_PAYLOAD | \
92 	RTE_ETH_RSS_IP | \
93 	RTE_ETH_RSS_UDP | \
94 	RTE_ETH_RSS_TCP | \
95 	RTE_ETH_RSS_SCTP | \
96 	RTE_ETH_RSS_MPLS | \
97 	RTE_ETH_RSS_C_VLAN | \
98 	RTE_ETH_RSS_S_VLAN | \
99 	RTE_ETH_RSS_ESP | \
100 	RTE_ETH_RSS_AH | \
101 	RTE_ETH_RSS_PPPOE)
102 
103 /* LX2 FRC Parsed values (Little Endian) */
104 #define DPAA2_PKT_TYPE_ETHER		0x0060
105 #define DPAA2_PKT_TYPE_IPV4		0x0000
106 #define DPAA2_PKT_TYPE_IPV6		0x0020
107 #define DPAA2_PKT_TYPE_IPV4_EXT \
108 			(0x0001 | DPAA2_PKT_TYPE_IPV4)
109 #define DPAA2_PKT_TYPE_IPV6_EXT \
110 			(0x0001 | DPAA2_PKT_TYPE_IPV6)
111 #define DPAA2_PKT_TYPE_IPV4_TCP \
112 			(0x000e | DPAA2_PKT_TYPE_IPV4)
113 #define DPAA2_PKT_TYPE_IPV6_TCP \
114 			(0x000e | DPAA2_PKT_TYPE_IPV6)
115 #define DPAA2_PKT_TYPE_IPV4_UDP \
116 			(0x0010 | DPAA2_PKT_TYPE_IPV4)
117 #define DPAA2_PKT_TYPE_IPV6_UDP \
118 			(0x0010 | DPAA2_PKT_TYPE_IPV6)
119 #define DPAA2_PKT_TYPE_IPV4_SCTP	\
120 			(0x000f | DPAA2_PKT_TYPE_IPV4)
121 #define DPAA2_PKT_TYPE_IPV6_SCTP	\
122 			(0x000f | DPAA2_PKT_TYPE_IPV6)
123 #define DPAA2_PKT_TYPE_IPV4_ICMP \
124 			(0x0003 | DPAA2_PKT_TYPE_IPV4_EXT)
125 #define DPAA2_PKT_TYPE_IPV6_ICMP \
126 			(0x0003 | DPAA2_PKT_TYPE_IPV6_EXT)
127 #define DPAA2_PKT_TYPE_VLAN_1		0x0160
128 #define DPAA2_PKT_TYPE_VLAN_2		0x0260
129 
130 /* Global pool used by driver for SG list TX */
131 extern struct rte_mempool *dpaa2_tx_sg_pool;
132 /* Maximum SG segments */
133 #define DPAA2_MAX_SGS 128
134 /* SG pool size */
135 #define DPAA2_POOL_SIZE 2048
136 /* SG pool cache size */
137 #define DPAA2_POOL_CACHE_SIZE 256
138 /* structure to free external and indirect
139  * buffers.
140  */
141 struct sw_buf_free {
142 	/* To which packet this segment belongs */
143 	uint16_t pkt_id;
144 	/* The actual segment */
145 	struct rte_mbuf *seg;
146 };
147 
148 /* enable timestamp in mbuf*/
149 extern bool dpaa2_enable_ts[];
150 extern uint64_t dpaa2_timestamp_rx_dynflag;
151 extern int dpaa2_timestamp_dynfield_offset;
152 
153 /* Externally defined */
154 extern const struct rte_flow_ops dpaa2_flow_ops;
155 
156 extern const struct rte_tm_ops dpaa2_tm_ops;
157 
158 extern bool dpaa2_enable_err_queue;
159 
160 extern bool dpaa2_print_parser_result;
161 
162 #define DPAA2_FAPR_SIZE \
163 	(sizeof(struct dpaa2_annot_hdr) - \
164 	offsetof(struct dpaa2_annot_hdr, word3))
165 
166 #define DPAA2_PR_NXTHDR_OFFSET 0
167 
168 #define DPAA2_FAFE_PSR_OFFSET 2
169 #define DPAA2_FAFE_PSR_SIZE 2
170 
171 #define DPAA2_FAF_PSR_OFFSET 4
172 #define DPAA2_FAF_PSR_SIZE 12
173 
174 #define DPAA2_FAF_TOTAL_SIZE \
175 	(DPAA2_FAFE_PSR_SIZE + DPAA2_FAF_PSR_SIZE)
176 
177 /* Just most popular Frame attribute flags (FAF) here.*/
178 enum dpaa2_rx_faf_offset {
179 	/* Set by SP start*/
180 	FAFE_VXLAN_IN_VLAN_FRAM = 0,
181 	FAFE_VXLAN_IN_IPV4_FRAM = 1,
182 	FAFE_VXLAN_IN_IPV6_FRAM = 2,
183 	FAFE_VXLAN_IN_UDP_FRAM = 3,
184 	FAFE_VXLAN_IN_TCP_FRAM = 4,
185 
186 	FAFE_ECPRI_FRAM = 7,
187 	/* Set by SP end*/
188 
189 	FAF_GTP_PRIMED_FRAM = 1 + DPAA2_FAFE_PSR_SIZE * 8,
190 	FAF_PTP_FRAM = 3 + DPAA2_FAFE_PSR_SIZE * 8,
191 	FAF_VXLAN_FRAM = 4 + DPAA2_FAFE_PSR_SIZE * 8,
192 	FAF_ETH_FRAM = 10 + DPAA2_FAFE_PSR_SIZE * 8,
193 	FAF_LLC_SNAP_FRAM = 18 + DPAA2_FAFE_PSR_SIZE * 8,
194 	FAF_VLAN_FRAM = 21 + DPAA2_FAFE_PSR_SIZE * 8,
195 	FAF_PPPOE_PPP_FRAM = 25 + DPAA2_FAFE_PSR_SIZE * 8,
196 	FAF_MPLS_FRAM = 27 + DPAA2_FAFE_PSR_SIZE * 8,
197 	FAF_ARP_FRAM = 30 + DPAA2_FAFE_PSR_SIZE * 8,
198 	FAF_IPV4_FRAM = 34 + DPAA2_FAFE_PSR_SIZE * 8,
199 	FAF_IPV6_FRAM = 42 + DPAA2_FAFE_PSR_SIZE * 8,
200 	FAF_IP_FRAM = 48 + DPAA2_FAFE_PSR_SIZE * 8,
201 	FAF_IP_FRAG_FRAM = 50 + DPAA2_FAFE_PSR_SIZE * 8,
202 	FAF_ICMP_FRAM = 57 + DPAA2_FAFE_PSR_SIZE * 8,
203 	FAF_IGMP_FRAM = 58 + DPAA2_FAFE_PSR_SIZE * 8,
204 	FAF_GRE_FRAM = 65 + DPAA2_FAFE_PSR_SIZE * 8,
205 	FAF_UDP_FRAM = 70 + DPAA2_FAFE_PSR_SIZE * 8,
206 	FAF_TCP_FRAM = 72 + DPAA2_FAFE_PSR_SIZE * 8,
207 	FAF_IPSEC_FRAM = 77 + DPAA2_FAFE_PSR_SIZE * 8,
208 	FAF_IPSEC_ESP_FRAM = 78 + DPAA2_FAFE_PSR_SIZE * 8,
209 	FAF_IPSEC_AH_FRAM = 79 + DPAA2_FAFE_PSR_SIZE * 8,
210 	FAF_SCTP_FRAM = 81 + DPAA2_FAFE_PSR_SIZE * 8,
211 	FAF_DCCP_FRAM = 83 + DPAA2_FAFE_PSR_SIZE * 8,
212 	FAF_GTP_FRAM = 87 + DPAA2_FAFE_PSR_SIZE * 8,
213 	FAF_ESP_FRAM = 89 + DPAA2_FAFE_PSR_SIZE * 8,
214 };
215 
216 enum dpaa2_ecpri_fafe_type {
217 	ECPRI_FAFE_TYPE_0 = (8 - FAFE_ECPRI_FRAM),
218 	ECPRI_FAFE_TYPE_1 = (8 - FAFE_ECPRI_FRAM) | (1 << 1),
219 	ECPRI_FAFE_TYPE_2 = (8 - FAFE_ECPRI_FRAM) | (2 << 1),
220 	ECPRI_FAFE_TYPE_3 = (8 - FAFE_ECPRI_FRAM) | (3 << 1),
221 	ECPRI_FAFE_TYPE_4 = (8 - FAFE_ECPRI_FRAM) | (4 << 1),
222 	ECPRI_FAFE_TYPE_5 = (8 - FAFE_ECPRI_FRAM) | (5 << 1),
223 	ECPRI_FAFE_TYPE_6 = (8 - FAFE_ECPRI_FRAM) | (6 << 1),
224 	ECPRI_FAFE_TYPE_7 = (8 - FAFE_ECPRI_FRAM) | (7 << 1)
225 };
226 
227 #define DPAA2_PR_ETH_OFF_OFFSET 19
228 #define DPAA2_PR_TCI_OFF_OFFSET 21
229 #define DPAA2_PR_LAST_ETYPE_OFFSET 23
230 #define DPAA2_PR_L3_OFF_OFFSET 27
231 #define DPAA2_PR_L4_OFF_OFFSET 30
232 #define DPAA2_PR_L5_OFF_OFFSET 31
233 #define DPAA2_PR_NXTHDR_OFF_OFFSET 34
234 
235 /* Set by SP for vxlan distribution start*/
236 #define DPAA2_VXLAN_IN_TCI_OFFSET 16
237 
238 #define DPAA2_VXLAN_IN_DADDR0_OFFSET 20
239 #define DPAA2_VXLAN_IN_DADDR1_OFFSET 22
240 #define DPAA2_VXLAN_IN_DADDR2_OFFSET 24
241 #define DPAA2_VXLAN_IN_DADDR3_OFFSET 25
242 #define DPAA2_VXLAN_IN_DADDR4_OFFSET 26
243 #define DPAA2_VXLAN_IN_DADDR5_OFFSET 28
244 
245 #define DPAA2_VXLAN_IN_SADDR0_OFFSET 29
246 #define DPAA2_VXLAN_IN_SADDR1_OFFSET 32
247 #define DPAA2_VXLAN_IN_SADDR2_OFFSET 33
248 #define DPAA2_VXLAN_IN_SADDR3_OFFSET 35
249 #define DPAA2_VXLAN_IN_SADDR4_OFFSET 41
250 #define DPAA2_VXLAN_IN_SADDR5_OFFSET 42
251 
252 #define DPAA2_VXLAN_VNI_OFFSET 43
253 #define DPAA2_VXLAN_IN_TYPE_OFFSET 46
254 /* Set by SP for vxlan distribution end*/
255 
256 /* ECPRI shares SP context with VXLAN*/
257 #define DPAA2_ECPRI_MSG_OFFSET DPAA2_VXLAN_VNI_OFFSET
258 
259 #define DPAA2_ECPRI_MAX_EXTRACT_NB 8
260 
261 struct ipv4_sd_addr_extract_rule {
262 	uint32_t ipv4_src;
263 	uint32_t ipv4_dst;
264 };
265 
266 struct ipv6_sd_addr_extract_rule {
267 	uint8_t ipv6_src[NH_FLD_IPV6_ADDR_SIZE];
268 	uint8_t ipv6_dst[NH_FLD_IPV6_ADDR_SIZE];
269 };
270 
271 struct ipv4_ds_addr_extract_rule {
272 	uint32_t ipv4_dst;
273 	uint32_t ipv4_src;
274 };
275 
276 struct ipv6_ds_addr_extract_rule {
277 	uint8_t ipv6_dst[NH_FLD_IPV6_ADDR_SIZE];
278 	uint8_t ipv6_src[NH_FLD_IPV6_ADDR_SIZE];
279 };
280 
281 union ip_addr_extract_rule {
282 	struct ipv4_sd_addr_extract_rule ipv4_sd_addr;
283 	struct ipv6_sd_addr_extract_rule ipv6_sd_addr;
284 	struct ipv4_ds_addr_extract_rule ipv4_ds_addr;
285 	struct ipv6_ds_addr_extract_rule ipv6_ds_addr;
286 };
287 
288 union ip_src_addr_extract_rule {
289 	uint32_t ipv4_src;
290 	uint8_t ipv6_src[NH_FLD_IPV6_ADDR_SIZE];
291 };
292 
293 union ip_dst_addr_extract_rule {
294 	uint32_t ipv4_dst;
295 	uint8_t ipv6_dst[NH_FLD_IPV6_ADDR_SIZE];
296 };
297 
298 enum ip_addr_extract_type {
299 	IP_NONE_ADDR_EXTRACT,
300 	IP_SRC_EXTRACT,
301 	IP_DST_EXTRACT,
302 	IP_SRC_DST_EXTRACT,
303 	IP_DST_SRC_EXTRACT
304 };
305 
306 enum key_prot_type {
307 	/* HW extracts from standard protocol fields*/
308 	DPAA2_NET_PROT_KEY,
309 	/* HW extracts from FAF of PR*/
310 	DPAA2_FAF_KEY,
311 	/* HW extracts from PR other than FAF*/
312 	DPAA2_PR_KEY
313 };
314 
315 struct key_prot_field {
316 	enum key_prot_type type;
317 	enum net_prot prot;
318 	uint32_t key_field;
319 };
320 
321 struct dpaa2_raw_region {
322 	uint8_t raw_start;
323 	uint8_t raw_size;
324 };
325 
326 struct dpaa2_key_profile {
327 	uint8_t num;
328 	uint8_t key_offset[DPKG_MAX_NUM_OF_EXTRACTS];
329 	uint8_t key_size[DPKG_MAX_NUM_OF_EXTRACTS];
330 
331 	enum ip_addr_extract_type ip_addr_type;
332 	uint8_t ip_addr_extract_pos;
333 	uint8_t ip_addr_extract_off;
334 
335 	uint8_t raw_extract_pos;
336 	uint8_t raw_extract_off;
337 	uint8_t raw_extract_num;
338 
339 	uint8_t l4_src_port_present;
340 	uint8_t l4_src_port_pos;
341 	uint8_t l4_src_port_offset;
342 	uint8_t l4_dst_port_present;
343 	uint8_t l4_dst_port_pos;
344 	uint8_t l4_dst_port_offset;
345 	struct key_prot_field prot_field[DPKG_MAX_NUM_OF_EXTRACTS];
346 	uint16_t key_max_size;
347 	struct dpaa2_raw_region raw_region;
348 };
349 
350 struct dpaa2_key_extract {
351 	struct dpkg_profile_cfg dpkg;
352 	struct dpaa2_key_profile key_profile;
353 };
354 
355 struct extract_s {
356 	struct dpaa2_key_extract qos_key_extract;
357 	struct dpaa2_key_extract tc_key_extract[MAX_TCS];
358 	uint8_t *qos_extract_param;
359 	uint8_t *tc_extract_param[MAX_TCS];
360 };
361 
362 struct dpaa2_dev_priv {
363 	void *hw;
364 	int32_t hw_id;
365 	int32_t qdid;
366 	uint16_t token;
367 	uint8_t nb_tx_queues;
368 	uint8_t nb_rx_queues;
369 	uint32_t options;
370 	void *rx_vq[MAX_RX_QUEUES];
371 	void *tx_vq[MAX_TX_QUEUES];
372 	struct dpaa2_bp_list *bp_list; /**<Attached buffer pool list */
373 	void *tx_conf_vq[MAX_TX_QUEUES * DPAA2_MAX_CHANNELS];
374 	void *rx_err_vq;
375 	uint8_t flags; /*dpaa2 config flags */
376 	uint8_t max_mac_filters;
377 	uint8_t max_vlan_filters;
378 	uint8_t num_rx_tc;
379 	uint8_t num_tx_tc;
380 	uint16_t qos_entries;
381 	uint16_t fs_entries;
382 	uint8_t dist_queues;
383 	uint8_t num_channels;
384 	uint8_t en_ordered;
385 	uint8_t en_loose_ordered;
386 	uint8_t max_cgs;
387 	uint8_t cgid_in_use[MAX_RX_QUEUES];
388 
389 	enum rte_dpaa2_dev_type ep_dev_type;   /**< Endpoint Device Type */
390 	uint16_t ep_object_id;                 /**< Endpoint DPAA2 Object ID */
391 	char ep_name[RTE_DEV_NAME_MAX_LEN];
392 
393 	struct extract_s extract;
394 
395 	uint16_t ss_offset;
396 	uint64_t ss_iova;
397 	uint64_t ss_param_iova;
398 	/*stores timestamp of last received packet on dev*/
399 	uint64_t rx_timestamp;
400 	/*stores timestamp of last received tx confirmation packet on dev*/
401 	uint64_t tx_timestamp;
402 	/* stores pointer to next tx_conf queue that should be processed,
403 	 * it corresponds to last packet transmitted
404 	 */
405 	struct dpaa2_queue *next_tx_conf_queue;
406 
407 	struct rte_eth_dev *eth_dev; /**< Pointer back to holding ethdev */
408 	rte_spinlock_t lpbk_qp_lock;
409 
410 	uint8_t channel_inuse;
411 	/* Stores correction offset for one step timestamping */
412 	uint16_t ptp_correction_offset;
413 
414 	struct dpaa2_dev_flow *curr;
415 	LIST_HEAD(, dpaa2_dev_flow) flows;
416 	LIST_HEAD(nodes, dpaa2_tm_node) nodes;
417 	LIST_HEAD(shaper_profiles, dpaa2_tm_shaper_profile) shaper_profiles;
418 };
419 
420 int dpaa2_distset_to_dpkg_profile_cfg(uint64_t req_dist_set,
421 				      struct dpkg_profile_cfg *kg_cfg);
422 
423 int dpaa2_setup_flow_dist(struct rte_eth_dev *eth_dev,
424 		uint64_t req_dist_set, int tc_index);
425 
426 int dpaa2_remove_flow_dist(struct rte_eth_dev *eth_dev,
427 			   uint8_t tc_index);
428 
429 int dpaa2_attach_bp_list(struct dpaa2_dev_priv *priv,
430 	struct fsl_mc_io *dpni, void *blist);
431 
432 __rte_internal
433 int dpaa2_eth_eventq_attach(const struct rte_eth_dev *dev,
434 		int eth_rx_queue_id,
435 		struct dpaa2_dpcon_dev *dpcon,
436 		const struct rte_event_eth_rx_adapter_queue_conf *queue_conf);
437 
438 __rte_internal
439 int dpaa2_eth_eventq_detach(const struct rte_eth_dev *dev,
440 		int eth_rx_queue_id);
441 
442 uint16_t dpaa2_dev_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts);
443 
444 uint16_t dpaa2_dev_loopback_rx(void *queue, struct rte_mbuf **bufs,
445 				uint16_t nb_pkts);
446 
447 uint16_t dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs,
448 			       uint16_t nb_pkts);
449 void dpaa2_dev_process_parallel_event(struct qbman_swp *swp,
450 				      const struct qbman_fd *fd,
451 				      const struct qbman_result *dq,
452 				      struct dpaa2_queue *rxq,
453 				      struct rte_event *ev);
454 void dpaa2_dev_process_atomic_event(struct qbman_swp *swp,
455 				    const struct qbman_fd *fd,
456 				    const struct qbman_result *dq,
457 				    struct dpaa2_queue *rxq,
458 				    struct rte_event *ev);
459 void dpaa2_dev_process_ordered_event(struct qbman_swp *swp,
460 				     const struct qbman_fd *fd,
461 				     const struct qbman_result *dq,
462 				     struct dpaa2_queue *rxq,
463 				     struct rte_event *ev);
464 uint16_t dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts);
465 uint16_t dpaa2_dev_tx_ordered(void *queue, struct rte_mbuf **bufs,
466 			      uint16_t nb_pkts);
467 __rte_internal
468 uint16_t dpaa2_dev_tx_multi_txq_ordered(void **queue,
469 		struct rte_mbuf **bufs, uint16_t nb_pkts);
470 
471 void dpaa2_dev_free_eqresp_buf(uint16_t eqresp_ci, struct dpaa2_queue *dpaa2_q);
472 void dpaa2_flow_clean(struct rte_eth_dev *dev);
473 uint16_t dpaa2_dev_tx_conf(void *queue)  __rte_unused;
474 
475 int dpaa2_timesync_enable(struct rte_eth_dev *dev);
476 int dpaa2_timesync_disable(struct rte_eth_dev *dev);
477 int dpaa2_timesync_read_time(struct rte_eth_dev *dev,
478 					struct timespec *timestamp);
479 int dpaa2_timesync_write_time(struct rte_eth_dev *dev,
480 					const struct timespec *timestamp);
481 int dpaa2_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta);
482 int dpaa2_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
483 						struct timespec *timestamp,
484 						uint32_t flags __rte_unused);
485 int dpaa2_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
486 					  struct timespec *timestamp);
487 
488 int dpaa2_dev_recycle_config(struct rte_eth_dev *eth_dev);
489 int dpaa2_dev_recycle_deconfig(struct rte_eth_dev *eth_dev);
490 int dpaa2_soft_parser_loaded(void);
491 
492 int dpaa2_dev_recycle_qp_setup(struct rte_dpaa2_device *dpaa2_dev,
493 	uint16_t qidx, uint64_t cntx,
494 	eth_rx_burst_t tx_lpbk, eth_tx_burst_t rx_lpbk,
495 	struct dpaa2_queue **txq,
496 	struct dpaa2_queue **rxq);
497 
498 #endif /* _DPAA2_ETHDEV_H */
499