xref: /dpdk/drivers/net/dpaa2/dpaa2_rxtx.c (revision 43fd3624fdfe3a33904a9b64d94306dd3d4f2c13)
1131a75b6SHemant Agrawal /* SPDX-License-Identifier: BSD-3-Clause
2cd9935ceSHemant Agrawal  *
3cd9935ceSHemant Agrawal  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
412d98eceSJun Yang  *   Copyright 2016-2024 NXP
5cd9935ceSHemant Agrawal  *
6cd9935ceSHemant Agrawal  */
7cd9935ceSHemant Agrawal 
8cd9935ceSHemant Agrawal #include <time.h>
9cd9935ceSHemant Agrawal #include <net/if.h>
10cd9935ceSHemant Agrawal 
11cd9935ceSHemant Agrawal #include <rte_mbuf.h>
12df96fd0dSBruce Richardson #include <ethdev_driver.h>
13cd9935ceSHemant Agrawal #include <rte_malloc.h>
14cd9935ceSHemant Agrawal #include <rte_memcpy.h>
15cd9935ceSHemant Agrawal #include <rte_string_fns.h>
161acb7f54SDavid Marchand #include <dev_driver.h>
174690a611SNipun Gupta #include <rte_hexdump.h>
18cd9935ceSHemant Agrawal 
19b4f22ca5SDavid Marchand #include <bus_fslmc_driver.h>
20cd9935ceSHemant Agrawal #include <fslmc_vfio.h>
21cd9935ceSHemant Agrawal #include <dpaa2_hw_pvt.h>
22cd9935ceSHemant Agrawal #include <dpaa2_hw_dpio.h>
23cd9935ceSHemant Agrawal #include <dpaa2_hw_mempool.h>
24cd9935ceSHemant Agrawal 
25a10a988aSShreyansh Jain #include "dpaa2_pmd_logs.h"
26cd9935ceSHemant Agrawal #include "dpaa2_ethdev.h"
27a5fc38d4SHemant Agrawal #include "base/dpaa2_hw_dpni_annot.h"
2893e41cb3SJun Yang #include "dpaa2_parse_dump.h"
29a5fc38d4SHemant Agrawal 
30e3866e73SThomas Monjalon static inline uint32_t __rte_hot
312375f879SHemant Agrawal dpaa2_dev_rx_parse_slow(struct rte_mbuf *mbuf,
322375f879SHemant Agrawal 			struct dpaa2_annot_hdr *annotation);
332375f879SHemant Agrawal 
34f2fc83b4SThomas Monjalon static void enable_tx_tstamp(struct qbman_fd *fd) __rte_unused;
35e806bf87SPriyanka Jain 
3661c41e2eSThomas Monjalon static inline rte_mbuf_timestamp_t *
3761c41e2eSThomas Monjalon dpaa2_timestamp_dynfield(struct rte_mbuf *mbuf)
3861c41e2eSThomas Monjalon {
3961c41e2eSThomas Monjalon 	return RTE_MBUF_DYNFIELD(mbuf,
4061c41e2eSThomas Monjalon 		dpaa2_timestamp_dynfield_offset, rte_mbuf_timestamp_t *);
4161c41e2eSThomas Monjalon }
4261c41e2eSThomas Monjalon 
4348e7f156SNipun Gupta #define DPAA2_MBUF_TO_CONTIG_FD(_mbuf, _fd, _bpid)  do { \
4448e7f156SNipun Gupta 	DPAA2_SET_FD_ADDR(_fd, DPAA2_MBUF_VADDR_TO_IOVA(_mbuf)); \
4548e7f156SNipun Gupta 	DPAA2_SET_FD_LEN(_fd, _mbuf->data_len); \
4648e7f156SNipun Gupta 	DPAA2_SET_ONLY_FD_BPID(_fd, _bpid); \
4748e7f156SNipun Gupta 	DPAA2_SET_FD_OFFSET(_fd, _mbuf->data_off); \
48fa21a6feSHemant Agrawal 	DPAA2_SET_FD_FRC(_fd, 0);		\
49fa21a6feSHemant Agrawal 	DPAA2_RESET_FD_CTRL(_fd);		\
50fa21a6feSHemant Agrawal 	DPAA2_RESET_FD_FLC(_fd);		\
5148e7f156SNipun Gupta } while (0)
5248e7f156SNipun Gupta 
53e3866e73SThomas Monjalon static inline void __rte_hot
54bff6a98fSRoman Kapl dpaa2_dev_rx_parse_new(struct rte_mbuf *m, const struct qbman_fd *fd,
55bff6a98fSRoman Kapl 		       void *hw_annot_addr)
56a5852a94SNipun Gupta {
5751aa71e8SHemant Agrawal 	uint16_t frc = DPAA2_GET_FD_FRC_PARSE_SUM(fd);
58bff6a98fSRoman Kapl 	struct dpaa2_annot_hdr *annotation =
59bff6a98fSRoman Kapl 			(struct dpaa2_annot_hdr *)hw_annot_addr;
6051aa71e8SHemant Agrawal 
6193e41cb3SJun Yang 	if (unlikely(dpaa2_print_parser_result))
6293e41cb3SJun Yang 		dpaa2_print_parse_result(annotation);
6393e41cb3SJun Yang 
64a5852a94SNipun Gupta 	m->packet_type = RTE_PTYPE_UNKNOWN;
65a5852a94SNipun Gupta 	switch (frc) {
66a5852a94SNipun Gupta 	case DPAA2_PKT_TYPE_ETHER:
67a5852a94SNipun Gupta 		m->packet_type = RTE_PTYPE_L2_ETHER;
68a5852a94SNipun Gupta 		break;
69a5852a94SNipun Gupta 	case DPAA2_PKT_TYPE_IPV4:
70a5852a94SNipun Gupta 		m->packet_type = RTE_PTYPE_L2_ETHER |
71a5852a94SNipun Gupta 			RTE_PTYPE_L3_IPV4;
72a5852a94SNipun Gupta 		break;
73a5852a94SNipun Gupta 	case DPAA2_PKT_TYPE_IPV6:
74a5852a94SNipun Gupta 		m->packet_type = RTE_PTYPE_L2_ETHER |
75a5852a94SNipun Gupta 			RTE_PTYPE_L3_IPV6;
76a5852a94SNipun Gupta 		break;
77a5852a94SNipun Gupta 	case DPAA2_PKT_TYPE_IPV4_EXT:
78a5852a94SNipun Gupta 		m->packet_type = RTE_PTYPE_L2_ETHER |
79a5852a94SNipun Gupta 			RTE_PTYPE_L3_IPV4_EXT;
80a5852a94SNipun Gupta 		break;
81a5852a94SNipun Gupta 	case DPAA2_PKT_TYPE_IPV6_EXT:
82a5852a94SNipun Gupta 		m->packet_type = RTE_PTYPE_L2_ETHER |
83a5852a94SNipun Gupta 			RTE_PTYPE_L3_IPV6_EXT;
84a5852a94SNipun Gupta 		break;
85a5852a94SNipun Gupta 	case DPAA2_PKT_TYPE_IPV4_TCP:
86a5852a94SNipun Gupta 		m->packet_type = RTE_PTYPE_L2_ETHER |
87a5852a94SNipun Gupta 			RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP;
88a5852a94SNipun Gupta 		break;
89a5852a94SNipun Gupta 	case DPAA2_PKT_TYPE_IPV6_TCP:
90a5852a94SNipun Gupta 		m->packet_type = RTE_PTYPE_L2_ETHER |
91a5852a94SNipun Gupta 			RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP;
92a5852a94SNipun Gupta 		break;
93a5852a94SNipun Gupta 	case DPAA2_PKT_TYPE_IPV4_UDP:
94a5852a94SNipun Gupta 		m->packet_type = RTE_PTYPE_L2_ETHER |
95a5852a94SNipun Gupta 			RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP;
96a5852a94SNipun Gupta 		break;
97a5852a94SNipun Gupta 	case DPAA2_PKT_TYPE_IPV6_UDP:
98a5852a94SNipun Gupta 		m->packet_type = RTE_PTYPE_L2_ETHER |
99a5852a94SNipun Gupta 			RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP;
100a5852a94SNipun Gupta 		break;
101a5852a94SNipun Gupta 	case DPAA2_PKT_TYPE_IPV4_SCTP:
102a5852a94SNipun Gupta 		m->packet_type = RTE_PTYPE_L2_ETHER |
103a5852a94SNipun Gupta 			RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP;
104a5852a94SNipun Gupta 		break;
105a5852a94SNipun Gupta 	case DPAA2_PKT_TYPE_IPV6_SCTP:
106a5852a94SNipun Gupta 		m->packet_type = RTE_PTYPE_L2_ETHER |
107a5852a94SNipun Gupta 			RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_SCTP;
108a5852a94SNipun Gupta 		break;
109a5852a94SNipun Gupta 	case DPAA2_PKT_TYPE_IPV4_ICMP:
110a5852a94SNipun Gupta 		m->packet_type = RTE_PTYPE_L2_ETHER |
111a5852a94SNipun Gupta 			RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_ICMP;
112a5852a94SNipun Gupta 		break;
113a5852a94SNipun Gupta 	case DPAA2_PKT_TYPE_IPV6_ICMP:
114a5852a94SNipun Gupta 		m->packet_type = RTE_PTYPE_L2_ETHER |
115a5852a94SNipun Gupta 			RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_ICMP;
116a5852a94SNipun Gupta 		break;
117a5852a94SNipun Gupta 	default:
118bff6a98fSRoman Kapl 		m->packet_type = dpaa2_dev_rx_parse_slow(m, annotation);
119a5852a94SNipun Gupta 	}
12051aa71e8SHemant Agrawal 	m->hash.rss = fd->simple.flc_hi;
121daa02b5cSOlivier Matz 	m->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
122c1870f65SAkhil Goyal 
123724f79dfSHemant Agrawal 	if (dpaa2_enable_ts[m->port]) {
12461c41e2eSThomas Monjalon 		*dpaa2_timestamp_dynfield(m) = annotation->word2;
12561c41e2eSThomas Monjalon 		m->ol_flags |= dpaa2_timestamp_rx_dynflag;
12661c41e2eSThomas Monjalon 		DPAA2_PMD_DP_DEBUG("pkt timestamp:0x%" PRIx64 "",
12761c41e2eSThomas Monjalon 				*dpaa2_timestamp_dynfield(m));
128c1870f65SAkhil Goyal 	}
129c1870f65SAkhil Goyal 
130c1870f65SAkhil Goyal 	DPAA2_PMD_DP_DEBUG("HW frc = 0x%x\t packet type =0x%x "
131c1870f65SAkhil Goyal 		"ol_flags =0x%" PRIx64 "",
132c1870f65SAkhil Goyal 		frc, m->packet_type, m->ol_flags);
133a5852a94SNipun Gupta }
134a5852a94SNipun Gupta 
135e3866e73SThomas Monjalon static inline uint32_t __rte_hot
1362375f879SHemant Agrawal dpaa2_dev_rx_parse_slow(struct rte_mbuf *mbuf,
1372375f879SHemant Agrawal 			struct dpaa2_annot_hdr *annotation)
138a5fc38d4SHemant Agrawal {
139a5fc38d4SHemant Agrawal 	uint32_t pkt_type = RTE_PTYPE_UNKNOWN;
1402375f879SHemant Agrawal 	uint16_t *vlan_tci;
141a5fc38d4SHemant Agrawal 
1422375f879SHemant Agrawal 	DPAA2_PMD_DP_DEBUG("(slow parse)annotation(3)=0x%" PRIx64 "\t"
1432375f879SHemant Agrawal 			"(4)=0x%" PRIx64 "\t",
1442375f879SHemant Agrawal 			annotation->word3, annotation->word4);
1452375f879SHemant Agrawal 
146e806bf87SPriyanka Jain #if defined(RTE_LIBRTE_IEEE1588)
14790762e5cSVanshika Shukla 	if (BIT_ISSET_AT_POS(annotation->word1, DPAA2_ETH_FAS_PTP)) {
148daa02b5cSOlivier Matz 		mbuf->ol_flags |= RTE_MBUF_F_RX_IEEE1588_PTP;
14990762e5cSVanshika Shukla 		mbuf->ol_flags |= RTE_MBUF_F_RX_IEEE1588_TMST;
15090762e5cSVanshika Shukla 	}
151e806bf87SPriyanka Jain #endif
152e806bf87SPriyanka Jain 
1532375f879SHemant Agrawal 	if (BIT_ISSET_AT_POS(annotation->word3, L2_VLAN_1_PRESENT)) {
1542375f879SHemant Agrawal 		vlan_tci = rte_pktmbuf_mtod_offset(mbuf, uint16_t *,
1552375f879SHemant Agrawal 			(VLAN_TCI_OFFSET_1(annotation->word5) >> 16));
1562375f879SHemant Agrawal 		mbuf->vlan_tci = rte_be_to_cpu_16(*vlan_tci);
157daa02b5cSOlivier Matz 		mbuf->ol_flags |= RTE_MBUF_F_RX_VLAN;
1582375f879SHemant Agrawal 		pkt_type |= RTE_PTYPE_L2_ETHER_VLAN;
1592375f879SHemant Agrawal 	} else if (BIT_ISSET_AT_POS(annotation->word3, L2_VLAN_N_PRESENT)) {
1602375f879SHemant Agrawal 		vlan_tci = rte_pktmbuf_mtod_offset(mbuf, uint16_t *,
1612375f879SHemant Agrawal 			(VLAN_TCI_OFFSET_1(annotation->word5) >> 16));
1622375f879SHemant Agrawal 		mbuf->vlan_tci = rte_be_to_cpu_16(*vlan_tci);
163daa02b5cSOlivier Matz 		mbuf->ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_QINQ;
1642375f879SHemant Agrawal 		pkt_type |= RTE_PTYPE_L2_ETHER_QINQ;
1652375f879SHemant Agrawal 	}
1662375f879SHemant Agrawal 
167a5fc38d4SHemant Agrawal 	if (BIT_ISSET_AT_POS(annotation->word3, L2_ARP_PRESENT)) {
1682375f879SHemant Agrawal 		pkt_type |= RTE_PTYPE_L2_ETHER_ARP;
169a5fc38d4SHemant Agrawal 		goto parse_done;
170a5fc38d4SHemant Agrawal 	} else if (BIT_ISSET_AT_POS(annotation->word3, L2_ETH_MAC_PRESENT)) {
1712375f879SHemant Agrawal 		pkt_type |= RTE_PTYPE_L2_ETHER;
172a5fc38d4SHemant Agrawal 	} else {
173a5fc38d4SHemant Agrawal 		goto parse_done;
174a5fc38d4SHemant Agrawal 	}
175a5fc38d4SHemant Agrawal 
1761832bc8eSApeksha Gupta 	if (BIT_ISSET_AT_POS(annotation->word3, L2_MPLS_1_PRESENT |
1771832bc8eSApeksha Gupta 				L2_MPLS_N_PRESENT))
1781832bc8eSApeksha Gupta 		pkt_type |= RTE_PTYPE_L2_ETHER_MPLS;
1791832bc8eSApeksha Gupta 
180a5fc38d4SHemant Agrawal 	if (BIT_ISSET_AT_POS(annotation->word4, L3_IPV4_1_PRESENT |
181a5fc38d4SHemant Agrawal 			     L3_IPV4_N_PRESENT)) {
182a5fc38d4SHemant Agrawal 		pkt_type |= RTE_PTYPE_L3_IPV4;
183a5fc38d4SHemant Agrawal 		if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_OPT_PRESENT |
184a5fc38d4SHemant Agrawal 			L3_IP_N_OPT_PRESENT))
185a5fc38d4SHemant Agrawal 			pkt_type |= RTE_PTYPE_L3_IPV4_EXT;
1860bffc64aSGagandeep Singh 		if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_IPSEC_ESP_PRESENT |
1870bffc64aSGagandeep Singh 					L3_PROTO_ESP_PRESENT))
1880bffc64aSGagandeep Singh 			pkt_type |= RTE_PTYPE_TUNNEL_ESP;
189a5fc38d4SHemant Agrawal 
190a5fc38d4SHemant Agrawal 	} else if (BIT_ISSET_AT_POS(annotation->word4, L3_IPV6_1_PRESENT |
191a5fc38d4SHemant Agrawal 		  L3_IPV6_N_PRESENT)) {
192a5fc38d4SHemant Agrawal 		pkt_type |= RTE_PTYPE_L3_IPV6;
193a5fc38d4SHemant Agrawal 		if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_OPT_PRESENT |
194a5fc38d4SHemant Agrawal 		    L3_IP_N_OPT_PRESENT))
195a5fc38d4SHemant Agrawal 			pkt_type |= RTE_PTYPE_L3_IPV6_EXT;
1960bffc64aSGagandeep Singh 		if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_IPSEC_ESP_PRESENT |
1970bffc64aSGagandeep Singh 					L3_PROTO_ESP_PRESENT))
1980bffc64aSGagandeep Singh 			pkt_type |= RTE_PTYPE_TUNNEL_ESP;
199a5fc38d4SHemant Agrawal 	} else {
200a5fc38d4SHemant Agrawal 		goto parse_done;
201a5fc38d4SHemant Agrawal 	}
202a5fc38d4SHemant Agrawal 
20394d31549SHemant Agrawal 	if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L3CE))
204daa02b5cSOlivier Matz 		mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
2057d83632bSTianli Lai 	else
2067d83632bSTianli Lai 		mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
2077d83632bSTianli Lai 	if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L4CE))
208daa02b5cSOlivier Matz 		mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
2097d83632bSTianli Lai 	else
2107d83632bSTianli Lai 		mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
21194d31549SHemant Agrawal 
212a5fc38d4SHemant Agrawal 	if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_FIRST_FRAGMENT |
213a5fc38d4SHemant Agrawal 	    L3_IP_1_MORE_FRAGMENT |
214a5fc38d4SHemant Agrawal 	    L3_IP_N_FIRST_FRAGMENT |
215a5fc38d4SHemant Agrawal 	    L3_IP_N_MORE_FRAGMENT)) {
216a5fc38d4SHemant Agrawal 		pkt_type |= RTE_PTYPE_L4_FRAG;
217a5fc38d4SHemant Agrawal 		goto parse_done;
218a5fc38d4SHemant Agrawal 	} else {
219a5fc38d4SHemant Agrawal 		pkt_type |= RTE_PTYPE_L4_NONFRAG;
220a5fc38d4SHemant Agrawal 	}
221a5fc38d4SHemant Agrawal 
222a5fc38d4SHemant Agrawal 	if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_UDP_PRESENT))
223a5fc38d4SHemant Agrawal 		pkt_type |= RTE_PTYPE_L4_UDP;
224a5fc38d4SHemant Agrawal 
225a5fc38d4SHemant Agrawal 	else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_TCP_PRESENT))
226a5fc38d4SHemant Agrawal 		pkt_type |= RTE_PTYPE_L4_TCP;
227a5fc38d4SHemant Agrawal 
228a5fc38d4SHemant Agrawal 	else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_SCTP_PRESENT))
229a5fc38d4SHemant Agrawal 		pkt_type |= RTE_PTYPE_L4_SCTP;
230a5fc38d4SHemant Agrawal 
231a5fc38d4SHemant Agrawal 	else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_ICMP_PRESENT))
232a5fc38d4SHemant Agrawal 		pkt_type |= RTE_PTYPE_L4_ICMP;
233a5fc38d4SHemant Agrawal 
234a5fc38d4SHemant Agrawal 	else if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_UNKNOWN_PROTOCOL))
235a5fc38d4SHemant Agrawal 		pkt_type |= RTE_PTYPE_UNKNOWN;
236a5fc38d4SHemant Agrawal 
237a5fc38d4SHemant Agrawal parse_done:
238a5fc38d4SHemant Agrawal 	return pkt_type;
239a5fc38d4SHemant Agrawal }
240a5fc38d4SHemant Agrawal 
241e3866e73SThomas Monjalon static inline uint32_t __rte_hot
2425ae1edffSHemant Agrawal dpaa2_dev_rx_parse(struct rte_mbuf *mbuf, void *hw_annot_addr)
2437ec5e530SNipun Gupta {
2447ec5e530SNipun Gupta 	struct dpaa2_annot_hdr *annotation =
2457ec5e530SNipun Gupta 			(struct dpaa2_annot_hdr *)hw_annot_addr;
2467ec5e530SNipun Gupta 
247a10a988aSShreyansh Jain 	DPAA2_PMD_DP_DEBUG("(fast parse) Annotation = 0x%" PRIx64 "\t",
248a10a988aSShreyansh Jain 			   annotation->word4);
2497ec5e530SNipun Gupta 
250d2ef05d5SHemant Agrawal 	if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L3CE))
251daa02b5cSOlivier Matz 		mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
2527d83632bSTianli Lai 	else
2537d83632bSTianli Lai 		mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
2547d83632bSTianli Lai 	if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L4CE))
255daa02b5cSOlivier Matz 		mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
2567d83632bSTianli Lai 	else
2577d83632bSTianli Lai 		mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
258d2ef05d5SHemant Agrawal 
25993e41cb3SJun Yang 	if (unlikely(dpaa2_print_parser_result))
26093e41cb3SJun Yang 		dpaa2_print_parse_result(annotation);
26193e41cb3SJun Yang 
26261c41e2eSThomas Monjalon 	if (dpaa2_enable_ts[mbuf->port]) {
26361c41e2eSThomas Monjalon 		*dpaa2_timestamp_dynfield(mbuf) = annotation->word2;
26461c41e2eSThomas Monjalon 		mbuf->ol_flags |= dpaa2_timestamp_rx_dynflag;
26561c41e2eSThomas Monjalon 		DPAA2_PMD_DP_DEBUG("pkt timestamp: 0x%" PRIx64 "",
26661c41e2eSThomas Monjalon 				*dpaa2_timestamp_dynfield(mbuf));
26761c41e2eSThomas Monjalon 	}
268c1870f65SAkhil Goyal 
2692375f879SHemant Agrawal 	/* Check detailed parsing requirement */
2702375f879SHemant Agrawal 	if (annotation->word3 & 0x7FFFFC3FFFF)
2712375f879SHemant Agrawal 		return dpaa2_dev_rx_parse_slow(mbuf, annotation);
2722375f879SHemant Agrawal 
2737ec5e530SNipun Gupta 	/* Return some common types from parse processing */
2747ec5e530SNipun Gupta 	switch (annotation->word4) {
2757ec5e530SNipun Gupta 	case DPAA2_L3_IPv4:
2767ec5e530SNipun Gupta 		return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4;
2777ec5e530SNipun Gupta 	case DPAA2_L3_IPv6:
2787ec5e530SNipun Gupta 		return  RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6;
2797ec5e530SNipun Gupta 	case DPAA2_L3_IPv4_TCP:
2807ec5e530SNipun Gupta 		return  RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 |
2817ec5e530SNipun Gupta 				RTE_PTYPE_L4_TCP;
2827ec5e530SNipun Gupta 	case DPAA2_L3_IPv4_UDP:
2837ec5e530SNipun Gupta 		return  RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 |
2847ec5e530SNipun Gupta 				RTE_PTYPE_L4_UDP;
2857ec5e530SNipun Gupta 	case DPAA2_L3_IPv6_TCP:
2867ec5e530SNipun Gupta 		return  RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 |
2877ec5e530SNipun Gupta 				RTE_PTYPE_L4_TCP;
2887ec5e530SNipun Gupta 	case DPAA2_L3_IPv6_UDP:
2897ec5e530SNipun Gupta 		return  RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 |
2907ec5e530SNipun Gupta 				RTE_PTYPE_L4_UDP;
2917ec5e530SNipun Gupta 	default:
2927ec5e530SNipun Gupta 		break;
2937ec5e530SNipun Gupta 	}
2947ec5e530SNipun Gupta 
2952375f879SHemant Agrawal 	return dpaa2_dev_rx_parse_slow(mbuf, annotation);
2967ec5e530SNipun Gupta }
2977ec5e530SNipun Gupta 
298e3866e73SThomas Monjalon static inline struct rte_mbuf *__rte_hot
299005d943eSNipun Gupta eth_sg_fd_to_mbuf(const struct qbman_fd *fd,
300005d943eSNipun Gupta 		  int port_id)
301774e9ea9SHemant Agrawal {
302774e9ea9SHemant Agrawal 	struct qbman_sge *sgt, *sge;
3035ae1edffSHemant Agrawal 	size_t sg_addr, fd_addr;
304774e9ea9SHemant Agrawal 	int i = 0;
305bff6a98fSRoman Kapl 	void *hw_annot_addr;
306774e9ea9SHemant Agrawal 	struct rte_mbuf *first_seg, *next_seg, *cur_seg, *temp;
307774e9ea9SHemant Agrawal 
3085ae1edffSHemant Agrawal 	fd_addr = (size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
309bff6a98fSRoman Kapl 	hw_annot_addr = (void *)(fd_addr + DPAA2_FD_PTA_SIZE);
310774e9ea9SHemant Agrawal 
311774e9ea9SHemant Agrawal 	/* Get Scatter gather table address */
312774e9ea9SHemant Agrawal 	sgt = (struct qbman_sge *)(fd_addr + DPAA2_GET_FD_OFFSET(fd));
313774e9ea9SHemant Agrawal 
314774e9ea9SHemant Agrawal 	sge = &sgt[i++];
3155ae1edffSHemant Agrawal 	sg_addr = (size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FLE_ADDR(sge));
316774e9ea9SHemant Agrawal 
317774e9ea9SHemant Agrawal 	/* First Scatter gather entry */
318774e9ea9SHemant Agrawal 	first_seg = DPAA2_INLINE_MBUF_FROM_BUF(sg_addr,
319774e9ea9SHemant Agrawal 		rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
320774e9ea9SHemant Agrawal 	/* Prepare all the metadata for first segment */
321774e9ea9SHemant Agrawal 	first_seg->buf_addr = (uint8_t *)sg_addr;
322774e9ea9SHemant Agrawal 	first_seg->ol_flags = 0;
323774e9ea9SHemant Agrawal 	first_seg->data_off = DPAA2_GET_FLE_OFFSET(sge);
324774e9ea9SHemant Agrawal 	first_seg->data_len = sge->length  & 0x1FFFF;
325774e9ea9SHemant Agrawal 	first_seg->pkt_len = DPAA2_GET_FD_LEN(fd);
326774e9ea9SHemant Agrawal 	first_seg->nb_segs = 1;
327774e9ea9SHemant Agrawal 	first_seg->next = NULL;
328005d943eSNipun Gupta 	first_seg->port = port_id;
329a5852a94SNipun Gupta 	if (dpaa2_svr_family == SVR_LX2160A)
330bff6a98fSRoman Kapl 		dpaa2_dev_rx_parse_new(first_seg, fd, hw_annot_addr);
331d2ef05d5SHemant Agrawal 	else
332bff6a98fSRoman Kapl 		first_seg->packet_type =
333bff6a98fSRoman Kapl 			dpaa2_dev_rx_parse(first_seg, hw_annot_addr);
334d2ef05d5SHemant Agrawal 
335774e9ea9SHemant Agrawal 	rte_mbuf_refcnt_set(first_seg, 1);
3363fa54e3dSGagandeep Singh #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
3373fa54e3dSGagandeep Singh 	rte_mempool_check_cookies(rte_mempool_from_obj((void *)first_seg),
3383fa54e3dSGagandeep Singh 			(void **)&first_seg, 1, 1);
3393fa54e3dSGagandeep Singh #endif
340774e9ea9SHemant Agrawal 	cur_seg = first_seg;
341774e9ea9SHemant Agrawal 	while (!DPAA2_SG_IS_FINAL(sge)) {
342774e9ea9SHemant Agrawal 		sge = &sgt[i++];
3435ae1edffSHemant Agrawal 		sg_addr = (size_t)DPAA2_IOVA_TO_VADDR(
344774e9ea9SHemant Agrawal 				DPAA2_GET_FLE_ADDR(sge));
345774e9ea9SHemant Agrawal 		next_seg = DPAA2_INLINE_MBUF_FROM_BUF(sg_addr,
346774e9ea9SHemant Agrawal 			rte_dpaa2_bpid_info[DPAA2_GET_FLE_BPID(sge)].meta_data_size);
347774e9ea9SHemant Agrawal 		next_seg->buf_addr  = (uint8_t *)sg_addr;
348774e9ea9SHemant Agrawal 		next_seg->data_off  = DPAA2_GET_FLE_OFFSET(sge);
349774e9ea9SHemant Agrawal 		next_seg->data_len  = sge->length  & 0x1FFFF;
350774e9ea9SHemant Agrawal 		first_seg->nb_segs += 1;
351774e9ea9SHemant Agrawal 		rte_mbuf_refcnt_set(next_seg, 1);
3523fa54e3dSGagandeep Singh #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
3533fa54e3dSGagandeep Singh 		rte_mempool_check_cookies(rte_mempool_from_obj((void *)next_seg),
3543fa54e3dSGagandeep Singh 				(void **)&next_seg, 1, 1);
3553fa54e3dSGagandeep Singh #endif
356774e9ea9SHemant Agrawal 		cur_seg->next = next_seg;
357774e9ea9SHemant Agrawal 		next_seg->next = NULL;
358774e9ea9SHemant Agrawal 		cur_seg = next_seg;
359774e9ea9SHemant Agrawal 	}
360774e9ea9SHemant Agrawal 	temp = DPAA2_INLINE_MBUF_FROM_BUF(fd_addr,
361774e9ea9SHemant Agrawal 		rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
362774e9ea9SHemant Agrawal 	rte_mbuf_refcnt_set(temp, 1);
3633fa54e3dSGagandeep Singh #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
3643fa54e3dSGagandeep Singh 		rte_mempool_check_cookies(rte_mempool_from_obj((void *)temp),
3653fa54e3dSGagandeep Singh 				(void **)&temp, 1, 1);
3663fa54e3dSGagandeep Singh #endif
367774e9ea9SHemant Agrawal 	rte_pktmbuf_free_seg(temp);
368774e9ea9SHemant Agrawal 
369774e9ea9SHemant Agrawal 	return (void *)first_seg;
370774e9ea9SHemant Agrawal }
371774e9ea9SHemant Agrawal 
372e3866e73SThomas Monjalon static inline struct rte_mbuf *__rte_hot
373005d943eSNipun Gupta eth_fd_to_mbuf(const struct qbman_fd *fd,
374005d943eSNipun Gupta 	       int port_id)
375cd9935ceSHemant Agrawal {
376bff6a98fSRoman Kapl 	void *v_addr = DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
377bff6a98fSRoman Kapl 	void *hw_annot_addr = (void *)((size_t)v_addr + DPAA2_FD_PTA_SIZE);
378bff6a98fSRoman Kapl 	struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF(v_addr,
379cd9935ceSHemant Agrawal 		     rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
380cd9935ceSHemant Agrawal 
381cd9935ceSHemant Agrawal 	/* need to repopulated some of the fields,
382cd9935ceSHemant Agrawal 	 * as they may have changed in last transmission
383cd9935ceSHemant Agrawal 	 */
384cd9935ceSHemant Agrawal 	mbuf->nb_segs = 1;
385cd9935ceSHemant Agrawal 	mbuf->ol_flags = 0;
386cd9935ceSHemant Agrawal 	mbuf->data_off = DPAA2_GET_FD_OFFSET(fd);
387cd9935ceSHemant Agrawal 	mbuf->data_len = DPAA2_GET_FD_LEN(fd);
388cd9935ceSHemant Agrawal 	mbuf->pkt_len = mbuf->data_len;
389005d943eSNipun Gupta 	mbuf->port = port_id;
3907ec5e530SNipun Gupta 	mbuf->next = NULL;
3917994a12cSApeksha Gupta 	mbuf->hash.sched.color = DPAA2_GET_FD_DROPP(fd);
3927ec5e530SNipun Gupta 	rte_mbuf_refcnt_set(mbuf, 1);
3933fa54e3dSGagandeep Singh #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
3943fa54e3dSGagandeep Singh 	rte_mempool_check_cookies(rte_mempool_from_obj((void *)mbuf),
3953fa54e3dSGagandeep Singh 			(void **)&mbuf, 1, 1);
3963fa54e3dSGagandeep Singh #endif
397cd9935ceSHemant Agrawal 
398a5fc38d4SHemant Agrawal 	/* Parse the packet */
399a5852a94SNipun Gupta 	/* parse results for LX2 are there in FRC field of FD.
400a5852a94SNipun Gupta 	 * For other DPAA2 platforms , parse results are after
401a5852a94SNipun Gupta 	 * the private - sw annotation area
402a5852a94SNipun Gupta 	 */
403a5852a94SNipun Gupta 
404a5852a94SNipun Gupta 	if (dpaa2_svr_family == SVR_LX2160A)
405bff6a98fSRoman Kapl 		dpaa2_dev_rx_parse_new(mbuf, fd, hw_annot_addr);
406d2ef05d5SHemant Agrawal 	else
407bff6a98fSRoman Kapl 		mbuf->packet_type = dpaa2_dev_rx_parse(mbuf, hw_annot_addr);
408cd9935ceSHemant Agrawal 
409a10a988aSShreyansh Jain 	DPAA2_PMD_DP_DEBUG("to mbuf - mbuf =%p, mbuf->buf_addr =%p, off = %d,"
4102b843cacSDavid Marchand 		"fd_off=%d fd =%" PRIx64 ", meta = %d  bpid =%d, len=%d",
411a10a988aSShreyansh Jain 		mbuf, mbuf->buf_addr, mbuf->data_off,
412cd9935ceSHemant Agrawal 		DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ADDR(fd),
413cd9935ceSHemant Agrawal 		rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
414cd9935ceSHemant Agrawal 		DPAA2_GET_FD_BPID(fd), DPAA2_GET_FD_LEN(fd));
415cd9935ceSHemant Agrawal 
416cd9935ceSHemant Agrawal 	return mbuf;
417cd9935ceSHemant Agrawal }
418cd9935ceSHemant Agrawal 
419e3866e73SThomas Monjalon static int __rte_noinline __rte_hot
420774e9ea9SHemant Agrawal eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf,
421cc8569f0SHemant Agrawal 		  struct qbman_fd *fd,
422b0074a7bSGagandeep Singh 		  struct sw_buf_free *free_buf,
423b0074a7bSGagandeep Singh 		  uint32_t *free_count,
424b0074a7bSGagandeep Singh 		  uint32_t pkt_id,
42575e2a1d4SGagandeep Singh 		  uint16_t bpid)
426774e9ea9SHemant Agrawal {
427b0074a7bSGagandeep Singh 	struct rte_mbuf *cur_seg = mbuf, *mi, *temp;
428774e9ea9SHemant Agrawal 	struct qbman_sge *sgt, *sge = NULL;
429cc8569f0SHemant Agrawal 	int i, offset = 0;
430774e9ea9SHemant Agrawal 
431cc8569f0SHemant Agrawal #ifdef RTE_LIBRTE_IEEE1588
432cc8569f0SHemant Agrawal 	/* annotation area for timestamp in first buffer */
433cc8569f0SHemant Agrawal 	offset = 0x64;
434cc8569f0SHemant Agrawal #endif
435cc8569f0SHemant Agrawal 	if (RTE_MBUF_DIRECT(mbuf) &&
436cc8569f0SHemant Agrawal 		(mbuf->data_off > (mbuf->nb_segs * sizeof(struct qbman_sge)
437cc8569f0SHemant Agrawal 		+ offset))) {
438cc8569f0SHemant Agrawal 		temp = mbuf;
439cc8569f0SHemant Agrawal 		if (rte_mbuf_refcnt_read(temp) > 1) {
440cc8569f0SHemant Agrawal 			/* If refcnt > 1, invalid bpid is set to ensure
441cc8569f0SHemant Agrawal 			 * buffer is not freed by HW
442cc8569f0SHemant Agrawal 			 */
443cc8569f0SHemant Agrawal 			fd->simple.bpid_offset = 0;
444cc8569f0SHemant Agrawal 			DPAA2_SET_FD_IVP(fd);
445cc8569f0SHemant Agrawal 			rte_mbuf_refcnt_update(temp, -1);
446cc8569f0SHemant Agrawal 		} else {
447cc8569f0SHemant Agrawal 			DPAA2_SET_ONLY_FD_BPID(fd, bpid);
4483fa54e3dSGagandeep Singh #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
4493fa54e3dSGagandeep Singh 			rte_mempool_check_cookies(rte_mempool_from_obj((void *)temp),
4503fa54e3dSGagandeep Singh 					(void **)&temp, 1, 0);
4513fa54e3dSGagandeep Singh #endif
452cc8569f0SHemant Agrawal 		}
453cc8569f0SHemant Agrawal 		DPAA2_SET_FD_OFFSET(fd, offset);
454cc8569f0SHemant Agrawal 	} else {
45575e2a1d4SGagandeep Singh 		temp = rte_pktmbuf_alloc(dpaa2_tx_sg_pool);
456774e9ea9SHemant Agrawal 		if (temp == NULL) {
4572b843cacSDavid Marchand 			DPAA2_PMD_DP_DEBUG("No memory to allocate S/G table");
458774e9ea9SHemant Agrawal 			return -ENOMEM;
459774e9ea9SHemant Agrawal 		}
46075e2a1d4SGagandeep Singh 		DPAA2_SET_ONLY_FD_BPID(fd, mempool_to_bpid(dpaa2_tx_sg_pool));
461774e9ea9SHemant Agrawal 		DPAA2_SET_FD_OFFSET(fd, temp->data_off);
4623fa54e3dSGagandeep Singh #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
4633fa54e3dSGagandeep Singh 		rte_mempool_check_cookies(rte_mempool_from_obj((void *)temp),
4643fa54e3dSGagandeep Singh 			(void **)&temp, 1, 0);
4653fa54e3dSGagandeep Singh #endif
466cc8569f0SHemant Agrawal 	}
467cc8569f0SHemant Agrawal 	DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(temp));
468cc8569f0SHemant Agrawal 	DPAA2_SET_FD_LEN(fd, mbuf->pkt_len);
469774e9ea9SHemant Agrawal 	DPAA2_FD_SET_FORMAT(fd, qbman_fd_sg);
4701f4d77d2SNipun Gupta 	DPAA2_RESET_FD_FRC(fd);
4711f4d77d2SNipun Gupta 	DPAA2_RESET_FD_CTRL(fd);
472cc8569f0SHemant Agrawal 	DPAA2_RESET_FD_FLC(fd);
473774e9ea9SHemant Agrawal 	/*Set Scatter gather table and Scatter gather entries*/
474774e9ea9SHemant Agrawal 	sgt = (struct qbman_sge *)(
4755ae1edffSHemant Agrawal 			(size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd))
476774e9ea9SHemant Agrawal 			+ DPAA2_GET_FD_OFFSET(fd));
477774e9ea9SHemant Agrawal 
478774e9ea9SHemant Agrawal 	for (i = 0; i < mbuf->nb_segs; i++) {
479774e9ea9SHemant Agrawal 		sge = &sgt[i];
480774e9ea9SHemant Agrawal 		/*Resetting the buffer pool id and offset field*/
481774e9ea9SHemant Agrawal 		sge->fin_bpid_offset = 0;
4821e522746SApeksha Gupta 		DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(cur_seg));
483774e9ea9SHemant Agrawal 		sge->length = cur_seg->data_len;
484774e9ea9SHemant Agrawal 		if (RTE_MBUF_DIRECT(cur_seg)) {
485cc8569f0SHemant Agrawal 			/* if we are using inline SGT in same buffers
486cc8569f0SHemant Agrawal 			 * set the FLE FMT as Frame Data Section
487cc8569f0SHemant Agrawal 			 */
488cc8569f0SHemant Agrawal 			if (temp == cur_seg) {
489cc8569f0SHemant Agrawal 				DPAA2_SG_SET_FORMAT(sge, qbman_fd_list);
490cc8569f0SHemant Agrawal 				DPAA2_SET_FLE_IVP(sge);
491cc8569f0SHemant Agrawal 			} else {
492774e9ea9SHemant Agrawal 				if (rte_mbuf_refcnt_read(cur_seg) > 1) {
493774e9ea9SHemant Agrawal 				/* If refcnt > 1, invalid bpid is set to ensure
494774e9ea9SHemant Agrawal 				 * buffer is not freed by HW
495774e9ea9SHemant Agrawal 				 */
496774e9ea9SHemant Agrawal 					DPAA2_SET_FLE_IVP(sge);
497774e9ea9SHemant Agrawal 					rte_mbuf_refcnt_update(cur_seg, -1);
498cc8569f0SHemant Agrawal 				} else {
499774e9ea9SHemant Agrawal 					DPAA2_SET_FLE_BPID(sge,
500774e9ea9SHemant Agrawal 						mempool_to_bpid(cur_seg->pool));
5013fa54e3dSGagandeep Singh #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
5023fa54e3dSGagandeep Singh 				rte_mempool_check_cookies(rte_mempool_from_obj((void *)cur_seg),
5033fa54e3dSGagandeep Singh 					(void **)&cur_seg, 1, 0);
5043fa54e3dSGagandeep Singh #endif
505cc8569f0SHemant Agrawal 				}
506cc8569f0SHemant Agrawal 			}
5076bfbafe1SNipun Gupta 		} else if (RTE_MBUF_HAS_EXTBUF(cur_seg)) {
508b0074a7bSGagandeep Singh 			free_buf[*free_count].seg = cur_seg;
509b0074a7bSGagandeep Singh 			free_buf[*free_count].pkt_id = pkt_id;
510b0074a7bSGagandeep Singh 			++*free_count;
5116bfbafe1SNipun Gupta 			DPAA2_SET_FLE_IVP(sge);
512774e9ea9SHemant Agrawal 		} else {
513774e9ea9SHemant Agrawal 			/* Get owner MBUF from indirect buffer */
514774e9ea9SHemant Agrawal 			mi = rte_mbuf_from_indirect(cur_seg);
515774e9ea9SHemant Agrawal 			if (rte_mbuf_refcnt_read(mi) > 1) {
516774e9ea9SHemant Agrawal 				/* If refcnt > 1, invalid bpid is set to ensure
517774e9ea9SHemant Agrawal 				 * owner buffer is not freed by HW
518774e9ea9SHemant Agrawal 				 */
519774e9ea9SHemant Agrawal 				DPAA2_SET_FLE_IVP(sge);
520774e9ea9SHemant Agrawal 			} else {
521774e9ea9SHemant Agrawal 				DPAA2_SET_FLE_BPID(sge,
522774e9ea9SHemant Agrawal 						   mempool_to_bpid(mi->pool));
523774e9ea9SHemant Agrawal 				rte_mbuf_refcnt_update(mi, 1);
524774e9ea9SHemant Agrawal 			}
525b0074a7bSGagandeep Singh 			free_buf[*free_count].seg = cur_seg;
526b0074a7bSGagandeep Singh 			free_buf[*free_count].pkt_id = pkt_id;
527b0074a7bSGagandeep Singh 			++*free_count;
528774e9ea9SHemant Agrawal 		}
529b0074a7bSGagandeep Singh 		cur_seg = cur_seg->next;
530774e9ea9SHemant Agrawal 	}
531774e9ea9SHemant Agrawal 	DPAA2_SG_SET_FINAL(sge, true);
532774e9ea9SHemant Agrawal 	return 0;
533774e9ea9SHemant Agrawal }
534774e9ea9SHemant Agrawal 
535774e9ea9SHemant Agrawal static void
536774e9ea9SHemant Agrawal eth_mbuf_to_fd(struct rte_mbuf *mbuf,
537b0074a7bSGagandeep Singh 	       struct qbman_fd *fd,
538b0074a7bSGagandeep Singh 	       struct sw_buf_free *buf_to_free,
539b0074a7bSGagandeep Singh 	       uint32_t *free_count,
540b0074a7bSGagandeep Singh 	       uint32_t pkt_id,
541b0074a7bSGagandeep Singh 	       uint16_t bpid) __rte_unused;
542774e9ea9SHemant Agrawal 
543e3866e73SThomas Monjalon static void __rte_noinline __rte_hot
544cd9935ceSHemant Agrawal eth_mbuf_to_fd(struct rte_mbuf *mbuf,
545b0074a7bSGagandeep Singh 	       struct qbman_fd *fd,
546b0074a7bSGagandeep Singh 	       struct sw_buf_free *buf_to_free,
547b0074a7bSGagandeep Singh 	       uint32_t *free_count,
548b0074a7bSGagandeep Singh 	       uint32_t pkt_id,
549b0074a7bSGagandeep Singh 	       uint16_t bpid)
550cd9935ceSHemant Agrawal {
55148e7f156SNipun Gupta 	DPAA2_MBUF_TO_CONTIG_FD(mbuf, fd, bpid);
552cd9935ceSHemant Agrawal 
553a10a988aSShreyansh Jain 	DPAA2_PMD_DP_DEBUG("mbuf =%p, mbuf->buf_addr =%p, off = %d,"
5542b843cacSDavid Marchand 		"fd_off=%d fd =%" PRIx64 ", meta = %d  bpid =%d, len=%d",
555a10a988aSShreyansh Jain 		mbuf, mbuf->buf_addr, mbuf->data_off,
556cd9935ceSHemant Agrawal 		DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ADDR(fd),
557cd9935ceSHemant Agrawal 		rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
558cd9935ceSHemant Agrawal 		DPAA2_GET_FD_BPID(fd), DPAA2_GET_FD_LEN(fd));
559774e9ea9SHemant Agrawal 	if (RTE_MBUF_DIRECT(mbuf)) {
560774e9ea9SHemant Agrawal 		if (rte_mbuf_refcnt_read(mbuf) > 1) {
561774e9ea9SHemant Agrawal 			DPAA2_SET_FD_IVP(fd);
562774e9ea9SHemant Agrawal 			rte_mbuf_refcnt_update(mbuf, -1);
563cd9935ceSHemant Agrawal 		}
5643fa54e3dSGagandeep Singh #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
5653fa54e3dSGagandeep Singh 		else
5663fa54e3dSGagandeep Singh 			rte_mempool_check_cookies(rte_mempool_from_obj((void *)mbuf),
5673fa54e3dSGagandeep Singh 				(void **)&mbuf, 1, 0);
5683fa54e3dSGagandeep Singh #endif
5696bfbafe1SNipun Gupta 	} else if (RTE_MBUF_HAS_EXTBUF(mbuf)) {
570b0074a7bSGagandeep Singh 		buf_to_free[*free_count].seg = mbuf;
571b0074a7bSGagandeep Singh 		buf_to_free[*free_count].pkt_id = pkt_id;
572b0074a7bSGagandeep Singh 		++*free_count;
5736bfbafe1SNipun Gupta 		DPAA2_SET_FD_IVP(fd);
574774e9ea9SHemant Agrawal 	} else {
575774e9ea9SHemant Agrawal 		struct rte_mbuf *mi;
576cd9935ceSHemant Agrawal 
577774e9ea9SHemant Agrawal 		mi = rte_mbuf_from_indirect(mbuf);
578774e9ea9SHemant Agrawal 		if (rte_mbuf_refcnt_read(mi) > 1)
579774e9ea9SHemant Agrawal 			DPAA2_SET_FD_IVP(fd);
580774e9ea9SHemant Agrawal 		else
581774e9ea9SHemant Agrawal 			rte_mbuf_refcnt_update(mi, 1);
582b0074a7bSGagandeep Singh 
583b0074a7bSGagandeep Singh 		buf_to_free[*free_count].seg = mbuf;
584b0074a7bSGagandeep Singh 		buf_to_free[*free_count].pkt_id = pkt_id;
585b0074a7bSGagandeep Singh 		++*free_count;
586774e9ea9SHemant Agrawal 	}
587774e9ea9SHemant Agrawal }
5889e5f3e6dSHemant Agrawal 
589e3866e73SThomas Monjalon static inline int __rte_hot
5909e5f3e6dSHemant Agrawal eth_copy_mbuf_to_fd(struct rte_mbuf *mbuf,
5919e5f3e6dSHemant Agrawal 		    struct qbman_fd *fd, uint16_t bpid)
5929e5f3e6dSHemant Agrawal {
5939e5f3e6dSHemant Agrawal 	struct rte_mbuf *m;
5949e5f3e6dSHemant Agrawal 	void *mb = NULL;
5959e5f3e6dSHemant Agrawal 
5969e5f3e6dSHemant Agrawal 	if (rte_dpaa2_mbuf_alloc_bulk(
5979e5f3e6dSHemant Agrawal 		rte_dpaa2_bpid_info[bpid].bp_list->mp, &mb, 1)) {
5982b843cacSDavid Marchand 		DPAA2_PMD_DP_DEBUG("Unable to allocated DPAA2 buffer");
5999e5f3e6dSHemant Agrawal 		return -1;
6009e5f3e6dSHemant Agrawal 	}
6019e5f3e6dSHemant Agrawal 	m = (struct rte_mbuf *)mb;
6029e5f3e6dSHemant Agrawal 	memcpy((char *)m->buf_addr + mbuf->data_off,
6039e5f3e6dSHemant Agrawal 	       (void *)((char *)mbuf->buf_addr + mbuf->data_off),
6049e5f3e6dSHemant Agrawal 		mbuf->pkt_len);
6059e5f3e6dSHemant Agrawal 
6069e5f3e6dSHemant Agrawal 	/* Copy required fields */
6079e5f3e6dSHemant Agrawal 	m->data_off = mbuf->data_off;
6089e5f3e6dSHemant Agrawal 	m->ol_flags = mbuf->ol_flags;
6099e5f3e6dSHemant Agrawal 	m->packet_type = mbuf->packet_type;
6109e5f3e6dSHemant Agrawal 	m->tx_offload = mbuf->tx_offload;
6119e5f3e6dSHemant Agrawal 
61248e7f156SNipun Gupta 	DPAA2_MBUF_TO_CONTIG_FD(m, fd, bpid);
6139e5f3e6dSHemant Agrawal 
6143fa54e3dSGagandeep Singh #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
6153fa54e3dSGagandeep Singh 	rte_mempool_check_cookies(rte_mempool_from_obj((void *)m),
6163fa54e3dSGagandeep Singh 		(void **)&m, 1, 0);
6173fa54e3dSGagandeep Singh #endif
618a10a988aSShreyansh Jain 	DPAA2_PMD_DP_DEBUG(
619a10a988aSShreyansh Jain 		"mbuf: %p, BMAN buf addr: %p, fdaddr: %" PRIx64 ", bpid: %d,"
6202b843cacSDavid Marchand 		" meta: %d, off: %d, len: %d",
621a10a988aSShreyansh Jain 		(void *)mbuf,
622a10a988aSShreyansh Jain 		mbuf->buf_addr,
6235ae1edffSHemant Agrawal 		DPAA2_GET_FD_ADDR(fd),
6245ae1edffSHemant Agrawal 		DPAA2_GET_FD_BPID(fd),
625a10a988aSShreyansh Jain 		rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
626a10a988aSShreyansh Jain 		DPAA2_GET_FD_OFFSET(fd),
6279e5f3e6dSHemant Agrawal 		DPAA2_GET_FD_LEN(fd));
6289e5f3e6dSHemant Agrawal 
6299e5f3e6dSHemant Agrawal return 0;
6309e5f3e6dSHemant Agrawal }
6319e5f3e6dSHemant Agrawal 
6324690a611SNipun Gupta static void
6334690a611SNipun Gupta dump_err_pkts(struct dpaa2_queue *dpaa2_q)
6344690a611SNipun Gupta {
6354690a611SNipun Gupta 	/* Function receive frames for a given device and VQ */
6364690a611SNipun Gupta 	struct qbman_result *dq_storage;
6374690a611SNipun Gupta 	uint32_t fqid = dpaa2_q->fqid;
638d9298902SDavid Marchand 	int ret, num_rx = 0;
6394690a611SNipun Gupta 	uint8_t pending, status;
6404690a611SNipun Gupta 	struct qbman_swp *swp;
6414690a611SNipun Gupta 	const struct qbman_fd *fd;
6424690a611SNipun Gupta 	struct qbman_pull_desc pulldesc;
6434690a611SNipun Gupta 	struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
6444690a611SNipun Gupta 	uint32_t lcore_id = rte_lcore_id();
6454690a611SNipun Gupta 	void *v_addr, *hw_annot_addr;
6464690a611SNipun Gupta 	struct dpaa2_fas *fas;
6474690a611SNipun Gupta 
6484690a611SNipun Gupta 	if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
6494690a611SNipun Gupta 		ret = dpaa2_affine_qbman_swp();
6504690a611SNipun Gupta 		if (ret) {
651f665790aSDavid Marchand 			DPAA2_PMD_ERR("Failed to allocate IO portal, tid: %d",
6524690a611SNipun Gupta 				rte_gettid());
6534690a611SNipun Gupta 			return;
6544690a611SNipun Gupta 		}
6554690a611SNipun Gupta 	}
6564690a611SNipun Gupta 	swp = DPAA2_PER_LCORE_PORTAL;
6574690a611SNipun Gupta 
65812d98eceSJun Yang 	dq_storage = dpaa2_q->q_storage[lcore_id]->dq_storage[0];
6594690a611SNipun Gupta 	qbman_pull_desc_clear(&pulldesc);
6604690a611SNipun Gupta 	qbman_pull_desc_set_fq(&pulldesc, fqid);
6614690a611SNipun Gupta 	qbman_pull_desc_set_storage(&pulldesc, dq_storage,
6624690a611SNipun Gupta 			(size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
6634690a611SNipun Gupta 	qbman_pull_desc_set_numframes(&pulldesc, dpaa2_dqrr_size);
6644690a611SNipun Gupta 
6654690a611SNipun Gupta 	while (1) {
6664690a611SNipun Gupta 		if (qbman_swp_pull(swp, &pulldesc)) {
6672b843cacSDavid Marchand 			DPAA2_PMD_DP_DEBUG("VDQ command is not issued.QBMAN is busy");
6684690a611SNipun Gupta 			/* Portal was busy, try again */
6694690a611SNipun Gupta 			continue;
6704690a611SNipun Gupta 		}
6714690a611SNipun Gupta 		break;
6724690a611SNipun Gupta 	}
6734690a611SNipun Gupta 
6744690a611SNipun Gupta 	/* Check if the previous issued command is completed. */
6754690a611SNipun Gupta 	while (!qbman_check_command_complete(dq_storage))
6764690a611SNipun Gupta 		;
6774690a611SNipun Gupta 
6784690a611SNipun Gupta 	pending = 1;
6794690a611SNipun Gupta 	do {
6804690a611SNipun Gupta 		/* Loop until the dq_storage is updated with
6814690a611SNipun Gupta 		 * new token by QBMAN
6824690a611SNipun Gupta 		 */
6834690a611SNipun Gupta 		while (!qbman_check_new_result(dq_storage))
6844690a611SNipun Gupta 			;
6854690a611SNipun Gupta 
6864690a611SNipun Gupta 		/* Check whether Last Pull command is Expired and
6874690a611SNipun Gupta 		 * setting Condition for Loop termination
6884690a611SNipun Gupta 		 */
6894690a611SNipun Gupta 		if (qbman_result_DQ_is_pull_complete(dq_storage)) {
6904690a611SNipun Gupta 			pending = 0;
6914690a611SNipun Gupta 			/* Check for valid frame. */
6924690a611SNipun Gupta 			status = qbman_result_DQ_flags(dq_storage);
6934690a611SNipun Gupta 			if (unlikely((status &
6944690a611SNipun Gupta 				QBMAN_DQ_STAT_VALIDFRAME) == 0))
6954690a611SNipun Gupta 				continue;
6964690a611SNipun Gupta 		}
6974690a611SNipun Gupta 		fd = qbman_result_DQ_fd(dq_storage);
6984690a611SNipun Gupta 		v_addr = DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
6994690a611SNipun Gupta 		hw_annot_addr = (void *)((size_t)v_addr + DPAA2_FD_PTA_SIZE);
7004690a611SNipun Gupta 		fas = hw_annot_addr;
7014690a611SNipun Gupta 
702f665790aSDavid Marchand 		DPAA2_PMD_ERR("[%d] error packet on port[%d]:"
7034690a611SNipun Gupta 			" fd_off: %d, fd_err: %x, fas_status: %x",
7044690a611SNipun Gupta 			rte_lcore_id(), eth_data->port_id,
7054690a611SNipun Gupta 			DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ERR(fd),
7064690a611SNipun Gupta 			fas->status);
7074690a611SNipun Gupta 		rte_hexdump(stderr, "Error packet", v_addr,
7084690a611SNipun Gupta 			DPAA2_GET_FD_OFFSET(fd) + DPAA2_GET_FD_LEN(fd));
7094690a611SNipun Gupta 
7104690a611SNipun Gupta 		dq_storage++;
7114690a611SNipun Gupta 		num_rx++;
7124690a611SNipun Gupta 	} while (pending);
7134690a611SNipun Gupta 
7144690a611SNipun Gupta 	dpaa2_q->err_pkts += num_rx;
7154690a611SNipun Gupta }
7164690a611SNipun Gupta 
71765a70a98SHemant Agrawal /* This function assumes that caller will be keep the same value for nb_pkts
71865a70a98SHemant Agrawal  * across calls per queue, if that is not the case, better use non-prefetch
71965a70a98SHemant Agrawal  * version of rx call.
72065a70a98SHemant Agrawal  * It will return the packets as requested in previous call without honoring
72165a70a98SHemant Agrawal  * the current nb_pkts or bufs space.
72265a70a98SHemant Agrawal  */
723cd9935ceSHemant Agrawal uint16_t
7245c6942fdSHemant Agrawal dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
725cd9935ceSHemant Agrawal {
7265c6942fdSHemant Agrawal 	/* Function receive frames for a given device and VQ*/
72712d98eceSJun Yang 	struct dpaa2_queue *dpaa2_q = queue;
7284bc5ab88SHemant Agrawal 	struct qbman_result *dq_storage, *dq_storage1 = NULL;
729cd9935ceSHemant Agrawal 	uint32_t fqid = dpaa2_q->fqid;
73065a70a98SHemant Agrawal 	int ret, num_rx = 0, pull_size;
7314bc5ab88SHemant Agrawal 	uint8_t pending, status;
732cd9935ceSHemant Agrawal 	struct qbman_swp *swp;
7332f41c930SNipun Gupta 	const struct qbman_fd *fd;
734cd9935ceSHemant Agrawal 	struct qbman_pull_desc pulldesc;
73512d98eceSJun Yang 	struct queue_storage_info_t *q_storage;
73685ee5ddaSShreyansh Jain 	struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
737e806bf87SPriyanka Jain 	struct dpaa2_dev_priv *priv = eth_data->dev_private;
7384690a611SNipun Gupta 
73912d98eceSJun Yang 	q_storage = dpaa2_q->q_storage[rte_lcore_id()];
74012d98eceSJun Yang 
7414690a611SNipun Gupta 	if (unlikely(dpaa2_enable_err_queue))
7424690a611SNipun Gupta 		dump_err_pkts(priv->rx_err_vq);
743cd9935ceSHemant Agrawal 
744b3ec974cSNipun Gupta 	if (unlikely(!DPAA2_PER_LCORE_ETHRX_DPIO)) {
745b3ec974cSNipun Gupta 		ret = dpaa2_affine_qbman_ethrx_swp();
746cd9935ceSHemant Agrawal 		if (ret) {
747a10a988aSShreyansh Jain 			DPAA2_PMD_ERR("Failure in affining portal");
748cd9935ceSHemant Agrawal 			return 0;
749cd9935ceSHemant Agrawal 		}
750cd9935ceSHemant Agrawal 	}
751109df460SShreyansh Jain 
752109df460SShreyansh Jain 	if (unlikely(!rte_dpaa2_bpid_info &&
753109df460SShreyansh Jain 		     rte_eal_process_type() == RTE_PROC_SECONDARY))
754109df460SShreyansh Jain 		rte_dpaa2_bpid_info = dpaa2_q->bp_array;
755109df460SShreyansh Jain 
756b3ec974cSNipun Gupta 	swp = DPAA2_PER_LCORE_ETHRX_PORTAL;
757bd23b1a8SNipun Gupta 	pull_size = (nb_pkts > dpaa2_dqrr_size) ? dpaa2_dqrr_size : nb_pkts;
758043b36f6SHemant Agrawal 	if (unlikely(!q_storage->active_dqs)) {
759043b36f6SHemant Agrawal 		q_storage->toggle = 0;
760043b36f6SHemant Agrawal 		dq_storage = q_storage->dq_storage[q_storage->toggle];
76165a70a98SHemant Agrawal 		q_storage->last_num_pkts = pull_size;
762043b36f6SHemant Agrawal 		qbman_pull_desc_clear(&pulldesc);
763043b36f6SHemant Agrawal 		qbman_pull_desc_set_numframes(&pulldesc,
764043b36f6SHemant Agrawal 					      q_storage->last_num_pkts);
765043b36f6SHemant Agrawal 		qbman_pull_desc_set_fq(&pulldesc, fqid);
766043b36f6SHemant Agrawal 		qbman_pull_desc_set_storage(&pulldesc, dq_storage,
7673ef648aaSHemant Agrawal 			(uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
768b3ec974cSNipun Gupta 		if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) {
7694bc5ab88SHemant Agrawal 			while (!qbman_check_command_complete(
770b3ec974cSNipun Gupta 			       get_swp_active_dqs(
771b3ec974cSNipun Gupta 			       DPAA2_PER_LCORE_ETHRX_DPIO->index)))
7724bc5ab88SHemant Agrawal 				;
773b3ec974cSNipun Gupta 			clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index);
7744bc5ab88SHemant Agrawal 		}
775cd9935ceSHemant Agrawal 		while (1) {
776cd9935ceSHemant Agrawal 			if (qbman_swp_pull(swp, &pulldesc)) {
777a10a988aSShreyansh Jain 				DPAA2_PMD_DP_DEBUG("VDQ command is not issued."
7782b843cacSDavid Marchand 						  " QBMAN is busy (1)");
779cd9935ceSHemant Agrawal 				/* Portal was busy, try again */
780cd9935ceSHemant Agrawal 				continue;
781cd9935ceSHemant Agrawal 			}
782cd9935ceSHemant Agrawal 			break;
7835c6942fdSHemant Agrawal 		}
7845c6942fdSHemant Agrawal 		q_storage->active_dqs = dq_storage;
785b3ec974cSNipun Gupta 		q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index;
786b3ec974cSNipun Gupta 		set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index,
787b3ec974cSNipun Gupta 				   dq_storage);
7885c6942fdSHemant Agrawal 	}
789043b36f6SHemant Agrawal 
7905c6942fdSHemant Agrawal 	dq_storage = q_storage->active_dqs;
7915ae1edffSHemant Agrawal 	rte_prefetch0((void *)(size_t)(dq_storage));
7925ae1edffSHemant Agrawal 	rte_prefetch0((void *)(size_t)(dq_storage + 1));
7934bc5ab88SHemant Agrawal 
7944bc5ab88SHemant Agrawal 	/* Prepare next pull descriptor. This will give space for the
7957be78d02SJosh Soref 	 * prefetching done on DQRR entries
7964bc5ab88SHemant Agrawal 	 */
7974bc5ab88SHemant Agrawal 	q_storage->toggle ^= 1;
7984bc5ab88SHemant Agrawal 	dq_storage1 = q_storage->dq_storage[q_storage->toggle];
7994bc5ab88SHemant Agrawal 	qbman_pull_desc_clear(&pulldesc);
80065a70a98SHemant Agrawal 	qbman_pull_desc_set_numframes(&pulldesc, pull_size);
8014bc5ab88SHemant Agrawal 	qbman_pull_desc_set_fq(&pulldesc, fqid);
8024bc5ab88SHemant Agrawal 	qbman_pull_desc_set_storage(&pulldesc, dq_storage1,
8033ef648aaSHemant Agrawal 		(uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage1)), 1);
8044bc5ab88SHemant Agrawal 
805cd9935ceSHemant Agrawal 	/* Check if the previous issued command is completed.
8065c6942fdSHemant Agrawal 	 * Also seems like the SWP is shared between the Ethernet Driver
8075c6942fdSHemant Agrawal 	 * and the SEC driver.
808cd9935ceSHemant Agrawal 	 */
80969293c77SHemant Agrawal 	while (!qbman_check_command_complete(dq_storage))
810cd9935ceSHemant Agrawal 		;
8115c6942fdSHemant Agrawal 	if (dq_storage == get_swp_active_dqs(q_storage->active_dpio_id))
8125c6942fdSHemant Agrawal 		clear_swp_active_dqs(q_storage->active_dpio_id);
813043b36f6SHemant Agrawal 
814043b36f6SHemant Agrawal 	pending = 1;
815043b36f6SHemant Agrawal 
816043b36f6SHemant Agrawal 	do {
817cd9935ceSHemant Agrawal 		/* Loop until the dq_storage is updated with
818cd9935ceSHemant Agrawal 		 * new token by QBMAN
819cd9935ceSHemant Agrawal 		 */
82069293c77SHemant Agrawal 		while (!qbman_check_new_result(dq_storage))
821cd9935ceSHemant Agrawal 			;
8225ae1edffSHemant Agrawal 		rte_prefetch0((void *)((size_t)(dq_storage + 2)));
823cd9935ceSHemant Agrawal 		/* Check whether Last Pull command is Expired and
824cd9935ceSHemant Agrawal 		 * setting Condition for Loop termination
825cd9935ceSHemant Agrawal 		 */
826cd9935ceSHemant Agrawal 		if (qbman_result_DQ_is_pull_complete(dq_storage)) {
827043b36f6SHemant Agrawal 			pending = 0;
828cd9935ceSHemant Agrawal 			/* Check for valid frame. */
829043b36f6SHemant Agrawal 			status = qbman_result_DQ_flags(dq_storage);
830cd9935ceSHemant Agrawal 			if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0))
831cd9935ceSHemant Agrawal 				continue;
832cd9935ceSHemant Agrawal 		}
833043b36f6SHemant Agrawal 		fd = qbman_result_DQ_fd(dq_storage);
834cd9935ceSHemant Agrawal 
8352f41c930SNipun Gupta #ifndef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
8361a814f4bSNipun Gupta 		if (dpaa2_svr_family != SVR_LX2160A) {
8372f41c930SNipun Gupta 			const struct qbman_fd *next_fd =
8382f41c930SNipun Gupta 				qbman_result_DQ_fd(dq_storage + 1);
839cd9935ceSHemant Agrawal 			/* Prefetch Annotation address for the parse results */
8402f41c930SNipun Gupta 			rte_prefetch0(DPAA2_IOVA_TO_VADDR((DPAA2_GET_FD_ADDR(
8412f41c930SNipun Gupta 				next_fd) + DPAA2_FD_PTA_SIZE + 16)));
8421a814f4bSNipun Gupta 		}
8432f41c930SNipun Gupta #endif
844cd9935ceSHemant Agrawal 
845043b36f6SHemant Agrawal 		if (unlikely(DPAA2_FD_GET_FORMAT(fd) == qbman_fd_sg))
846005d943eSNipun Gupta 			bufs[num_rx] = eth_sg_fd_to_mbuf(fd, eth_data->port_id);
847774e9ea9SHemant Agrawal 		else
848005d943eSNipun Gupta 			bufs[num_rx] = eth_fd_to_mbuf(fd, eth_data->port_id);
849e806bf87SPriyanka Jain #if defined(RTE_LIBRTE_IEEE1588)
85002548404SGagandeep Singh 		if (bufs[num_rx]->ol_flags & RTE_MBUF_F_RX_IEEE1588_TMST) {
85190762e5cSVanshika Shukla 			priv->rx_timestamp =
85290762e5cSVanshika Shukla 				*dpaa2_timestamp_dynfield(bufs[num_rx]);
85390762e5cSVanshika Shukla 		}
854e806bf87SPriyanka Jain #endif
855cd9935ceSHemant Agrawal 
85685ee5ddaSShreyansh Jain 		if (eth_data->dev_conf.rxmode.offloads &
857295968d1SFerruh Yigit 				RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
858d20e6e60SHemant Agrawal 			rte_vlan_strip(bufs[num_rx]);
859d20e6e60SHemant Agrawal 
860cd9935ceSHemant Agrawal 		dq_storage++;
8615c6942fdSHemant Agrawal 		num_rx++;
862043b36f6SHemant Agrawal 	} while (pending);
863043b36f6SHemant Agrawal 
864b3ec974cSNipun Gupta 	if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) {
8654bc5ab88SHemant Agrawal 		while (!qbman_check_command_complete(
866b3ec974cSNipun Gupta 		       get_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)))
867043b36f6SHemant Agrawal 			;
868b3ec974cSNipun Gupta 		clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index);
869043b36f6SHemant Agrawal 	}
870043b36f6SHemant Agrawal 	/* issue a volatile dequeue command for next pull */
8715c6942fdSHemant Agrawal 	while (1) {
8725c6942fdSHemant Agrawal 		if (qbman_swp_pull(swp, &pulldesc)) {
873a10a988aSShreyansh Jain 			DPAA2_PMD_DP_DEBUG("VDQ command is not issued."
8742b843cacSDavid Marchand 					  "QBMAN is busy (2)");
8755c6942fdSHemant Agrawal 			continue;
8765c6942fdSHemant Agrawal 		}
8775c6942fdSHemant Agrawal 		break;
8785c6942fdSHemant Agrawal 	}
8794bc5ab88SHemant Agrawal 	q_storage->active_dqs = dq_storage1;
880b3ec974cSNipun Gupta 	q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index;
881b3ec974cSNipun Gupta 	set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index, dq_storage1);
882cd9935ceSHemant Agrawal 
883cd9935ceSHemant Agrawal 	dpaa2_q->rx_pkts += num_rx;
884cd9935ceSHemant Agrawal 
885cd9935ceSHemant Agrawal 	return num_rx;
886cd9935ceSHemant Agrawal }
887cd9935ceSHemant Agrawal 
888e3866e73SThomas Monjalon void __rte_hot
889b677d4c6SNipun Gupta dpaa2_dev_process_parallel_event(struct qbman_swp *swp,
890b677d4c6SNipun Gupta 				 const struct qbman_fd *fd,
891b677d4c6SNipun Gupta 				 const struct qbman_result *dq,
892b677d4c6SNipun Gupta 				 struct dpaa2_queue *rxq,
893b677d4c6SNipun Gupta 				 struct rte_event *ev)
894b677d4c6SNipun Gupta {
8955ae1edffSHemant Agrawal 	rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd) +
8968fc28889SNipun Gupta 		DPAA2_FD_PTA_SIZE + 16));
897b677d4c6SNipun Gupta 
898b677d4c6SNipun Gupta 	ev->flow_id = rxq->ev.flow_id;
899b677d4c6SNipun Gupta 	ev->sub_event_type = rxq->ev.sub_event_type;
900b677d4c6SNipun Gupta 	ev->event_type = RTE_EVENT_TYPE_ETHDEV;
901b677d4c6SNipun Gupta 	ev->op = RTE_EVENT_OP_NEW;
902b677d4c6SNipun Gupta 	ev->sched_type = rxq->ev.sched_type;
903b677d4c6SNipun Gupta 	ev->queue_id = rxq->ev.queue_id;
904b677d4c6SNipun Gupta 	ev->priority = rxq->ev.priority;
905b677d4c6SNipun Gupta 
906005d943eSNipun Gupta 	ev->mbuf = eth_fd_to_mbuf(fd, rxq->eth_data->port_id);
9078fc28889SNipun Gupta 
908b677d4c6SNipun Gupta 	qbman_swp_dqrr_consume(swp, dq);
909b677d4c6SNipun Gupta }
910b677d4c6SNipun Gupta 
911e3866e73SThomas Monjalon void __rte_hot
912f2fc83b4SThomas Monjalon dpaa2_dev_process_atomic_event(struct qbman_swp *swp __rte_unused,
9132d378863SNipun Gupta 			       const struct qbman_fd *fd,
9142d378863SNipun Gupta 			       const struct qbman_result *dq,
9152d378863SNipun Gupta 			       struct dpaa2_queue *rxq,
9162d378863SNipun Gupta 			       struct rte_event *ev)
9172d378863SNipun Gupta {
9188fc28889SNipun Gupta 	uint8_t dqrr_index;
9192d378863SNipun Gupta 
9205ae1edffSHemant Agrawal 	rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd) +
9218fc28889SNipun Gupta 		DPAA2_FD_PTA_SIZE + 16));
9222d378863SNipun Gupta 
9232d378863SNipun Gupta 	ev->flow_id = rxq->ev.flow_id;
9242d378863SNipun Gupta 	ev->sub_event_type = rxq->ev.sub_event_type;
9252d378863SNipun Gupta 	ev->event_type = RTE_EVENT_TYPE_ETHDEV;
9262d378863SNipun Gupta 	ev->op = RTE_EVENT_OP_NEW;
9272d378863SNipun Gupta 	ev->sched_type = rxq->ev.sched_type;
9282d378863SNipun Gupta 	ev->queue_id = rxq->ev.queue_id;
9292d378863SNipun Gupta 	ev->priority = rxq->ev.priority;
9302d378863SNipun Gupta 
931005d943eSNipun Gupta 	ev->mbuf = eth_fd_to_mbuf(fd, rxq->eth_data->port_id);
9328fc28889SNipun Gupta 
9338fc28889SNipun Gupta 	dqrr_index = qbman_get_dqrr_idx(dq);
934ea278063SDavid Marchand 	*dpaa2_seqn(ev->mbuf) = dqrr_index + 1;
9352d378863SNipun Gupta 	DPAA2_PER_LCORE_DQRR_SIZE++;
9362d378863SNipun Gupta 	DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index;
9372d378863SNipun Gupta 	DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = ev->mbuf;
9382d378863SNipun Gupta }
9392d378863SNipun Gupta 
940e3866e73SThomas Monjalon void __rte_hot
94116c4a3c4SNipun Gupta dpaa2_dev_process_ordered_event(struct qbman_swp *swp,
94216c4a3c4SNipun Gupta 				const struct qbman_fd *fd,
94316c4a3c4SNipun Gupta 				const struct qbman_result *dq,
94416c4a3c4SNipun Gupta 				struct dpaa2_queue *rxq,
94516c4a3c4SNipun Gupta 				struct rte_event *ev)
94616c4a3c4SNipun Gupta {
94716c4a3c4SNipun Gupta 	rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd) +
94816c4a3c4SNipun Gupta 		DPAA2_FD_PTA_SIZE + 16));
94916c4a3c4SNipun Gupta 
95016c4a3c4SNipun Gupta 	ev->flow_id = rxq->ev.flow_id;
95116c4a3c4SNipun Gupta 	ev->sub_event_type = rxq->ev.sub_event_type;
95216c4a3c4SNipun Gupta 	ev->event_type = RTE_EVENT_TYPE_ETHDEV;
95316c4a3c4SNipun Gupta 	ev->op = RTE_EVENT_OP_NEW;
95416c4a3c4SNipun Gupta 	ev->sched_type = rxq->ev.sched_type;
95516c4a3c4SNipun Gupta 	ev->queue_id = rxq->ev.queue_id;
95616c4a3c4SNipun Gupta 	ev->priority = rxq->ev.priority;
95716c4a3c4SNipun Gupta 
958005d943eSNipun Gupta 	ev->mbuf = eth_fd_to_mbuf(fd, rxq->eth_data->port_id);
95916c4a3c4SNipun Gupta 
960ea278063SDavid Marchand 	*dpaa2_seqn(ev->mbuf) = DPAA2_ENQUEUE_FLAG_ORP;
961ea278063SDavid Marchand 	*dpaa2_seqn(ev->mbuf) |= qbman_result_DQ_odpid(dq) << DPAA2_EQCR_OPRID_SHIFT;
962ea278063SDavid Marchand 	*dpaa2_seqn(ev->mbuf) |= qbman_result_DQ_seqnum(dq) << DPAA2_EQCR_SEQNUM_SHIFT;
96316c4a3c4SNipun Gupta 
96416c4a3c4SNipun Gupta 	qbman_swp_dqrr_consume(swp, dq);
96516c4a3c4SNipun Gupta }
96616c4a3c4SNipun Gupta 
96720191ab3SNipun Gupta uint16_t
96820191ab3SNipun Gupta dpaa2_dev_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
96920191ab3SNipun Gupta {
97020191ab3SNipun Gupta 	/* Function receive frames for a given device and VQ */
97112d98eceSJun Yang 	struct dpaa2_queue *dpaa2_q = queue;
97220191ab3SNipun Gupta 	struct qbman_result *dq_storage;
97320191ab3SNipun Gupta 	uint32_t fqid = dpaa2_q->fqid;
97420191ab3SNipun Gupta 	int ret, num_rx = 0, next_pull = nb_pkts, num_pulled;
97520191ab3SNipun Gupta 	uint8_t pending, status;
97620191ab3SNipun Gupta 	struct qbman_swp *swp;
9772f41c930SNipun Gupta 	const struct qbman_fd *fd;
97820191ab3SNipun Gupta 	struct qbman_pull_desc pulldesc;
97920191ab3SNipun Gupta 	struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
9804690a611SNipun Gupta 	struct dpaa2_dev_priv *priv = eth_data->dev_private;
9814690a611SNipun Gupta 
9824690a611SNipun Gupta 	if (unlikely(dpaa2_enable_err_queue))
9834690a611SNipun Gupta 		dump_err_pkts(priv->rx_err_vq);
98420191ab3SNipun Gupta 
98520191ab3SNipun Gupta 	if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
98620191ab3SNipun Gupta 		ret = dpaa2_affine_qbman_swp();
98720191ab3SNipun Gupta 		if (ret) {
988d527f5d9SNipun Gupta 			DPAA2_PMD_ERR(
989f665790aSDavid Marchand 				"Failed to allocate IO portal, tid: %d",
990d527f5d9SNipun Gupta 				rte_gettid());
99120191ab3SNipun Gupta 			return 0;
99220191ab3SNipun Gupta 		}
99320191ab3SNipun Gupta 	}
99420191ab3SNipun Gupta 	swp = DPAA2_PER_LCORE_PORTAL;
99520191ab3SNipun Gupta 
99620191ab3SNipun Gupta 	do {
99712d98eceSJun Yang 		dq_storage = dpaa2_q->q_storage[0]->dq_storage[0];
99820191ab3SNipun Gupta 		qbman_pull_desc_clear(&pulldesc);
99920191ab3SNipun Gupta 		qbman_pull_desc_set_fq(&pulldesc, fqid);
100020191ab3SNipun Gupta 		qbman_pull_desc_set_storage(&pulldesc, dq_storage,
100120191ab3SNipun Gupta 				(size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
100220191ab3SNipun Gupta 
100320191ab3SNipun Gupta 		if (next_pull > dpaa2_dqrr_size) {
100420191ab3SNipun Gupta 			qbman_pull_desc_set_numframes(&pulldesc,
100520191ab3SNipun Gupta 				dpaa2_dqrr_size);
100620191ab3SNipun Gupta 			next_pull -= dpaa2_dqrr_size;
100720191ab3SNipun Gupta 		} else {
100820191ab3SNipun Gupta 			qbman_pull_desc_set_numframes(&pulldesc, next_pull);
100920191ab3SNipun Gupta 			next_pull = 0;
101020191ab3SNipun Gupta 		}
101120191ab3SNipun Gupta 
101220191ab3SNipun Gupta 		while (1) {
101320191ab3SNipun Gupta 			if (qbman_swp_pull(swp, &pulldesc)) {
101420191ab3SNipun Gupta 				DPAA2_PMD_DP_DEBUG(
10152b843cacSDavid Marchand 					"VDQ command is not issued.QBMAN is busy");
101620191ab3SNipun Gupta 				/* Portal was busy, try again */
101720191ab3SNipun Gupta 				continue;
101820191ab3SNipun Gupta 			}
101920191ab3SNipun Gupta 			break;
102020191ab3SNipun Gupta 		}
102120191ab3SNipun Gupta 
102220191ab3SNipun Gupta 		rte_prefetch0((void *)((size_t)(dq_storage + 1)));
102320191ab3SNipun Gupta 		/* Check if the previous issued command is completed. */
102420191ab3SNipun Gupta 		while (!qbman_check_command_complete(dq_storage))
102520191ab3SNipun Gupta 			;
102620191ab3SNipun Gupta 
102720191ab3SNipun Gupta 		num_pulled = 0;
102820191ab3SNipun Gupta 		pending = 1;
102920191ab3SNipun Gupta 		do {
103020191ab3SNipun Gupta 			/* Loop until the dq_storage is updated with
103120191ab3SNipun Gupta 			 * new token by QBMAN
103220191ab3SNipun Gupta 			 */
103320191ab3SNipun Gupta 			while (!qbman_check_new_result(dq_storage))
103420191ab3SNipun Gupta 				;
103520191ab3SNipun Gupta 			rte_prefetch0((void *)((size_t)(dq_storage + 2)));
103620191ab3SNipun Gupta 			/* Check whether Last Pull command is Expired and
103720191ab3SNipun Gupta 			 * setting Condition for Loop termination
103820191ab3SNipun Gupta 			 */
103920191ab3SNipun Gupta 			if (qbman_result_DQ_is_pull_complete(dq_storage)) {
104020191ab3SNipun Gupta 				pending = 0;
104120191ab3SNipun Gupta 				/* Check for valid frame. */
104220191ab3SNipun Gupta 				status = qbman_result_DQ_flags(dq_storage);
104320191ab3SNipun Gupta 				if (unlikely((status &
104420191ab3SNipun Gupta 					QBMAN_DQ_STAT_VALIDFRAME) == 0))
104520191ab3SNipun Gupta 					continue;
104620191ab3SNipun Gupta 			}
104720191ab3SNipun Gupta 			fd = qbman_result_DQ_fd(dq_storage);
104820191ab3SNipun Gupta 
10492f41c930SNipun Gupta #ifndef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
10502f41c930SNipun Gupta 			if (dpaa2_svr_family != SVR_LX2160A) {
10512f41c930SNipun Gupta 				const struct qbman_fd *next_fd =
10522f41c930SNipun Gupta 					qbman_result_DQ_fd(dq_storage + 1);
10532f41c930SNipun Gupta 
10542f41c930SNipun Gupta 				/* Prefetch Annotation address for the parse
10552f41c930SNipun Gupta 				 * results.
10562f41c930SNipun Gupta 				 */
10572f41c930SNipun Gupta 				rte_prefetch0((DPAA2_IOVA_TO_VADDR(
10582f41c930SNipun Gupta 					DPAA2_GET_FD_ADDR(next_fd) +
10592f41c930SNipun Gupta 					DPAA2_FD_PTA_SIZE + 16)));
10602f41c930SNipun Gupta 			}
10612f41c930SNipun Gupta #endif
106220191ab3SNipun Gupta 
106320191ab3SNipun Gupta 			if (unlikely(DPAA2_FD_GET_FORMAT(fd) == qbman_fd_sg))
1064005d943eSNipun Gupta 				bufs[num_rx] = eth_sg_fd_to_mbuf(fd,
1065005d943eSNipun Gupta 							eth_data->port_id);
106620191ab3SNipun Gupta 			else
1067005d943eSNipun Gupta 				bufs[num_rx] = eth_fd_to_mbuf(fd,
1068005d943eSNipun Gupta 							eth_data->port_id);
106920191ab3SNipun Gupta 
107090762e5cSVanshika Shukla #if defined(RTE_LIBRTE_IEEE1588)
107102548404SGagandeep Singh 		if (bufs[num_rx]->ol_flags & RTE_MBUF_F_RX_IEEE1588_TMST) {
107290762e5cSVanshika Shukla 			priv->rx_timestamp =
107390762e5cSVanshika Shukla 				*dpaa2_timestamp_dynfield(bufs[num_rx]);
107490762e5cSVanshika Shukla 		}
107590762e5cSVanshika Shukla #endif
107690762e5cSVanshika Shukla 
107720191ab3SNipun Gupta 		if (eth_data->dev_conf.rxmode.offloads &
1078295968d1SFerruh Yigit 				RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
107920191ab3SNipun Gupta 			rte_vlan_strip(bufs[num_rx]);
108020191ab3SNipun Gupta 		}
108120191ab3SNipun Gupta 
108220191ab3SNipun Gupta 			dq_storage++;
108320191ab3SNipun Gupta 			num_rx++;
108420191ab3SNipun Gupta 			num_pulled++;
108520191ab3SNipun Gupta 		} while (pending);
108620191ab3SNipun Gupta 	/* Last VDQ provided all packets and more packets are requested */
108720191ab3SNipun Gupta 	} while (next_pull && num_pulled == dpaa2_dqrr_size);
108820191ab3SNipun Gupta 
108920191ab3SNipun Gupta 	dpaa2_q->rx_pkts += num_rx;
109020191ab3SNipun Gupta 
109120191ab3SNipun Gupta 	return num_rx;
109220191ab3SNipun Gupta }
109320191ab3SNipun Gupta 
10949ceacab7SPriyanka Jain uint16_t dpaa2_dev_tx_conf(void *queue)
10959ceacab7SPriyanka Jain {
10969ceacab7SPriyanka Jain 	/* Function receive frames for a given device and VQ */
10979ceacab7SPriyanka Jain 	struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
10989ceacab7SPriyanka Jain 	struct qbman_result *dq_storage;
10999ceacab7SPriyanka Jain 	uint32_t fqid = dpaa2_q->fqid;
11009ceacab7SPriyanka Jain 	int ret, num_tx_conf = 0, num_pulled;
11019ceacab7SPriyanka Jain 	uint8_t pending, status;
11029ceacab7SPriyanka Jain 	struct qbman_swp *swp;
11039ceacab7SPriyanka Jain 	const struct qbman_fd *fd, *next_fd;
11049ceacab7SPriyanka Jain 	struct qbman_pull_desc pulldesc;
11059ceacab7SPriyanka Jain 	struct qbman_release_desc releasedesc;
11069ceacab7SPriyanka Jain 	uint32_t bpid;
11079ceacab7SPriyanka Jain 	uint64_t buf;
1108e806bf87SPriyanka Jain #if defined(RTE_LIBRTE_IEEE1588)
1109e806bf87SPriyanka Jain 	struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
1110e806bf87SPriyanka Jain 	struct dpaa2_dev_priv *priv = eth_data->dev_private;
1111e806bf87SPriyanka Jain 	struct dpaa2_annot_hdr *annotation;
111290762e5cSVanshika Shukla 	void *v_addr;
111390762e5cSVanshika Shukla 	struct rte_mbuf *mbuf;
1114e806bf87SPriyanka Jain #endif
11159ceacab7SPriyanka Jain 
11169ceacab7SPriyanka Jain 	if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
11179ceacab7SPriyanka Jain 		ret = dpaa2_affine_qbman_swp();
11189ceacab7SPriyanka Jain 		if (ret) {
1119d527f5d9SNipun Gupta 			DPAA2_PMD_ERR(
1120f665790aSDavid Marchand 				"Failed to allocate IO portal, tid: %d",
1121d527f5d9SNipun Gupta 				rte_gettid());
11229ceacab7SPriyanka Jain 			return 0;
11239ceacab7SPriyanka Jain 		}
11249ceacab7SPriyanka Jain 	}
11259ceacab7SPriyanka Jain 	swp = DPAA2_PER_LCORE_PORTAL;
11269ceacab7SPriyanka Jain 
11279ceacab7SPriyanka Jain 	do {
112812d98eceSJun Yang 		dq_storage = dpaa2_q->q_storage[0]->dq_storage[0];
11299ceacab7SPriyanka Jain 		qbman_pull_desc_clear(&pulldesc);
11309ceacab7SPriyanka Jain 		qbman_pull_desc_set_fq(&pulldesc, fqid);
11319ceacab7SPriyanka Jain 		qbman_pull_desc_set_storage(&pulldesc, dq_storage,
11329ceacab7SPriyanka Jain 				(size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
11339ceacab7SPriyanka Jain 
11349ceacab7SPriyanka Jain 		qbman_pull_desc_set_numframes(&pulldesc, dpaa2_dqrr_size);
11359ceacab7SPriyanka Jain 
11369ceacab7SPriyanka Jain 		while (1) {
11379ceacab7SPriyanka Jain 			if (qbman_swp_pull(swp, &pulldesc)) {
11389ceacab7SPriyanka Jain 				DPAA2_PMD_DP_DEBUG("VDQ command is not issued."
11392b843cacSDavid Marchand 						   "QBMAN is busy");
11409ceacab7SPriyanka Jain 				/* Portal was busy, try again */
11419ceacab7SPriyanka Jain 				continue;
11429ceacab7SPriyanka Jain 			}
11439ceacab7SPriyanka Jain 			break;
11449ceacab7SPriyanka Jain 		}
11459ceacab7SPriyanka Jain 
11469ceacab7SPriyanka Jain 		rte_prefetch0((void *)((size_t)(dq_storage + 1)));
11479ceacab7SPriyanka Jain 		/* Check if the previous issued command is completed. */
11489ceacab7SPriyanka Jain 		while (!qbman_check_command_complete(dq_storage))
11499ceacab7SPriyanka Jain 			;
11509ceacab7SPriyanka Jain 
11519ceacab7SPriyanka Jain 		num_pulled = 0;
11529ceacab7SPriyanka Jain 		pending = 1;
11539ceacab7SPriyanka Jain 		do {
11549ceacab7SPriyanka Jain 			/* Loop until the dq_storage is updated with
11559ceacab7SPriyanka Jain 			 * new token by QBMAN
11569ceacab7SPriyanka Jain 			 */
11579ceacab7SPriyanka Jain 			while (!qbman_check_new_result(dq_storage))
11589ceacab7SPriyanka Jain 				;
11599ceacab7SPriyanka Jain 			rte_prefetch0((void *)((size_t)(dq_storage + 2)));
11609ceacab7SPriyanka Jain 			/* Check whether Last Pull command is Expired and
11619ceacab7SPriyanka Jain 			 * setting Condition for Loop termination
11629ceacab7SPriyanka Jain 			 */
11639ceacab7SPriyanka Jain 			if (qbman_result_DQ_is_pull_complete(dq_storage)) {
11649ceacab7SPriyanka Jain 				pending = 0;
11659ceacab7SPriyanka Jain 				/* Check for valid frame. */
11669ceacab7SPriyanka Jain 				status = qbman_result_DQ_flags(dq_storage);
11679ceacab7SPriyanka Jain 				if (unlikely((status &
11689ceacab7SPriyanka Jain 					QBMAN_DQ_STAT_VALIDFRAME) == 0))
11699ceacab7SPriyanka Jain 					continue;
11709ceacab7SPriyanka Jain 			}
11719ceacab7SPriyanka Jain 			fd = qbman_result_DQ_fd(dq_storage);
11729ceacab7SPriyanka Jain 
11739ceacab7SPriyanka Jain 			next_fd = qbman_result_DQ_fd(dq_storage + 1);
11749ceacab7SPriyanka Jain 			/* Prefetch Annotation address for the parse results */
11759ceacab7SPriyanka Jain 			rte_prefetch0((void *)(size_t)
11769ceacab7SPriyanka Jain 				(DPAA2_GET_FD_ADDR(next_fd) +
11779ceacab7SPriyanka Jain 				 DPAA2_FD_PTA_SIZE + 16));
11789ceacab7SPriyanka Jain 
11799ceacab7SPriyanka Jain 			bpid = DPAA2_GET_FD_BPID(fd);
11809ceacab7SPriyanka Jain 
11819ceacab7SPriyanka Jain 			/* Create a release descriptor required for releasing
11829ceacab7SPriyanka Jain 			 * buffers into QBMAN
11839ceacab7SPriyanka Jain 			 */
11849ceacab7SPriyanka Jain 			qbman_release_desc_clear(&releasedesc);
11859ceacab7SPriyanka Jain 			qbman_release_desc_set_bpid(&releasedesc, bpid);
11869ceacab7SPriyanka Jain 
11879ceacab7SPriyanka Jain 			buf = DPAA2_GET_FD_ADDR(fd);
11889ceacab7SPriyanka Jain 			/* feed them to bman */
11899ceacab7SPriyanka Jain 			do {
11909ceacab7SPriyanka Jain 				ret = qbman_swp_release(swp, &releasedesc,
11919ceacab7SPriyanka Jain 							&buf, 1);
11929ceacab7SPriyanka Jain 			} while (ret == -EBUSY);
11939ceacab7SPriyanka Jain 
11949ceacab7SPriyanka Jain 			dq_storage++;
11959ceacab7SPriyanka Jain 			num_tx_conf++;
11969ceacab7SPriyanka Jain 			num_pulled++;
1197e806bf87SPriyanka Jain #if defined(RTE_LIBRTE_IEEE1588)
119890762e5cSVanshika Shukla 			v_addr = DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
119990762e5cSVanshika Shukla 			mbuf = DPAA2_INLINE_MBUF_FROM_BUF(v_addr,
120090762e5cSVanshika Shukla 				rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
120190762e5cSVanshika Shukla 
120202548404SGagandeep Singh 			if (mbuf->ol_flags & RTE_MBUF_F_TX_IEEE1588_TMST) {
1203e806bf87SPriyanka Jain 				annotation = (struct dpaa2_annot_hdr *)((size_t)
1204e806bf87SPriyanka Jain 					DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)) +
1205e806bf87SPriyanka Jain 					DPAA2_FD_PTA_SIZE);
1206e806bf87SPriyanka Jain 				priv->tx_timestamp = annotation->word2;
120790762e5cSVanshika Shukla 			}
1208e806bf87SPriyanka Jain #endif
12099ceacab7SPriyanka Jain 		} while (pending);
12109ceacab7SPriyanka Jain 
12119ceacab7SPriyanka Jain 	/* Last VDQ provided all packets and more packets are requested */
12129ceacab7SPriyanka Jain 	} while (num_pulled == dpaa2_dqrr_size);
12139ceacab7SPriyanka Jain 
12149ceacab7SPriyanka Jain 	dpaa2_q->rx_pkts += num_tx_conf;
12159ceacab7SPriyanka Jain 
12169ceacab7SPriyanka Jain 	return num_tx_conf;
12179ceacab7SPriyanka Jain }
12189ceacab7SPriyanka Jain 
1219e806bf87SPriyanka Jain /* Configure the egress frame annotation for timestamp update */
1220e806bf87SPriyanka Jain static void enable_tx_tstamp(struct qbman_fd *fd)
1221e806bf87SPriyanka Jain {
1222e806bf87SPriyanka Jain 	struct dpaa2_faead *fd_faead;
1223e806bf87SPriyanka Jain 
1224e806bf87SPriyanka Jain 	/* Set frame annotation status field as valid */
1225e806bf87SPriyanka Jain 	(fd)->simple.frc |= DPAA2_FD_FRC_FASV;
1226e806bf87SPriyanka Jain 
1227e806bf87SPriyanka Jain 	/* Set frame annotation egress action descriptor as valid */
1228e806bf87SPriyanka Jain 	(fd)->simple.frc |= DPAA2_FD_FRC_FAEADV;
1229e806bf87SPriyanka Jain 
1230e806bf87SPriyanka Jain 	/* Set Annotation Length as 128B */
1231e806bf87SPriyanka Jain 	(fd)->simple.ctrl |= DPAA2_FD_CTRL_ASAL;
1232e806bf87SPriyanka Jain 
1233e806bf87SPriyanka Jain 	/* enable update of confirmation frame annotation */
1234e806bf87SPriyanka Jain 	fd_faead = (struct dpaa2_faead *)((size_t)
1235e806bf87SPriyanka Jain 			DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)) +
1236e806bf87SPriyanka Jain 			DPAA2_FD_PTA_SIZE + DPAA2_FD_HW_ANNOT_FAEAD_OFFSET);
1237e806bf87SPriyanka Jain 	fd_faead->ctrl = DPAA2_ANNOT_FAEAD_A2V | DPAA2_ANNOT_FAEAD_UPDV |
1238e806bf87SPriyanka Jain 				DPAA2_ANNOT_FAEAD_UPD;
1239e806bf87SPriyanka Jain }
1240e806bf87SPriyanka Jain 
1241cd9935ceSHemant Agrawal /*
1242cd9935ceSHemant Agrawal  * Callback to handle sending packets through WRIOP based interface
1243cd9935ceSHemant Agrawal  */
1244cd9935ceSHemant Agrawal uint16_t
1245cd9935ceSHemant Agrawal dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
1246cd9935ceSHemant Agrawal {
1247cd9935ceSHemant Agrawal 	/* Function to transmit the frames to given device and VQ*/
1248a0840963SHemant Agrawal 	uint32_t loop, retry_count;
1249cd9935ceSHemant Agrawal 	int32_t ret;
1250cd9935ceSHemant Agrawal 	struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
1251774e9ea9SHemant Agrawal 	struct rte_mbuf *mi;
1252cd9935ceSHemant Agrawal 	uint32_t frames_to_send;
1253cd9935ceSHemant Agrawal 	struct rte_mempool *mp;
1254cd9935ceSHemant Agrawal 	struct qbman_eq_desc eqdesc;
1255cd9935ceSHemant Agrawal 	struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
1256cd9935ceSHemant Agrawal 	struct qbman_swp *swp;
1257cd9935ceSHemant Agrawal 	uint16_t num_tx = 0;
1258cd9935ceSHemant Agrawal 	uint16_t bpid;
125985ee5ddaSShreyansh Jain 	struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
126085ee5ddaSShreyansh Jain 	struct dpaa2_dev_priv *priv = eth_data->dev_private;
12612d378863SNipun Gupta 	uint32_t flags[MAX_TX_RING_SLOTS] = {0};
1262b0074a7bSGagandeep Singh 	struct sw_buf_free buf_to_free[DPAA2_MAX_SGS * dpaa2_dqrr_size];
1263b0074a7bSGagandeep Singh 	uint32_t free_count = 0;
1264cd9935ceSHemant Agrawal 
1265cd9935ceSHemant Agrawal 	if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
1266cd9935ceSHemant Agrawal 		ret = dpaa2_affine_qbman_swp();
1267cd9935ceSHemant Agrawal 		if (ret) {
1268d527f5d9SNipun Gupta 			DPAA2_PMD_ERR(
1269f665790aSDavid Marchand 				"Failed to allocate IO portal, tid: %d",
1270d527f5d9SNipun Gupta 				rte_gettid());
1271cd9935ceSHemant Agrawal 			return 0;
1272cd9935ceSHemant Agrawal 		}
1273cd9935ceSHemant Agrawal 	}
1274cd9935ceSHemant Agrawal 	swp = DPAA2_PER_LCORE_PORTAL;
1275cd9935ceSHemant Agrawal 
12762b843cacSDavid Marchand 	DPAA2_PMD_DP_DEBUG("===> eth_data =%p, fqid =%d",
127785ee5ddaSShreyansh Jain 			eth_data, dpaa2_q->fqid);
1278cd9935ceSHemant Agrawal 
1279e806bf87SPriyanka Jain #ifdef RTE_LIBRTE_IEEE1588
1280e806bf87SPriyanka Jain 	/* IEEE1588 driver need pointer to tx confirmation queue
1281e806bf87SPriyanka Jain 	 * corresponding to last packet transmitted for reading
1282e806bf87SPriyanka Jain 	 * the timestamp
1283e806bf87SPriyanka Jain 	 */
128402548404SGagandeep Singh 	if ((*bufs)->ol_flags & RTE_MBUF_F_TX_IEEE1588_TMST) {
1285e806bf87SPriyanka Jain 		priv->next_tx_conf_queue = dpaa2_q->tx_conf_queue;
1286e806bf87SPriyanka Jain 		dpaa2_dev_tx_conf(dpaa2_q->tx_conf_queue);
128790762e5cSVanshika Shukla 		priv->tx_timestamp = 0;
128890762e5cSVanshika Shukla 	}
1289e806bf87SPriyanka Jain #endif
1290e806bf87SPriyanka Jain 
1291cd9935ceSHemant Agrawal 	/*Prepare enqueue descriptor*/
1292cd9935ceSHemant Agrawal 	qbman_eq_desc_clear(&eqdesc);
1293cd9935ceSHemant Agrawal 	qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
1294e26bf82eSSachin Saxena 	qbman_eq_desc_set_fq(&eqdesc, dpaa2_q->fqid);
1295e26bf82eSSachin Saxena 
1296cd9935ceSHemant Agrawal 	/*Clear the unused FD fields before sending*/
1297cd9935ceSHemant Agrawal 	while (nb_pkts) {
12987ae777d0SHemant Agrawal 		/*Check if the queue is congested*/
1299a0840963SHemant Agrawal 		retry_count = 0;
130069293c77SHemant Agrawal 		while (qbman_result_SCN_state(dpaa2_q->cscn)) {
1301a0840963SHemant Agrawal 			retry_count++;
1302a0840963SHemant Agrawal 			/* Retry for some time before giving up */
1303c3ffe74dSGagandeep Singh 			if (retry_count > CONG_RETRY_COUNT) {
1304c3ffe74dSGagandeep Singh 				if (dpaa2_q->tm_sw_td)
1305c3ffe74dSGagandeep Singh 					goto sw_td;
13067ae777d0SHemant Agrawal 				goto skip_tx;
1307a0840963SHemant Agrawal 			}
1308c3ffe74dSGagandeep Singh 		}
13097ae777d0SHemant Agrawal 
1310bd23b1a8SNipun Gupta 		frames_to_send = (nb_pkts > dpaa2_eqcr_size) ?
1311bd23b1a8SNipun Gupta 			dpaa2_eqcr_size : nb_pkts;
1312cd9935ceSHemant Agrawal 
1313cd9935ceSHemant Agrawal 		for (loop = 0; loop < frames_to_send; loop++) {
1314ea278063SDavid Marchand 			if (*dpaa2_seqn(*bufs)) {
1315ea278063SDavid Marchand 				uint8_t dqrr_index = *dpaa2_seqn(*bufs) - 1;
13162d378863SNipun Gupta 
13172d378863SNipun Gupta 				flags[loop] = QBMAN_ENQUEUE_FLAG_DCA |
13182d378863SNipun Gupta 						dqrr_index;
13192d378863SNipun Gupta 				DPAA2_PER_LCORE_DQRR_SIZE--;
13202d378863SNipun Gupta 				DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dqrr_index);
1321ea278063SDavid Marchand 				*dpaa2_seqn(*bufs) = DPAA2_INVALID_MBUF_SEQN;
13222d378863SNipun Gupta 			}
13232d378863SNipun Gupta 
132448e7f156SNipun Gupta 			if (likely(RTE_MBUF_DIRECT(*bufs))) {
1325cd9935ceSHemant Agrawal 				mp = (*bufs)->pool;
132648e7f156SNipun Gupta 				/* Check the basic scenario and set
132748e7f156SNipun Gupta 				 * the FD appropriately here itself.
132848e7f156SNipun Gupta 				 */
132948e7f156SNipun Gupta 				if (likely(mp && mp->ops_index ==
133048e7f156SNipun Gupta 				    priv->bp_list->dpaa2_ops_index &&
133148e7f156SNipun Gupta 				    (*bufs)->nb_segs == 1 &&
133248e7f156SNipun Gupta 				    rte_mbuf_refcnt_read((*bufs)) == 1)) {
13330ebce612SSunil Kumar Kori 					if (unlikely(((*bufs)->ol_flags
1334daa02b5cSOlivier Matz 						& RTE_MBUF_F_TX_VLAN) ||
133585ee5ddaSShreyansh Jain 						(eth_data->dev_conf.txmode.offloads
1336295968d1SFerruh Yigit 						& RTE_ETH_TX_OFFLOAD_VLAN_INSERT))) {
133748e7f156SNipun Gupta 						ret = rte_vlan_insert(bufs);
133848e7f156SNipun Gupta 						if (ret)
133948e7f156SNipun Gupta 							goto send_n_return;
134048e7f156SNipun Gupta 					}
134148e7f156SNipun Gupta 					DPAA2_MBUF_TO_CONTIG_FD((*bufs),
134248e7f156SNipun Gupta 					&fd_arr[loop], mempool_to_bpid(mp));
13433fa54e3dSGagandeep Singh #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
13443fa54e3dSGagandeep Singh 					rte_mempool_check_cookies
13453fa54e3dSGagandeep Singh 						(rte_mempool_from_obj((void *)*bufs),
13463fa54e3dSGagandeep Singh 						(void **)bufs, 1, 0);
13473fa54e3dSGagandeep Singh #endif
134848e7f156SNipun Gupta 					bufs++;
1349e806bf87SPriyanka Jain #ifdef RTE_LIBRTE_IEEE1588
1350e806bf87SPriyanka Jain 					enable_tx_tstamp(&fd_arr[loop]);
1351e806bf87SPriyanka Jain #endif
135248e7f156SNipun Gupta 					continue;
135348e7f156SNipun Gupta 				}
1354774e9ea9SHemant Agrawal 			} else {
1355774e9ea9SHemant Agrawal 				mi = rte_mbuf_from_indirect(*bufs);
1356774e9ea9SHemant Agrawal 				mp = mi->pool;
1357774e9ea9SHemant Agrawal 			}
13586bfbafe1SNipun Gupta 
13596bfbafe1SNipun Gupta 			if (unlikely(RTE_MBUF_HAS_EXTBUF(*bufs))) {
13606bfbafe1SNipun Gupta 				if (unlikely((*bufs)->nb_segs > 1)) {
136175e2a1d4SGagandeep Singh 					mp = (*bufs)->pool;
13626bfbafe1SNipun Gupta 					if (eth_mbuf_to_sg_fd(*bufs,
13636bfbafe1SNipun Gupta 							      &fd_arr[loop],
1364b0074a7bSGagandeep Singh 							      buf_to_free,
1365b0074a7bSGagandeep Singh 							      &free_count,
1366b0074a7bSGagandeep Singh 							      loop,
136775e2a1d4SGagandeep Singh 							      mempool_to_bpid(mp)))
13686bfbafe1SNipun Gupta 						goto send_n_return;
13696bfbafe1SNipun Gupta 				} else {
13706bfbafe1SNipun Gupta 					eth_mbuf_to_fd(*bufs,
1371b0074a7bSGagandeep Singh 							&fd_arr[loop],
1372b0074a7bSGagandeep Singh 							buf_to_free,
1373b0074a7bSGagandeep Singh 							&free_count,
1374b0074a7bSGagandeep Singh 							loop, 0);
13756bfbafe1SNipun Gupta 				}
13766bfbafe1SNipun Gupta 				bufs++;
13776bfbafe1SNipun Gupta #ifdef RTE_LIBRTE_IEEE1588
13786bfbafe1SNipun Gupta 				enable_tx_tstamp(&fd_arr[loop]);
13796bfbafe1SNipun Gupta #endif
13806bfbafe1SNipun Gupta 				continue;
13816bfbafe1SNipun Gupta 			}
13826bfbafe1SNipun Gupta 
13839e5f3e6dSHemant Agrawal 			/* Not a hw_pkt pool allocated frame */
1384790ec226SHemant Agrawal 			if (unlikely(!mp || !priv->bp_list)) {
1385a10a988aSShreyansh Jain 				DPAA2_PMD_ERR("Err: No buffer pool attached");
1386790ec226SHemant Agrawal 				goto send_n_return;
1387774e9ea9SHemant Agrawal 			}
1388790ec226SHemant Agrawal 
1389daa02b5cSOlivier Matz 			if (unlikely(((*bufs)->ol_flags & RTE_MBUF_F_TX_VLAN) ||
139085ee5ddaSShreyansh Jain 				(eth_data->dev_conf.txmode.offloads
1391295968d1SFerruh Yigit 				& RTE_ETH_TX_OFFLOAD_VLAN_INSERT))) {
13920ebce612SSunil Kumar Kori 				int ret = rte_vlan_insert(bufs);
13930ebce612SSunil Kumar Kori 				if (ret)
13940ebce612SSunil Kumar Kori 					goto send_n_return;
13950ebce612SSunil Kumar Kori 			}
13969e5f3e6dSHemant Agrawal 			if (mp->ops_index != priv->bp_list->dpaa2_ops_index) {
1397a10a988aSShreyansh Jain 				DPAA2_PMD_WARN("Non DPAA2 buffer pool");
13989e5f3e6dSHemant Agrawal 				/* alloc should be from the default buffer pool
13999e5f3e6dSHemant Agrawal 				 * attached to this interface
14009e5f3e6dSHemant Agrawal 				 */
14019e5f3e6dSHemant Agrawal 				bpid = priv->bp_list->buf_pool.bpid;
1402790ec226SHemant Agrawal 
1403774e9ea9SHemant Agrawal 				if (unlikely((*bufs)->nb_segs > 1)) {
1404a10a988aSShreyansh Jain 					DPAA2_PMD_ERR("S/G support not added"
1405774e9ea9SHemant Agrawal 						" for non hw offload buffer");
1406790ec226SHemant Agrawal 					goto send_n_return;
1407774e9ea9SHemant Agrawal 				}
14089e5f3e6dSHemant Agrawal 				if (eth_copy_mbuf_to_fd(*bufs,
14099e5f3e6dSHemant Agrawal 							&fd_arr[loop], bpid)) {
1410790ec226SHemant Agrawal 					goto send_n_return;
14119e5f3e6dSHemant Agrawal 				}
1412790ec226SHemant Agrawal 				/* free the original packet */
1413790ec226SHemant Agrawal 				rte_pktmbuf_free(*bufs);
14149e5f3e6dSHemant Agrawal 			} else {
1415cd9935ceSHemant Agrawal 				bpid = mempool_to_bpid(mp);
1416774e9ea9SHemant Agrawal 				if (unlikely((*bufs)->nb_segs > 1)) {
1417774e9ea9SHemant Agrawal 					if (eth_mbuf_to_sg_fd(*bufs,
1418cc8569f0SHemant Agrawal 							&fd_arr[loop],
1419b0074a7bSGagandeep Singh 							buf_to_free,
1420b0074a7bSGagandeep Singh 							&free_count,
1421b0074a7bSGagandeep Singh 							loop,
142275e2a1d4SGagandeep Singh 							bpid))
1423790ec226SHemant Agrawal 						goto send_n_return;
1424774e9ea9SHemant Agrawal 				} else {
1425774e9ea9SHemant Agrawal 					eth_mbuf_to_fd(*bufs,
1426b0074a7bSGagandeep Singh 							&fd_arr[loop],
1427b0074a7bSGagandeep Singh 							buf_to_free,
1428b0074a7bSGagandeep Singh 							&free_count,
1429b0074a7bSGagandeep Singh 							loop, bpid);
1430774e9ea9SHemant Agrawal 				}
14319e5f3e6dSHemant Agrawal 			}
1432e806bf87SPriyanka Jain #ifdef RTE_LIBRTE_IEEE1588
1433e806bf87SPriyanka Jain 			enable_tx_tstamp(&fd_arr[loop]);
1434e806bf87SPriyanka Jain #endif
1435cd9935ceSHemant Agrawal 			bufs++;
1436cd9935ceSHemant Agrawal 		}
1437ce4fd609SNipun Gupta 
1438cd9935ceSHemant Agrawal 		loop = 0;
1439ce4fd609SNipun Gupta 		retry_count = 0;
1440cd9935ceSHemant Agrawal 		while (loop < frames_to_send) {
1441ce4fd609SNipun Gupta 			ret = qbman_swp_enqueue_multiple(swp, &eqdesc,
14422d378863SNipun Gupta 					&fd_arr[loop], &flags[loop],
1443496324d2SNipun Gupta 					frames_to_send - loop);
1444ce4fd609SNipun Gupta 			if (unlikely(ret < 0)) {
1445ce4fd609SNipun Gupta 				retry_count++;
1446ce4fd609SNipun Gupta 				if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) {
1447ce4fd609SNipun Gupta 					num_tx += loop;
1448ce4fd609SNipun Gupta 					nb_pkts -= loop;
1449ce4fd609SNipun Gupta 					goto send_n_return;
1450ce4fd609SNipun Gupta 				}
1451ce4fd609SNipun Gupta 			} else {
1452ce4fd609SNipun Gupta 				loop += ret;
1453ce4fd609SNipun Gupta 				retry_count = 0;
1454ce4fd609SNipun Gupta 			}
1455cd9935ceSHemant Agrawal 		}
1456cd9935ceSHemant Agrawal 
1457ce4fd609SNipun Gupta 		num_tx += loop;
1458ce4fd609SNipun Gupta 		nb_pkts -= loop;
1459cd9935ceSHemant Agrawal 	}
146048e7f156SNipun Gupta 	dpaa2_q->tx_pkts += num_tx;
14616bfbafe1SNipun Gupta 
1462b0074a7bSGagandeep Singh 	for (loop = 0; loop < free_count; loop++) {
1463b0074a7bSGagandeep Singh 		if (buf_to_free[loop].pkt_id < num_tx)
1464b0074a7bSGagandeep Singh 			rte_pktmbuf_free_seg(buf_to_free[loop].seg);
14656bfbafe1SNipun Gupta 	}
14666bfbafe1SNipun Gupta 
1467790ec226SHemant Agrawal 	return num_tx;
1468790ec226SHemant Agrawal 
1469790ec226SHemant Agrawal send_n_return:
1470790ec226SHemant Agrawal 	/* send any already prepared fd */
1471790ec226SHemant Agrawal 	if (loop) {
1472790ec226SHemant Agrawal 		unsigned int i = 0;
1473790ec226SHemant Agrawal 
1474ce4fd609SNipun Gupta 		retry_count = 0;
1475790ec226SHemant Agrawal 		while (i < loop) {
1476ce4fd609SNipun Gupta 			ret = qbman_swp_enqueue_multiple(swp, &eqdesc,
14772d378863SNipun Gupta 							 &fd_arr[i],
1478ce4fd609SNipun Gupta 							 &flags[i],
1479496324d2SNipun Gupta 							 loop - i);
1480ce4fd609SNipun Gupta 			if (unlikely(ret < 0)) {
1481ce4fd609SNipun Gupta 				retry_count++;
1482ce4fd609SNipun Gupta 				if (retry_count > DPAA2_MAX_TX_RETRY_COUNT)
1483ce4fd609SNipun Gupta 					break;
1484ce4fd609SNipun Gupta 			} else {
1485ce4fd609SNipun Gupta 				i += ret;
1486ce4fd609SNipun Gupta 				retry_count = 0;
1487790ec226SHemant Agrawal 			}
1488ce4fd609SNipun Gupta 		}
1489ce4fd609SNipun Gupta 		num_tx += i;
1490790ec226SHemant Agrawal 	}
14919e5f3e6dSHemant Agrawal skip_tx:
149248e7f156SNipun Gupta 	dpaa2_q->tx_pkts += num_tx;
14936bfbafe1SNipun Gupta 
1494b0074a7bSGagandeep Singh 	for (loop = 0; loop < free_count; loop++) {
1495b0074a7bSGagandeep Singh 		if (buf_to_free[loop].pkt_id < num_tx)
1496b0074a7bSGagandeep Singh 			rte_pktmbuf_free_seg(buf_to_free[loop].seg);
14976bfbafe1SNipun Gupta 	}
14986bfbafe1SNipun Gupta 
1499cd9935ceSHemant Agrawal 	return num_tx;
1500c3ffe74dSGagandeep Singh sw_td:
1501c3ffe74dSGagandeep Singh 	loop = 0;
1502c3ffe74dSGagandeep Singh 	while (loop < num_tx) {
1503c3ffe74dSGagandeep Singh 		if (unlikely(RTE_MBUF_HAS_EXTBUF(*bufs)))
1504c3ffe74dSGagandeep Singh 			rte_pktmbuf_free(*bufs);
1505c3ffe74dSGagandeep Singh 		bufs++;
1506c3ffe74dSGagandeep Singh 		loop++;
1507c3ffe74dSGagandeep Singh 	}
1508c3ffe74dSGagandeep Singh 
1509c3ffe74dSGagandeep Singh 	/* free the pending buffers */
1510c3ffe74dSGagandeep Singh 	while (nb_pkts) {
1511c3ffe74dSGagandeep Singh 		rte_pktmbuf_free(*bufs);
1512c3ffe74dSGagandeep Singh 		bufs++;
1513c3ffe74dSGagandeep Singh 		nb_pkts--;
1514c3ffe74dSGagandeep Singh 		num_tx++;
1515c3ffe74dSGagandeep Singh 	}
1516c3ffe74dSGagandeep Singh 	dpaa2_q->tx_pkts += num_tx;
1517c3ffe74dSGagandeep Singh 
1518c3ffe74dSGagandeep Singh 	return num_tx;
1519cd9935ceSHemant Agrawal }
1520a1f3a12cSHemant Agrawal 
152116c4a3c4SNipun Gupta void
152295af364bSGagandeep Singh dpaa2_dev_free_eqresp_buf(uint16_t eqresp_ci,
152395af364bSGagandeep Singh 			  __rte_unused struct dpaa2_queue *dpaa2_q)
152416c4a3c4SNipun Gupta {
152516c4a3c4SNipun Gupta 	struct dpaa2_dpio_dev *dpio_dev = DPAA2_PER_LCORE_DPIO;
152616c4a3c4SNipun Gupta 	struct qbman_fd *fd;
152716c4a3c4SNipun Gupta 	struct rte_mbuf *m;
152816c4a3c4SNipun Gupta 
152916c4a3c4SNipun Gupta 	fd = qbman_result_eqresp_fd(&dpio_dev->eqresp[eqresp_ci]);
1530005d943eSNipun Gupta 
1531005d943eSNipun Gupta 	/* Setting port id does not matter as we are to free the mbuf */
1532005d943eSNipun Gupta 	m = eth_fd_to_mbuf(fd, 0);
153316c4a3c4SNipun Gupta 	rte_pktmbuf_free(m);
153416c4a3c4SNipun Gupta }
153516c4a3c4SNipun Gupta 
153616c4a3c4SNipun Gupta static void
153716c4a3c4SNipun Gupta dpaa2_set_enqueue_descriptor(struct dpaa2_queue *dpaa2_q,
153816c4a3c4SNipun Gupta 			     struct rte_mbuf *m,
153916c4a3c4SNipun Gupta 			     struct qbman_eq_desc *eqdesc)
154016c4a3c4SNipun Gupta {
154116c4a3c4SNipun Gupta 	struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
154216c4a3c4SNipun Gupta 	struct dpaa2_dev_priv *priv = eth_data->dev_private;
154316c4a3c4SNipun Gupta 	struct dpaa2_dpio_dev *dpio_dev = DPAA2_PER_LCORE_DPIO;
154416c4a3c4SNipun Gupta 	struct eqresp_metadata *eqresp_meta;
154516c4a3c4SNipun Gupta 	uint16_t orpid, seqnum;
154616c4a3c4SNipun Gupta 	uint8_t dq_idx;
154716c4a3c4SNipun Gupta 
1548e26bf82eSSachin Saxena 	qbman_eq_desc_set_fq(eqdesc, dpaa2_q->fqid);
154916c4a3c4SNipun Gupta 
1550ea278063SDavid Marchand 	if (*dpaa2_seqn(m) & DPAA2_ENQUEUE_FLAG_ORP) {
1551ea278063SDavid Marchand 		orpid = (*dpaa2_seqn(m) & DPAA2_EQCR_OPRID_MASK) >>
155216c4a3c4SNipun Gupta 			DPAA2_EQCR_OPRID_SHIFT;
1553ea278063SDavid Marchand 		seqnum = (*dpaa2_seqn(m) & DPAA2_EQCR_SEQNUM_MASK) >>
155416c4a3c4SNipun Gupta 			DPAA2_EQCR_SEQNUM_SHIFT;
155516c4a3c4SNipun Gupta 
155616c4a3c4SNipun Gupta 		if (!priv->en_loose_ordered) {
155716c4a3c4SNipun Gupta 			qbman_eq_desc_set_orp(eqdesc, 1, orpid, seqnum, 0);
155816c4a3c4SNipun Gupta 			qbman_eq_desc_set_response(eqdesc, (uint64_t)
155916c4a3c4SNipun Gupta 				DPAA2_VADDR_TO_IOVA(&dpio_dev->eqresp[
156016c4a3c4SNipun Gupta 				dpio_dev->eqresp_pi]), 1);
156116c4a3c4SNipun Gupta 			qbman_eq_desc_set_token(eqdesc, 1);
156216c4a3c4SNipun Gupta 
156316c4a3c4SNipun Gupta 			eqresp_meta = &dpio_dev->eqresp_meta[
156416c4a3c4SNipun Gupta 				dpio_dev->eqresp_pi];
156516c4a3c4SNipun Gupta 			eqresp_meta->dpaa2_q = dpaa2_q;
156616c4a3c4SNipun Gupta 			eqresp_meta->mp = m->pool;
156716c4a3c4SNipun Gupta 
156816c4a3c4SNipun Gupta 			dpio_dev->eqresp_pi + 1 < MAX_EQ_RESP_ENTRIES ?
156916c4a3c4SNipun Gupta 				dpio_dev->eqresp_pi++ :
157016c4a3c4SNipun Gupta 				(dpio_dev->eqresp_pi = 0);
157116c4a3c4SNipun Gupta 		} else {
157216c4a3c4SNipun Gupta 			qbman_eq_desc_set_orp(eqdesc, 0, orpid, seqnum, 0);
157316c4a3c4SNipun Gupta 		}
157416c4a3c4SNipun Gupta 	} else {
1575ea278063SDavid Marchand 		dq_idx = *dpaa2_seqn(m) - 1;
157616c4a3c4SNipun Gupta 		qbman_eq_desc_set_dca(eqdesc, 1, dq_idx, 0);
157716c4a3c4SNipun Gupta 		DPAA2_PER_LCORE_DQRR_SIZE--;
157816c4a3c4SNipun Gupta 		DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dq_idx);
157916c4a3c4SNipun Gupta 	}
1580ea278063SDavid Marchand 	*dpaa2_seqn(m) = DPAA2_INVALID_MBUF_SEQN;
158116c4a3c4SNipun Gupta }
158216c4a3c4SNipun Gupta 
1583ed1cdbedSJun Yang uint16_t
1584ed1cdbedSJun Yang dpaa2_dev_tx_multi_txq_ordered(void **queue,
1585ed1cdbedSJun Yang 		struct rte_mbuf **bufs, uint16_t nb_pkts)
1586ed1cdbedSJun Yang {
1587ed1cdbedSJun Yang 	/* Function to transmit the frames to multiple queues respectively.*/
1588b0074a7bSGagandeep Singh 	uint32_t loop, i, retry_count;
1589ed1cdbedSJun Yang 	int32_t ret;
1590ed1cdbedSJun Yang 	struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
1591fb2790a5SBrick Yang 	uint32_t frames_to_send, num_free_eq_desc = 0;
1592ed1cdbedSJun Yang 	struct rte_mempool *mp;
1593ed1cdbedSJun Yang 	struct qbman_eq_desc eqdesc[MAX_TX_RING_SLOTS];
1594ed1cdbedSJun Yang 	struct dpaa2_queue *dpaa2_q[MAX_TX_RING_SLOTS];
1595ed1cdbedSJun Yang 	struct qbman_swp *swp;
1596ed1cdbedSJun Yang 	uint16_t bpid;
1597ed1cdbedSJun Yang 	struct rte_mbuf *mi;
1598ed1cdbedSJun Yang 	struct rte_eth_dev_data *eth_data;
1599ed1cdbedSJun Yang 	struct dpaa2_dev_priv *priv;
1600ed1cdbedSJun Yang 	struct dpaa2_queue *order_sendq;
1601b0074a7bSGagandeep Singh 	struct sw_buf_free buf_to_free[DPAA2_MAX_SGS * dpaa2_dqrr_size];
1602b0074a7bSGagandeep Singh 	uint32_t free_count = 0;
1603ed1cdbedSJun Yang 
1604ed1cdbedSJun Yang 	if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
1605ed1cdbedSJun Yang 		ret = dpaa2_affine_qbman_swp();
1606ed1cdbedSJun Yang 		if (ret) {
1607ed1cdbedSJun Yang 			DPAA2_PMD_ERR(
1608f665790aSDavid Marchand 				"Failed to allocate IO portal, tid: %d",
1609ed1cdbedSJun Yang 				rte_gettid());
1610ed1cdbedSJun Yang 			return 0;
1611ed1cdbedSJun Yang 		}
1612ed1cdbedSJun Yang 	}
1613ed1cdbedSJun Yang 	swp = DPAA2_PER_LCORE_PORTAL;
1614ed1cdbedSJun Yang 
1615fb2790a5SBrick Yang 	frames_to_send = (nb_pkts > dpaa2_eqcr_size) ?
1616fb2790a5SBrick Yang 		dpaa2_eqcr_size : nb_pkts;
1617fb2790a5SBrick Yang 
1618fb2790a5SBrick Yang 	for (loop = 0; loop < frames_to_send; loop++) {
1619ed1cdbedSJun Yang 		dpaa2_q[loop] = (struct dpaa2_queue *)queue[loop];
1620ed1cdbedSJun Yang 		eth_data = dpaa2_q[loop]->eth_data;
1621ed1cdbedSJun Yang 		priv = eth_data->dev_private;
1622fb2790a5SBrick Yang 		if (!priv->en_loose_ordered) {
1623fb2790a5SBrick Yang 			if (*dpaa2_seqn(*bufs) & DPAA2_ENQUEUE_FLAG_ORP) {
1624fb2790a5SBrick Yang 				if (!num_free_eq_desc) {
1625fb2790a5SBrick Yang 					num_free_eq_desc = dpaa2_free_eq_descriptors();
1626fb2790a5SBrick Yang 					if (!num_free_eq_desc)
1627fb2790a5SBrick Yang 						goto send_frames;
1628fb2790a5SBrick Yang 				}
1629fb2790a5SBrick Yang 				num_free_eq_desc--;
1630fb2790a5SBrick Yang 			}
1631fb2790a5SBrick Yang 		}
1632fb2790a5SBrick Yang 
16332b843cacSDavid Marchand 		DPAA2_PMD_DP_DEBUG("===> eth_data =%p, fqid =%d",
1634fb2790a5SBrick Yang 				   eth_data, dpaa2_q[loop]->fqid);
1635fb2790a5SBrick Yang 
1636fb2790a5SBrick Yang 		/* Check if the queue is congested */
1637fb2790a5SBrick Yang 		retry_count = 0;
1638fb2790a5SBrick Yang 		while (qbman_result_SCN_state(dpaa2_q[loop]->cscn)) {
1639fb2790a5SBrick Yang 			retry_count++;
1640fb2790a5SBrick Yang 			/* Retry for some time before giving up */
1641fb2790a5SBrick Yang 			if (retry_count > CONG_RETRY_COUNT)
1642fb2790a5SBrick Yang 				goto send_frames;
1643fb2790a5SBrick Yang 		}
1644fb2790a5SBrick Yang 
1645fb2790a5SBrick Yang 		/* Prepare enqueue descriptor */
1646ed1cdbedSJun Yang 		qbman_eq_desc_clear(&eqdesc[loop]);
1647fb2790a5SBrick Yang 
1648ed1cdbedSJun Yang 		if (*dpaa2_seqn(*bufs) && priv->en_ordered) {
1649ed1cdbedSJun Yang 			order_sendq = (struct dpaa2_queue *)priv->tx_vq[0];
1650ed1cdbedSJun Yang 			dpaa2_set_enqueue_descriptor(order_sendq,
1651ed1cdbedSJun Yang 						     (*bufs),
1652ed1cdbedSJun Yang 						     &eqdesc[loop]);
1653ed1cdbedSJun Yang 		} else {
1654ed1cdbedSJun Yang 			qbman_eq_desc_set_no_orp(&eqdesc[loop],
1655ed1cdbedSJun Yang 							 DPAA2_EQ_RESP_ERR_FQ);
1656ed1cdbedSJun Yang 			qbman_eq_desc_set_fq(&eqdesc[loop],
1657ed1cdbedSJun Yang 						     dpaa2_q[loop]->fqid);
1658ed1cdbedSJun Yang 		}
1659ed1cdbedSJun Yang 
1660ed1cdbedSJun Yang 		if (likely(RTE_MBUF_DIRECT(*bufs))) {
1661ed1cdbedSJun Yang 			mp = (*bufs)->pool;
1662ed1cdbedSJun Yang 			/* Check the basic scenario and set
1663ed1cdbedSJun Yang 			 * the FD appropriately here itself.
1664ed1cdbedSJun Yang 			 */
1665ed1cdbedSJun Yang 			if (likely(mp && mp->ops_index ==
1666ed1cdbedSJun Yang 				priv->bp_list->dpaa2_ops_index &&
1667ed1cdbedSJun Yang 				(*bufs)->nb_segs == 1 &&
1668ed1cdbedSJun Yang 				rte_mbuf_refcnt_read((*bufs)) == 1)) {
1669ed1cdbedSJun Yang 				if (unlikely((*bufs)->ol_flags
1670ed1cdbedSJun Yang 					& RTE_MBUF_F_TX_VLAN)) {
1671ed1cdbedSJun Yang 					ret = rte_vlan_insert(bufs);
1672ed1cdbedSJun Yang 					if (ret)
1673ed1cdbedSJun Yang 						goto send_frames;
1674ed1cdbedSJun Yang 				}
1675ed1cdbedSJun Yang 				DPAA2_MBUF_TO_CONTIG_FD((*bufs),
1676ed1cdbedSJun Yang 					&fd_arr[loop],
1677ed1cdbedSJun Yang 					mempool_to_bpid(mp));
1678ed1cdbedSJun Yang 				bufs++;
1679ed1cdbedSJun Yang 				continue;
1680ed1cdbedSJun Yang 			}
1681ed1cdbedSJun Yang 		} else {
1682ed1cdbedSJun Yang 			mi = rte_mbuf_from_indirect(*bufs);
1683ed1cdbedSJun Yang 			mp = mi->pool;
1684ed1cdbedSJun Yang 		}
1685ed1cdbedSJun Yang 		/* Not a hw_pkt pool allocated frame */
1686ed1cdbedSJun Yang 		if (unlikely(!mp || !priv->bp_list)) {
1687ed1cdbedSJun Yang 			DPAA2_PMD_ERR("Err: No buffer pool attached");
1688ed1cdbedSJun Yang 			goto send_frames;
1689ed1cdbedSJun Yang 		}
1690ed1cdbedSJun Yang 
1691ed1cdbedSJun Yang 		if (mp->ops_index != priv->bp_list->dpaa2_ops_index) {
1692ed1cdbedSJun Yang 			DPAA2_PMD_WARN("Non DPAA2 buffer pool");
1693ed1cdbedSJun Yang 			/* alloc should be from the default buffer pool
1694ed1cdbedSJun Yang 			 * attached to this interface
1695ed1cdbedSJun Yang 			 */
1696ed1cdbedSJun Yang 			bpid = priv->bp_list->buf_pool.bpid;
1697ed1cdbedSJun Yang 
1698ed1cdbedSJun Yang 			if (unlikely((*bufs)->nb_segs > 1)) {
1699ed1cdbedSJun Yang 				DPAA2_PMD_ERR(
1700ed1cdbedSJun Yang 					"S/G not supp for non hw offload buffer");
1701ed1cdbedSJun Yang 				goto send_frames;
1702ed1cdbedSJun Yang 			}
1703ed1cdbedSJun Yang 			if (eth_copy_mbuf_to_fd(*bufs,
1704ed1cdbedSJun Yang 						&fd_arr[loop], bpid)) {
1705ed1cdbedSJun Yang 				goto send_frames;
1706ed1cdbedSJun Yang 			}
1707ed1cdbedSJun Yang 			/* free the original packet */
1708ed1cdbedSJun Yang 			rte_pktmbuf_free(*bufs);
1709ed1cdbedSJun Yang 		} else {
1710ed1cdbedSJun Yang 			bpid = mempool_to_bpid(mp);
1711ed1cdbedSJun Yang 			if (unlikely((*bufs)->nb_segs > 1)) {
1712ed1cdbedSJun Yang 				if (eth_mbuf_to_sg_fd(*bufs,
1713ed1cdbedSJun Yang 						      &fd_arr[loop],
1714b0074a7bSGagandeep Singh 						      buf_to_free,
1715b0074a7bSGagandeep Singh 						      &free_count,
1716b0074a7bSGagandeep Singh 						      loop,
1717ed1cdbedSJun Yang 						      bpid))
1718ed1cdbedSJun Yang 					goto send_frames;
1719ed1cdbedSJun Yang 			} else {
1720ed1cdbedSJun Yang 				eth_mbuf_to_fd(*bufs,
1721b0074a7bSGagandeep Singh 						&fd_arr[loop],
1722b0074a7bSGagandeep Singh 						buf_to_free,
1723b0074a7bSGagandeep Singh 						&free_count,
1724b0074a7bSGagandeep Singh 						loop, bpid);
1725ed1cdbedSJun Yang 			}
1726ed1cdbedSJun Yang 		}
1727ed1cdbedSJun Yang 
1728ed1cdbedSJun Yang 		bufs++;
1729ed1cdbedSJun Yang 	}
1730ed1cdbedSJun Yang 
1731ed1cdbedSJun Yang send_frames:
1732ed1cdbedSJun Yang 	frames_to_send = loop;
1733ed1cdbedSJun Yang 	loop = 0;
1734fb2790a5SBrick Yang 	retry_count = 0;
1735ed1cdbedSJun Yang 	while (loop < frames_to_send) {
1736ed1cdbedSJun Yang 		ret = qbman_swp_enqueue_multiple_desc(swp, &eqdesc[loop],
1737ed1cdbedSJun Yang 				&fd_arr[loop],
1738ed1cdbedSJun Yang 				frames_to_send - loop);
1739ed1cdbedSJun Yang 		if (likely(ret > 0)) {
1740ed1cdbedSJun Yang 			loop += ret;
1741fb2790a5SBrick Yang 			retry_count = 0;
1742ed1cdbedSJun Yang 		} else {
1743ed1cdbedSJun Yang 			retry_count++;
1744ed1cdbedSJun Yang 			if (retry_count > DPAA2_MAX_TX_RETRY_COUNT)
1745ed1cdbedSJun Yang 				break;
1746ed1cdbedSJun Yang 		}
1747ed1cdbedSJun Yang 	}
1748ed1cdbedSJun Yang 
1749b0074a7bSGagandeep Singh 	for (i = 0; i < free_count; i++) {
1750b0074a7bSGagandeep Singh 		if (buf_to_free[i].pkt_id < loop)
1751b0074a7bSGagandeep Singh 			rte_pktmbuf_free_seg(buf_to_free[i].seg);
1752b0074a7bSGagandeep Singh 	}
1753ed1cdbedSJun Yang 	return loop;
1754ed1cdbedSJun Yang }
1755ed1cdbedSJun Yang 
175616c4a3c4SNipun Gupta /* Callback to handle sending ordered packets through WRIOP based interface */
175716c4a3c4SNipun Gupta uint16_t
175816c4a3c4SNipun Gupta dpaa2_dev_tx_ordered(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
175916c4a3c4SNipun Gupta {
176016c4a3c4SNipun Gupta 	/* Function to transmit the frames to given device and VQ*/
176116c4a3c4SNipun Gupta 	struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
176216c4a3c4SNipun Gupta 	struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
176316c4a3c4SNipun Gupta 	struct dpaa2_dev_priv *priv = eth_data->dev_private;
176416c4a3c4SNipun Gupta 	struct dpaa2_queue *order_sendq = (struct dpaa2_queue *)priv->tx_vq[0];
176516c4a3c4SNipun Gupta 	struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
176616c4a3c4SNipun Gupta 	struct rte_mbuf *mi;
176716c4a3c4SNipun Gupta 	struct rte_mempool *mp;
176816c4a3c4SNipun Gupta 	struct qbman_eq_desc eqdesc[MAX_TX_RING_SLOTS];
176916c4a3c4SNipun Gupta 	struct qbman_swp *swp;
177016c4a3c4SNipun Gupta 	uint32_t frames_to_send, num_free_eq_desc;
177116c4a3c4SNipun Gupta 	uint32_t loop, retry_count;
177216c4a3c4SNipun Gupta 	int32_t ret;
177316c4a3c4SNipun Gupta 	uint16_t num_tx = 0;
177416c4a3c4SNipun Gupta 	uint16_t bpid;
1775b0074a7bSGagandeep Singh 	struct sw_buf_free buf_to_free[DPAA2_MAX_SGS * dpaa2_dqrr_size];
1776b0074a7bSGagandeep Singh 	uint32_t free_count = 0;
177716c4a3c4SNipun Gupta 
177816c4a3c4SNipun Gupta 	if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
177916c4a3c4SNipun Gupta 		ret = dpaa2_affine_qbman_swp();
178016c4a3c4SNipun Gupta 		if (ret) {
1781d527f5d9SNipun Gupta 			DPAA2_PMD_ERR(
1782f665790aSDavid Marchand 				"Failed to allocate IO portal, tid: %d",
1783d527f5d9SNipun Gupta 				rte_gettid());
178416c4a3c4SNipun Gupta 			return 0;
178516c4a3c4SNipun Gupta 		}
178616c4a3c4SNipun Gupta 	}
178716c4a3c4SNipun Gupta 	swp = DPAA2_PER_LCORE_PORTAL;
178816c4a3c4SNipun Gupta 
17892b843cacSDavid Marchand 	DPAA2_PMD_DP_DEBUG("===> eth_data =%p, fqid =%d",
179016c4a3c4SNipun Gupta 			   eth_data, dpaa2_q->fqid);
179116c4a3c4SNipun Gupta 
179216c4a3c4SNipun Gupta 	/* This would also handle normal and atomic queues as any type
179316c4a3c4SNipun Gupta 	 * of packet can be enqueued when ordered queues are being used.
179416c4a3c4SNipun Gupta 	 */
179516c4a3c4SNipun Gupta 	while (nb_pkts) {
179616c4a3c4SNipun Gupta 		/*Check if the queue is congested*/
179716c4a3c4SNipun Gupta 		retry_count = 0;
179816c4a3c4SNipun Gupta 		while (qbman_result_SCN_state(dpaa2_q->cscn)) {
179916c4a3c4SNipun Gupta 			retry_count++;
180016c4a3c4SNipun Gupta 			/* Retry for some time before giving up */
180116c4a3c4SNipun Gupta 			if (retry_count > CONG_RETRY_COUNT)
180216c4a3c4SNipun Gupta 				goto skip_tx;
180316c4a3c4SNipun Gupta 		}
180416c4a3c4SNipun Gupta 
180516c4a3c4SNipun Gupta 		frames_to_send = (nb_pkts > dpaa2_eqcr_size) ?
180616c4a3c4SNipun Gupta 			dpaa2_eqcr_size : nb_pkts;
180716c4a3c4SNipun Gupta 
180816c4a3c4SNipun Gupta 		if (!priv->en_loose_ordered) {
1809ea278063SDavid Marchand 			if (*dpaa2_seqn(*bufs) & DPAA2_ENQUEUE_FLAG_ORP) {
181016c4a3c4SNipun Gupta 				num_free_eq_desc = dpaa2_free_eq_descriptors();
181116c4a3c4SNipun Gupta 				if (num_free_eq_desc < frames_to_send)
181216c4a3c4SNipun Gupta 					frames_to_send = num_free_eq_desc;
181316c4a3c4SNipun Gupta 			}
181416c4a3c4SNipun Gupta 		}
181516c4a3c4SNipun Gupta 
181616c4a3c4SNipun Gupta 		for (loop = 0; loop < frames_to_send; loop++) {
181716c4a3c4SNipun Gupta 			/*Prepare enqueue descriptor*/
181816c4a3c4SNipun Gupta 			qbman_eq_desc_clear(&eqdesc[loop]);
181916c4a3c4SNipun Gupta 
1820ea278063SDavid Marchand 			if (*dpaa2_seqn(*bufs)) {
182116c4a3c4SNipun Gupta 				/* Use only queue 0 for Tx in case of atomic/
182216c4a3c4SNipun Gupta 				 * ordered packets as packets can get unordered
18237be78d02SJosh Soref 				 * when being transmitted out from the interface
182416c4a3c4SNipun Gupta 				 */
182516c4a3c4SNipun Gupta 				dpaa2_set_enqueue_descriptor(order_sendq,
182616c4a3c4SNipun Gupta 							     (*bufs),
182716c4a3c4SNipun Gupta 							     &eqdesc[loop]);
182816c4a3c4SNipun Gupta 			} else {
182916c4a3c4SNipun Gupta 				qbman_eq_desc_set_no_orp(&eqdesc[loop],
183016c4a3c4SNipun Gupta 							 DPAA2_EQ_RESP_ERR_FQ);
1831e26bf82eSSachin Saxena 				qbman_eq_desc_set_fq(&eqdesc[loop],
1832e26bf82eSSachin Saxena 						     dpaa2_q->fqid);
183316c4a3c4SNipun Gupta 			}
183416c4a3c4SNipun Gupta 
183516c4a3c4SNipun Gupta 			if (likely(RTE_MBUF_DIRECT(*bufs))) {
183616c4a3c4SNipun Gupta 				mp = (*bufs)->pool;
183716c4a3c4SNipun Gupta 				/* Check the basic scenario and set
183816c4a3c4SNipun Gupta 				 * the FD appropriately here itself.
183916c4a3c4SNipun Gupta 				 */
184016c4a3c4SNipun Gupta 				if (likely(mp && mp->ops_index ==
184116c4a3c4SNipun Gupta 				    priv->bp_list->dpaa2_ops_index &&
184216c4a3c4SNipun Gupta 				    (*bufs)->nb_segs == 1 &&
184316c4a3c4SNipun Gupta 				    rte_mbuf_refcnt_read((*bufs)) == 1)) {
184416c4a3c4SNipun Gupta 					if (unlikely((*bufs)->ol_flags
1845daa02b5cSOlivier Matz 						& RTE_MBUF_F_TX_VLAN)) {
184616c4a3c4SNipun Gupta 					  ret = rte_vlan_insert(bufs);
184716c4a3c4SNipun Gupta 					  if (ret)
184816c4a3c4SNipun Gupta 						goto send_n_return;
184916c4a3c4SNipun Gupta 					}
185016c4a3c4SNipun Gupta 					DPAA2_MBUF_TO_CONTIG_FD((*bufs),
185116c4a3c4SNipun Gupta 						&fd_arr[loop],
185216c4a3c4SNipun Gupta 						mempool_to_bpid(mp));
185316c4a3c4SNipun Gupta 					bufs++;
185416c4a3c4SNipun Gupta 					continue;
185516c4a3c4SNipun Gupta 				}
185616c4a3c4SNipun Gupta 			} else {
185716c4a3c4SNipun Gupta 				mi = rte_mbuf_from_indirect(*bufs);
185816c4a3c4SNipun Gupta 				mp = mi->pool;
185916c4a3c4SNipun Gupta 			}
186016c4a3c4SNipun Gupta 			/* Not a hw_pkt pool allocated frame */
186116c4a3c4SNipun Gupta 			if (unlikely(!mp || !priv->bp_list)) {
186216c4a3c4SNipun Gupta 				DPAA2_PMD_ERR("Err: No buffer pool attached");
186316c4a3c4SNipun Gupta 				goto send_n_return;
186416c4a3c4SNipun Gupta 			}
186516c4a3c4SNipun Gupta 
186616c4a3c4SNipun Gupta 			if (mp->ops_index != priv->bp_list->dpaa2_ops_index) {
186716c4a3c4SNipun Gupta 				DPAA2_PMD_WARN("Non DPAA2 buffer pool");
186816c4a3c4SNipun Gupta 				/* alloc should be from the default buffer pool
186916c4a3c4SNipun Gupta 				 * attached to this interface
187016c4a3c4SNipun Gupta 				 */
187116c4a3c4SNipun Gupta 				bpid = priv->bp_list->buf_pool.bpid;
187216c4a3c4SNipun Gupta 
187316c4a3c4SNipun Gupta 				if (unlikely((*bufs)->nb_segs > 1)) {
187416c4a3c4SNipun Gupta 					DPAA2_PMD_ERR(
187516c4a3c4SNipun Gupta 						"S/G not supp for non hw offload buffer");
187616c4a3c4SNipun Gupta 					goto send_n_return;
187716c4a3c4SNipun Gupta 				}
187816c4a3c4SNipun Gupta 				if (eth_copy_mbuf_to_fd(*bufs,
187916c4a3c4SNipun Gupta 							&fd_arr[loop], bpid)) {
188016c4a3c4SNipun Gupta 					goto send_n_return;
188116c4a3c4SNipun Gupta 				}
188216c4a3c4SNipun Gupta 				/* free the original packet */
188316c4a3c4SNipun Gupta 				rte_pktmbuf_free(*bufs);
188416c4a3c4SNipun Gupta 			} else {
188516c4a3c4SNipun Gupta 				bpid = mempool_to_bpid(mp);
188616c4a3c4SNipun Gupta 				if (unlikely((*bufs)->nb_segs > 1)) {
188716c4a3c4SNipun Gupta 					if (eth_mbuf_to_sg_fd(*bufs,
188816c4a3c4SNipun Gupta 							      &fd_arr[loop],
1889b0074a7bSGagandeep Singh 							      buf_to_free,
1890b0074a7bSGagandeep Singh 							      &free_count,
1891b0074a7bSGagandeep Singh 							      loop,
189216c4a3c4SNipun Gupta 							      bpid))
189316c4a3c4SNipun Gupta 						goto send_n_return;
189416c4a3c4SNipun Gupta 				} else {
189516c4a3c4SNipun Gupta 					eth_mbuf_to_fd(*bufs,
1896b0074a7bSGagandeep Singh 							&fd_arr[loop],
1897b0074a7bSGagandeep Singh 							buf_to_free,
1898b0074a7bSGagandeep Singh 							&free_count,
1899b0074a7bSGagandeep Singh 							loop, bpid);
190016c4a3c4SNipun Gupta 				}
190116c4a3c4SNipun Gupta 			}
190216c4a3c4SNipun Gupta 			bufs++;
190316c4a3c4SNipun Gupta 		}
1904ce4fd609SNipun Gupta 
190516c4a3c4SNipun Gupta 		loop = 0;
1906ce4fd609SNipun Gupta 		retry_count = 0;
190716c4a3c4SNipun Gupta 		while (loop < frames_to_send) {
1908ce4fd609SNipun Gupta 			ret = qbman_swp_enqueue_multiple_desc(swp,
190916c4a3c4SNipun Gupta 					&eqdesc[loop], &fd_arr[loop],
191016c4a3c4SNipun Gupta 					frames_to_send - loop);
1911ce4fd609SNipun Gupta 			if (unlikely(ret < 0)) {
1912ce4fd609SNipun Gupta 				retry_count++;
1913ce4fd609SNipun Gupta 				if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) {
1914ce4fd609SNipun Gupta 					num_tx += loop;
1915ce4fd609SNipun Gupta 					nb_pkts -= loop;
1916ce4fd609SNipun Gupta 					goto send_n_return;
1917ce4fd609SNipun Gupta 				}
1918ce4fd609SNipun Gupta 			} else {
1919ce4fd609SNipun Gupta 				loop += ret;
1920ce4fd609SNipun Gupta 				retry_count = 0;
1921ce4fd609SNipun Gupta 			}
192216c4a3c4SNipun Gupta 		}
192316c4a3c4SNipun Gupta 
1924ce4fd609SNipun Gupta 		num_tx += loop;
1925ce4fd609SNipun Gupta 		nb_pkts -= loop;
192616c4a3c4SNipun Gupta 	}
192716c4a3c4SNipun Gupta 	dpaa2_q->tx_pkts += num_tx;
1928b0074a7bSGagandeep Singh 	for (loop = 0; loop < free_count; loop++) {
1929b0074a7bSGagandeep Singh 		if (buf_to_free[loop].pkt_id < num_tx)
1930b0074a7bSGagandeep Singh 			rte_pktmbuf_free_seg(buf_to_free[loop].seg);
1931b0074a7bSGagandeep Singh 	}
1932b0074a7bSGagandeep Singh 
193316c4a3c4SNipun Gupta 	return num_tx;
193416c4a3c4SNipun Gupta 
193516c4a3c4SNipun Gupta send_n_return:
193616c4a3c4SNipun Gupta 	/* send any already prepared fd */
193716c4a3c4SNipun Gupta 	if (loop) {
193816c4a3c4SNipun Gupta 		unsigned int i = 0;
193916c4a3c4SNipun Gupta 
1940ce4fd609SNipun Gupta 		retry_count = 0;
194116c4a3c4SNipun Gupta 		while (i < loop) {
1942ce4fd609SNipun Gupta 			ret = qbman_swp_enqueue_multiple_desc(swp,
1943fb2790a5SBrick Yang 				       &eqdesc[i], &fd_arr[i], loop - i);
1944ce4fd609SNipun Gupta 			if (unlikely(ret < 0)) {
1945ce4fd609SNipun Gupta 				retry_count++;
1946ce4fd609SNipun Gupta 				if (retry_count > DPAA2_MAX_TX_RETRY_COUNT)
1947ce4fd609SNipun Gupta 					break;
1948ce4fd609SNipun Gupta 			} else {
1949ce4fd609SNipun Gupta 				i += ret;
1950ce4fd609SNipun Gupta 				retry_count = 0;
195116c4a3c4SNipun Gupta 			}
1952ce4fd609SNipun Gupta 		}
1953ce4fd609SNipun Gupta 		num_tx += i;
195416c4a3c4SNipun Gupta 	}
195516c4a3c4SNipun Gupta skip_tx:
195616c4a3c4SNipun Gupta 	dpaa2_q->tx_pkts += num_tx;
1957b0074a7bSGagandeep Singh 	for (loop = 0; loop < free_count; loop++) {
1958b0074a7bSGagandeep Singh 		if (buf_to_free[loop].pkt_id < num_tx)
1959b0074a7bSGagandeep Singh 			rte_pktmbuf_free_seg(buf_to_free[loop].seg);
1960b0074a7bSGagandeep Singh 	}
1961b0074a7bSGagandeep Singh 
196216c4a3c4SNipun Gupta 	return num_tx;
196316c4a3c4SNipun Gupta }
196416c4a3c4SNipun Gupta 
1965a3a997f0SHemant Agrawal /* This function loopbacks all the received packets.*/
1966a3a997f0SHemant Agrawal uint16_t
1967a3a997f0SHemant Agrawal dpaa2_dev_loopback_rx(void *queue,
1968a3a997f0SHemant Agrawal 		      struct rte_mbuf **bufs __rte_unused,
1969a3a997f0SHemant Agrawal 		      uint16_t nb_pkts)
1970a3a997f0SHemant Agrawal {
1971a3a997f0SHemant Agrawal 	/* Function receive frames for a given device and VQ*/
1972a3a997f0SHemant Agrawal 	struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
1973a3a997f0SHemant Agrawal 	struct qbman_result *dq_storage, *dq_storage1 = NULL;
1974a3a997f0SHemant Agrawal 	uint32_t fqid = dpaa2_q->fqid;
1975a3a997f0SHemant Agrawal 	int ret, num_rx = 0, num_tx = 0, pull_size;
1976a3a997f0SHemant Agrawal 	uint8_t pending, status;
1977a3a997f0SHemant Agrawal 	struct qbman_swp *swp;
1978a3a997f0SHemant Agrawal 	struct qbman_fd *fd[DPAA2_LX2_DQRR_RING_SIZE];
1979a3a997f0SHemant Agrawal 	struct qbman_pull_desc pulldesc;
1980a3a997f0SHemant Agrawal 	struct qbman_eq_desc eqdesc;
198112d98eceSJun Yang 	struct queue_storage_info_t *q_storage;
1982a3a997f0SHemant Agrawal 	struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
1983a3a997f0SHemant Agrawal 	struct dpaa2_dev_priv *priv = eth_data->dev_private;
1984a3a997f0SHemant Agrawal 	struct dpaa2_queue *tx_q = priv->tx_vq[0];
1985a3a997f0SHemant Agrawal 	/* todo - currently we are using 1st TX queue only for loopback*/
1986a3a997f0SHemant Agrawal 
198712d98eceSJun Yang 	q_storage = dpaa2_q->q_storage[rte_lcore_id()];
1988a3a997f0SHemant Agrawal 	if (unlikely(!DPAA2_PER_LCORE_ETHRX_DPIO)) {
1989a3a997f0SHemant Agrawal 		ret = dpaa2_affine_qbman_ethrx_swp();
1990a3a997f0SHemant Agrawal 		if (ret) {
1991a3a997f0SHemant Agrawal 			DPAA2_PMD_ERR("Failure in affining portal");
1992a3a997f0SHemant Agrawal 			return 0;
1993a3a997f0SHemant Agrawal 		}
1994a3a997f0SHemant Agrawal 	}
1995a3a997f0SHemant Agrawal 	swp = DPAA2_PER_LCORE_ETHRX_PORTAL;
1996a3a997f0SHemant Agrawal 	pull_size = (nb_pkts > dpaa2_dqrr_size) ? dpaa2_dqrr_size : nb_pkts;
1997a3a997f0SHemant Agrawal 	if (unlikely(!q_storage->active_dqs)) {
1998a3a997f0SHemant Agrawal 		q_storage->toggle = 0;
1999a3a997f0SHemant Agrawal 		dq_storage = q_storage->dq_storage[q_storage->toggle];
2000a3a997f0SHemant Agrawal 		q_storage->last_num_pkts = pull_size;
2001a3a997f0SHemant Agrawal 		qbman_pull_desc_clear(&pulldesc);
2002a3a997f0SHemant Agrawal 		qbman_pull_desc_set_numframes(&pulldesc,
2003a3a997f0SHemant Agrawal 					      q_storage->last_num_pkts);
2004a3a997f0SHemant Agrawal 		qbman_pull_desc_set_fq(&pulldesc, fqid);
2005a3a997f0SHemant Agrawal 		qbman_pull_desc_set_storage(&pulldesc, dq_storage,
2006a3a997f0SHemant Agrawal 			(size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
2007a3a997f0SHemant Agrawal 		if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) {
2008a3a997f0SHemant Agrawal 			while (!qbman_check_command_complete(
2009a3a997f0SHemant Agrawal 			       get_swp_active_dqs(
2010a3a997f0SHemant Agrawal 			       DPAA2_PER_LCORE_ETHRX_DPIO->index)))
2011a3a997f0SHemant Agrawal 				;
2012a3a997f0SHemant Agrawal 			clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index);
2013a3a997f0SHemant Agrawal 		}
2014a3a997f0SHemant Agrawal 		while (1) {
2015a3a997f0SHemant Agrawal 			if (qbman_swp_pull(swp, &pulldesc)) {
2016a3a997f0SHemant Agrawal 				DPAA2_PMD_DP_DEBUG(
20172b843cacSDavid Marchand 					"VDQ command not issued.QBMAN busy");
2018a3a997f0SHemant Agrawal 				/* Portal was busy, try again */
2019a3a997f0SHemant Agrawal 				continue;
2020a3a997f0SHemant Agrawal 			}
2021a3a997f0SHemant Agrawal 			break;
2022a3a997f0SHemant Agrawal 		}
2023a3a997f0SHemant Agrawal 		q_storage->active_dqs = dq_storage;
2024a3a997f0SHemant Agrawal 		q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index;
2025a3a997f0SHemant Agrawal 		set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index,
2026a3a997f0SHemant Agrawal 				   dq_storage);
2027a3a997f0SHemant Agrawal 	}
2028a3a997f0SHemant Agrawal 
2029a3a997f0SHemant Agrawal 	dq_storage = q_storage->active_dqs;
2030a3a997f0SHemant Agrawal 	rte_prefetch0((void *)(size_t)(dq_storage));
2031a3a997f0SHemant Agrawal 	rte_prefetch0((void *)(size_t)(dq_storage + 1));
2032a3a997f0SHemant Agrawal 
2033a3a997f0SHemant Agrawal 	/* Prepare next pull descriptor. This will give space for the
20347be78d02SJosh Soref 	 * prefetching done on DQRR entries
2035a3a997f0SHemant Agrawal 	 */
2036a3a997f0SHemant Agrawal 	q_storage->toggle ^= 1;
2037a3a997f0SHemant Agrawal 	dq_storage1 = q_storage->dq_storage[q_storage->toggle];
2038a3a997f0SHemant Agrawal 	qbman_pull_desc_clear(&pulldesc);
2039a3a997f0SHemant Agrawal 	qbman_pull_desc_set_numframes(&pulldesc, pull_size);
2040a3a997f0SHemant Agrawal 	qbman_pull_desc_set_fq(&pulldesc, fqid);
2041a3a997f0SHemant Agrawal 	qbman_pull_desc_set_storage(&pulldesc, dq_storage1,
2042a3a997f0SHemant Agrawal 		(size_t)(DPAA2_VADDR_TO_IOVA(dq_storage1)), 1);
2043a3a997f0SHemant Agrawal 
2044a3a997f0SHemant Agrawal 	/*Prepare enqueue descriptor*/
2045a3a997f0SHemant Agrawal 	qbman_eq_desc_clear(&eqdesc);
2046a3a997f0SHemant Agrawal 	qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
2047a3a997f0SHemant Agrawal 	qbman_eq_desc_set_response(&eqdesc, 0, 0);
2048a3a997f0SHemant Agrawal 	qbman_eq_desc_set_fq(&eqdesc, tx_q->fqid);
2049a3a997f0SHemant Agrawal 
2050a3a997f0SHemant Agrawal 	/* Check if the previous issued command is completed.
2051a3a997f0SHemant Agrawal 	 * Also seems like the SWP is shared between the Ethernet Driver
2052a3a997f0SHemant Agrawal 	 * and the SEC driver.
2053a3a997f0SHemant Agrawal 	 */
2054a3a997f0SHemant Agrawal 	while (!qbman_check_command_complete(dq_storage))
2055a3a997f0SHemant Agrawal 		;
2056a3a997f0SHemant Agrawal 	if (dq_storage == get_swp_active_dqs(q_storage->active_dpio_id))
2057a3a997f0SHemant Agrawal 		clear_swp_active_dqs(q_storage->active_dpio_id);
2058a3a997f0SHemant Agrawal 
2059a3a997f0SHemant Agrawal 	pending = 1;
2060a3a997f0SHemant Agrawal 
2061a3a997f0SHemant Agrawal 	do {
2062a3a997f0SHemant Agrawal 		/* Loop until the dq_storage is updated with
2063a3a997f0SHemant Agrawal 		 * new token by QBMAN
2064a3a997f0SHemant Agrawal 		 */
2065a3a997f0SHemant Agrawal 		while (!qbman_check_new_result(dq_storage))
2066a3a997f0SHemant Agrawal 			;
2067a3a997f0SHemant Agrawal 		rte_prefetch0((void *)((size_t)(dq_storage + 2)));
2068a3a997f0SHemant Agrawal 		/* Check whether Last Pull command is Expired and
2069a3a997f0SHemant Agrawal 		 * setting Condition for Loop termination
2070a3a997f0SHemant Agrawal 		 */
2071a3a997f0SHemant Agrawal 		if (qbman_result_DQ_is_pull_complete(dq_storage)) {
2072a3a997f0SHemant Agrawal 			pending = 0;
2073a3a997f0SHemant Agrawal 			/* Check for valid frame. */
2074a3a997f0SHemant Agrawal 			status = qbman_result_DQ_flags(dq_storage);
2075a3a997f0SHemant Agrawal 			if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0))
2076a3a997f0SHemant Agrawal 				continue;
2077a3a997f0SHemant Agrawal 		}
2078*43fd3624SAndre Muezerie 		fd[num_rx] = RTE_PTR_UNQUAL(qbman_result_DQ_fd(dq_storage));
2079a3a997f0SHemant Agrawal 
2080a3a997f0SHemant Agrawal 		dq_storage++;
2081a3a997f0SHemant Agrawal 		num_rx++;
2082a3a997f0SHemant Agrawal 	} while (pending);
2083a3a997f0SHemant Agrawal 
2084a3a997f0SHemant Agrawal 	while (num_tx < num_rx) {
2085a3a997f0SHemant Agrawal 		num_tx += qbman_swp_enqueue_multiple_fd(swp, &eqdesc,
2086a3a997f0SHemant Agrawal 				&fd[num_tx], 0, num_rx - num_tx);
2087a3a997f0SHemant Agrawal 	}
2088a3a997f0SHemant Agrawal 
2089a3a997f0SHemant Agrawal 	if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) {
2090a3a997f0SHemant Agrawal 		while (!qbman_check_command_complete(
2091a3a997f0SHemant Agrawal 		       get_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)))
2092a3a997f0SHemant Agrawal 			;
2093a3a997f0SHemant Agrawal 		clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index);
2094a3a997f0SHemant Agrawal 	}
2095a3a997f0SHemant Agrawal 	/* issue a volatile dequeue command for next pull */
2096a3a997f0SHemant Agrawal 	while (1) {
2097a3a997f0SHemant Agrawal 		if (qbman_swp_pull(swp, &pulldesc)) {
2098a3a997f0SHemant Agrawal 			DPAA2_PMD_DP_DEBUG("VDQ command is not issued."
20992b843cacSDavid Marchand 					  "QBMAN is busy (2)");
2100a3a997f0SHemant Agrawal 			continue;
2101a3a997f0SHemant Agrawal 		}
2102a3a997f0SHemant Agrawal 		break;
2103a3a997f0SHemant Agrawal 	}
2104a3a997f0SHemant Agrawal 	q_storage->active_dqs = dq_storage1;
2105a3a997f0SHemant Agrawal 	q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index;
2106a3a997f0SHemant Agrawal 	set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index, dq_storage1);
2107a3a997f0SHemant Agrawal 
2108a3a997f0SHemant Agrawal 	dpaa2_q->rx_pkts += num_rx;
2109a3a997f0SHemant Agrawal 	dpaa2_q->tx_pkts += num_tx;
2110a3a997f0SHemant Agrawal 
2111a3a997f0SHemant Agrawal 	return 0;
2112a3a997f0SHemant Agrawal }
2113