xref: /dpdk/drivers/net/dpaa/dpaa_rxtx.c (revision 7594cafa92189fd5bad87a5caa6b7a92bbab0979)
1d81734caSHemant Agrawal /* SPDX-License-Identifier: BSD-3-Clause
237f9b54bSShreyansh Jain  *
337f9b54bSShreyansh Jain  *   Copyright 2016 Freescale Semiconductor, Inc. All rights reserved.
4615352f5SVanshika Shukla  *   Copyright 2017,2019-2024 NXP
537f9b54bSShreyansh Jain  *
637f9b54bSShreyansh Jain  */
737f9b54bSShreyansh Jain 
837f9b54bSShreyansh Jain /* System headers */
937f9b54bSShreyansh Jain #include <inttypes.h>
1037f9b54bSShreyansh Jain #include <unistd.h>
1137f9b54bSShreyansh Jain #include <stdio.h>
1237f9b54bSShreyansh Jain #include <limits.h>
1337f9b54bSShreyansh Jain #include <sched.h>
1437f9b54bSShreyansh Jain #include <pthread.h>
1537f9b54bSShreyansh Jain 
1637f9b54bSShreyansh Jain #include <rte_byteorder.h>
1737f9b54bSShreyansh Jain #include <rte_common.h>
1837f9b54bSShreyansh Jain #include <rte_interrupts.h>
1937f9b54bSShreyansh Jain #include <rte_log.h>
2037f9b54bSShreyansh Jain #include <rte_debug.h>
2137f9b54bSShreyansh Jain #include <rte_pci.h>
2237f9b54bSShreyansh Jain #include <rte_atomic.h>
2337f9b54bSShreyansh Jain #include <rte_branch_prediction.h>
2437f9b54bSShreyansh Jain #include <rte_memory.h>
2537f9b54bSShreyansh Jain #include <rte_tailq.h>
2637f9b54bSShreyansh Jain #include <rte_eal.h>
2737f9b54bSShreyansh Jain #include <rte_alarm.h>
2837f9b54bSShreyansh Jain #include <rte_ether.h>
29df96fd0dSBruce Richardson #include <ethdev_driver.h>
3037f9b54bSShreyansh Jain #include <rte_malloc.h>
3137f9b54bSShreyansh Jain #include <rte_ring.h>
3237f9b54bSShreyansh Jain #include <rte_ip.h>
3337f9b54bSShreyansh Jain #include <rte_tcp.h>
3437f9b54bSShreyansh Jain #include <rte_udp.h>
35d565c887SAshish Jain #include <rte_net.h>
365e745593SSunil Kumar Kori #include <rte_eventdev.h>
3737f9b54bSShreyansh Jain 
3837f9b54bSShreyansh Jain #include "dpaa_ethdev.h"
3937f9b54bSShreyansh Jain #include "dpaa_rxtx.h"
40a2f1da7dSDavid Marchand #include <bus_dpaa_driver.h>
4137f9b54bSShreyansh Jain #include <dpaa_mempool.h>
4237f9b54bSShreyansh Jain 
435e745593SSunil Kumar Kori #include <qman.h>
4437f9b54bSShreyansh Jain #include <fsl_usd.h>
4537f9b54bSShreyansh Jain #include <fsl_qman.h>
4637f9b54bSShreyansh Jain #include <fsl_bman.h>
478c83f28cSHemant Agrawal #include <dpaa_of.h>
4837f9b54bSShreyansh Jain #include <netcfg.h>
4937f9b54bSShreyansh Jain 
50480ec5b4SHemant Agrawal #ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER
51480ec5b4SHemant Agrawal static int s_force_display_frm;
52480ec5b4SHemant Agrawal #endif
53480ec5b4SHemant Agrawal 
5437f9b54bSShreyansh Jain #define DPAA_MBUF_TO_CONTIG_FD(_mbuf, _fd, _bpid) \
5537f9b54bSShreyansh Jain 	do { \
5637f9b54bSShreyansh Jain 		(_fd)->opaque_addr = 0; \
5737f9b54bSShreyansh Jain 		(_fd)->opaque = QM_FD_CONTIG << DPAA_FD_FORMAT_SHIFT; \
5837f9b54bSShreyansh Jain 		(_fd)->opaque |= ((_mbuf)->data_off) << DPAA_FD_OFFSET_SHIFT; \
5937f9b54bSShreyansh Jain 		(_fd)->opaque |= (_mbuf)->pkt_len; \
60455da545SSantosh Shukla 		(_fd)->addr = (_mbuf)->buf_iova; \
6137f9b54bSShreyansh Jain 		(_fd)->bpid = _bpid; \
6237f9b54bSShreyansh Jain 	} while (0)
6337f9b54bSShreyansh Jain 
6477393f56SSachin Saxena #ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER
65480ec5b4SHemant Agrawal void
66480ec5b4SHemant Agrawal dpaa_force_display_frame_set(int set)
67480ec5b4SHemant Agrawal {
68480ec5b4SHemant Agrawal 	s_force_display_frm = set;
69480ec5b4SHemant Agrawal }
70480ec5b4SHemant Agrawal 
7177393f56SSachin Saxena #define DISPLAY_PRINT printf
72480ec5b4SHemant Agrawal static void
73480ec5b4SHemant Agrawal dpaa_display_frame_info(const struct qm_fd *fd,
7477393f56SSachin Saxena 	uint32_t fqid, bool rx)
7505ba55bcSShreyansh Jain {
76480ec5b4SHemant Agrawal 	int pos, offset = 0;
77480ec5b4SHemant Agrawal 	char *ptr, info[1024];
7877393f56SSachin Saxena 	struct annotations_t *annot = rte_dpaa_mem_ptov(fd->addr);
7977393f56SSachin Saxena 	uint8_t format;
80480ec5b4SHemant Agrawal 	const struct dpaa_eth_parse_results_t *psr;
8105ba55bcSShreyansh Jain 
82480ec5b4SHemant Agrawal 	if (!fd->status && !s_force_display_frm) {
83480ec5b4SHemant Agrawal 		/* Do not display correct packets unless force display.*/
8477393f56SSachin Saxena 		return;
8505ba55bcSShreyansh Jain 	}
86480ec5b4SHemant Agrawal 	psr = &annot->parse;
8777393f56SSachin Saxena 
88480ec5b4SHemant Agrawal 	format = (fd->opaque & DPAA_FD_FORMAT_MASK) >> DPAA_FD_FORMAT_SHIFT;
89480ec5b4SHemant Agrawal 	if (format == qm_fd_contig)
90480ec5b4SHemant Agrawal 		sprintf(info, "simple");
91480ec5b4SHemant Agrawal 	else if (format == qm_fd_sg)
92480ec5b4SHemant Agrawal 		sprintf(info, "sg");
93480ec5b4SHemant Agrawal 	else
94480ec5b4SHemant Agrawal 		sprintf(info, "unknown format(%d)", format);
9577393f56SSachin Saxena 
96480ec5b4SHemant Agrawal 	DISPLAY_PRINT("%s: fqid=%08x, bpid=%d, phy addr=0x%lx ",
97480ec5b4SHemant Agrawal 		rx ? "RX" : "TX", fqid, fd->bpid, (unsigned long)fd->addr);
98480ec5b4SHemant Agrawal 	DISPLAY_PRINT("format=%s offset=%d, len=%d, stat=0x%x\r\n",
99480ec5b4SHemant Agrawal 		info, fd->offset, fd->length20, fd->status);
10077393f56SSachin Saxena 	if (rx) {
101480ec5b4SHemant Agrawal 		DISPLAY_PRINT("Display usual RX parser result:\r\n");
102480ec5b4SHemant Agrawal 		if (psr->eth_frame_type == 0)
103480ec5b4SHemant Agrawal 			offset += sprintf(&info[offset], "unicast");
104480ec5b4SHemant Agrawal 		else if (psr->eth_frame_type == 1)
105480ec5b4SHemant Agrawal 			offset += sprintf(&info[offset], "multicast");
106480ec5b4SHemant Agrawal 		else if (psr->eth_frame_type == 3)
107480ec5b4SHemant Agrawal 			offset += sprintf(&info[offset], "broadcast");
108480ec5b4SHemant Agrawal 		else
109480ec5b4SHemant Agrawal 			offset += sprintf(&info[offset], "unknown eth type(%d)",
110480ec5b4SHemant Agrawal 				psr->eth_frame_type);
111480ec5b4SHemant Agrawal 		if (psr->l2r_err) {
112480ec5b4SHemant Agrawal 			offset += sprintf(&info[offset], " L2 error(%d)",
113480ec5b4SHemant Agrawal 				psr->l2r_err);
114480ec5b4SHemant Agrawal 		} else {
115480ec5b4SHemant Agrawal 			offset += sprintf(&info[offset], " L2 non error");
11677393f56SSachin Saxena 		}
117480ec5b4SHemant Agrawal 		DISPLAY_PRINT("L2: %s, %s, ethernet type:%s\r\n",
118480ec5b4SHemant Agrawal 			psr->ethernet ? "is ethernet" : "non ethernet",
119480ec5b4SHemant Agrawal 			psr->vlan ? "is vlan" : "non vlan", info);
120480ec5b4SHemant Agrawal 
121480ec5b4SHemant Agrawal 		offset = 0;
122480ec5b4SHemant Agrawal 		DISPLAY_PRINT("L3: %s/%s, %s/%s, %s, %s\r\n",
123480ec5b4SHemant Agrawal 			psr->first_ipv4 ? "first IPv4" : "non first IPv4",
124480ec5b4SHemant Agrawal 			psr->last_ipv4 ? "last IPv4" : "non last IPv4",
125480ec5b4SHemant Agrawal 			psr->first_ipv6 ? "first IPv6" : "non first IPv6",
126480ec5b4SHemant Agrawal 			psr->last_ipv6 ? "last IPv6" : "non last IPv6",
127480ec5b4SHemant Agrawal 			psr->gre ? "GRE" : "non GRE",
128480ec5b4SHemant Agrawal 			psr->l3_err ? "L3 has error" : "L3 non error");
129480ec5b4SHemant Agrawal 
130480ec5b4SHemant Agrawal 		if (psr->l4_type == DPAA_PR_L4_TCP_TYPE) {
131480ec5b4SHemant Agrawal 			offset += sprintf(&info[offset], "tcp");
132480ec5b4SHemant Agrawal 		} else if (psr->l4_type == DPAA_PR_L4_UDP_TYPE) {
133480ec5b4SHemant Agrawal 			offset += sprintf(&info[offset], "udp");
134480ec5b4SHemant Agrawal 		} else if (psr->l4_type == DPAA_PR_L4_IPSEC_TYPE) {
135480ec5b4SHemant Agrawal 			offset += sprintf(&info[offset], "IPSec ");
136480ec5b4SHemant Agrawal 			if (psr->esp_sum)
137480ec5b4SHemant Agrawal 				offset += sprintf(&info[offset], "ESP");
138480ec5b4SHemant Agrawal 			if (psr->ah)
139480ec5b4SHemant Agrawal 				offset += sprintf(&info[offset], "AH");
140480ec5b4SHemant Agrawal 		} else if (psr->l4_type == DPAA_PR_L4_SCTP_TYPE) {
141480ec5b4SHemant Agrawal 			offset += sprintf(&info[offset], "sctp");
142480ec5b4SHemant Agrawal 		} else if (psr->l4_type == DPAA_PR_L4_DCCP_TYPE) {
143480ec5b4SHemant Agrawal 			offset += sprintf(&info[offset], "dccp");
144480ec5b4SHemant Agrawal 		} else {
145480ec5b4SHemant Agrawal 			offset += sprintf(&info[offset], "unknown l4 type(%d)",
146480ec5b4SHemant Agrawal 				psr->l4_type);
147480ec5b4SHemant Agrawal 		}
148480ec5b4SHemant Agrawal 		DISPLAY_PRINT("L4: type:%s, L4 validation %s\r\n",
149480ec5b4SHemant Agrawal 			info, psr->l4cv ? "Performed" : "NOT performed");
150480ec5b4SHemant Agrawal 
151480ec5b4SHemant Agrawal 		offset = 0;
152480ec5b4SHemant Agrawal 		if (psr->ethernet) {
153480ec5b4SHemant Agrawal 			offset += sprintf(&info[offset],
154480ec5b4SHemant Agrawal 				"Eth offset=%d, ethtype offset=%d, ",
155480ec5b4SHemant Agrawal 				psr->eth_off, psr->etype_off);
156480ec5b4SHemant Agrawal 		}
157480ec5b4SHemant Agrawal 		if (psr->vlan) {
158480ec5b4SHemant Agrawal 			offset += sprintf(&info[offset], "vLAN offset=%d, ",
159480ec5b4SHemant Agrawal 				psr->vlan_off[0]);
160480ec5b4SHemant Agrawal 		}
161480ec5b4SHemant Agrawal 		if (psr->first_ipv4 || psr->first_ipv6) {
162480ec5b4SHemant Agrawal 			offset += sprintf(&info[offset], "first IP offset=%d, ",
163480ec5b4SHemant Agrawal 				psr->ip_off[0]);
164480ec5b4SHemant Agrawal 		}
165480ec5b4SHemant Agrawal 		if (psr->last_ipv4 || psr->last_ipv6) {
166480ec5b4SHemant Agrawal 			offset += sprintf(&info[offset], "last IP offset=%d, ",
167480ec5b4SHemant Agrawal 				psr->ip_off[1]);
168480ec5b4SHemant Agrawal 		}
169480ec5b4SHemant Agrawal 		if (psr->gre) {
170480ec5b4SHemant Agrawal 			offset += sprintf(&info[offset], "GRE offset=%d, ",
171480ec5b4SHemant Agrawal 				psr->gre_off);
172480ec5b4SHemant Agrawal 		}
173480ec5b4SHemant Agrawal 		if (psr->l4_type >= DPAA_PR_L4_TCP_TYPE) {
174480ec5b4SHemant Agrawal 			offset += sprintf(&info[offset], "L4 offset=%d, ",
175480ec5b4SHemant Agrawal 				psr->l4_off);
176480ec5b4SHemant Agrawal 		}
177480ec5b4SHemant Agrawal 		offset += sprintf(&info[offset], "Next HDR(0x%04x) offset=%d.",
178480ec5b4SHemant Agrawal 			rte_be_to_cpu_16(psr->nxthdr), psr->nxthdr_off);
179480ec5b4SHemant Agrawal 
180480ec5b4SHemant Agrawal 		DISPLAY_PRINT("%s\r\n", info);
18177393f56SSachin Saxena 	}
18277393f56SSachin Saxena 
18377393f56SSachin Saxena 	if (unlikely(format == qm_fd_sg)) {
18477393f56SSachin Saxena 		/*TBD:S/G display: to be implemented*/
18577393f56SSachin Saxena 		return;
18677393f56SSachin Saxena 	}
18777393f56SSachin Saxena 
18877393f56SSachin Saxena 	DISPLAY_PRINT("Frame payload:\r\n");
18977393f56SSachin Saxena 	ptr = (char *)annot;
19077393f56SSachin Saxena 	ptr += fd->offset;
191480ec5b4SHemant Agrawal 	for (pos = 0; pos < fd->length20; pos++) {
192480ec5b4SHemant Agrawal 		DISPLAY_PRINT("%02x ", ptr[pos]);
193480ec5b4SHemant Agrawal 		if (((pos + 1) % 16) == 0)
1940fcdbde0SHemant Agrawal 			DISPLAY_PRINT("\n");
19505ba55bcSShreyansh Jain 	}
19677393f56SSachin Saxena 	DISPLAY_PRINT("\n");
19777393f56SSachin Saxena }
198480ec5b4SHemant Agrawal 
19905ba55bcSShreyansh Jain #else
20077393f56SSachin Saxena #define dpaa_display_frame_info(a, b, c)
20105ba55bcSShreyansh Jain #endif
20205ba55bcSShreyansh Jain 
203a350a954SHemant Agrawal static inline void
204a350a954SHemant Agrawal dpaa_slow_parsing(struct rte_mbuf *m,
205a350a954SHemant Agrawal 	const struct annotations_t *annot)
206a7bdc3bdSShreyansh Jain {
207a350a954SHemant Agrawal 	const struct dpaa_eth_parse_results_t *parse;
208a350a954SHemant Agrawal 
209a7bdc3bdSShreyansh Jain 	DPAA_DP_LOG(DEBUG, "Slow parsing");
210a350a954SHemant Agrawal 	parse = &annot->parse;
211a350a954SHemant Agrawal 
212a350a954SHemant Agrawal 	if (parse->ethernet)
213a350a954SHemant Agrawal 		m->packet_type |= RTE_PTYPE_L2_ETHER;
214a350a954SHemant Agrawal 	if (parse->vlan)
215a350a954SHemant Agrawal 		m->packet_type |= RTE_PTYPE_L2_ETHER_VLAN;
216a350a954SHemant Agrawal 	if (parse->first_ipv4)
217a350a954SHemant Agrawal 		m->packet_type |= RTE_PTYPE_L3_IPV4;
218a350a954SHemant Agrawal 	if (parse->first_ipv6)
219a350a954SHemant Agrawal 		m->packet_type |= RTE_PTYPE_L3_IPV6;
220a350a954SHemant Agrawal 	if (parse->gre)
221a350a954SHemant Agrawal 		m->packet_type |= RTE_PTYPE_TUNNEL_GRE;
222a350a954SHemant Agrawal 	if (parse->last_ipv4)
223a350a954SHemant Agrawal 		m->packet_type |= RTE_PTYPE_L3_IPV4_EXT;
224a350a954SHemant Agrawal 	if (parse->last_ipv6)
225a350a954SHemant Agrawal 		m->packet_type |= RTE_PTYPE_L3_IPV6_EXT;
226a350a954SHemant Agrawal 	if (parse->l4_type == DPAA_PR_L4_TCP_TYPE)
227a350a954SHemant Agrawal 		m->packet_type |= RTE_PTYPE_L4_TCP;
228a350a954SHemant Agrawal 	else if (parse->l4_type == DPAA_PR_L4_UDP_TYPE)
229a350a954SHemant Agrawal 		m->packet_type |= RTE_PTYPE_L4_UDP;
230a350a954SHemant Agrawal 	else if (parse->l4_type == DPAA_PR_L4_IPSEC_TYPE &&
231a350a954SHemant Agrawal 		!parse->l4_info_err && parse->esp_sum)
232a350a954SHemant Agrawal 		m->packet_type |= RTE_PTYPE_TUNNEL_ESP;
233a350a954SHemant Agrawal 	else if (parse->l4_type == DPAA_PR_L4_SCTP_TYPE)
234a350a954SHemant Agrawal 		m->packet_type |= RTE_PTYPE_L4_SCTP;
235a7bdc3bdSShreyansh Jain }
236a7bdc3bdSShreyansh Jain 
2370e5607e4SHemant Agrawal static inline void dpaa_eth_packet_info(struct rte_mbuf *m, void *fd_virt_addr)
238a7bdc3bdSShreyansh Jain {
239a7bdc3bdSShreyansh Jain 	struct annotations_t *annot = GET_ANNOTATIONS(fd_virt_addr);
2400e5607e4SHemant Agrawal 	uint64_t prs = *((uintptr_t *)(&annot->parse)) & DPAA_PARSE_MASK;
241615352f5SVanshika Shukla 	struct rte_ether_hdr *eth_hdr =
242615352f5SVanshika Shukla 		rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
243a7bdc3bdSShreyansh Jain 
244a7bdc3bdSShreyansh Jain 	DPAA_DP_LOG(DEBUG, " Parsing mbuf: %p with annotations: %p", m, annot);
245a7bdc3bdSShreyansh Jain 
246daa02b5cSOlivier Matz 	m->ol_flags = RTE_MBUF_F_RX_RSS_HASH | RTE_MBUF_F_RX_IP_CKSUM_GOOD |
247daa02b5cSOlivier Matz 		RTE_MBUF_F_RX_L4_CKSUM_GOOD;
24895d226f0SNipun Gupta 
249a7bdc3bdSShreyansh Jain 	switch (prs) {
250a7bdc3bdSShreyansh Jain 	case DPAA_PKT_TYPE_IPV4:
251a7bdc3bdSShreyansh Jain 		m->packet_type = RTE_PTYPE_L2_ETHER |
252a7bdc3bdSShreyansh Jain 			RTE_PTYPE_L3_IPV4;
253a7bdc3bdSShreyansh Jain 		break;
254a7bdc3bdSShreyansh Jain 	case DPAA_PKT_TYPE_IPV6:
255a7bdc3bdSShreyansh Jain 		m->packet_type = RTE_PTYPE_L2_ETHER |
256a7bdc3bdSShreyansh Jain 			RTE_PTYPE_L3_IPV6;
257a7bdc3bdSShreyansh Jain 		break;
2589ac71da4SNipun Gupta 	case DPAA_PKT_TYPE_ETHER:
2599ac71da4SNipun Gupta 		m->packet_type = RTE_PTYPE_L2_ETHER;
2609ac71da4SNipun Gupta 		break;
261a7bdc3bdSShreyansh Jain 	case DPAA_PKT_TYPE_IPV4_FRAG:
262a7bdc3bdSShreyansh Jain 	case DPAA_PKT_TYPE_IPV4_FRAG_UDP:
263a7bdc3bdSShreyansh Jain 	case DPAA_PKT_TYPE_IPV4_FRAG_TCP:
264a7bdc3bdSShreyansh Jain 	case DPAA_PKT_TYPE_IPV4_FRAG_SCTP:
265a7bdc3bdSShreyansh Jain 		m->packet_type = RTE_PTYPE_L2_ETHER |
266a7bdc3bdSShreyansh Jain 			RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_FRAG;
267a7bdc3bdSShreyansh Jain 		break;
268a7bdc3bdSShreyansh Jain 	case DPAA_PKT_TYPE_IPV6_FRAG:
269a7bdc3bdSShreyansh Jain 	case DPAA_PKT_TYPE_IPV6_FRAG_UDP:
270a7bdc3bdSShreyansh Jain 	case DPAA_PKT_TYPE_IPV6_FRAG_TCP:
271a7bdc3bdSShreyansh Jain 	case DPAA_PKT_TYPE_IPV6_FRAG_SCTP:
272a7bdc3bdSShreyansh Jain 		m->packet_type = RTE_PTYPE_L2_ETHER |
273a7bdc3bdSShreyansh Jain 			RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_FRAG;
274a7bdc3bdSShreyansh Jain 		break;
275a7bdc3bdSShreyansh Jain 	case DPAA_PKT_TYPE_IPV4_EXT:
276a7bdc3bdSShreyansh Jain 		m->packet_type = RTE_PTYPE_L2_ETHER |
277a7bdc3bdSShreyansh Jain 			RTE_PTYPE_L3_IPV4_EXT;
278a7bdc3bdSShreyansh Jain 		break;
279a7bdc3bdSShreyansh Jain 	case DPAA_PKT_TYPE_IPV6_EXT:
280a7bdc3bdSShreyansh Jain 		m->packet_type = RTE_PTYPE_L2_ETHER |
281a7bdc3bdSShreyansh Jain 			RTE_PTYPE_L3_IPV6_EXT;
282a7bdc3bdSShreyansh Jain 		break;
283a7bdc3bdSShreyansh Jain 	case DPAA_PKT_TYPE_IPV4_TCP:
284a7bdc3bdSShreyansh Jain 		m->packet_type = RTE_PTYPE_L2_ETHER |
285a7bdc3bdSShreyansh Jain 			RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP;
286a7bdc3bdSShreyansh Jain 		break;
287a7bdc3bdSShreyansh Jain 	case DPAA_PKT_TYPE_IPV6_TCP:
288a7bdc3bdSShreyansh Jain 		m->packet_type = RTE_PTYPE_L2_ETHER |
289a7bdc3bdSShreyansh Jain 			RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP;
290a7bdc3bdSShreyansh Jain 		break;
291a7bdc3bdSShreyansh Jain 	case DPAA_PKT_TYPE_IPV4_UDP:
292a7bdc3bdSShreyansh Jain 		m->packet_type = RTE_PTYPE_L2_ETHER |
293a7bdc3bdSShreyansh Jain 			RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP;
294a7bdc3bdSShreyansh Jain 		break;
295a7bdc3bdSShreyansh Jain 	case DPAA_PKT_TYPE_IPV6_UDP:
296a7bdc3bdSShreyansh Jain 		m->packet_type = RTE_PTYPE_L2_ETHER |
297a7bdc3bdSShreyansh Jain 			RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP;
298a7bdc3bdSShreyansh Jain 		break;
299e7524271SGagandeep Singh 	case DPAA_PKT_TYPE_IPSEC_IPV4:
300e7524271SGagandeep Singh 		if (*((uintptr_t *)&annot->parse) & DPAA_PARSE_ESP_MASK)
301e7524271SGagandeep Singh 			m->packet_type = RTE_PTYPE_L2_ETHER |
302e7524271SGagandeep Singh 				RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_ESP;
303e7524271SGagandeep Singh 		break;
304e7524271SGagandeep Singh 	case DPAA_PKT_TYPE_IPSEC_IPV6:
305e7524271SGagandeep Singh 		if (*((uintptr_t *)&annot->parse) & DPAA_PARSE_ESP_MASK)
306e7524271SGagandeep Singh 			m->packet_type = RTE_PTYPE_L2_ETHER |
307e7524271SGagandeep Singh 				RTE_PTYPE_L3_IPV6 | RTE_PTYPE_TUNNEL_ESP;
308e7524271SGagandeep Singh 		break;
309a7bdc3bdSShreyansh Jain 	case DPAA_PKT_TYPE_IPV4_EXT_UDP:
310a7bdc3bdSShreyansh Jain 		m->packet_type = RTE_PTYPE_L2_ETHER |
311a7bdc3bdSShreyansh Jain 			RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_UDP;
312a7bdc3bdSShreyansh Jain 		break;
313a7bdc3bdSShreyansh Jain 	case DPAA_PKT_TYPE_IPV6_EXT_UDP:
314a7bdc3bdSShreyansh Jain 		m->packet_type = RTE_PTYPE_L2_ETHER |
315a7bdc3bdSShreyansh Jain 			RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP;
316a7bdc3bdSShreyansh Jain 		break;
317a7bdc3bdSShreyansh Jain 	case DPAA_PKT_TYPE_IPV4_EXT_TCP:
318a7bdc3bdSShreyansh Jain 		m->packet_type = RTE_PTYPE_L2_ETHER |
319a7bdc3bdSShreyansh Jain 			RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_TCP;
320a7bdc3bdSShreyansh Jain 		break;
321a7bdc3bdSShreyansh Jain 	case DPAA_PKT_TYPE_IPV6_EXT_TCP:
322a7bdc3bdSShreyansh Jain 		m->packet_type = RTE_PTYPE_L2_ETHER |
323a7bdc3bdSShreyansh Jain 			RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP;
324a7bdc3bdSShreyansh Jain 		break;
325a7bdc3bdSShreyansh Jain 	case DPAA_PKT_TYPE_IPV4_SCTP:
326a7bdc3bdSShreyansh Jain 		m->packet_type = RTE_PTYPE_L2_ETHER |
327a7bdc3bdSShreyansh Jain 			RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP;
328a7bdc3bdSShreyansh Jain 		break;
329a7bdc3bdSShreyansh Jain 	case DPAA_PKT_TYPE_IPV6_SCTP:
330a7bdc3bdSShreyansh Jain 		m->packet_type = RTE_PTYPE_L2_ETHER |
331a7bdc3bdSShreyansh Jain 			RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_SCTP;
332a7bdc3bdSShreyansh Jain 		break;
33395d226f0SNipun Gupta 	case DPAA_PKT_TYPE_IPV4_CSUM_ERR:
33495d226f0SNipun Gupta 	case DPAA_PKT_TYPE_IPV6_CSUM_ERR:
335daa02b5cSOlivier Matz 		m->ol_flags = RTE_MBUF_F_RX_RSS_HASH | RTE_MBUF_F_RX_IP_CKSUM_BAD;
33695d226f0SNipun Gupta 		break;
33795d226f0SNipun Gupta 	case DPAA_PKT_TYPE_IPV4_TCP_CSUM_ERR:
33895d226f0SNipun Gupta 	case DPAA_PKT_TYPE_IPV6_TCP_CSUM_ERR:
33995d226f0SNipun Gupta 	case DPAA_PKT_TYPE_IPV4_UDP_CSUM_ERR:
34095d226f0SNipun Gupta 	case DPAA_PKT_TYPE_IPV6_UDP_CSUM_ERR:
341daa02b5cSOlivier Matz 		m->ol_flags = RTE_MBUF_F_RX_RSS_HASH | RTE_MBUF_F_RX_L4_CKSUM_BAD;
34295d226f0SNipun Gupta 		break;
3439ac71da4SNipun Gupta 	case DPAA_PKT_TYPE_NONE:
3449ac71da4SNipun Gupta 		m->packet_type = 0;
3459ac71da4SNipun Gupta 		break;
346a7bdc3bdSShreyansh Jain 	/* More switch cases can be added */
347a7bdc3bdSShreyansh Jain 	default:
348a350a954SHemant Agrawal 		dpaa_slow_parsing(m, annot);
349a7bdc3bdSShreyansh Jain 	}
350a7bdc3bdSShreyansh Jain 
351a7bdc3bdSShreyansh Jain 	m->tx_offload = annot->parse.ip_off[0];
352a7bdc3bdSShreyansh Jain 	m->tx_offload |= (annot->parse.l4_off - annot->parse.ip_off[0])
353a7bdc3bdSShreyansh Jain 					<< DPAA_PKT_L3_LEN_SHIFT;
354a7bdc3bdSShreyansh Jain 
355a7bdc3bdSShreyansh Jain 	/* Set the hash values */
3569ac71da4SNipun Gupta 	m->hash.rss = (uint32_t)(annot->hash);
357a7bdc3bdSShreyansh Jain 
358a7bdc3bdSShreyansh Jain 	/* Check if Vlan is present */
359a7bdc3bdSShreyansh Jain 	if (prs & DPAA_PARSE_VLAN_MASK)
360daa02b5cSOlivier Matz 		m->ol_flags |= RTE_MBUF_F_RX_VLAN;
361a7bdc3bdSShreyansh Jain 	/* Packet received without stripping the vlan */
362615352f5SVanshika Shukla 
363615352f5SVanshika Shukla 	if (eth_hdr->ether_type == htons(RTE_ETHER_TYPE_1588)) {
364615352f5SVanshika Shukla 		m->ol_flags |= RTE_MBUF_F_RX_IEEE1588_PTP;
365615352f5SVanshika Shukla 		m->ol_flags |= RTE_MBUF_F_RX_IEEE1588_TMST;
366615352f5SVanshika Shukla 	}
367a7bdc3bdSShreyansh Jain }
368a7bdc3bdSShreyansh Jain 
3695a8cf1beSShreyansh Jain static inline void dpaa_checksum(struct rte_mbuf *mbuf)
3705a8cf1beSShreyansh Jain {
3716d13ea8eSOlivier Matz 	struct rte_ether_hdr *eth_hdr =
3726d13ea8eSOlivier Matz 		rte_pktmbuf_mtod(mbuf, struct rte_ether_hdr *);
3735a8cf1beSShreyansh Jain 	char *l3_hdr = (char *)eth_hdr + mbuf->l2_len;
374a7c528e5SOlivier Matz 	struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *)l3_hdr;
375a7c528e5SOlivier Matz 	struct rte_ipv6_hdr *ipv6_hdr = (struct rte_ipv6_hdr *)l3_hdr;
3765a8cf1beSShreyansh Jain 
3775a8cf1beSShreyansh Jain 	DPAA_DP_LOG(DEBUG, "Calculating checksum for mbuf: %p", mbuf);
3785a8cf1beSShreyansh Jain 
3795a8cf1beSShreyansh Jain 	if (((mbuf->packet_type & RTE_PTYPE_L3_MASK) == RTE_PTYPE_L3_IPV4) ||
3805a8cf1beSShreyansh Jain 	    ((mbuf->packet_type & RTE_PTYPE_L3_MASK) ==
3815a8cf1beSShreyansh Jain 	    RTE_PTYPE_L3_IPV4_EXT)) {
382a7c528e5SOlivier Matz 		ipv4_hdr = (struct rte_ipv4_hdr *)l3_hdr;
3835a8cf1beSShreyansh Jain 		ipv4_hdr->hdr_checksum = 0;
3845a8cf1beSShreyansh Jain 		ipv4_hdr->hdr_checksum = rte_ipv4_cksum(ipv4_hdr);
3855a8cf1beSShreyansh Jain 	} else if (((mbuf->packet_type & RTE_PTYPE_L3_MASK) ==
3865a8cf1beSShreyansh Jain 		   RTE_PTYPE_L3_IPV6) ||
3875a8cf1beSShreyansh Jain 		   ((mbuf->packet_type & RTE_PTYPE_L3_MASK) ==
3885a8cf1beSShreyansh Jain 		   RTE_PTYPE_L3_IPV6_EXT))
389a7c528e5SOlivier Matz 		ipv6_hdr = (struct rte_ipv6_hdr *)l3_hdr;
3905a8cf1beSShreyansh Jain 
3915a8cf1beSShreyansh Jain 	if ((mbuf->packet_type & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP) {
392f41b5156SOlivier Matz 		struct rte_tcp_hdr *tcp_hdr = (struct rte_tcp_hdr *)(l3_hdr +
3935a8cf1beSShreyansh Jain 					  mbuf->l3_len);
3945a8cf1beSShreyansh Jain 		tcp_hdr->cksum = 0;
3950c9da755SDavid Marchand 		if (eth_hdr->ether_type == htons(RTE_ETHER_TYPE_IPV4))
3965a8cf1beSShreyansh Jain 			tcp_hdr->cksum = rte_ipv4_udptcp_cksum(ipv4_hdr,
3975a8cf1beSShreyansh Jain 							       tcp_hdr);
3980c9da755SDavid Marchand 		else /* assume ethertype == RTE_ETHER_TYPE_IPV6 */
3995a8cf1beSShreyansh Jain 			tcp_hdr->cksum = rte_ipv6_udptcp_cksum(ipv6_hdr,
4005a8cf1beSShreyansh Jain 							       tcp_hdr);
4015a8cf1beSShreyansh Jain 	} else if ((mbuf->packet_type & RTE_PTYPE_L4_MASK) ==
4025a8cf1beSShreyansh Jain 		   RTE_PTYPE_L4_UDP) {
403e73e3547SOlivier Matz 		struct rte_udp_hdr *udp_hdr = (struct rte_udp_hdr *)(l3_hdr +
4045a8cf1beSShreyansh Jain 							     mbuf->l3_len);
4055a8cf1beSShreyansh Jain 		udp_hdr->dgram_cksum = 0;
4060c9da755SDavid Marchand 		if (eth_hdr->ether_type == htons(RTE_ETHER_TYPE_IPV4))
4075a8cf1beSShreyansh Jain 			udp_hdr->dgram_cksum = rte_ipv4_udptcp_cksum(ipv4_hdr,
4085a8cf1beSShreyansh Jain 								     udp_hdr);
4090c9da755SDavid Marchand 		else /* assume ethertype == RTE_ETHER_TYPE_IPV6 */
4105a8cf1beSShreyansh Jain 			udp_hdr->dgram_cksum = rte_ipv6_udptcp_cksum(ipv6_hdr,
4115a8cf1beSShreyansh Jain 								     udp_hdr);
4125a8cf1beSShreyansh Jain 	}
4135a8cf1beSShreyansh Jain }
4145a8cf1beSShreyansh Jain 
4155a8cf1beSShreyansh Jain static inline void dpaa_checksum_offload(struct rte_mbuf *mbuf,
4165a8cf1beSShreyansh Jain 					 struct qm_fd *fd, char *prs_buf)
4175a8cf1beSShreyansh Jain {
4185a8cf1beSShreyansh Jain 	struct dpaa_eth_parse_results_t *prs;
4195a8cf1beSShreyansh Jain 
4205a8cf1beSShreyansh Jain 	DPAA_DP_LOG(DEBUG, " Offloading checksum for mbuf: %p", mbuf);
4215a8cf1beSShreyansh Jain 
4225a8cf1beSShreyansh Jain 	prs = GET_TX_PRS(prs_buf);
4235a8cf1beSShreyansh Jain 	prs->l3r = 0;
4245a8cf1beSShreyansh Jain 	prs->l4r = 0;
4255a8cf1beSShreyansh Jain 	if (((mbuf->packet_type & RTE_PTYPE_L3_MASK) == RTE_PTYPE_L3_IPV4) ||
4265a8cf1beSShreyansh Jain 	   ((mbuf->packet_type & RTE_PTYPE_L3_MASK) ==
4275a8cf1beSShreyansh Jain 	   RTE_PTYPE_L3_IPV4_EXT))
4285a8cf1beSShreyansh Jain 		prs->l3r = DPAA_L3_PARSE_RESULT_IPV4;
4295a8cf1beSShreyansh Jain 	else if (((mbuf->packet_type & RTE_PTYPE_L3_MASK) ==
4305a8cf1beSShreyansh Jain 		   RTE_PTYPE_L3_IPV6) ||
4315a8cf1beSShreyansh Jain 		 ((mbuf->packet_type & RTE_PTYPE_L3_MASK) ==
4325a8cf1beSShreyansh Jain 		RTE_PTYPE_L3_IPV6_EXT))
4335a8cf1beSShreyansh Jain 		prs->l3r = DPAA_L3_PARSE_RESULT_IPV6;
4345a8cf1beSShreyansh Jain 
4355a8cf1beSShreyansh Jain 	if ((mbuf->packet_type & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP)
4365a8cf1beSShreyansh Jain 		prs->l4r = DPAA_L4_PARSE_RESULT_TCP;
4375a8cf1beSShreyansh Jain 	else if ((mbuf->packet_type & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_UDP)
4385a8cf1beSShreyansh Jain 		prs->l4r = DPAA_L4_PARSE_RESULT_UDP;
4395a8cf1beSShreyansh Jain 
4405a8cf1beSShreyansh Jain 	prs->ip_off[0] = mbuf->l2_len;
4415a8cf1beSShreyansh Jain 	prs->l4_off = mbuf->l3_len + mbuf->l2_len;
4425a8cf1beSShreyansh Jain 	/* Enable L3 (and L4, if TCP or UDP) HW checksum*/
443615352f5SVanshika Shukla 	fd->cmd |= DPAA_FD_CMD_RPD | DPAA_FD_CMD_DTC;
4445a8cf1beSShreyansh Jain }
4455a8cf1beSShreyansh Jain 
4465e0789e9SNipun Gupta static inline void
4475e0789e9SNipun Gupta dpaa_unsegmented_checksum(struct rte_mbuf *mbuf, struct qm_fd *fd_arr)
4485e0789e9SNipun Gupta {
4495e0789e9SNipun Gupta 	if (!mbuf->packet_type) {
4505e0789e9SNipun Gupta 		struct rte_net_hdr_lens hdr_lens;
4515e0789e9SNipun Gupta 
4525e0789e9SNipun Gupta 		mbuf->packet_type = rte_net_get_ptype(mbuf, &hdr_lens,
4535e0789e9SNipun Gupta 				RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK
4545e0789e9SNipun Gupta 				| RTE_PTYPE_L4_MASK);
4555e0789e9SNipun Gupta 		mbuf->l2_len = hdr_lens.l2_len;
4565e0789e9SNipun Gupta 		mbuf->l3_len = hdr_lens.l3_len;
4575e0789e9SNipun Gupta 	}
4585e0789e9SNipun Gupta 	if (mbuf->data_off < (DEFAULT_TX_ICEOF +
4595e0789e9SNipun Gupta 	    sizeof(struct dpaa_eth_parse_results_t))) {
4605e0789e9SNipun Gupta 		DPAA_DP_LOG(DEBUG, "Checksum offload Err: "
4615e0789e9SNipun Gupta 			"Not enough Headroom "
4625e0789e9SNipun Gupta 			"space for correct Checksum offload."
4635e0789e9SNipun Gupta 			"So Calculating checksum in Software.");
4645e0789e9SNipun Gupta 		dpaa_checksum(mbuf);
4655e0789e9SNipun Gupta 	} else {
4665e0789e9SNipun Gupta 		dpaa_checksum_offload(mbuf, fd_arr, mbuf->buf_addr);
4675e0789e9SNipun Gupta 	}
4685e0789e9SNipun Gupta }
4695e0789e9SNipun Gupta 
470f191d5abSHemant Agrawal static struct rte_mbuf *
4719ac71da4SNipun Gupta dpaa_eth_sg_to_mbuf(const struct qm_fd *fd, uint32_t ifid)
4728cffdcbeSShreyansh Jain {
4738cffdcbeSShreyansh Jain 	struct dpaa_bp_info *bp_info = DPAA_BPID_TO_POOL_INFO(fd->bpid);
4748cffdcbeSShreyansh Jain 	struct rte_mbuf *first_seg, *prev_seg, *cur_seg, *temp;
4758cffdcbeSShreyansh Jain 	struct qm_sg_entry *sgt, *sg_temp;
4768cffdcbeSShreyansh Jain 	void *vaddr, *sg_vaddr;
4778cffdcbeSShreyansh Jain 	int i = 0;
478287f4256SNipun Gupta 	uint16_t fd_offset = fd->offset;
4798cffdcbeSShreyansh Jain 
48041c9ee8dSHemant Agrawal 	vaddr = DPAA_MEMPOOL_PTOV(bp_info, qm_fd_addr(fd));
4818cffdcbeSShreyansh Jain 	if (!vaddr) {
4828cffdcbeSShreyansh Jain 		DPAA_PMD_ERR("unable to convert physical address");
4838cffdcbeSShreyansh Jain 		return NULL;
4848cffdcbeSShreyansh Jain 	}
4858cffdcbeSShreyansh Jain 	sgt = vaddr + fd_offset;
4868cffdcbeSShreyansh Jain 	sg_temp = &sgt[i++];
4878cffdcbeSShreyansh Jain 	hw_sg_to_cpu(sg_temp);
4888cffdcbeSShreyansh Jain 	temp = (struct rte_mbuf *)((char *)vaddr - bp_info->meta_data_size);
48941c9ee8dSHemant Agrawal 	sg_vaddr = DPAA_MEMPOOL_PTOV(bp_info, qm_sg_entry_get64(sg_temp));
4908cffdcbeSShreyansh Jain 
4918cffdcbeSShreyansh Jain 	first_seg = (struct rte_mbuf *)((char *)sg_vaddr -
4928cffdcbeSShreyansh Jain 						bp_info->meta_data_size);
4938cffdcbeSShreyansh Jain 	first_seg->data_off = sg_temp->offset;
4948cffdcbeSShreyansh Jain 	first_seg->data_len = sg_temp->length;
4958cffdcbeSShreyansh Jain 	first_seg->pkt_len = sg_temp->length;
4968cffdcbeSShreyansh Jain 	rte_mbuf_refcnt_set(first_seg, 1);
497b0827a40SGagandeep Singh #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
498b0827a40SGagandeep Singh 	rte_mempool_check_cookies(rte_mempool_from_obj((void *)first_seg),
499b0827a40SGagandeep Singh 			(void **)&first_seg, 1, 1);
500b0827a40SGagandeep Singh #endif
5018cffdcbeSShreyansh Jain 
5028cffdcbeSShreyansh Jain 	first_seg->port = ifid;
5038cffdcbeSShreyansh Jain 	first_seg->nb_segs = 1;
5048cffdcbeSShreyansh Jain 	first_seg->ol_flags = 0;
5058cffdcbeSShreyansh Jain 	prev_seg = first_seg;
5068cffdcbeSShreyansh Jain 	while (i < DPAA_SGT_MAX_ENTRIES) {
5078cffdcbeSShreyansh Jain 		sg_temp = &sgt[i++];
5088cffdcbeSShreyansh Jain 		hw_sg_to_cpu(sg_temp);
50941c9ee8dSHemant Agrawal 		sg_vaddr = DPAA_MEMPOOL_PTOV(bp_info,
51041c9ee8dSHemant Agrawal 					     qm_sg_entry_get64(sg_temp));
5118cffdcbeSShreyansh Jain 		cur_seg = (struct rte_mbuf *)((char *)sg_vaddr -
5128cffdcbeSShreyansh Jain 						      bp_info->meta_data_size);
5138cffdcbeSShreyansh Jain 		cur_seg->data_off = sg_temp->offset;
5148cffdcbeSShreyansh Jain 		cur_seg->data_len = sg_temp->length;
5158cffdcbeSShreyansh Jain 		first_seg->pkt_len += sg_temp->length;
5168cffdcbeSShreyansh Jain 		first_seg->nb_segs += 1;
5178cffdcbeSShreyansh Jain 		rte_mbuf_refcnt_set(cur_seg, 1);
518b0827a40SGagandeep Singh #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
519b0827a40SGagandeep Singh 		rte_mempool_check_cookies(rte_mempool_from_obj((void *)cur_seg),
520b0827a40SGagandeep Singh 				(void **)&cur_seg, 1, 1);
521b0827a40SGagandeep Singh #endif
5228cffdcbeSShreyansh Jain 		prev_seg->next = cur_seg;
5238cffdcbeSShreyansh Jain 		if (sg_temp->final) {
5248cffdcbeSShreyansh Jain 			cur_seg->next = NULL;
5258cffdcbeSShreyansh Jain 			break;
5268cffdcbeSShreyansh Jain 		}
5278cffdcbeSShreyansh Jain 		prev_seg = cur_seg;
5288cffdcbeSShreyansh Jain 	}
52955576ac2SHemant Agrawal 	DPAA_DP_LOG(DEBUG, "Received an SG frame len =%d, num_sg =%d",
53055576ac2SHemant Agrawal 			first_seg->pkt_len, first_seg->nb_segs);
5318cffdcbeSShreyansh Jain 
5320e5607e4SHemant Agrawal 	dpaa_eth_packet_info(first_seg, vaddr);
533b0827a40SGagandeep Singh #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
534b0827a40SGagandeep Singh 	rte_mempool_check_cookies(rte_mempool_from_obj((void *)temp),
535b0827a40SGagandeep Singh 			(void **)&temp, 1, 1);
536b0827a40SGagandeep Singh #endif
5378cffdcbeSShreyansh Jain 	rte_pktmbuf_free_seg(temp);
5388cffdcbeSShreyansh Jain 
5398cffdcbeSShreyansh Jain 	return first_seg;
5408cffdcbeSShreyansh Jain }
5418cffdcbeSShreyansh Jain 
5429ac71da4SNipun Gupta static inline struct rte_mbuf *
5439ac71da4SNipun Gupta dpaa_eth_fd_to_mbuf(const struct qm_fd *fd, uint32_t ifid)
54437f9b54bSShreyansh Jain {
54537f9b54bSShreyansh Jain 	struct rte_mbuf *mbuf;
5469ac71da4SNipun Gupta 	struct dpaa_bp_info *bp_info = DPAA_BPID_TO_POOL_INFO(fd->bpid);
54741c9ee8dSHemant Agrawal 	void *ptr;
5488cffdcbeSShreyansh Jain 	uint8_t format =
5498cffdcbeSShreyansh Jain 		(fd->opaque & DPAA_FD_FORMAT_MASK) >> DPAA_FD_FORMAT_SHIFT;
5509ac71da4SNipun Gupta 	uint16_t offset;
5519ac71da4SNipun Gupta 	uint32_t length;
55237f9b54bSShreyansh Jain 
5538cffdcbeSShreyansh Jain 	if (unlikely(format == qm_fd_sg))
5548cffdcbeSShreyansh Jain 		return dpaa_eth_sg_to_mbuf(fd, ifid);
5558cffdcbeSShreyansh Jain 
5569ac71da4SNipun Gupta 	offset = (fd->opaque & DPAA_FD_OFFSET_MASK) >> DPAA_FD_OFFSET_SHIFT;
5579ac71da4SNipun Gupta 	length = fd->opaque & DPAA_FD_LENGTH_MASK;
5589ac71da4SNipun Gupta 
55955576ac2SHemant Agrawal 	DPAA_DP_LOG(DEBUG, " FD--->MBUF off %d len = %d", offset, length);
56055576ac2SHemant Agrawal 
56137f9b54bSShreyansh Jain 	/* Ignoring case when format != qm_fd_contig */
5621ee09e39SHemant Agrawal 	ptr = DPAA_MEMPOOL_PTOV(bp_info, qm_fd_addr(fd));
56337f9b54bSShreyansh Jain 
56437f9b54bSShreyansh Jain 	mbuf = (struct rte_mbuf *)((char *)ptr - bp_info->meta_data_size);
5651ee09e39SHemant Agrawal 	/* Prefetch the Parse results and packet data to L1 */
5661ee09e39SHemant Agrawal 	rte_prefetch0((void *)((uint8_t *)ptr + DEFAULT_RX_ICEOF));
56737f9b54bSShreyansh Jain 
56837f9b54bSShreyansh Jain 	mbuf->data_off = offset;
56937f9b54bSShreyansh Jain 	mbuf->data_len = length;
57037f9b54bSShreyansh Jain 	mbuf->pkt_len = length;
57137f9b54bSShreyansh Jain 
57237f9b54bSShreyansh Jain 	mbuf->port = ifid;
57337f9b54bSShreyansh Jain 	mbuf->nb_segs = 1;
57437f9b54bSShreyansh Jain 	mbuf->ol_flags = 0;
57537f9b54bSShreyansh Jain 	mbuf->next = NULL;
57637f9b54bSShreyansh Jain 	rte_mbuf_refcnt_set(mbuf, 1);
577b0827a40SGagandeep Singh #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
578b0827a40SGagandeep Singh 	rte_mempool_check_cookies(rte_mempool_from_obj((void *)mbuf),
579b0827a40SGagandeep Singh 			(void **)&mbuf, 1, 1);
580b0827a40SGagandeep Singh #endif
5810e5607e4SHemant Agrawal 	dpaa_eth_packet_info(mbuf, mbuf->buf_addr);
58237f9b54bSShreyansh Jain 
58337f9b54bSShreyansh Jain 	return mbuf;
58437f9b54bSShreyansh Jain }
58537f9b54bSShreyansh Jain 
5869124e65dSGagandeep Singh uint16_t
5879124e65dSGagandeep Singh dpaa_free_mbuf(const struct qm_fd *fd)
5889124e65dSGagandeep Singh {
5899124e65dSGagandeep Singh 	struct rte_mbuf *mbuf;
5909124e65dSGagandeep Singh 	struct dpaa_bp_info *bp_info;
5919124e65dSGagandeep Singh 	uint8_t format;
5929124e65dSGagandeep Singh 	void *ptr;
5939124e65dSGagandeep Singh 
5949124e65dSGagandeep Singh 	bp_info = DPAA_BPID_TO_POOL_INFO(fd->bpid);
5959124e65dSGagandeep Singh 	format = (fd->opaque & DPAA_FD_FORMAT_MASK) >> DPAA_FD_FORMAT_SHIFT;
5969124e65dSGagandeep Singh 	if (unlikely(format == qm_fd_sg)) {
5970bf99a02SGagandeep Singh 		struct rte_mbuf *first_seg, *cur_seg;
5989124e65dSGagandeep Singh 		struct qm_sg_entry *sgt, *sg_temp;
5999124e65dSGagandeep Singh 		void *vaddr, *sg_vaddr;
6009124e65dSGagandeep Singh 		int i = 0;
6019124e65dSGagandeep Singh 		uint16_t fd_offset = fd->offset;
6029124e65dSGagandeep Singh 
6039124e65dSGagandeep Singh 		vaddr = DPAA_MEMPOOL_PTOV(bp_info, qm_fd_addr(fd));
6049124e65dSGagandeep Singh 		if (!vaddr) {
6059124e65dSGagandeep Singh 			DPAA_PMD_ERR("unable to convert physical address");
6069124e65dSGagandeep Singh 			return -1;
6079124e65dSGagandeep Singh 		}
6089124e65dSGagandeep Singh 		sgt = vaddr + fd_offset;
6099124e65dSGagandeep Singh 		sg_temp = &sgt[i++];
6109124e65dSGagandeep Singh 		hw_sg_to_cpu(sg_temp);
6119124e65dSGagandeep Singh 		sg_vaddr = DPAA_MEMPOOL_PTOV(bp_info,
6129124e65dSGagandeep Singh 						qm_sg_entry_get64(sg_temp));
6139124e65dSGagandeep Singh 		first_seg = (struct rte_mbuf *)((char *)sg_vaddr -
6149124e65dSGagandeep Singh 						bp_info->meta_data_size);
6159124e65dSGagandeep Singh 		first_seg->nb_segs = 1;
6169124e65dSGagandeep Singh 		while (i < DPAA_SGT_MAX_ENTRIES) {
6179124e65dSGagandeep Singh 			sg_temp = &sgt[i++];
6189124e65dSGagandeep Singh 			hw_sg_to_cpu(sg_temp);
6190bf99a02SGagandeep Singh 			if (sg_temp->bpid != 0xFF) {
6200bf99a02SGagandeep Singh 				bp_info = DPAA_BPID_TO_POOL_INFO(sg_temp->bpid);
6219124e65dSGagandeep Singh 				sg_vaddr = DPAA_MEMPOOL_PTOV(bp_info,
6229124e65dSGagandeep Singh 						qm_sg_entry_get64(sg_temp));
6239124e65dSGagandeep Singh 				cur_seg = (struct rte_mbuf *)((char *)sg_vaddr -
6249124e65dSGagandeep Singh 						      bp_info->meta_data_size);
6250bf99a02SGagandeep Singh 				rte_pktmbuf_free_seg(cur_seg);
6260bf99a02SGagandeep Singh 			}
6270bf99a02SGagandeep Singh 			if (sg_temp->final)
6289124e65dSGagandeep Singh 				break;
6299124e65dSGagandeep Singh 		}
6309124e65dSGagandeep Singh 		rte_pktmbuf_free_seg(first_seg);
6319124e65dSGagandeep Singh 		return 0;
6329124e65dSGagandeep Singh 	}
6339124e65dSGagandeep Singh 
6349124e65dSGagandeep Singh 	ptr = DPAA_MEMPOOL_PTOV(bp_info, qm_fd_addr(fd));
6359124e65dSGagandeep Singh 	mbuf = (struct rte_mbuf *)((char *)ptr - bp_info->meta_data_size);
6369124e65dSGagandeep Singh 
6379124e65dSGagandeep Singh 	rte_pktmbuf_free(mbuf);
6389124e65dSGagandeep Singh 
6399124e65dSGagandeep Singh 	return 0;
6409124e65dSGagandeep Singh }
6419124e65dSGagandeep Singh 
64219b4aba2SHemant Agrawal /* Specific for LS1043 */
643b9083ea5SNipun Gupta void
64419b4aba2SHemant Agrawal dpaa_rx_cb_no_prefetch(struct qman_fq **fq, struct qm_dqrr_entry **dqrr,
645b9083ea5SNipun Gupta 	   void **bufs, int num_bufs)
6460c504f69SHemant Agrawal {
647b9083ea5SNipun Gupta 	struct rte_mbuf *mbuf;
648b9083ea5SNipun Gupta 	struct dpaa_bp_info *bp_info;
649b9083ea5SNipun Gupta 	const struct qm_fd *fd;
650b9083ea5SNipun Gupta 	void *ptr;
651b9083ea5SNipun Gupta 	struct dpaa_if *dpaa_intf;
652b9083ea5SNipun Gupta 	uint16_t offset, i;
653b9083ea5SNipun Gupta 	uint32_t length;
654b9083ea5SNipun Gupta 	uint8_t format;
655615352f5SVanshika Shukla 	struct annotations_t *annot;
6560c504f69SHemant Agrawal 
657b9083ea5SNipun Gupta 	bp_info = DPAA_BPID_TO_POOL_INFO(dqrr[0]->fd.bpid);
658b9083ea5SNipun Gupta 	ptr = rte_dpaa_mem_ptov(qm_fd_addr(&dqrr[0]->fd));
659b9083ea5SNipun Gupta 	rte_prefetch0((void *)((uint8_t *)ptr + DEFAULT_RX_ICEOF));
66019b4aba2SHemant Agrawal 	bufs[0] = (struct rte_mbuf *)((char *)ptr - bp_info->meta_data_size);
661b9083ea5SNipun Gupta 
662b9083ea5SNipun Gupta 	for (i = 0; i < num_bufs; i++) {
66319b4aba2SHemant Agrawal 		if (i < num_bufs - 1) {
664b9083ea5SNipun Gupta 			bp_info = DPAA_BPID_TO_POOL_INFO(dqrr[i + 1]->fd.bpid);
665b9083ea5SNipun Gupta 			ptr = rte_dpaa_mem_ptov(qm_fd_addr(&dqrr[i + 1]->fd));
666b9083ea5SNipun Gupta 			rte_prefetch0((void *)((uint8_t *)ptr +
667b9083ea5SNipun Gupta 					DEFAULT_RX_ICEOF));
668b9083ea5SNipun Gupta 			bufs[i + 1] = (struct rte_mbuf *)((char *)ptr -
669b9083ea5SNipun Gupta 					bp_info->meta_data_size);
670b9083ea5SNipun Gupta 		}
671b9083ea5SNipun Gupta 
672b9083ea5SNipun Gupta 		fd = &dqrr[i]->fd;
6739abdad12SHemant Agrawal 		dpaa_intf = fq[0]->dpaa_intf;
674b9083ea5SNipun Gupta 		format = (fd->opaque & DPAA_FD_FORMAT_MASK) >>
675b9083ea5SNipun Gupta 				DPAA_FD_FORMAT_SHIFT;
676b9083ea5SNipun Gupta 		if (unlikely(format == qm_fd_sg)) {
677b9083ea5SNipun Gupta 			bufs[i] = dpaa_eth_sg_to_mbuf(fd, dpaa_intf->ifid);
678b9083ea5SNipun Gupta 			continue;
679b9083ea5SNipun Gupta 		}
680b9083ea5SNipun Gupta 
681b9083ea5SNipun Gupta 		offset = (fd->opaque & DPAA_FD_OFFSET_MASK) >>
682b9083ea5SNipun Gupta 				DPAA_FD_OFFSET_SHIFT;
683b9083ea5SNipun Gupta 		length = fd->opaque & DPAA_FD_LENGTH_MASK;
684b9083ea5SNipun Gupta 
685b9083ea5SNipun Gupta 		mbuf = bufs[i];
686b9083ea5SNipun Gupta 		mbuf->data_off = offset;
687b9083ea5SNipun Gupta 		mbuf->data_len = length;
688b9083ea5SNipun Gupta 		mbuf->pkt_len = length;
689b9083ea5SNipun Gupta 		mbuf->port = dpaa_intf->ifid;
690b9083ea5SNipun Gupta 
691b9083ea5SNipun Gupta 		mbuf->nb_segs = 1;
692b9083ea5SNipun Gupta 		mbuf->ol_flags = 0;
693b9083ea5SNipun Gupta 		mbuf->next = NULL;
694b9083ea5SNipun Gupta 		rte_mbuf_refcnt_set(mbuf, 1);
695b0827a40SGagandeep Singh #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
696b0827a40SGagandeep Singh 		rte_mempool_check_cookies(rte_mempool_from_obj((void *)mbuf),
697b0827a40SGagandeep Singh 			(void **)&mbuf, 1, 1);
698b0827a40SGagandeep Singh #endif
6990e5607e4SHemant Agrawal 		dpaa_eth_packet_info(mbuf, mbuf->buf_addr);
70077393f56SSachin Saxena 		dpaa_display_frame_info(fd, fq[0]->fqid, true);
701615352f5SVanshika Shukla 		if (dpaa_ieee_1588) {
702615352f5SVanshika Shukla 			annot = GET_ANNOTATIONS(mbuf->buf_addr);
703615352f5SVanshika Shukla 			dpaa_intf->rx_timestamp =
704615352f5SVanshika Shukla 				rte_cpu_to_be_64(annot->timestamp);
705615352f5SVanshika Shukla 		}
706b9083ea5SNipun Gupta 	}
707b9083ea5SNipun Gupta }
708b9083ea5SNipun Gupta 
70919b4aba2SHemant Agrawal void
71019b4aba2SHemant Agrawal dpaa_rx_cb(struct qman_fq **fq, struct qm_dqrr_entry **dqrr,
71119b4aba2SHemant Agrawal 	   void **bufs, int num_bufs)
71219b4aba2SHemant Agrawal {
71319b4aba2SHemant Agrawal 	struct rte_mbuf *mbuf;
71419b4aba2SHemant Agrawal 	const struct qm_fd *fd;
71519b4aba2SHemant Agrawal 	struct dpaa_if *dpaa_intf;
71619b4aba2SHemant Agrawal 	uint16_t offset, i;
71719b4aba2SHemant Agrawal 	uint32_t length;
71819b4aba2SHemant Agrawal 	uint8_t format;
719615352f5SVanshika Shukla 	struct annotations_t *annot;
72019b4aba2SHemant Agrawal 
72119b4aba2SHemant Agrawal 	for (i = 0; i < num_bufs; i++) {
72219b4aba2SHemant Agrawal 		fd = &dqrr[i]->fd;
72319b4aba2SHemant Agrawal 		dpaa_intf = fq[0]->dpaa_intf;
72419b4aba2SHemant Agrawal 		format = (fd->opaque & DPAA_FD_FORMAT_MASK) >>
72519b4aba2SHemant Agrawal 				DPAA_FD_FORMAT_SHIFT;
72619b4aba2SHemant Agrawal 		if (unlikely(format == qm_fd_sg)) {
72719b4aba2SHemant Agrawal 			bufs[i] = dpaa_eth_sg_to_mbuf(fd, dpaa_intf->ifid);
72819b4aba2SHemant Agrawal 			continue;
72919b4aba2SHemant Agrawal 		}
73019b4aba2SHemant Agrawal 
73119b4aba2SHemant Agrawal 		offset = (fd->opaque & DPAA_FD_OFFSET_MASK) >>
73219b4aba2SHemant Agrawal 				DPAA_FD_OFFSET_SHIFT;
73319b4aba2SHemant Agrawal 		length = fd->opaque & DPAA_FD_LENGTH_MASK;
73419b4aba2SHemant Agrawal 
73519b4aba2SHemant Agrawal 		mbuf = bufs[i];
73619b4aba2SHemant Agrawal 		mbuf->data_off = offset;
73719b4aba2SHemant Agrawal 		mbuf->data_len = length;
73819b4aba2SHemant Agrawal 		mbuf->pkt_len = length;
73919b4aba2SHemant Agrawal 		mbuf->port = dpaa_intf->ifid;
74019b4aba2SHemant Agrawal 
74119b4aba2SHemant Agrawal 		mbuf->nb_segs = 1;
74219b4aba2SHemant Agrawal 		mbuf->ol_flags = 0;
74319b4aba2SHemant Agrawal 		mbuf->next = NULL;
74419b4aba2SHemant Agrawal 		rte_mbuf_refcnt_set(mbuf, 1);
745b0827a40SGagandeep Singh #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
746b0827a40SGagandeep Singh 		rte_mempool_check_cookies(rte_mempool_from_obj((void *)mbuf),
747b0827a40SGagandeep Singh 			(void **)&mbuf, 1, 1);
748b0827a40SGagandeep Singh #endif
74919b4aba2SHemant Agrawal 		dpaa_eth_packet_info(mbuf, mbuf->buf_addr);
75077393f56SSachin Saxena 		dpaa_display_frame_info(fd, fq[0]->fqid, true);
751615352f5SVanshika Shukla 		if (dpaa_ieee_1588) {
752615352f5SVanshika Shukla 			annot = GET_ANNOTATIONS(mbuf->buf_addr);
753615352f5SVanshika Shukla 			dpaa_intf->rx_timestamp =
754615352f5SVanshika Shukla 				rte_cpu_to_be_64(annot->timestamp);
755615352f5SVanshika Shukla 		}
75619b4aba2SHemant Agrawal 	}
75719b4aba2SHemant Agrawal }
75819b4aba2SHemant Agrawal 
759b9083ea5SNipun Gupta void dpaa_rx_cb_prepare(struct qm_dqrr_entry *dq, void **bufs)
760b9083ea5SNipun Gupta {
761b9083ea5SNipun Gupta 	struct dpaa_bp_info *bp_info = DPAA_BPID_TO_POOL_INFO(dq->fd.bpid);
762b9083ea5SNipun Gupta 	void *ptr = rte_dpaa_mem_ptov(qm_fd_addr(&dq->fd));
763b9083ea5SNipun Gupta 
764b9083ea5SNipun Gupta 	/* In case of LS1046, annotation stashing is disabled due to L2 cache
7657be78d02SJosh Soref 	 * being bottleneck in case of multicore scenario for this platform.
7667be78d02SJosh Soref 	 * So we prefetch the annotation beforehand, so that it is available
767b9083ea5SNipun Gupta 	 * in cache when accessed.
768b9083ea5SNipun Gupta 	 */
769b9083ea5SNipun Gupta 	rte_prefetch0((void *)((uint8_t *)ptr + DEFAULT_RX_ICEOF));
770b9083ea5SNipun Gupta 
771b9083ea5SNipun Gupta 	*bufs = (struct rte_mbuf *)((char *)ptr - bp_info->meta_data_size);
7720c504f69SHemant Agrawal }
7730c504f69SHemant Agrawal 
7740c504f69SHemant Agrawal static uint16_t
7750c504f69SHemant Agrawal dpaa_eth_queue_portal_rx(struct qman_fq *fq,
7760c504f69SHemant Agrawal 			 struct rte_mbuf **bufs,
7770c504f69SHemant Agrawal 			 uint16_t nb_bufs)
7780c504f69SHemant Agrawal {
7790c504f69SHemant Agrawal 	int ret;
7800c504f69SHemant Agrawal 
781b9c94167SNipun Gupta 	if (unlikely(!fq->qp_initialized)) {
7820c504f69SHemant Agrawal 		ret = rte_dpaa_portal_fq_init((void *)0, fq);
7830c504f69SHemant Agrawal 		if (ret) {
7840c504f69SHemant Agrawal 			DPAA_PMD_ERR("Failure in affining portal %d", ret);
7850c504f69SHemant Agrawal 			return 0;
7860c504f69SHemant Agrawal 		}
787b9c94167SNipun Gupta 		fq->qp_initialized = 1;
7880c504f69SHemant Agrawal 	}
7890c504f69SHemant Agrawal 
7900c504f69SHemant Agrawal 	return qman_portal_poll_rx(nb_bufs, (void **)bufs, fq->qp);
7910c504f69SHemant Agrawal }
7920c504f69SHemant Agrawal 
7935e745593SSunil Kumar Kori enum qman_cb_dqrr_result
7945e745593SSunil Kumar Kori dpaa_rx_cb_parallel(void *event,
7955e745593SSunil Kumar Kori 		    struct qman_portal *qm __always_unused,
7965e745593SSunil Kumar Kori 		    struct qman_fq *fq,
7975e745593SSunil Kumar Kori 		    const struct qm_dqrr_entry *dqrr,
7985e745593SSunil Kumar Kori 		    void **bufs)
7995e745593SSunil Kumar Kori {
8005e745593SSunil Kumar Kori 	u32 ifid = ((struct dpaa_if *)fq->dpaa_intf)->ifid;
8015e745593SSunil Kumar Kori 	struct rte_mbuf *mbuf;
8025e745593SSunil Kumar Kori 	struct rte_event *ev = (struct rte_event *)event;
8035e745593SSunil Kumar Kori 
8045e745593SSunil Kumar Kori 	mbuf = dpaa_eth_fd_to_mbuf(&dqrr->fd, ifid);
8055e745593SSunil Kumar Kori 	ev->event_ptr = (void *)mbuf;
8065e745593SSunil Kumar Kori 	ev->flow_id = fq->ev.flow_id;
8075e745593SSunil Kumar Kori 	ev->sub_event_type = fq->ev.sub_event_type;
8085e745593SSunil Kumar Kori 	ev->event_type = RTE_EVENT_TYPE_ETHDEV;
8095e745593SSunil Kumar Kori 	ev->op = RTE_EVENT_OP_NEW;
8105e745593SSunil Kumar Kori 	ev->sched_type = fq->ev.sched_type;
8115e745593SSunil Kumar Kori 	ev->queue_id = fq->ev.queue_id;
8125e745593SSunil Kumar Kori 	ev->priority = fq->ev.priority;
8135e745593SSunil Kumar Kori 	ev->impl_opaque = (uint8_t)DPAA_INVALID_MBUF_SEQN;
814c9a1c2e5SDavid Marchand 	*dpaa_seqn(mbuf) = DPAA_INVALID_MBUF_SEQN;
8155e745593SSunil Kumar Kori 	*bufs = mbuf;
8165e745593SSunil Kumar Kori 
8175e745593SSunil Kumar Kori 	return qman_cb_dqrr_consume;
8185e745593SSunil Kumar Kori }
8195e745593SSunil Kumar Kori 
8205e745593SSunil Kumar Kori enum qman_cb_dqrr_result
8215e745593SSunil Kumar Kori dpaa_rx_cb_atomic(void *event,
8225e745593SSunil Kumar Kori 		  struct qman_portal *qm __always_unused,
8235e745593SSunil Kumar Kori 		  struct qman_fq *fq,
8245e745593SSunil Kumar Kori 		  const struct qm_dqrr_entry *dqrr,
8255e745593SSunil Kumar Kori 		  void **bufs)
8265e745593SSunil Kumar Kori {
8275e745593SSunil Kumar Kori 	u8 index;
8285e745593SSunil Kumar Kori 	u32 ifid = ((struct dpaa_if *)fq->dpaa_intf)->ifid;
8295e745593SSunil Kumar Kori 	struct rte_mbuf *mbuf;
8305e745593SSunil Kumar Kori 	struct rte_event *ev = (struct rte_event *)event;
8315e745593SSunil Kumar Kori 
8325e745593SSunil Kumar Kori 	mbuf = dpaa_eth_fd_to_mbuf(&dqrr->fd, ifid);
8335e745593SSunil Kumar Kori 	ev->event_ptr = (void *)mbuf;
8345e745593SSunil Kumar Kori 	ev->flow_id = fq->ev.flow_id;
8355e745593SSunil Kumar Kori 	ev->sub_event_type = fq->ev.sub_event_type;
8365e745593SSunil Kumar Kori 	ev->event_type = RTE_EVENT_TYPE_ETHDEV;
8375e745593SSunil Kumar Kori 	ev->op = RTE_EVENT_OP_NEW;
8385e745593SSunil Kumar Kori 	ev->sched_type = fq->ev.sched_type;
8395e745593SSunil Kumar Kori 	ev->queue_id = fq->ev.queue_id;
8405e745593SSunil Kumar Kori 	ev->priority = fq->ev.priority;
8415e745593SSunil Kumar Kori 
8425e745593SSunil Kumar Kori 	/* Save active dqrr entries */
8435e745593SSunil Kumar Kori 	index = DQRR_PTR2IDX(dqrr);
8445e745593SSunil Kumar Kori 	DPAA_PER_LCORE_DQRR_SIZE++;
8455e745593SSunil Kumar Kori 	DPAA_PER_LCORE_DQRR_HELD |= 1 << index;
8465e745593SSunil Kumar Kori 	DPAA_PER_LCORE_DQRR_MBUF(index) = mbuf;
8475e745593SSunil Kumar Kori 	ev->impl_opaque = index + 1;
848c9a1c2e5SDavid Marchand 	*dpaa_seqn(mbuf) = (uint32_t)index + 1;
8495e745593SSunil Kumar Kori 	*bufs = mbuf;
8505e745593SSunil Kumar Kori 
8515e745593SSunil Kumar Kori 	return qman_cb_dqrr_defer;
8525e745593SSunil Kumar Kori }
8535e745593SSunil Kumar Kori 
85477393f56SSachin Saxena #ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER
8559e97abf2SJun Yang static inline void
8569e97abf2SJun Yang dpaa_eth_err_queue(struct qman_fq *fq)
85777393f56SSachin Saxena {
85877393f56SSachin Saxena 	struct rte_mbuf *mbuf;
85977393f56SSachin Saxena 	struct qman_fq *debug_fq;
86077393f56SSachin Saxena 	int ret, i;
86177393f56SSachin Saxena 	struct qm_dqrr_entry *dq;
86277393f56SSachin Saxena 	struct qm_fd *fd;
8639e97abf2SJun Yang 	struct dpaa_if *dpaa_intf;
8649e97abf2SJun Yang 
8659e97abf2SJun Yang 	dpaa_intf = fq->dpaa_intf;
8669e97abf2SJun Yang 	if (fq != &dpaa_intf->rx_queues[0]) {
8679e97abf2SJun Yang 		/* Associate error queues to the first RXQ.*/
8689e97abf2SJun Yang 		return;
8699e97abf2SJun Yang 	}
8709e97abf2SJun Yang 
8719e97abf2SJun Yang 	if (dpaa_intf->cfg->fman_if->is_shared_mac) {
8729e97abf2SJun Yang 		/* Error queues of shared MAC are handled in kernel. */
8739e97abf2SJun Yang 		return;
8749e97abf2SJun Yang 	}
87577393f56SSachin Saxena 
87677393f56SSachin Saxena 	if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
87777393f56SSachin Saxena 		ret = rte_dpaa_portal_init((void *)0);
87877393f56SSachin Saxena 		if (ret) {
87977393f56SSachin Saxena 			DPAA_PMD_ERR("Failure in affining portal");
88077393f56SSachin Saxena 			return;
88177393f56SSachin Saxena 		}
88277393f56SSachin Saxena 	}
8839e97abf2SJun Yang 	for (i = 0; i < DPAA_DEBUG_FQ_MAX_NUM; i++) {
88477393f56SSachin Saxena 		debug_fq = &dpaa_intf->debug_queues[i];
88577393f56SSachin Saxena 		ret = qman_set_vdq(debug_fq, 4, QM_VDQCR_EXACT);
88677393f56SSachin Saxena 		if (ret)
88777393f56SSachin Saxena 			return;
88877393f56SSachin Saxena 
88977393f56SSachin Saxena 		do {
89077393f56SSachin Saxena 			dq = qman_dequeue(debug_fq);
89177393f56SSachin Saxena 			if (!dq)
89277393f56SSachin Saxena 				continue;
89377393f56SSachin Saxena 			fd = &dq->fd;
89477393f56SSachin Saxena 			if (i == DPAA_DEBUG_FQ_RX_ERROR)
89577393f56SSachin Saxena 				DPAA_PMD_ERR("RX ERROR status: 0x%08x",
89677393f56SSachin Saxena 					fd->status);
89777393f56SSachin Saxena 			else
89877393f56SSachin Saxena 				DPAA_PMD_ERR("TX ERROR status: 0x%08x",
89977393f56SSachin Saxena 					fd->status);
90077393f56SSachin Saxena 			dpaa_display_frame_info(fd, debug_fq->fqid,
90177393f56SSachin Saxena 				i == DPAA_DEBUG_FQ_RX_ERROR);
90277393f56SSachin Saxena 
90377393f56SSachin Saxena 			mbuf = dpaa_eth_fd_to_mbuf(fd, dpaa_intf->ifid);
90477393f56SSachin Saxena 			rte_pktmbuf_free(mbuf);
90577393f56SSachin Saxena 			qman_dqrr_consume(debug_fq, dq);
90677393f56SSachin Saxena 		} while (debug_fq->flags & QMAN_FQ_STATE_VDQCR);
90777393f56SSachin Saxena 	}
90877393f56SSachin Saxena }
90977393f56SSachin Saxena #endif
91077393f56SSachin Saxena 
91137f9b54bSShreyansh Jain uint16_t dpaa_eth_queue_rx(void *q,
91237f9b54bSShreyansh Jain 			   struct rte_mbuf **bufs,
91337f9b54bSShreyansh Jain 			   uint16_t nb_bufs)
91437f9b54bSShreyansh Jain {
91537f9b54bSShreyansh Jain 	struct qman_fq *fq = q;
91637f9b54bSShreyansh Jain 	struct qm_dqrr_entry *dq;
91737f9b54bSShreyansh Jain 	uint32_t num_rx = 0, ifid = ((struct dpaa_if *)fq->dpaa_intf)->ifid;
918f40d5a53SNipun Gupta 	int num_rx_bufs, ret;
919f40d5a53SNipun Gupta 	uint32_t vdqcr_flags = 0;
920615352f5SVanshika Shukla 	struct annotations_t *annot;
921615352f5SVanshika Shukla 	struct dpaa_if *dpaa_intf = fq->dpaa_intf;
92237f9b54bSShreyansh Jain 
923e1797f4bSAkhil Goyal 	if (unlikely(rte_dpaa_bpid_info == NULL &&
924e1797f4bSAkhil Goyal 				rte_eal_process_type() == RTE_PROC_SECONDARY))
925e1797f4bSAkhil Goyal 		rte_dpaa_bpid_info = fq->bp_array;
926e1797f4bSAkhil Goyal 
92777393f56SSachin Saxena #ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER
9289e97abf2SJun Yang 	dpaa_eth_err_queue(fq);
92977393f56SSachin Saxena #endif
93077393f56SSachin Saxena 
9310c504f69SHemant Agrawal 	if (likely(fq->is_static))
9320c504f69SHemant Agrawal 		return dpaa_eth_queue_portal_rx(fq, bufs, nb_bufs);
9330c504f69SHemant Agrawal 
934e5872221SRohit Raj 	if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
93537f9b54bSShreyansh Jain 		ret = rte_dpaa_portal_init((void *)0);
93637f9b54bSShreyansh Jain 		if (ret) {
93737f9b54bSShreyansh Jain 			DPAA_PMD_ERR("Failure in affining portal");
93837f9b54bSShreyansh Jain 			return 0;
93937f9b54bSShreyansh Jain 		}
9405d944582SNipun Gupta 	}
94137f9b54bSShreyansh Jain 
942f40d5a53SNipun Gupta 	/* Until request for four buffers, we provide exact number of buffers.
943f40d5a53SNipun Gupta 	 * Otherwise we do not set the QM_VDQCR_EXACT flag.
944f40d5a53SNipun Gupta 	 * Not setting QM_VDQCR_EXACT flag can provide two more buffers than
945f40d5a53SNipun Gupta 	 * requested, so we request two less in this case.
946f40d5a53SNipun Gupta 	 */
947f40d5a53SNipun Gupta 	if (nb_bufs < 4) {
948f40d5a53SNipun Gupta 		vdqcr_flags = QM_VDQCR_EXACT;
949f40d5a53SNipun Gupta 		num_rx_bufs = nb_bufs;
950f40d5a53SNipun Gupta 	} else {
951f40d5a53SNipun Gupta 		num_rx_bufs = nb_bufs > DPAA_MAX_DEQUEUE_NUM_FRAMES ?
952f40d5a53SNipun Gupta 			(DPAA_MAX_DEQUEUE_NUM_FRAMES - 2) : (nb_bufs - 2);
953f40d5a53SNipun Gupta 	}
954f40d5a53SNipun Gupta 	ret = qman_set_vdq(fq, num_rx_bufs, vdqcr_flags);
95537f9b54bSShreyansh Jain 	if (ret)
95637f9b54bSShreyansh Jain 		return 0;
95737f9b54bSShreyansh Jain 
95837f9b54bSShreyansh Jain 	do {
95937f9b54bSShreyansh Jain 		dq = qman_dequeue(fq);
96037f9b54bSShreyansh Jain 		if (!dq)
96137f9b54bSShreyansh Jain 			continue;
96237f9b54bSShreyansh Jain 		bufs[num_rx++] = dpaa_eth_fd_to_mbuf(&dq->fd, ifid);
96377393f56SSachin Saxena 		dpaa_display_frame_info(&dq->fd, fq->fqid, true);
964615352f5SVanshika Shukla 		if (dpaa_ieee_1588) {
965615352f5SVanshika Shukla 			annot = GET_ANNOTATIONS(bufs[num_rx - 1]->buf_addr);
966615352f5SVanshika Shukla 			dpaa_intf->rx_timestamp = rte_cpu_to_be_64(annot->timestamp);
967615352f5SVanshika Shukla 		}
96837f9b54bSShreyansh Jain 		qman_dqrr_consume(fq, dq);
96937f9b54bSShreyansh Jain 	} while (fq->flags & QMAN_FQ_STATE_VDQCR);
97037f9b54bSShreyansh Jain 
97137f9b54bSShreyansh Jain 	return num_rx;
97237f9b54bSShreyansh Jain }
97337f9b54bSShreyansh Jain 
974f191d5abSHemant Agrawal static int
9758cffdcbeSShreyansh Jain dpaa_eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf,
9768716c0ecSGagandeep Singh 		struct qm_fd *fd,
9778716c0ecSGagandeep Singh 		struct dpaa_sw_buf_free *free_buf,
9788716c0ecSGagandeep Singh 		uint32_t *free_count,
9798716c0ecSGagandeep Singh 		uint32_t pkt_id)
9808cffdcbeSShreyansh Jain {
9818716c0ecSGagandeep Singh 	struct rte_mbuf *cur_seg = mbuf;
9828cffdcbeSShreyansh Jain 	struct rte_mbuf *temp, *mi;
9838cffdcbeSShreyansh Jain 	struct qm_sg_entry *sg_temp, *sgt;
9848cffdcbeSShreyansh Jain 	int i = 0;
9858cffdcbeSShreyansh Jain 
9868cffdcbeSShreyansh Jain 	DPAA_DP_LOG(DEBUG, "Creating SG FD to transmit");
9878cffdcbeSShreyansh Jain 
988533c31ccSGagandeep Singh 	temp = rte_pktmbuf_alloc(dpaa_tx_sg_pool);
9898cffdcbeSShreyansh Jain 	if (!temp) {
9908cffdcbeSShreyansh Jain 		DPAA_PMD_ERR("Failure in allocation of mbuf");
9918cffdcbeSShreyansh Jain 		return -1;
9928cffdcbeSShreyansh Jain 	}
9938cffdcbeSShreyansh Jain 	if (temp->buf_len < ((mbuf->nb_segs * sizeof(struct qm_sg_entry))
9948cffdcbeSShreyansh Jain 				+ temp->data_off)) {
9958cffdcbeSShreyansh Jain 		DPAA_PMD_ERR("Insufficient space in mbuf for SG entries");
9968cffdcbeSShreyansh Jain 		return -1;
9978cffdcbeSShreyansh Jain 	}
9988cffdcbeSShreyansh Jain 
999b0827a40SGagandeep Singh #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
1000b0827a40SGagandeep Singh 	rte_mempool_check_cookies(rte_mempool_from_obj((void *)temp),
1001b0827a40SGagandeep Singh 			(void **)&temp, 1, 0);
1002b0827a40SGagandeep Singh #endif
10038cffdcbeSShreyansh Jain 	fd->cmd = 0;
10048cffdcbeSShreyansh Jain 	fd->opaque_addr = 0;
10058cffdcbeSShreyansh Jain 
10068cffdcbeSShreyansh Jain 	if (mbuf->ol_flags & DPAA_TX_CKSUM_OFFLOAD_MASK) {
1007d565c887SAshish Jain 		if (!mbuf->packet_type) {
1008d565c887SAshish Jain 			struct rte_net_hdr_lens hdr_lens;
1009d565c887SAshish Jain 
1010d565c887SAshish Jain 			mbuf->packet_type = rte_net_get_ptype(mbuf, &hdr_lens,
1011d565c887SAshish Jain 					RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK
1012d565c887SAshish Jain 					| RTE_PTYPE_L4_MASK);
1013d565c887SAshish Jain 			mbuf->l2_len = hdr_lens.l2_len;
1014d565c887SAshish Jain 			mbuf->l3_len = hdr_lens.l3_len;
1015d565c887SAshish Jain 		}
10168cffdcbeSShreyansh Jain 		if (temp->data_off < DEFAULT_TX_ICEOF
10178cffdcbeSShreyansh Jain 			+ sizeof(struct dpaa_eth_parse_results_t))
10188cffdcbeSShreyansh Jain 			temp->data_off = DEFAULT_TX_ICEOF
10198cffdcbeSShreyansh Jain 				+ sizeof(struct dpaa_eth_parse_results_t);
10208cffdcbeSShreyansh Jain 		dcbz_64(temp->buf_addr);
10218cffdcbeSShreyansh Jain 		dpaa_checksum_offload(mbuf, fd, temp->buf_addr);
10228cffdcbeSShreyansh Jain 	}
10238cffdcbeSShreyansh Jain 
10248cffdcbeSShreyansh Jain 	sgt = temp->buf_addr + temp->data_off;
10258cffdcbeSShreyansh Jain 	fd->format = QM_FD_SG;
1026455da545SSantosh Shukla 	fd->addr = temp->buf_iova;
10278cffdcbeSShreyansh Jain 	fd->offset = temp->data_off;
1028533c31ccSGagandeep Singh 	fd->bpid = DPAA_MEMPOOL_TO_BPID(dpaa_tx_sg_pool);
10298cffdcbeSShreyansh Jain 	fd->length20 = mbuf->pkt_len;
10308cffdcbeSShreyansh Jain 
10318cffdcbeSShreyansh Jain 	while (i < DPAA_SGT_MAX_ENTRIES) {
10328cffdcbeSShreyansh Jain 		sg_temp = &sgt[i++];
10338cffdcbeSShreyansh Jain 		sg_temp->opaque = 0;
10348cffdcbeSShreyansh Jain 		sg_temp->val = 0;
1035455da545SSantosh Shukla 		sg_temp->addr = cur_seg->buf_iova;
10368cffdcbeSShreyansh Jain 		sg_temp->offset = cur_seg->data_off;
10378cffdcbeSShreyansh Jain 		sg_temp->length = cur_seg->data_len;
10388cffdcbeSShreyansh Jain 		if (RTE_MBUF_DIRECT(cur_seg)) {
10398cffdcbeSShreyansh Jain 			if (rte_mbuf_refcnt_read(cur_seg) > 1) {
10408cffdcbeSShreyansh Jain 				/*If refcnt > 1, invalid bpid is set to ensure
10418cffdcbeSShreyansh Jain 				 * buffer is not freed by HW.
10428cffdcbeSShreyansh Jain 				 */
10438cffdcbeSShreyansh Jain 				sg_temp->bpid = 0xff;
10448cffdcbeSShreyansh Jain 				rte_mbuf_refcnt_update(cur_seg, -1);
10458cffdcbeSShreyansh Jain 			} else {
10468cffdcbeSShreyansh Jain 				sg_temp->bpid =
10478cffdcbeSShreyansh Jain 					DPAA_MEMPOOL_TO_BPID(cur_seg->pool);
1048b0827a40SGagandeep Singh #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
1049b0827a40SGagandeep Singh 				rte_mempool_check_cookies(rte_mempool_from_obj((void *)cur_seg),
1050b0827a40SGagandeep Singh 					(void **)&cur_seg, 1, 0);
1051b0827a40SGagandeep Singh #endif
10528cffdcbeSShreyansh Jain 			}
1053f191d5abSHemant Agrawal 		} else if (RTE_MBUF_HAS_EXTBUF(cur_seg)) {
10548716c0ecSGagandeep Singh 			free_buf[*free_count].seg = cur_seg;
10558716c0ecSGagandeep Singh 			free_buf[*free_count].pkt_id = pkt_id;
10568716c0ecSGagandeep Singh 			++*free_count;
1057f191d5abSHemant Agrawal 			sg_temp->bpid = 0xff;
10588cffdcbeSShreyansh Jain 		} else {
10598cffdcbeSShreyansh Jain 			/* Get owner MBUF from indirect buffer */
10608cffdcbeSShreyansh Jain 			mi = rte_mbuf_from_indirect(cur_seg);
10618cffdcbeSShreyansh Jain 			if (rte_mbuf_refcnt_read(mi) > 1) {
10628cffdcbeSShreyansh Jain 				/*If refcnt > 1, invalid bpid is set to ensure
10638cffdcbeSShreyansh Jain 				 * owner buffer is not freed by HW.
10648cffdcbeSShreyansh Jain 				 */
10658cffdcbeSShreyansh Jain 				sg_temp->bpid = 0xff;
10668cffdcbeSShreyansh Jain 			} else {
10678cffdcbeSShreyansh Jain 				sg_temp->bpid = DPAA_MEMPOOL_TO_BPID(mi->pool);
10688cffdcbeSShreyansh Jain 				rte_mbuf_refcnt_update(mi, 1);
10698cffdcbeSShreyansh Jain 			}
10708716c0ecSGagandeep Singh 			free_buf[*free_count].seg = cur_seg;
10718716c0ecSGagandeep Singh 			free_buf[*free_count].pkt_id = pkt_id;
10728716c0ecSGagandeep Singh 			++*free_count;
10738cffdcbeSShreyansh Jain 		}
10748716c0ecSGagandeep Singh 		cur_seg = cur_seg->next;
10758cffdcbeSShreyansh Jain 		if (cur_seg == NULL) {
10768cffdcbeSShreyansh Jain 			sg_temp->final = 1;
10778cffdcbeSShreyansh Jain 			cpu_to_hw_sg(sg_temp);
10788cffdcbeSShreyansh Jain 			break;
10798cffdcbeSShreyansh Jain 		}
10808cffdcbeSShreyansh Jain 		cpu_to_hw_sg(sg_temp);
10818cffdcbeSShreyansh Jain 	}
10828cffdcbeSShreyansh Jain 	return 0;
10838cffdcbeSShreyansh Jain }
10848cffdcbeSShreyansh Jain 
108537f9b54bSShreyansh Jain /* Handle mbufs which are not segmented (non SG) */
108637f9b54bSShreyansh Jain static inline void
108737f9b54bSShreyansh Jain tx_on_dpaa_pool_unsegmented(struct rte_mbuf *mbuf,
108837f9b54bSShreyansh Jain 			    struct dpaa_bp_info *bp_info,
10898716c0ecSGagandeep Singh 			    struct qm_fd *fd_arr,
10908716c0ecSGagandeep Singh 			    struct dpaa_sw_buf_free *buf_to_free,
10918716c0ecSGagandeep Singh 			    uint32_t *free_count,
10928716c0ecSGagandeep Singh 			    uint32_t pkt_id)
109337f9b54bSShreyansh Jain {
109437f9b54bSShreyansh Jain 	struct rte_mbuf *mi = NULL;
109537f9b54bSShreyansh Jain 
109637f9b54bSShreyansh Jain 	if (RTE_MBUF_DIRECT(mbuf)) {
109737f9b54bSShreyansh Jain 		if (rte_mbuf_refcnt_read(mbuf) > 1) {
109837f9b54bSShreyansh Jain 			/* In case of direct mbuf and mbuf being cloned,
109937f9b54bSShreyansh Jain 			 * BMAN should _not_ release buffer.
110037f9b54bSShreyansh Jain 			 */
110137f9b54bSShreyansh Jain 			DPAA_MBUF_TO_CONTIG_FD(mbuf, fd_arr, 0xff);
110237f9b54bSShreyansh Jain 			/* Buffer should be releasd by EAL */
110337f9b54bSShreyansh Jain 			rte_mbuf_refcnt_update(mbuf, -1);
110437f9b54bSShreyansh Jain 		} else {
110537f9b54bSShreyansh Jain 			/* In case of direct mbuf and no cloning, mbuf can be
110637f9b54bSShreyansh Jain 			 * released by BMAN.
110737f9b54bSShreyansh Jain 			 */
110837f9b54bSShreyansh Jain 			DPAA_MBUF_TO_CONTIG_FD(mbuf, fd_arr, bp_info->bpid);
1109b0827a40SGagandeep Singh #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
1110b0827a40SGagandeep Singh 			rte_mempool_check_cookies(rte_mempool_from_obj((void *)mbuf),
1111b0827a40SGagandeep Singh 				(void **)&mbuf, 1, 0);
1112b0827a40SGagandeep Singh #endif
111337f9b54bSShreyansh Jain 		}
1114f191d5abSHemant Agrawal 	} else if (RTE_MBUF_HAS_EXTBUF(mbuf)) {
11158716c0ecSGagandeep Singh 		buf_to_free[*free_count].seg = mbuf;
11168716c0ecSGagandeep Singh 		buf_to_free[*free_count].pkt_id = pkt_id;
11178716c0ecSGagandeep Singh 		++*free_count;
1118f191d5abSHemant Agrawal 		DPAA_MBUF_TO_CONTIG_FD(mbuf, fd_arr,
1119f191d5abSHemant Agrawal 				bp_info ? bp_info->bpid : 0xff);
112037f9b54bSShreyansh Jain 	} else {
112137f9b54bSShreyansh Jain 		/* This is data-containing core mbuf: 'mi' */
112237f9b54bSShreyansh Jain 		mi = rte_mbuf_from_indirect(mbuf);
112337f9b54bSShreyansh Jain 		if (rte_mbuf_refcnt_read(mi) > 1) {
112437f9b54bSShreyansh Jain 			/* In case of indirect mbuf, and mbuf being cloned,
112537f9b54bSShreyansh Jain 			 * BMAN should _not_ release it and let EAL release
112637f9b54bSShreyansh Jain 			 * it through pktmbuf_free below.
112737f9b54bSShreyansh Jain 			 */
112837f9b54bSShreyansh Jain 			DPAA_MBUF_TO_CONTIG_FD(mbuf, fd_arr, 0xff);
112937f9b54bSShreyansh Jain 		} else {
113037f9b54bSShreyansh Jain 			/* In case of indirect mbuf, and no cloning, core mbuf
113137f9b54bSShreyansh Jain 			 * should be released by BMAN.
113237f9b54bSShreyansh Jain 			 * Increate refcnt of core mbuf so that when
113337f9b54bSShreyansh Jain 			 * pktmbuf_free is called and mbuf is released, EAL
113437f9b54bSShreyansh Jain 			 * doesn't try to release core mbuf which would have
113537f9b54bSShreyansh Jain 			 * been released by BMAN.
113637f9b54bSShreyansh Jain 			 */
113737f9b54bSShreyansh Jain 			rte_mbuf_refcnt_update(mi, 1);
1138f191d5abSHemant Agrawal 			DPAA_MBUF_TO_CONTIG_FD(mbuf, fd_arr,
1139f191d5abSHemant Agrawal 						bp_info ? bp_info->bpid : 0xff);
114037f9b54bSShreyansh Jain 		}
11418716c0ecSGagandeep Singh 		buf_to_free[*free_count].seg = mbuf;
11428716c0ecSGagandeep Singh 		buf_to_free[*free_count].pkt_id = pkt_id;
11438716c0ecSGagandeep Singh 		++*free_count;
114437f9b54bSShreyansh Jain 	}
11455a8cf1beSShreyansh Jain 
11465e0789e9SNipun Gupta 	if (mbuf->ol_flags & DPAA_TX_CKSUM_OFFLOAD_MASK)
11475e0789e9SNipun Gupta 		dpaa_unsegmented_checksum(mbuf, fd_arr);
114837f9b54bSShreyansh Jain }
114937f9b54bSShreyansh Jain 
115037f9b54bSShreyansh Jain /* Handle all mbufs on dpaa BMAN managed pool */
115137f9b54bSShreyansh Jain static inline uint16_t
115237f9b54bSShreyansh Jain tx_on_dpaa_pool(struct rte_mbuf *mbuf,
115337f9b54bSShreyansh Jain 		struct dpaa_bp_info *bp_info,
11548716c0ecSGagandeep Singh 		struct qm_fd *fd_arr,
11558716c0ecSGagandeep Singh 		struct dpaa_sw_buf_free *buf_to_free,
11568716c0ecSGagandeep Singh 		uint32_t *free_count,
11578716c0ecSGagandeep Singh 		uint32_t pkt_id)
115837f9b54bSShreyansh Jain {
115937f9b54bSShreyansh Jain 	DPAA_DP_LOG(DEBUG, "BMAN offloaded buffer, mbuf: %p", mbuf);
116037f9b54bSShreyansh Jain 
116137f9b54bSShreyansh Jain 	if (mbuf->nb_segs == 1) {
116237f9b54bSShreyansh Jain 		/* Case for non-segmented buffers */
11638716c0ecSGagandeep Singh 		tx_on_dpaa_pool_unsegmented(mbuf, bp_info, fd_arr,
11648716c0ecSGagandeep Singh 				buf_to_free, free_count, pkt_id);
11658cffdcbeSShreyansh Jain 	} else if (mbuf->nb_segs > 1 &&
11668cffdcbeSShreyansh Jain 		   mbuf->nb_segs <= DPAA_SGT_MAX_ENTRIES) {
11678716c0ecSGagandeep Singh 		if (dpaa_eth_mbuf_to_sg_fd(mbuf, fd_arr, buf_to_free,
11688716c0ecSGagandeep Singh 					   free_count, pkt_id)) {
11698cffdcbeSShreyansh Jain 			DPAA_PMD_DEBUG("Unable to create Scatter Gather FD");
11708cffdcbeSShreyansh Jain 			return 1;
11718cffdcbeSShreyansh Jain 		}
117237f9b54bSShreyansh Jain 	} else {
117337f9b54bSShreyansh Jain 		DPAA_PMD_DEBUG("Number of Segments not supported");
117437f9b54bSShreyansh Jain 		return 1;
117537f9b54bSShreyansh Jain 	}
117637f9b54bSShreyansh Jain 
117737f9b54bSShreyansh Jain 	return 0;
117837f9b54bSShreyansh Jain }
117937f9b54bSShreyansh Jain 
118037f9b54bSShreyansh Jain /* Handle all mbufs on an external pool (non-dpaa) */
1181f8c7a17aSNipun Gupta static inline struct rte_mbuf *
1182f8c7a17aSNipun Gupta reallocate_mbuf(struct qman_fq *txq, struct rte_mbuf *mbuf)
118337f9b54bSShreyansh Jain {
118437f9b54bSShreyansh Jain 	struct dpaa_if *dpaa_intf = txq->dpaa_intf;
1185f8c7a17aSNipun Gupta 	struct dpaa_bp_info *bp_info = dpaa_intf->bp_info;
1186f8c7a17aSNipun Gupta 	struct rte_mbuf *new_mbufs[DPAA_SGT_MAX_ENTRIES + 1] = {0};
1187f8c7a17aSNipun Gupta 	struct rte_mbuf *temp_mbuf;
1188f8c7a17aSNipun Gupta 	int num_new_segs, mbuf_greater, ret, extra_seg = 0, i = 0;
1189f8c7a17aSNipun Gupta 	uint64_t mbufs_size, bytes_to_copy, offset1 = 0, offset2 = 0;
1190f8c7a17aSNipun Gupta 	char *data;
119137f9b54bSShreyansh Jain 
1192f8c7a17aSNipun Gupta 	DPAA_DP_LOG(DEBUG, "Reallocating transmit buffer");
1193f8c7a17aSNipun Gupta 
1194f8c7a17aSNipun Gupta 	mbufs_size = bp_info->size -
1195f8c7a17aSNipun Gupta 		bp_info->meta_data_size - RTE_PKTMBUF_HEADROOM;
1196f8c7a17aSNipun Gupta 	extra_seg = !!(mbuf->pkt_len % mbufs_size);
1197f8c7a17aSNipun Gupta 	num_new_segs = (mbuf->pkt_len / mbufs_size) + extra_seg;
1198f8c7a17aSNipun Gupta 
1199f8c7a17aSNipun Gupta 	ret = rte_pktmbuf_alloc_bulk(bp_info->mp, new_mbufs, num_new_segs);
1200f8c7a17aSNipun Gupta 	if (ret != 0) {
1201f8c7a17aSNipun Gupta 		DPAA_DP_LOG(DEBUG, "Allocation for new buffers failed");
1202f8c7a17aSNipun Gupta 		return NULL;
120337f9b54bSShreyansh Jain 	}
120437f9b54bSShreyansh Jain 
1205f8c7a17aSNipun Gupta 	temp_mbuf = mbuf;
120637f9b54bSShreyansh Jain 
1207f8c7a17aSNipun Gupta 	while (temp_mbuf) {
1208f8c7a17aSNipun Gupta 		/* If mbuf data is less than new mbuf remaining memory */
1209f8c7a17aSNipun Gupta 		if ((temp_mbuf->data_len - offset1) < (mbufs_size - offset2)) {
1210f8c7a17aSNipun Gupta 			bytes_to_copy = temp_mbuf->data_len - offset1;
1211f8c7a17aSNipun Gupta 			mbuf_greater = -1;
1212f8c7a17aSNipun Gupta 		/* If mbuf data is greater than new mbuf remaining memory */
1213f8c7a17aSNipun Gupta 		} else if ((temp_mbuf->data_len - offset1) >
1214f8c7a17aSNipun Gupta 			   (mbufs_size - offset2)) {
1215f8c7a17aSNipun Gupta 			bytes_to_copy = mbufs_size - offset2;
1216f8c7a17aSNipun Gupta 			mbuf_greater = 1;
1217f8c7a17aSNipun Gupta 		/* if mbuf data is equal to new mbuf remaining memory */
1218f8c7a17aSNipun Gupta 		} else {
1219f8c7a17aSNipun Gupta 			bytes_to_copy = temp_mbuf->data_len - offset1;
1220f8c7a17aSNipun Gupta 			mbuf_greater = 0;
1221f8c7a17aSNipun Gupta 		}
1222f8c7a17aSNipun Gupta 
1223f8c7a17aSNipun Gupta 		/* Copy the data */
1224f8c7a17aSNipun Gupta 		data = rte_pktmbuf_append(new_mbufs[0], bytes_to_copy);
1225f8c7a17aSNipun Gupta 
1226*7594cafaSVanshika Shukla 		rte_memcpy((uint8_t *)data, rte_pktmbuf_mtod_offset(temp_mbuf,
1227f8c7a17aSNipun Gupta 			   void *, offset1), bytes_to_copy);
1228f8c7a17aSNipun Gupta 
1229f8c7a17aSNipun Gupta 		/* Set new offsets and the temp buffers */
1230f8c7a17aSNipun Gupta 		if (mbuf_greater == -1) {
1231f8c7a17aSNipun Gupta 			offset1 = 0;
1232f8c7a17aSNipun Gupta 			offset2 += bytes_to_copy;
1233f8c7a17aSNipun Gupta 			temp_mbuf = temp_mbuf->next;
1234f8c7a17aSNipun Gupta 		} else if (mbuf_greater == 1) {
1235f8c7a17aSNipun Gupta 			offset2 = 0;
1236f8c7a17aSNipun Gupta 			offset1 += bytes_to_copy;
1237f8c7a17aSNipun Gupta 			new_mbufs[i]->next = new_mbufs[i + 1];
1238f8c7a17aSNipun Gupta 			new_mbufs[0]->nb_segs++;
1239f8c7a17aSNipun Gupta 			i++;
1240f8c7a17aSNipun Gupta 		} else {
1241f8c7a17aSNipun Gupta 			offset1 = 0;
1242f8c7a17aSNipun Gupta 			offset2 = 0;
1243f8c7a17aSNipun Gupta 			temp_mbuf = temp_mbuf->next;
1244f8c7a17aSNipun Gupta 			new_mbufs[i]->next = new_mbufs[i + 1];
1245f8c7a17aSNipun Gupta 			if (new_mbufs[i + 1])
1246f8c7a17aSNipun Gupta 				new_mbufs[0]->nb_segs++;
1247f8c7a17aSNipun Gupta 			i++;
1248f8c7a17aSNipun Gupta 		}
1249f8c7a17aSNipun Gupta 	}
1250f8c7a17aSNipun Gupta 
1251f8c7a17aSNipun Gupta 	/* Copy other required fields */
1252f8c7a17aSNipun Gupta 	new_mbufs[0]->ol_flags = mbuf->ol_flags;
1253f8c7a17aSNipun Gupta 	new_mbufs[0]->packet_type = mbuf->packet_type;
1254f8c7a17aSNipun Gupta 	new_mbufs[0]->tx_offload = mbuf->tx_offload;
1255f8c7a17aSNipun Gupta 
1256f8c7a17aSNipun Gupta 	rte_pktmbuf_free(mbuf);
1257f8c7a17aSNipun Gupta 
1258f8c7a17aSNipun Gupta 	return new_mbufs[0];
125937f9b54bSShreyansh Jain }
126037f9b54bSShreyansh Jain 
1261a978a7f6SJun Yang #ifdef RTE_LIBRTE_DPAA_ERRATA_LS1043_A010022
1262a978a7f6SJun Yang /* In case the data offset is not multiple of 16,
1263a978a7f6SJun Yang  * FMAN can stall because of an errata. So reallocate
1264a978a7f6SJun Yang  * the buffer in such case.
1265a978a7f6SJun Yang  */
1266a978a7f6SJun Yang static inline int
1267a978a7f6SJun Yang dpaa_eth_ls1043a_mbuf_realloc(struct rte_mbuf *mbuf)
1268a978a7f6SJun Yang {
1269a978a7f6SJun Yang 	uint64_t len, offset;
1270a978a7f6SJun Yang 
1271a978a7f6SJun Yang 	if (dpaa_svr_family != SVR_LS1043A_FAMILY)
1272a978a7f6SJun Yang 		return 0;
1273a978a7f6SJun Yang 
1274a978a7f6SJun Yang 	while (mbuf) {
1275a978a7f6SJun Yang 		len = mbuf->data_len;
1276a978a7f6SJun Yang 		offset = mbuf->data_off;
1277a978a7f6SJun Yang 		if ((mbuf->next &&
1278a978a7f6SJun Yang 			!rte_is_aligned((void *)len, 16)) ||
1279a978a7f6SJun Yang 			!rte_is_aligned((void *)offset, 16)) {
1280a978a7f6SJun Yang 			DPAA_PMD_DEBUG("Errata condition hit");
1281a978a7f6SJun Yang 
1282a978a7f6SJun Yang 			return 1;
1283a978a7f6SJun Yang 		}
1284a978a7f6SJun Yang 		mbuf = mbuf->next;
1285a978a7f6SJun Yang 	}
1286a978a7f6SJun Yang 	return 0;
1287a978a7f6SJun Yang }
1288a978a7f6SJun Yang #endif
1289a978a7f6SJun Yang 
129037f9b54bSShreyansh Jain uint16_t
129137f9b54bSShreyansh Jain dpaa_eth_queue_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
129237f9b54bSShreyansh Jain {
129337f9b54bSShreyansh Jain 	struct rte_mbuf *mbuf, *mi = NULL;
129437f9b54bSShreyansh Jain 	struct rte_mempool *mp;
129537f9b54bSShreyansh Jain 	struct dpaa_bp_info *bp_info;
1296b0a87fe2SNipun Gupta 	struct qm_fd fd_arr[DPAA_TX_BURST_SIZE];
12975e0789e9SNipun Gupta 	uint32_t frames_to_send, loop, sent = 0;
129837f9b54bSShreyansh Jain 	uint16_t state;
1299f8c7a17aSNipun Gupta 	int ret, realloc_mbuf = 0;
13005e745593SSunil Kumar Kori 	uint32_t seqn, index, flags[DPAA_TX_BURST_SIZE] = {0};
13018716c0ecSGagandeep Singh 	struct dpaa_sw_buf_free buf_to_free[DPAA_MAX_SGS * DPAA_MAX_DEQUEUE_NUM_FRAMES];
13028716c0ecSGagandeep Singh 	uint32_t free_count = 0;
130358e0420fSVanshika Shukla 	struct qman_fq *fq = q;
1304615352f5SVanshika Shukla 	struct dpaa_if *dpaa_intf = fq->dpaa_intf;
1305d11482d9SVanshika Shukla 	struct qman_fq *fq_txconf = fq->tx_conf_queue;
130637f9b54bSShreyansh Jain 
1307e5872221SRohit Raj 	if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
130837f9b54bSShreyansh Jain 		ret = rte_dpaa_portal_init((void *)0);
130937f9b54bSShreyansh Jain 		if (ret) {
131037f9b54bSShreyansh Jain 			DPAA_PMD_ERR("Failure in affining portal");
131137f9b54bSShreyansh Jain 			return 0;
131237f9b54bSShreyansh Jain 		}
13135d944582SNipun Gupta 	}
131437f9b54bSShreyansh Jain 
131537f9b54bSShreyansh Jain 	DPAA_DP_LOG(DEBUG, "Transmitting %d buffers on queue: %p", nb_bufs, q);
131637f9b54bSShreyansh Jain 
1317615352f5SVanshika Shukla 	if (dpaa_ieee_1588) {
1318615352f5SVanshika Shukla 		dpaa_intf->next_tx_conf_queue = fq_txconf;
1319615352f5SVanshika Shukla 		dpaa_eth_tx_conf(fq_txconf);
1320615352f5SVanshika Shukla 		dpaa_intf->tx_timestamp = 0;
1321615352f5SVanshika Shukla 	}
1322615352f5SVanshika Shukla 
132337f9b54bSShreyansh Jain 	while (nb_bufs) {
1324b0a87fe2SNipun Gupta 		frames_to_send = (nb_bufs > DPAA_TX_BURST_SIZE) ?
1325b0a87fe2SNipun Gupta 				DPAA_TX_BURST_SIZE : nb_bufs;
13265e0789e9SNipun Gupta 		for (loop = 0; loop < frames_to_send; loop++) {
13275e0789e9SNipun Gupta 			mbuf = *(bufs++);
1328615352f5SVanshika Shukla 			fd_arr[loop].cmd = 0;
1329615352f5SVanshika Shukla 			if (dpaa_ieee_1588) {
1330615352f5SVanshika Shukla 				fd_arr[loop].cmd |= DPAA_FD_CMD_FCO |
1331615352f5SVanshika Shukla 					qman_fq_fqid(fq_txconf);
1332615352f5SVanshika Shukla 				fd_arr[loop].cmd |= DPAA_FD_CMD_RPD |
1333615352f5SVanshika Shukla 					DPAA_FD_CMD_UPD;
1334615352f5SVanshika Shukla 			}
1335a978a7f6SJun Yang #ifdef RTE_LIBRTE_DPAA_ERRATA_LS1043_A010022
1336a978a7f6SJun Yang 			realloc_mbuf = dpaa_eth_ls1043a_mbuf_realloc(mbuf);
1337a978a7f6SJun Yang #endif
1338c9a1c2e5SDavid Marchand 			seqn = *dpaa_seqn(mbuf);
13399afce5aaSSunil Kumar Kori 			if (seqn != DPAA_INVALID_MBUF_SEQN) {
13409afce5aaSSunil Kumar Kori 				index = seqn - 1;
13419afce5aaSSunil Kumar Kori 				if (DPAA_PER_LCORE_DQRR_HELD & (1 << index)) {
13429afce5aaSSunil Kumar Kori 					flags[loop] =
13439afce5aaSSunil Kumar Kori 					   ((index & QM_EQCR_DCA_IDXMASK) << 8);
13449afce5aaSSunil Kumar Kori 					flags[loop] |= QMAN_ENQUEUE_FLAG_DCA;
13459afce5aaSSunil Kumar Kori 					DPAA_PER_LCORE_DQRR_SIZE--;
13469afce5aaSSunil Kumar Kori 					DPAA_PER_LCORE_DQRR_HELD &=
13479afce5aaSSunil Kumar Kori 								~(1 << index);
13489afce5aaSSunil Kumar Kori 				}
13499afce5aaSSunil Kumar Kori 			}
13509afce5aaSSunil Kumar Kori 
13515e0789e9SNipun Gupta 			if (likely(RTE_MBUF_DIRECT(mbuf))) {
135237f9b54bSShreyansh Jain 				mp = mbuf->pool;
13535e0789e9SNipun Gupta 				bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp);
13545e0789e9SNipun Gupta 				if (likely(mp->ops_index ==
13555e0789e9SNipun Gupta 						bp_info->dpaa_ops_index &&
13565e0789e9SNipun Gupta 					mbuf->nb_segs == 1 &&
1357f8c7a17aSNipun Gupta 					realloc_mbuf == 0 &&
13585e0789e9SNipun Gupta 					rte_mbuf_refcnt_read(mbuf) == 1)) {
13595e0789e9SNipun Gupta 					DPAA_MBUF_TO_CONTIG_FD(mbuf,
13605e0789e9SNipun Gupta 						&fd_arr[loop], bp_info->bpid);
13615e0789e9SNipun Gupta 					if (mbuf->ol_flags &
13625e0789e9SNipun Gupta 						DPAA_TX_CKSUM_OFFLOAD_MASK)
13635e0789e9SNipun Gupta 						dpaa_unsegmented_checksum(mbuf,
13645e0789e9SNipun Gupta 							&fd_arr[loop]);
1365b0827a40SGagandeep Singh #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
1366b0827a40SGagandeep Singh 				rte_mempool_check_cookies(rte_mempool_from_obj((void *)mbuf),
1367b0827a40SGagandeep Singh 						(void **)&mbuf, 1, 0);
1368b0827a40SGagandeep Singh #endif
13695e0789e9SNipun Gupta 					continue;
13705e0789e9SNipun Gupta 				}
137137f9b54bSShreyansh Jain 			} else {
137237f9b54bSShreyansh Jain 				mi = rte_mbuf_from_indirect(mbuf);
137337f9b54bSShreyansh Jain 				mp = mi->pool;
137437f9b54bSShreyansh Jain 			}
137537f9b54bSShreyansh Jain 
1376f191d5abSHemant Agrawal 			if (unlikely(RTE_MBUF_HAS_EXTBUF(mbuf))) {
1377f191d5abSHemant Agrawal 				bp_info = NULL;
1378f191d5abSHemant Agrawal 				goto indirect_buf;
1379f191d5abSHemant Agrawal 			}
1380f191d5abSHemant Agrawal 
138137f9b54bSShreyansh Jain 			bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp);
1382f8c7a17aSNipun Gupta 			if (unlikely(mp->ops_index != bp_info->dpaa_ops_index ||
1383f8c7a17aSNipun Gupta 				     realloc_mbuf == 1)) {
1384f8c7a17aSNipun Gupta 				struct rte_mbuf *temp_mbuf;
1385f8c7a17aSNipun Gupta 
1386f8c7a17aSNipun Gupta 				temp_mbuf = reallocate_mbuf(q, mbuf);
1387f8c7a17aSNipun Gupta 				if (!temp_mbuf) {
1388f8c7a17aSNipun Gupta 					/* Set frames_to_send & nb_bufs so
1389f8c7a17aSNipun Gupta 					 * that packets are transmitted till
1390f8c7a17aSNipun Gupta 					 * previous frame.
1391f8c7a17aSNipun Gupta 					 */
1392f8c7a17aSNipun Gupta 					frames_to_send = loop;
1393f8c7a17aSNipun Gupta 					nb_bufs = loop;
1394f8c7a17aSNipun Gupta 					goto send_pkts;
1395f8c7a17aSNipun Gupta 				}
1396f8c7a17aSNipun Gupta 				mbuf = temp_mbuf;
1397f8c7a17aSNipun Gupta 				realloc_mbuf = 0;
1398f8c7a17aSNipun Gupta 			}
1399f191d5abSHemant Agrawal indirect_buf:
140037f9b54bSShreyansh Jain 			state = tx_on_dpaa_pool(mbuf, bp_info,
14018716c0ecSGagandeep Singh 						&fd_arr[loop],
14028716c0ecSGagandeep Singh 						buf_to_free,
14038716c0ecSGagandeep Singh 						&free_count,
14048716c0ecSGagandeep Singh 						loop);
140537f9b54bSShreyansh Jain 			if (unlikely(state)) {
140637f9b54bSShreyansh Jain 				/* Set frames_to_send & nb_bufs so
140737f9b54bSShreyansh Jain 				 * that packets are transmitted till
140837f9b54bSShreyansh Jain 				 * previous frame.
140937f9b54bSShreyansh Jain 				 */
141037f9b54bSShreyansh Jain 				frames_to_send = loop;
141137f9b54bSShreyansh Jain 				nb_bufs = loop;
141237f9b54bSShreyansh Jain 				goto send_pkts;
141337f9b54bSShreyansh Jain 			}
141437f9b54bSShreyansh Jain 		}
141537f9b54bSShreyansh Jain 
141637f9b54bSShreyansh Jain send_pkts:
141737f9b54bSShreyansh Jain 		loop = 0;
141837f9b54bSShreyansh Jain 		while (loop < frames_to_send) {
141937f9b54bSShreyansh Jain 			loop += qman_enqueue_multi(q, &fd_arr[loop],
14205e745593SSunil Kumar Kori 						   &flags[loop],
142137f9b54bSShreyansh Jain 						   frames_to_send - loop);
142237f9b54bSShreyansh Jain 		}
142337f9b54bSShreyansh Jain 		nb_bufs -= frames_to_send;
14245e0789e9SNipun Gupta 		sent += frames_to_send;
142537f9b54bSShreyansh Jain 	}
142637f9b54bSShreyansh Jain 
14275e0789e9SNipun Gupta 	DPAA_DP_LOG(DEBUG, "Transmitted %d buffers on queue: %p", sent, q);
142837f9b54bSShreyansh Jain 
14298716c0ecSGagandeep Singh 	for (loop = 0; loop < free_count; loop++) {
14308716c0ecSGagandeep Singh 		if (buf_to_free[loop].pkt_id < sent)
14318716c0ecSGagandeep Singh 			rte_pktmbuf_free_seg(buf_to_free[loop].seg);
1432f191d5abSHemant Agrawal 	}
1433f191d5abSHemant Agrawal 
14345e0789e9SNipun Gupta 	return sent;
143537f9b54bSShreyansh Jain }
143637f9b54bSShreyansh Jain 
143758e0420fSVanshika Shukla void
143858e0420fSVanshika Shukla dpaa_eth_tx_conf(void *q)
143958e0420fSVanshika Shukla {
144058e0420fSVanshika Shukla 	struct qman_fq *fq = q;
144158e0420fSVanshika Shukla 	struct qm_dqrr_entry *dq;
144258e0420fSVanshika Shukla 	int num_tx_conf, ret, dq_num;
144358e0420fSVanshika Shukla 	uint32_t vdqcr_flags = 0;
1444615352f5SVanshika Shukla 	struct dpaa_if *dpaa_intf = fq->dpaa_intf;
1445615352f5SVanshika Shukla 	struct qm_dqrr_entry *dqrr;
1446615352f5SVanshika Shukla 	struct dpaa_bp_info *bp_info;
1447615352f5SVanshika Shukla 	struct rte_mbuf *mbuf;
1448615352f5SVanshika Shukla 	void *ptr;
1449615352f5SVanshika Shukla 	struct annotations_t *annot;
145058e0420fSVanshika Shukla 
145158e0420fSVanshika Shukla 	if (unlikely(rte_dpaa_bpid_info == NULL &&
145258e0420fSVanshika Shukla 				rte_eal_process_type() == RTE_PROC_SECONDARY))
145358e0420fSVanshika Shukla 		rte_dpaa_bpid_info = fq->bp_array;
145458e0420fSVanshika Shukla 
145558e0420fSVanshika Shukla 	if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
145658e0420fSVanshika Shukla 		ret = rte_dpaa_portal_init((void *)0);
145758e0420fSVanshika Shukla 		if (ret) {
145858e0420fSVanshika Shukla 			DPAA_PMD_ERR("Failure in affining portal");
145958e0420fSVanshika Shukla 			return;
146058e0420fSVanshika Shukla 		}
146158e0420fSVanshika Shukla 	}
146258e0420fSVanshika Shukla 
146358e0420fSVanshika Shukla 	num_tx_conf = DPAA_MAX_DEQUEUE_NUM_FRAMES - 2;
146458e0420fSVanshika Shukla 
146558e0420fSVanshika Shukla 	do {
146658e0420fSVanshika Shukla 		dq_num = 0;
146758e0420fSVanshika Shukla 		ret = qman_set_vdq(fq, num_tx_conf, vdqcr_flags);
146858e0420fSVanshika Shukla 		if (ret)
146958e0420fSVanshika Shukla 			return;
147058e0420fSVanshika Shukla 		do {
147158e0420fSVanshika Shukla 			dq = qman_dequeue(fq);
147258e0420fSVanshika Shukla 			if (!dq)
147358e0420fSVanshika Shukla 				continue;
1474615352f5SVanshika Shukla 			dqrr = dq;
147558e0420fSVanshika Shukla 			dq_num++;
1476615352f5SVanshika Shukla 			bp_info = DPAA_BPID_TO_POOL_INFO(dqrr->fd.bpid);
1477615352f5SVanshika Shukla 			ptr = rte_dpaa_mem_ptov(qm_fd_addr(&dqrr->fd));
1478615352f5SVanshika Shukla 			rte_prefetch0((void *)((uint8_t *)ptr
1479615352f5SVanshika Shukla 						+ DEFAULT_RX_ICEOF));
1480615352f5SVanshika Shukla 			mbuf = (struct rte_mbuf *)
1481615352f5SVanshika Shukla 				((char *)ptr - bp_info->meta_data_size);
1482615352f5SVanshika Shukla 
1483615352f5SVanshika Shukla 			if (mbuf->ol_flags & RTE_MBUF_F_TX_IEEE1588_TMST) {
1484615352f5SVanshika Shukla 				annot = GET_ANNOTATIONS(mbuf->buf_addr);
1485615352f5SVanshika Shukla 				dpaa_intf->tx_timestamp =
1486615352f5SVanshika Shukla 					rte_cpu_to_be_64(annot->timestamp);
1487615352f5SVanshika Shukla 			}
148858e0420fSVanshika Shukla 			dpaa_display_frame_info(&dq->fd, fq->fqid, true);
148958e0420fSVanshika Shukla 			qman_dqrr_consume(fq, dq);
149058e0420fSVanshika Shukla 			dpaa_free_mbuf(&dq->fd);
149158e0420fSVanshika Shukla 		} while (fq->flags & QMAN_FQ_STATE_VDQCR);
149258e0420fSVanshika Shukla 	} while (dq_num == num_tx_conf);
149358e0420fSVanshika Shukla }
149458e0420fSVanshika Shukla 
14959124e65dSGagandeep Singh uint16_t
14969124e65dSGagandeep Singh dpaa_eth_queue_tx_slow(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
14979124e65dSGagandeep Singh {
14989124e65dSGagandeep Singh 	qman_ern_poll_free();
14999124e65dSGagandeep Singh 
15009124e65dSGagandeep Singh 	return dpaa_eth_queue_tx(q, bufs, nb_bufs);
15019124e65dSGagandeep Singh }
15029124e65dSGagandeep Singh 
150337f9b54bSShreyansh Jain uint16_t dpaa_eth_tx_drop_all(void *q  __rte_unused,
150437f9b54bSShreyansh Jain 			      struct rte_mbuf **bufs __rte_unused,
150537f9b54bSShreyansh Jain 		uint16_t nb_bufs __rte_unused)
150637f9b54bSShreyansh Jain {
150737f9b54bSShreyansh Jain 	DPAA_DP_LOG(DEBUG, "Drop all packets");
150837f9b54bSShreyansh Jain 
150937f9b54bSShreyansh Jain 	/* Drop all incoming packets. No need to free packets here
151037f9b54bSShreyansh Jain 	 * because the rte_eth f/w frees up the packets through tx_buffer
151137f9b54bSShreyansh Jain 	 * callback in case this functions returns count less than nb_bufs
151237f9b54bSShreyansh Jain 	 */
151337f9b54bSShreyansh Jain 	return 0;
151437f9b54bSShreyansh Jain }
1515