xref: /dpdk/drivers/net/dpaa/dpaa_rxtx.c (revision 480ec5b43e51a426bf86759214b4a3b4a70ddb12)
1d81734caSHemant Agrawal /* SPDX-License-Identifier: BSD-3-Clause
237f9b54bSShreyansh Jain  *
337f9b54bSShreyansh Jain  *   Copyright 2016 Freescale Semiconductor, Inc. All rights reserved.
4615352f5SVanshika Shukla  *   Copyright 2017,2019-2024 NXP
537f9b54bSShreyansh Jain  *
637f9b54bSShreyansh Jain  */
737f9b54bSShreyansh Jain 
837f9b54bSShreyansh Jain /* System headers */
937f9b54bSShreyansh Jain #include <inttypes.h>
1037f9b54bSShreyansh Jain #include <unistd.h>
1137f9b54bSShreyansh Jain #include <stdio.h>
1237f9b54bSShreyansh Jain #include <limits.h>
1337f9b54bSShreyansh Jain #include <sched.h>
1437f9b54bSShreyansh Jain #include <pthread.h>
1537f9b54bSShreyansh Jain 
1637f9b54bSShreyansh Jain #include <rte_byteorder.h>
1737f9b54bSShreyansh Jain #include <rte_common.h>
1837f9b54bSShreyansh Jain #include <rte_interrupts.h>
1937f9b54bSShreyansh Jain #include <rte_log.h>
2037f9b54bSShreyansh Jain #include <rte_debug.h>
2137f9b54bSShreyansh Jain #include <rte_pci.h>
2237f9b54bSShreyansh Jain #include <rte_atomic.h>
2337f9b54bSShreyansh Jain #include <rte_branch_prediction.h>
2437f9b54bSShreyansh Jain #include <rte_memory.h>
2537f9b54bSShreyansh Jain #include <rte_tailq.h>
2637f9b54bSShreyansh Jain #include <rte_eal.h>
2737f9b54bSShreyansh Jain #include <rte_alarm.h>
2837f9b54bSShreyansh Jain #include <rte_ether.h>
29df96fd0dSBruce Richardson #include <ethdev_driver.h>
3037f9b54bSShreyansh Jain #include <rte_malloc.h>
3137f9b54bSShreyansh Jain #include <rte_ring.h>
3237f9b54bSShreyansh Jain #include <rte_ip.h>
3337f9b54bSShreyansh Jain #include <rte_tcp.h>
3437f9b54bSShreyansh Jain #include <rte_udp.h>
35d565c887SAshish Jain #include <rte_net.h>
365e745593SSunil Kumar Kori #include <rte_eventdev.h>
3737f9b54bSShreyansh Jain 
3837f9b54bSShreyansh Jain #include "dpaa_ethdev.h"
3937f9b54bSShreyansh Jain #include "dpaa_rxtx.h"
40a2f1da7dSDavid Marchand #include <bus_dpaa_driver.h>
4137f9b54bSShreyansh Jain #include <dpaa_mempool.h>
4237f9b54bSShreyansh Jain 
435e745593SSunil Kumar Kori #include <qman.h>
4437f9b54bSShreyansh Jain #include <fsl_usd.h>
4537f9b54bSShreyansh Jain #include <fsl_qman.h>
4637f9b54bSShreyansh Jain #include <fsl_bman.h>
478c83f28cSHemant Agrawal #include <dpaa_of.h>
4837f9b54bSShreyansh Jain #include <netcfg.h>
4937f9b54bSShreyansh Jain 
50*480ec5b4SHemant Agrawal #ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER
51*480ec5b4SHemant Agrawal static int s_force_display_frm;
52*480ec5b4SHemant Agrawal #endif
53*480ec5b4SHemant Agrawal 
5437f9b54bSShreyansh Jain #define DPAA_MBUF_TO_CONTIG_FD(_mbuf, _fd, _bpid) \
5537f9b54bSShreyansh Jain 	do { \
5637f9b54bSShreyansh Jain 		(_fd)->opaque_addr = 0; \
5737f9b54bSShreyansh Jain 		(_fd)->opaque = QM_FD_CONTIG << DPAA_FD_FORMAT_SHIFT; \
5837f9b54bSShreyansh Jain 		(_fd)->opaque |= ((_mbuf)->data_off) << DPAA_FD_OFFSET_SHIFT; \
5937f9b54bSShreyansh Jain 		(_fd)->opaque |= (_mbuf)->pkt_len; \
60455da545SSantosh Shukla 		(_fd)->addr = (_mbuf)->buf_iova; \
6137f9b54bSShreyansh Jain 		(_fd)->bpid = _bpid; \
6237f9b54bSShreyansh Jain 	} while (0)
6337f9b54bSShreyansh Jain 
6477393f56SSachin Saxena #ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER
65*480ec5b4SHemant Agrawal void
66*480ec5b4SHemant Agrawal dpaa_force_display_frame_set(int set)
67*480ec5b4SHemant Agrawal {
68*480ec5b4SHemant Agrawal 	s_force_display_frm = set;
69*480ec5b4SHemant Agrawal }
70*480ec5b4SHemant Agrawal 
7177393f56SSachin Saxena #define DISPLAY_PRINT printf
72*480ec5b4SHemant Agrawal static void
73*480ec5b4SHemant Agrawal dpaa_display_frame_info(const struct qm_fd *fd,
7477393f56SSachin Saxena 	uint32_t fqid, bool rx)
7505ba55bcSShreyansh Jain {
76*480ec5b4SHemant Agrawal 	int pos, offset = 0;
77*480ec5b4SHemant Agrawal 	char *ptr, info[1024];
7877393f56SSachin Saxena 	struct annotations_t *annot = rte_dpaa_mem_ptov(fd->addr);
7977393f56SSachin Saxena 	uint8_t format;
80*480ec5b4SHemant Agrawal 	const struct dpaa_eth_parse_results_t *psr;
8105ba55bcSShreyansh Jain 
82*480ec5b4SHemant Agrawal 	if (!fd->status && !s_force_display_frm) {
83*480ec5b4SHemant Agrawal 		/* Do not display correct packets unless force display.*/
8477393f56SSachin Saxena 		return;
8505ba55bcSShreyansh Jain 	}
86*480ec5b4SHemant Agrawal 	psr = &annot->parse;
8777393f56SSachin Saxena 
88*480ec5b4SHemant Agrawal 	format = (fd->opaque & DPAA_FD_FORMAT_MASK) >> DPAA_FD_FORMAT_SHIFT;
89*480ec5b4SHemant Agrawal 	if (format == qm_fd_contig)
90*480ec5b4SHemant Agrawal 		sprintf(info, "simple");
91*480ec5b4SHemant Agrawal 	else if (format == qm_fd_sg)
92*480ec5b4SHemant Agrawal 		sprintf(info, "sg");
93*480ec5b4SHemant Agrawal 	else
94*480ec5b4SHemant Agrawal 		sprintf(info, "unknown format(%d)", format);
9577393f56SSachin Saxena 
96*480ec5b4SHemant Agrawal 	DISPLAY_PRINT("%s: fqid=%08x, bpid=%d, phy addr=0x%lx ",
97*480ec5b4SHemant Agrawal 		rx ? "RX" : "TX", fqid, fd->bpid, (unsigned long)fd->addr);
98*480ec5b4SHemant Agrawal 	DISPLAY_PRINT("format=%s offset=%d, len=%d, stat=0x%x\r\n",
99*480ec5b4SHemant Agrawal 		info, fd->offset, fd->length20, fd->status);
10077393f56SSachin Saxena 	if (rx) {
101*480ec5b4SHemant Agrawal 		DISPLAY_PRINT("Display usual RX parser result:\r\n");
102*480ec5b4SHemant Agrawal 		if (psr->eth_frame_type == 0)
103*480ec5b4SHemant Agrawal 			offset += sprintf(&info[offset], "unicast");
104*480ec5b4SHemant Agrawal 		else if (psr->eth_frame_type == 1)
105*480ec5b4SHemant Agrawal 			offset += sprintf(&info[offset], "multicast");
106*480ec5b4SHemant Agrawal 		else if (psr->eth_frame_type == 3)
107*480ec5b4SHemant Agrawal 			offset += sprintf(&info[offset], "broadcast");
108*480ec5b4SHemant Agrawal 		else
109*480ec5b4SHemant Agrawal 			offset += sprintf(&info[offset], "unknown eth type(%d)",
110*480ec5b4SHemant Agrawal 				psr->eth_frame_type);
111*480ec5b4SHemant Agrawal 		if (psr->l2r_err) {
112*480ec5b4SHemant Agrawal 			offset += sprintf(&info[offset], " L2 error(%d)",
113*480ec5b4SHemant Agrawal 				psr->l2r_err);
114*480ec5b4SHemant Agrawal 		} else {
115*480ec5b4SHemant Agrawal 			offset += sprintf(&info[offset], " L2 non error");
11677393f56SSachin Saxena 		}
117*480ec5b4SHemant Agrawal 		DISPLAY_PRINT("L2: %s, %s, ethernet type:%s\r\n",
118*480ec5b4SHemant Agrawal 			psr->ethernet ? "is ethernet" : "non ethernet",
119*480ec5b4SHemant Agrawal 			psr->vlan ? "is vlan" : "non vlan", info);
120*480ec5b4SHemant Agrawal 
121*480ec5b4SHemant Agrawal 		offset = 0;
122*480ec5b4SHemant Agrawal 		DISPLAY_PRINT("L3: %s/%s, %s/%s, %s, %s\r\n",
123*480ec5b4SHemant Agrawal 			psr->first_ipv4 ? "first IPv4" : "non first IPv4",
124*480ec5b4SHemant Agrawal 			psr->last_ipv4 ? "last IPv4" : "non last IPv4",
125*480ec5b4SHemant Agrawal 			psr->first_ipv6 ? "first IPv6" : "non first IPv6",
126*480ec5b4SHemant Agrawal 			psr->last_ipv6 ? "last IPv6" : "non last IPv6",
127*480ec5b4SHemant Agrawal 			psr->gre ? "GRE" : "non GRE",
128*480ec5b4SHemant Agrawal 			psr->l3_err ? "L3 has error" : "L3 non error");
129*480ec5b4SHemant Agrawal 
130*480ec5b4SHemant Agrawal 		if (psr->l4_type == DPAA_PR_L4_TCP_TYPE) {
131*480ec5b4SHemant Agrawal 			offset += sprintf(&info[offset], "tcp");
132*480ec5b4SHemant Agrawal 		} else if (psr->l4_type == DPAA_PR_L4_UDP_TYPE) {
133*480ec5b4SHemant Agrawal 			offset += sprintf(&info[offset], "udp");
134*480ec5b4SHemant Agrawal 		} else if (psr->l4_type == DPAA_PR_L4_IPSEC_TYPE) {
135*480ec5b4SHemant Agrawal 			offset += sprintf(&info[offset], "IPSec ");
136*480ec5b4SHemant Agrawal 			if (psr->esp_sum)
137*480ec5b4SHemant Agrawal 				offset += sprintf(&info[offset], "ESP");
138*480ec5b4SHemant Agrawal 			if (psr->ah)
139*480ec5b4SHemant Agrawal 				offset += sprintf(&info[offset], "AH");
140*480ec5b4SHemant Agrawal 		} else if (psr->l4_type == DPAA_PR_L4_SCTP_TYPE) {
141*480ec5b4SHemant Agrawal 			offset += sprintf(&info[offset], "sctp");
142*480ec5b4SHemant Agrawal 		} else if (psr->l4_type == DPAA_PR_L4_DCCP_TYPE) {
143*480ec5b4SHemant Agrawal 			offset += sprintf(&info[offset], "dccp");
144*480ec5b4SHemant Agrawal 		} else {
145*480ec5b4SHemant Agrawal 			offset += sprintf(&info[offset], "unknown l4 type(%d)",
146*480ec5b4SHemant Agrawal 				psr->l4_type);
147*480ec5b4SHemant Agrawal 		}
148*480ec5b4SHemant Agrawal 		DISPLAY_PRINT("L4: type:%s, L4 validation %s\r\n",
149*480ec5b4SHemant Agrawal 			info, psr->l4cv ? "Performed" : "NOT performed");
150*480ec5b4SHemant Agrawal 
151*480ec5b4SHemant Agrawal 		offset = 0;
152*480ec5b4SHemant Agrawal 		if (psr->ethernet) {
153*480ec5b4SHemant Agrawal 			offset += sprintf(&info[offset],
154*480ec5b4SHemant Agrawal 				"Eth offset=%d, ethtype offset=%d, ",
155*480ec5b4SHemant Agrawal 				psr->eth_off, psr->etype_off);
156*480ec5b4SHemant Agrawal 		}
157*480ec5b4SHemant Agrawal 		if (psr->vlan) {
158*480ec5b4SHemant Agrawal 			offset += sprintf(&info[offset], "vLAN offset=%d, ",
159*480ec5b4SHemant Agrawal 				psr->vlan_off[0]);
160*480ec5b4SHemant Agrawal 		}
161*480ec5b4SHemant Agrawal 		if (psr->first_ipv4 || psr->first_ipv6) {
162*480ec5b4SHemant Agrawal 			offset += sprintf(&info[offset], "first IP offset=%d, ",
163*480ec5b4SHemant Agrawal 				psr->ip_off[0]);
164*480ec5b4SHemant Agrawal 		}
165*480ec5b4SHemant Agrawal 		if (psr->last_ipv4 || psr->last_ipv6) {
166*480ec5b4SHemant Agrawal 			offset += sprintf(&info[offset], "last IP offset=%d, ",
167*480ec5b4SHemant Agrawal 				psr->ip_off[1]);
168*480ec5b4SHemant Agrawal 		}
169*480ec5b4SHemant Agrawal 		if (psr->gre) {
170*480ec5b4SHemant Agrawal 			offset += sprintf(&info[offset], "GRE offset=%d, ",
171*480ec5b4SHemant Agrawal 				psr->gre_off);
172*480ec5b4SHemant Agrawal 		}
173*480ec5b4SHemant Agrawal 		if (psr->l4_type >= DPAA_PR_L4_TCP_TYPE) {
174*480ec5b4SHemant Agrawal 			offset += sprintf(&info[offset], "L4 offset=%d, ",
175*480ec5b4SHemant Agrawal 				psr->l4_off);
176*480ec5b4SHemant Agrawal 		}
177*480ec5b4SHemant Agrawal 		offset += sprintf(&info[offset], "Next HDR(0x%04x) offset=%d.",
178*480ec5b4SHemant Agrawal 			rte_be_to_cpu_16(psr->nxthdr), psr->nxthdr_off);
179*480ec5b4SHemant Agrawal 
180*480ec5b4SHemant Agrawal 		DISPLAY_PRINT("%s\r\n", info);
18177393f56SSachin Saxena 	}
18277393f56SSachin Saxena 
18377393f56SSachin Saxena 	if (unlikely(format == qm_fd_sg)) {
18477393f56SSachin Saxena 		/*TBD:S/G display: to be implemented*/
18577393f56SSachin Saxena 		return;
18677393f56SSachin Saxena 	}
18777393f56SSachin Saxena 
18877393f56SSachin Saxena 	DISPLAY_PRINT("Frame payload:\r\n");
18977393f56SSachin Saxena 	ptr = (char *)annot;
19077393f56SSachin Saxena 	ptr += fd->offset;
191*480ec5b4SHemant Agrawal 	for (pos = 0; pos < fd->length20; pos++) {
192*480ec5b4SHemant Agrawal 		DISPLAY_PRINT("%02x ", ptr[pos]);
193*480ec5b4SHemant Agrawal 		if (((pos + 1) % 16) == 0)
1940fcdbde0SHemant Agrawal 			DISPLAY_PRINT("\n");
19505ba55bcSShreyansh Jain 	}
19677393f56SSachin Saxena 	DISPLAY_PRINT("\n");
19777393f56SSachin Saxena }
198*480ec5b4SHemant Agrawal 
19905ba55bcSShreyansh Jain #else
20077393f56SSachin Saxena #define dpaa_display_frame_info(a, b, c)
20105ba55bcSShreyansh Jain #endif
20205ba55bcSShreyansh Jain 
203a350a954SHemant Agrawal static inline void
204a350a954SHemant Agrawal dpaa_slow_parsing(struct rte_mbuf *m,
205a350a954SHemant Agrawal 	const struct annotations_t *annot)
206a7bdc3bdSShreyansh Jain {
207a350a954SHemant Agrawal 	const struct dpaa_eth_parse_results_t *parse;
208a350a954SHemant Agrawal 
209a7bdc3bdSShreyansh Jain 	DPAA_DP_LOG(DEBUG, "Slow parsing");
210a350a954SHemant Agrawal 	parse = &annot->parse;
211a350a954SHemant Agrawal 
212a350a954SHemant Agrawal 	if (parse->ethernet)
213a350a954SHemant Agrawal 		m->packet_type |= RTE_PTYPE_L2_ETHER;
214a350a954SHemant Agrawal 	if (parse->vlan)
215a350a954SHemant Agrawal 		m->packet_type |= RTE_PTYPE_L2_ETHER_VLAN;
216a350a954SHemant Agrawal 	if (parse->first_ipv4)
217a350a954SHemant Agrawal 		m->packet_type |= RTE_PTYPE_L3_IPV4;
218a350a954SHemant Agrawal 	if (parse->first_ipv6)
219a350a954SHemant Agrawal 		m->packet_type |= RTE_PTYPE_L3_IPV6;
220a350a954SHemant Agrawal 	if (parse->gre)
221a350a954SHemant Agrawal 		m->packet_type |= RTE_PTYPE_TUNNEL_GRE;
222a350a954SHemant Agrawal 	if (parse->last_ipv4)
223a350a954SHemant Agrawal 		m->packet_type |= RTE_PTYPE_L3_IPV4_EXT;
224a350a954SHemant Agrawal 	if (parse->last_ipv6)
225a350a954SHemant Agrawal 		m->packet_type |= RTE_PTYPE_L3_IPV6_EXT;
226a350a954SHemant Agrawal 	if (parse->l4_type == DPAA_PR_L4_TCP_TYPE)
227a350a954SHemant Agrawal 		m->packet_type |= RTE_PTYPE_L4_TCP;
228a350a954SHemant Agrawal 	else if (parse->l4_type == DPAA_PR_L4_UDP_TYPE)
229a350a954SHemant Agrawal 		m->packet_type |= RTE_PTYPE_L4_UDP;
230a350a954SHemant Agrawal 	else if (parse->l4_type == DPAA_PR_L4_IPSEC_TYPE &&
231a350a954SHemant Agrawal 		!parse->l4_info_err && parse->esp_sum)
232a350a954SHemant Agrawal 		m->packet_type |= RTE_PTYPE_TUNNEL_ESP;
233a350a954SHemant Agrawal 	else if (parse->l4_type == DPAA_PR_L4_SCTP_TYPE)
234a350a954SHemant Agrawal 		m->packet_type |= RTE_PTYPE_L4_SCTP;
235a7bdc3bdSShreyansh Jain }
236a7bdc3bdSShreyansh Jain 
2370e5607e4SHemant Agrawal static inline void dpaa_eth_packet_info(struct rte_mbuf *m, void *fd_virt_addr)
238a7bdc3bdSShreyansh Jain {
239a7bdc3bdSShreyansh Jain 	struct annotations_t *annot = GET_ANNOTATIONS(fd_virt_addr);
2400e5607e4SHemant Agrawal 	uint64_t prs = *((uintptr_t *)(&annot->parse)) & DPAA_PARSE_MASK;
241615352f5SVanshika Shukla 	struct rte_ether_hdr *eth_hdr =
242615352f5SVanshika Shukla 		rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
243a7bdc3bdSShreyansh Jain 
244a7bdc3bdSShreyansh Jain 	DPAA_DP_LOG(DEBUG, " Parsing mbuf: %p with annotations: %p", m, annot);
245a7bdc3bdSShreyansh Jain 
246daa02b5cSOlivier Matz 	m->ol_flags = RTE_MBUF_F_RX_RSS_HASH | RTE_MBUF_F_RX_IP_CKSUM_GOOD |
247daa02b5cSOlivier Matz 		RTE_MBUF_F_RX_L4_CKSUM_GOOD;
24895d226f0SNipun Gupta 
249a7bdc3bdSShreyansh Jain 	switch (prs) {
250a7bdc3bdSShreyansh Jain 	case DPAA_PKT_TYPE_IPV4:
251a7bdc3bdSShreyansh Jain 		m->packet_type = RTE_PTYPE_L2_ETHER |
252a7bdc3bdSShreyansh Jain 			RTE_PTYPE_L3_IPV4;
253a7bdc3bdSShreyansh Jain 		break;
254a7bdc3bdSShreyansh Jain 	case DPAA_PKT_TYPE_IPV6:
255a7bdc3bdSShreyansh Jain 		m->packet_type = RTE_PTYPE_L2_ETHER |
256a7bdc3bdSShreyansh Jain 			RTE_PTYPE_L3_IPV6;
257a7bdc3bdSShreyansh Jain 		break;
2589ac71da4SNipun Gupta 	case DPAA_PKT_TYPE_ETHER:
2599ac71da4SNipun Gupta 		m->packet_type = RTE_PTYPE_L2_ETHER;
2609ac71da4SNipun Gupta 		break;
261a7bdc3bdSShreyansh Jain 	case DPAA_PKT_TYPE_IPV4_FRAG:
262a7bdc3bdSShreyansh Jain 	case DPAA_PKT_TYPE_IPV4_FRAG_UDP:
263a7bdc3bdSShreyansh Jain 	case DPAA_PKT_TYPE_IPV4_FRAG_TCP:
264a7bdc3bdSShreyansh Jain 	case DPAA_PKT_TYPE_IPV4_FRAG_SCTP:
265a7bdc3bdSShreyansh Jain 		m->packet_type = RTE_PTYPE_L2_ETHER |
266a7bdc3bdSShreyansh Jain 			RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_FRAG;
267a7bdc3bdSShreyansh Jain 		break;
268a7bdc3bdSShreyansh Jain 	case DPAA_PKT_TYPE_IPV6_FRAG:
269a7bdc3bdSShreyansh Jain 	case DPAA_PKT_TYPE_IPV6_FRAG_UDP:
270a7bdc3bdSShreyansh Jain 	case DPAA_PKT_TYPE_IPV6_FRAG_TCP:
271a7bdc3bdSShreyansh Jain 	case DPAA_PKT_TYPE_IPV6_FRAG_SCTP:
272a7bdc3bdSShreyansh Jain 		m->packet_type = RTE_PTYPE_L2_ETHER |
273a7bdc3bdSShreyansh Jain 			RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_FRAG;
274a7bdc3bdSShreyansh Jain 		break;
275a7bdc3bdSShreyansh Jain 	case DPAA_PKT_TYPE_IPV4_EXT:
276a7bdc3bdSShreyansh Jain 		m->packet_type = RTE_PTYPE_L2_ETHER |
277a7bdc3bdSShreyansh Jain 			RTE_PTYPE_L3_IPV4_EXT;
278a7bdc3bdSShreyansh Jain 		break;
279a7bdc3bdSShreyansh Jain 	case DPAA_PKT_TYPE_IPV6_EXT:
280a7bdc3bdSShreyansh Jain 		m->packet_type = RTE_PTYPE_L2_ETHER |
281a7bdc3bdSShreyansh Jain 			RTE_PTYPE_L3_IPV6_EXT;
282a7bdc3bdSShreyansh Jain 		break;
283a7bdc3bdSShreyansh Jain 	case DPAA_PKT_TYPE_IPV4_TCP:
284a7bdc3bdSShreyansh Jain 		m->packet_type = RTE_PTYPE_L2_ETHER |
285a7bdc3bdSShreyansh Jain 			RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP;
286a7bdc3bdSShreyansh Jain 		break;
287a7bdc3bdSShreyansh Jain 	case DPAA_PKT_TYPE_IPV6_TCP:
288a7bdc3bdSShreyansh Jain 		m->packet_type = RTE_PTYPE_L2_ETHER |
289a7bdc3bdSShreyansh Jain 			RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP;
290a7bdc3bdSShreyansh Jain 		break;
291a7bdc3bdSShreyansh Jain 	case DPAA_PKT_TYPE_IPV4_UDP:
292a7bdc3bdSShreyansh Jain 		m->packet_type = RTE_PTYPE_L2_ETHER |
293a7bdc3bdSShreyansh Jain 			RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP;
294a7bdc3bdSShreyansh Jain 		break;
295a7bdc3bdSShreyansh Jain 	case DPAA_PKT_TYPE_IPV6_UDP:
296a7bdc3bdSShreyansh Jain 		m->packet_type = RTE_PTYPE_L2_ETHER |
297a7bdc3bdSShreyansh Jain 			RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP;
298a7bdc3bdSShreyansh Jain 		break;
299e7524271SGagandeep Singh 	case DPAA_PKT_TYPE_IPSEC_IPV4:
300e7524271SGagandeep Singh 		if (*((uintptr_t *)&annot->parse) & DPAA_PARSE_ESP_MASK)
301e7524271SGagandeep Singh 			m->packet_type = RTE_PTYPE_L2_ETHER |
302e7524271SGagandeep Singh 				RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_ESP;
303e7524271SGagandeep Singh 		break;
304e7524271SGagandeep Singh 	case DPAA_PKT_TYPE_IPSEC_IPV6:
305e7524271SGagandeep Singh 		if (*((uintptr_t *)&annot->parse) & DPAA_PARSE_ESP_MASK)
306e7524271SGagandeep Singh 			m->packet_type = RTE_PTYPE_L2_ETHER |
307e7524271SGagandeep Singh 				RTE_PTYPE_L3_IPV6 | RTE_PTYPE_TUNNEL_ESP;
308e7524271SGagandeep Singh 		break;
309a7bdc3bdSShreyansh Jain 	case DPAA_PKT_TYPE_IPV4_EXT_UDP:
310a7bdc3bdSShreyansh Jain 		m->packet_type = RTE_PTYPE_L2_ETHER |
311a7bdc3bdSShreyansh Jain 			RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_UDP;
312a7bdc3bdSShreyansh Jain 		break;
313a7bdc3bdSShreyansh Jain 	case DPAA_PKT_TYPE_IPV6_EXT_UDP:
314a7bdc3bdSShreyansh Jain 		m->packet_type = RTE_PTYPE_L2_ETHER |
315a7bdc3bdSShreyansh Jain 			RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP;
316a7bdc3bdSShreyansh Jain 		break;
317a7bdc3bdSShreyansh Jain 	case DPAA_PKT_TYPE_IPV4_EXT_TCP:
318a7bdc3bdSShreyansh Jain 		m->packet_type = RTE_PTYPE_L2_ETHER |
319a7bdc3bdSShreyansh Jain 			RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_TCP;
320a7bdc3bdSShreyansh Jain 		break;
321a7bdc3bdSShreyansh Jain 	case DPAA_PKT_TYPE_IPV6_EXT_TCP:
322a7bdc3bdSShreyansh Jain 		m->packet_type = RTE_PTYPE_L2_ETHER |
323a7bdc3bdSShreyansh Jain 			RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP;
324a7bdc3bdSShreyansh Jain 		break;
325a7bdc3bdSShreyansh Jain 	case DPAA_PKT_TYPE_IPV4_SCTP:
326a7bdc3bdSShreyansh Jain 		m->packet_type = RTE_PTYPE_L2_ETHER |
327a7bdc3bdSShreyansh Jain 			RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP;
328a7bdc3bdSShreyansh Jain 		break;
329a7bdc3bdSShreyansh Jain 	case DPAA_PKT_TYPE_IPV6_SCTP:
330a7bdc3bdSShreyansh Jain 		m->packet_type = RTE_PTYPE_L2_ETHER |
331a7bdc3bdSShreyansh Jain 			RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_SCTP;
332a7bdc3bdSShreyansh Jain 		break;
33395d226f0SNipun Gupta 	case DPAA_PKT_TYPE_IPV4_CSUM_ERR:
33495d226f0SNipun Gupta 	case DPAA_PKT_TYPE_IPV6_CSUM_ERR:
335daa02b5cSOlivier Matz 		m->ol_flags = RTE_MBUF_F_RX_RSS_HASH | RTE_MBUF_F_RX_IP_CKSUM_BAD;
33695d226f0SNipun Gupta 		break;
33795d226f0SNipun Gupta 	case DPAA_PKT_TYPE_IPV4_TCP_CSUM_ERR:
33895d226f0SNipun Gupta 	case DPAA_PKT_TYPE_IPV6_TCP_CSUM_ERR:
33995d226f0SNipun Gupta 	case DPAA_PKT_TYPE_IPV4_UDP_CSUM_ERR:
34095d226f0SNipun Gupta 	case DPAA_PKT_TYPE_IPV6_UDP_CSUM_ERR:
341daa02b5cSOlivier Matz 		m->ol_flags = RTE_MBUF_F_RX_RSS_HASH | RTE_MBUF_F_RX_L4_CKSUM_BAD;
34295d226f0SNipun Gupta 		break;
3439ac71da4SNipun Gupta 	case DPAA_PKT_TYPE_NONE:
3449ac71da4SNipun Gupta 		m->packet_type = 0;
3459ac71da4SNipun Gupta 		break;
346a7bdc3bdSShreyansh Jain 	/* More switch cases can be added */
347a7bdc3bdSShreyansh Jain 	default:
348a350a954SHemant Agrawal 		dpaa_slow_parsing(m, annot);
349a7bdc3bdSShreyansh Jain 	}
350a7bdc3bdSShreyansh Jain 
351a7bdc3bdSShreyansh Jain 	m->tx_offload = annot->parse.ip_off[0];
352a7bdc3bdSShreyansh Jain 	m->tx_offload |= (annot->parse.l4_off - annot->parse.ip_off[0])
353a7bdc3bdSShreyansh Jain 					<< DPAA_PKT_L3_LEN_SHIFT;
354a7bdc3bdSShreyansh Jain 
355a7bdc3bdSShreyansh Jain 	/* Set the hash values */
3569ac71da4SNipun Gupta 	m->hash.rss = (uint32_t)(annot->hash);
357a7bdc3bdSShreyansh Jain 
358a7bdc3bdSShreyansh Jain 	/* Check if Vlan is present */
359a7bdc3bdSShreyansh Jain 	if (prs & DPAA_PARSE_VLAN_MASK)
360daa02b5cSOlivier Matz 		m->ol_flags |= RTE_MBUF_F_RX_VLAN;
361a7bdc3bdSShreyansh Jain 	/* Packet received without stripping the vlan */
362615352f5SVanshika Shukla 
363615352f5SVanshika Shukla 	if (eth_hdr->ether_type == htons(RTE_ETHER_TYPE_1588)) {
364615352f5SVanshika Shukla 		m->ol_flags |= RTE_MBUF_F_RX_IEEE1588_PTP;
365615352f5SVanshika Shukla 		m->ol_flags |= RTE_MBUF_F_RX_IEEE1588_TMST;
366615352f5SVanshika Shukla 	}
367a7bdc3bdSShreyansh Jain }
368a7bdc3bdSShreyansh Jain 
3695a8cf1beSShreyansh Jain static inline void dpaa_checksum(struct rte_mbuf *mbuf)
3705a8cf1beSShreyansh Jain {
3716d13ea8eSOlivier Matz 	struct rte_ether_hdr *eth_hdr =
3726d13ea8eSOlivier Matz 		rte_pktmbuf_mtod(mbuf, struct rte_ether_hdr *);
3735a8cf1beSShreyansh Jain 	char *l3_hdr = (char *)eth_hdr + mbuf->l2_len;
374a7c528e5SOlivier Matz 	struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *)l3_hdr;
375a7c528e5SOlivier Matz 	struct rte_ipv6_hdr *ipv6_hdr = (struct rte_ipv6_hdr *)l3_hdr;
3765a8cf1beSShreyansh Jain 
3775a8cf1beSShreyansh Jain 	DPAA_DP_LOG(DEBUG, "Calculating checksum for mbuf: %p", mbuf);
3785a8cf1beSShreyansh Jain 
3795a8cf1beSShreyansh Jain 	if (((mbuf->packet_type & RTE_PTYPE_L3_MASK) == RTE_PTYPE_L3_IPV4) ||
3805a8cf1beSShreyansh Jain 	    ((mbuf->packet_type & RTE_PTYPE_L3_MASK) ==
3815a8cf1beSShreyansh Jain 	    RTE_PTYPE_L3_IPV4_EXT)) {
382a7c528e5SOlivier Matz 		ipv4_hdr = (struct rte_ipv4_hdr *)l3_hdr;
3835a8cf1beSShreyansh Jain 		ipv4_hdr->hdr_checksum = 0;
3845a8cf1beSShreyansh Jain 		ipv4_hdr->hdr_checksum = rte_ipv4_cksum(ipv4_hdr);
3855a8cf1beSShreyansh Jain 	} else if (((mbuf->packet_type & RTE_PTYPE_L3_MASK) ==
3865a8cf1beSShreyansh Jain 		   RTE_PTYPE_L3_IPV6) ||
3875a8cf1beSShreyansh Jain 		   ((mbuf->packet_type & RTE_PTYPE_L3_MASK) ==
3885a8cf1beSShreyansh Jain 		   RTE_PTYPE_L3_IPV6_EXT))
389a7c528e5SOlivier Matz 		ipv6_hdr = (struct rte_ipv6_hdr *)l3_hdr;
3905a8cf1beSShreyansh Jain 
3915a8cf1beSShreyansh Jain 	if ((mbuf->packet_type & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP) {
392f41b5156SOlivier Matz 		struct rte_tcp_hdr *tcp_hdr = (struct rte_tcp_hdr *)(l3_hdr +
3935a8cf1beSShreyansh Jain 					  mbuf->l3_len);
3945a8cf1beSShreyansh Jain 		tcp_hdr->cksum = 0;
3950c9da755SDavid Marchand 		if (eth_hdr->ether_type == htons(RTE_ETHER_TYPE_IPV4))
3965a8cf1beSShreyansh Jain 			tcp_hdr->cksum = rte_ipv4_udptcp_cksum(ipv4_hdr,
3975a8cf1beSShreyansh Jain 							       tcp_hdr);
3980c9da755SDavid Marchand 		else /* assume ethertype == RTE_ETHER_TYPE_IPV6 */
3995a8cf1beSShreyansh Jain 			tcp_hdr->cksum = rte_ipv6_udptcp_cksum(ipv6_hdr,
4005a8cf1beSShreyansh Jain 							       tcp_hdr);
4015a8cf1beSShreyansh Jain 	} else if ((mbuf->packet_type & RTE_PTYPE_L4_MASK) ==
4025a8cf1beSShreyansh Jain 		   RTE_PTYPE_L4_UDP) {
403e73e3547SOlivier Matz 		struct rte_udp_hdr *udp_hdr = (struct rte_udp_hdr *)(l3_hdr +
4045a8cf1beSShreyansh Jain 							     mbuf->l3_len);
4055a8cf1beSShreyansh Jain 		udp_hdr->dgram_cksum = 0;
4060c9da755SDavid Marchand 		if (eth_hdr->ether_type == htons(RTE_ETHER_TYPE_IPV4))
4075a8cf1beSShreyansh Jain 			udp_hdr->dgram_cksum = rte_ipv4_udptcp_cksum(ipv4_hdr,
4085a8cf1beSShreyansh Jain 								     udp_hdr);
4090c9da755SDavid Marchand 		else /* assume ethertype == RTE_ETHER_TYPE_IPV6 */
4105a8cf1beSShreyansh Jain 			udp_hdr->dgram_cksum = rte_ipv6_udptcp_cksum(ipv6_hdr,
4115a8cf1beSShreyansh Jain 								     udp_hdr);
4125a8cf1beSShreyansh Jain 	}
4135a8cf1beSShreyansh Jain }
4145a8cf1beSShreyansh Jain 
4155a8cf1beSShreyansh Jain static inline void dpaa_checksum_offload(struct rte_mbuf *mbuf,
4165a8cf1beSShreyansh Jain 					 struct qm_fd *fd, char *prs_buf)
4175a8cf1beSShreyansh Jain {
4185a8cf1beSShreyansh Jain 	struct dpaa_eth_parse_results_t *prs;
4195a8cf1beSShreyansh Jain 
4205a8cf1beSShreyansh Jain 	DPAA_DP_LOG(DEBUG, " Offloading checksum for mbuf: %p", mbuf);
4215a8cf1beSShreyansh Jain 
4225a8cf1beSShreyansh Jain 	prs = GET_TX_PRS(prs_buf);
4235a8cf1beSShreyansh Jain 	prs->l3r = 0;
4245a8cf1beSShreyansh Jain 	prs->l4r = 0;
4255a8cf1beSShreyansh Jain 	if (((mbuf->packet_type & RTE_PTYPE_L3_MASK) == RTE_PTYPE_L3_IPV4) ||
4265a8cf1beSShreyansh Jain 	   ((mbuf->packet_type & RTE_PTYPE_L3_MASK) ==
4275a8cf1beSShreyansh Jain 	   RTE_PTYPE_L3_IPV4_EXT))
4285a8cf1beSShreyansh Jain 		prs->l3r = DPAA_L3_PARSE_RESULT_IPV4;
4295a8cf1beSShreyansh Jain 	else if (((mbuf->packet_type & RTE_PTYPE_L3_MASK) ==
4305a8cf1beSShreyansh Jain 		   RTE_PTYPE_L3_IPV6) ||
4315a8cf1beSShreyansh Jain 		 ((mbuf->packet_type & RTE_PTYPE_L3_MASK) ==
4325a8cf1beSShreyansh Jain 		RTE_PTYPE_L3_IPV6_EXT))
4335a8cf1beSShreyansh Jain 		prs->l3r = DPAA_L3_PARSE_RESULT_IPV6;
4345a8cf1beSShreyansh Jain 
4355a8cf1beSShreyansh Jain 	if ((mbuf->packet_type & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP)
4365a8cf1beSShreyansh Jain 		prs->l4r = DPAA_L4_PARSE_RESULT_TCP;
4375a8cf1beSShreyansh Jain 	else if ((mbuf->packet_type & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_UDP)
4385a8cf1beSShreyansh Jain 		prs->l4r = DPAA_L4_PARSE_RESULT_UDP;
4395a8cf1beSShreyansh Jain 
4405a8cf1beSShreyansh Jain 	prs->ip_off[0] = mbuf->l2_len;
4415a8cf1beSShreyansh Jain 	prs->l4_off = mbuf->l3_len + mbuf->l2_len;
4425a8cf1beSShreyansh Jain 	/* Enable L3 (and L4, if TCP or UDP) HW checksum*/
443615352f5SVanshika Shukla 	fd->cmd |= DPAA_FD_CMD_RPD | DPAA_FD_CMD_DTC;
4445a8cf1beSShreyansh Jain }
4455a8cf1beSShreyansh Jain 
4465e0789e9SNipun Gupta static inline void
4475e0789e9SNipun Gupta dpaa_unsegmented_checksum(struct rte_mbuf *mbuf, struct qm_fd *fd_arr)
4485e0789e9SNipun Gupta {
4495e0789e9SNipun Gupta 	if (!mbuf->packet_type) {
4505e0789e9SNipun Gupta 		struct rte_net_hdr_lens hdr_lens;
4515e0789e9SNipun Gupta 
4525e0789e9SNipun Gupta 		mbuf->packet_type = rte_net_get_ptype(mbuf, &hdr_lens,
4535e0789e9SNipun Gupta 				RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK
4545e0789e9SNipun Gupta 				| RTE_PTYPE_L4_MASK);
4555e0789e9SNipun Gupta 		mbuf->l2_len = hdr_lens.l2_len;
4565e0789e9SNipun Gupta 		mbuf->l3_len = hdr_lens.l3_len;
4575e0789e9SNipun Gupta 	}
4585e0789e9SNipun Gupta 	if (mbuf->data_off < (DEFAULT_TX_ICEOF +
4595e0789e9SNipun Gupta 	    sizeof(struct dpaa_eth_parse_results_t))) {
4605e0789e9SNipun Gupta 		DPAA_DP_LOG(DEBUG, "Checksum offload Err: "
4615e0789e9SNipun Gupta 			"Not enough Headroom "
4625e0789e9SNipun Gupta 			"space for correct Checksum offload."
4635e0789e9SNipun Gupta 			"So Calculating checksum in Software.");
4645e0789e9SNipun Gupta 		dpaa_checksum(mbuf);
4655e0789e9SNipun Gupta 	} else {
4665e0789e9SNipun Gupta 		dpaa_checksum_offload(mbuf, fd_arr, mbuf->buf_addr);
4675e0789e9SNipun Gupta 	}
4685e0789e9SNipun Gupta }
4695e0789e9SNipun Gupta 
470f191d5abSHemant Agrawal static struct rte_mbuf *
4719ac71da4SNipun Gupta dpaa_eth_sg_to_mbuf(const struct qm_fd *fd, uint32_t ifid)
4728cffdcbeSShreyansh Jain {
4738cffdcbeSShreyansh Jain 	struct dpaa_bp_info *bp_info = DPAA_BPID_TO_POOL_INFO(fd->bpid);
4748cffdcbeSShreyansh Jain 	struct rte_mbuf *first_seg, *prev_seg, *cur_seg, *temp;
4758cffdcbeSShreyansh Jain 	struct qm_sg_entry *sgt, *sg_temp;
4768cffdcbeSShreyansh Jain 	void *vaddr, *sg_vaddr;
4778cffdcbeSShreyansh Jain 	int i = 0;
478287f4256SNipun Gupta 	uint16_t fd_offset = fd->offset;
4798cffdcbeSShreyansh Jain 
48041c9ee8dSHemant Agrawal 	vaddr = DPAA_MEMPOOL_PTOV(bp_info, qm_fd_addr(fd));
4818cffdcbeSShreyansh Jain 	if (!vaddr) {
4828cffdcbeSShreyansh Jain 		DPAA_PMD_ERR("unable to convert physical address");
4838cffdcbeSShreyansh Jain 		return NULL;
4848cffdcbeSShreyansh Jain 	}
4858cffdcbeSShreyansh Jain 	sgt = vaddr + fd_offset;
4868cffdcbeSShreyansh Jain 	sg_temp = &sgt[i++];
4878cffdcbeSShreyansh Jain 	hw_sg_to_cpu(sg_temp);
4888cffdcbeSShreyansh Jain 	temp = (struct rte_mbuf *)((char *)vaddr - bp_info->meta_data_size);
48941c9ee8dSHemant Agrawal 	sg_vaddr = DPAA_MEMPOOL_PTOV(bp_info, qm_sg_entry_get64(sg_temp));
4908cffdcbeSShreyansh Jain 
4918cffdcbeSShreyansh Jain 	first_seg = (struct rte_mbuf *)((char *)sg_vaddr -
4928cffdcbeSShreyansh Jain 						bp_info->meta_data_size);
4938cffdcbeSShreyansh Jain 	first_seg->data_off = sg_temp->offset;
4948cffdcbeSShreyansh Jain 	first_seg->data_len = sg_temp->length;
4958cffdcbeSShreyansh Jain 	first_seg->pkt_len = sg_temp->length;
4968cffdcbeSShreyansh Jain 	rte_mbuf_refcnt_set(first_seg, 1);
4978cffdcbeSShreyansh Jain 
4988cffdcbeSShreyansh Jain 	first_seg->port = ifid;
4998cffdcbeSShreyansh Jain 	first_seg->nb_segs = 1;
5008cffdcbeSShreyansh Jain 	first_seg->ol_flags = 0;
5018cffdcbeSShreyansh Jain 	prev_seg = first_seg;
5028cffdcbeSShreyansh Jain 	while (i < DPAA_SGT_MAX_ENTRIES) {
5038cffdcbeSShreyansh Jain 		sg_temp = &sgt[i++];
5048cffdcbeSShreyansh Jain 		hw_sg_to_cpu(sg_temp);
50541c9ee8dSHemant Agrawal 		sg_vaddr = DPAA_MEMPOOL_PTOV(bp_info,
50641c9ee8dSHemant Agrawal 					     qm_sg_entry_get64(sg_temp));
5078cffdcbeSShreyansh Jain 		cur_seg = (struct rte_mbuf *)((char *)sg_vaddr -
5088cffdcbeSShreyansh Jain 						      bp_info->meta_data_size);
5098cffdcbeSShreyansh Jain 		cur_seg->data_off = sg_temp->offset;
5108cffdcbeSShreyansh Jain 		cur_seg->data_len = sg_temp->length;
5118cffdcbeSShreyansh Jain 		first_seg->pkt_len += sg_temp->length;
5128cffdcbeSShreyansh Jain 		first_seg->nb_segs += 1;
5138cffdcbeSShreyansh Jain 		rte_mbuf_refcnt_set(cur_seg, 1);
5148cffdcbeSShreyansh Jain 		prev_seg->next = cur_seg;
5158cffdcbeSShreyansh Jain 		if (sg_temp->final) {
5168cffdcbeSShreyansh Jain 			cur_seg->next = NULL;
5178cffdcbeSShreyansh Jain 			break;
5188cffdcbeSShreyansh Jain 		}
5198cffdcbeSShreyansh Jain 		prev_seg = cur_seg;
5208cffdcbeSShreyansh Jain 	}
52155576ac2SHemant Agrawal 	DPAA_DP_LOG(DEBUG, "Received an SG frame len =%d, num_sg =%d",
52255576ac2SHemant Agrawal 			first_seg->pkt_len, first_seg->nb_segs);
5238cffdcbeSShreyansh Jain 
5240e5607e4SHemant Agrawal 	dpaa_eth_packet_info(first_seg, vaddr);
5258cffdcbeSShreyansh Jain 	rte_pktmbuf_free_seg(temp);
5268cffdcbeSShreyansh Jain 
5278cffdcbeSShreyansh Jain 	return first_seg;
5288cffdcbeSShreyansh Jain }
5298cffdcbeSShreyansh Jain 
5309ac71da4SNipun Gupta static inline struct rte_mbuf *
5319ac71da4SNipun Gupta dpaa_eth_fd_to_mbuf(const struct qm_fd *fd, uint32_t ifid)
53237f9b54bSShreyansh Jain {
53337f9b54bSShreyansh Jain 	struct rte_mbuf *mbuf;
5349ac71da4SNipun Gupta 	struct dpaa_bp_info *bp_info = DPAA_BPID_TO_POOL_INFO(fd->bpid);
53541c9ee8dSHemant Agrawal 	void *ptr;
5368cffdcbeSShreyansh Jain 	uint8_t format =
5378cffdcbeSShreyansh Jain 		(fd->opaque & DPAA_FD_FORMAT_MASK) >> DPAA_FD_FORMAT_SHIFT;
5389ac71da4SNipun Gupta 	uint16_t offset;
5399ac71da4SNipun Gupta 	uint32_t length;
54037f9b54bSShreyansh Jain 
5418cffdcbeSShreyansh Jain 	if (unlikely(format == qm_fd_sg))
5428cffdcbeSShreyansh Jain 		return dpaa_eth_sg_to_mbuf(fd, ifid);
5438cffdcbeSShreyansh Jain 
5449ac71da4SNipun Gupta 	offset = (fd->opaque & DPAA_FD_OFFSET_MASK) >> DPAA_FD_OFFSET_SHIFT;
5459ac71da4SNipun Gupta 	length = fd->opaque & DPAA_FD_LENGTH_MASK;
5469ac71da4SNipun Gupta 
54755576ac2SHemant Agrawal 	DPAA_DP_LOG(DEBUG, " FD--->MBUF off %d len = %d", offset, length);
54855576ac2SHemant Agrawal 
54937f9b54bSShreyansh Jain 	/* Ignoring case when format != qm_fd_contig */
5501ee09e39SHemant Agrawal 	ptr = DPAA_MEMPOOL_PTOV(bp_info, qm_fd_addr(fd));
55137f9b54bSShreyansh Jain 
55237f9b54bSShreyansh Jain 	mbuf = (struct rte_mbuf *)((char *)ptr - bp_info->meta_data_size);
5531ee09e39SHemant Agrawal 	/* Prefetch the Parse results and packet data to L1 */
5541ee09e39SHemant Agrawal 	rte_prefetch0((void *)((uint8_t *)ptr + DEFAULT_RX_ICEOF));
55537f9b54bSShreyansh Jain 
55637f9b54bSShreyansh Jain 	mbuf->data_off = offset;
55737f9b54bSShreyansh Jain 	mbuf->data_len = length;
55837f9b54bSShreyansh Jain 	mbuf->pkt_len = length;
55937f9b54bSShreyansh Jain 
56037f9b54bSShreyansh Jain 	mbuf->port = ifid;
56137f9b54bSShreyansh Jain 	mbuf->nb_segs = 1;
56237f9b54bSShreyansh Jain 	mbuf->ol_flags = 0;
56337f9b54bSShreyansh Jain 	mbuf->next = NULL;
56437f9b54bSShreyansh Jain 	rte_mbuf_refcnt_set(mbuf, 1);
5650e5607e4SHemant Agrawal 	dpaa_eth_packet_info(mbuf, mbuf->buf_addr);
56637f9b54bSShreyansh Jain 
56737f9b54bSShreyansh Jain 	return mbuf;
56837f9b54bSShreyansh Jain }
56937f9b54bSShreyansh Jain 
5709124e65dSGagandeep Singh uint16_t
5719124e65dSGagandeep Singh dpaa_free_mbuf(const struct qm_fd *fd)
5729124e65dSGagandeep Singh {
5739124e65dSGagandeep Singh 	struct rte_mbuf *mbuf;
5749124e65dSGagandeep Singh 	struct dpaa_bp_info *bp_info;
5759124e65dSGagandeep Singh 	uint8_t format;
5769124e65dSGagandeep Singh 	void *ptr;
5779124e65dSGagandeep Singh 
5789124e65dSGagandeep Singh 	bp_info = DPAA_BPID_TO_POOL_INFO(fd->bpid);
5799124e65dSGagandeep Singh 	format = (fd->opaque & DPAA_FD_FORMAT_MASK) >> DPAA_FD_FORMAT_SHIFT;
5809124e65dSGagandeep Singh 	if (unlikely(format == qm_fd_sg)) {
5810bf99a02SGagandeep Singh 		struct rte_mbuf *first_seg, *cur_seg;
5829124e65dSGagandeep Singh 		struct qm_sg_entry *sgt, *sg_temp;
5839124e65dSGagandeep Singh 		void *vaddr, *sg_vaddr;
5849124e65dSGagandeep Singh 		int i = 0;
5859124e65dSGagandeep Singh 		uint16_t fd_offset = fd->offset;
5869124e65dSGagandeep Singh 
5879124e65dSGagandeep Singh 		vaddr = DPAA_MEMPOOL_PTOV(bp_info, qm_fd_addr(fd));
5889124e65dSGagandeep Singh 		if (!vaddr) {
5899124e65dSGagandeep Singh 			DPAA_PMD_ERR("unable to convert physical address");
5909124e65dSGagandeep Singh 			return -1;
5919124e65dSGagandeep Singh 		}
5929124e65dSGagandeep Singh 		sgt = vaddr + fd_offset;
5939124e65dSGagandeep Singh 		sg_temp = &sgt[i++];
5949124e65dSGagandeep Singh 		hw_sg_to_cpu(sg_temp);
5959124e65dSGagandeep Singh 		sg_vaddr = DPAA_MEMPOOL_PTOV(bp_info,
5969124e65dSGagandeep Singh 						qm_sg_entry_get64(sg_temp));
5979124e65dSGagandeep Singh 		first_seg = (struct rte_mbuf *)((char *)sg_vaddr -
5989124e65dSGagandeep Singh 						bp_info->meta_data_size);
5999124e65dSGagandeep Singh 		first_seg->nb_segs = 1;
6009124e65dSGagandeep Singh 		while (i < DPAA_SGT_MAX_ENTRIES) {
6019124e65dSGagandeep Singh 			sg_temp = &sgt[i++];
6029124e65dSGagandeep Singh 			hw_sg_to_cpu(sg_temp);
6030bf99a02SGagandeep Singh 			if (sg_temp->bpid != 0xFF) {
6040bf99a02SGagandeep Singh 				bp_info = DPAA_BPID_TO_POOL_INFO(sg_temp->bpid);
6059124e65dSGagandeep Singh 				sg_vaddr = DPAA_MEMPOOL_PTOV(bp_info,
6069124e65dSGagandeep Singh 						qm_sg_entry_get64(sg_temp));
6079124e65dSGagandeep Singh 				cur_seg = (struct rte_mbuf *)((char *)sg_vaddr -
6089124e65dSGagandeep Singh 						      bp_info->meta_data_size);
6090bf99a02SGagandeep Singh 				rte_pktmbuf_free_seg(cur_seg);
6100bf99a02SGagandeep Singh 			}
6110bf99a02SGagandeep Singh 			if (sg_temp->final)
6129124e65dSGagandeep Singh 				break;
6139124e65dSGagandeep Singh 		}
6149124e65dSGagandeep Singh 		rte_pktmbuf_free_seg(first_seg);
6159124e65dSGagandeep Singh 		return 0;
6169124e65dSGagandeep Singh 	}
6179124e65dSGagandeep Singh 
6189124e65dSGagandeep Singh 	ptr = DPAA_MEMPOOL_PTOV(bp_info, qm_fd_addr(fd));
6199124e65dSGagandeep Singh 	mbuf = (struct rte_mbuf *)((char *)ptr - bp_info->meta_data_size);
6209124e65dSGagandeep Singh 
6219124e65dSGagandeep Singh 	rte_pktmbuf_free(mbuf);
6229124e65dSGagandeep Singh 
6239124e65dSGagandeep Singh 	return 0;
6249124e65dSGagandeep Singh }
6259124e65dSGagandeep Singh 
62619b4aba2SHemant Agrawal /* Specific for LS1043 */
627b9083ea5SNipun Gupta void
62819b4aba2SHemant Agrawal dpaa_rx_cb_no_prefetch(struct qman_fq **fq, struct qm_dqrr_entry **dqrr,
629b9083ea5SNipun Gupta 	   void **bufs, int num_bufs)
6300c504f69SHemant Agrawal {
631b9083ea5SNipun Gupta 	struct rte_mbuf *mbuf;
632b9083ea5SNipun Gupta 	struct dpaa_bp_info *bp_info;
633b9083ea5SNipun Gupta 	const struct qm_fd *fd;
634b9083ea5SNipun Gupta 	void *ptr;
635b9083ea5SNipun Gupta 	struct dpaa_if *dpaa_intf;
636b9083ea5SNipun Gupta 	uint16_t offset, i;
637b9083ea5SNipun Gupta 	uint32_t length;
638b9083ea5SNipun Gupta 	uint8_t format;
639615352f5SVanshika Shukla 	struct annotations_t *annot;
6400c504f69SHemant Agrawal 
641b9083ea5SNipun Gupta 	bp_info = DPAA_BPID_TO_POOL_INFO(dqrr[0]->fd.bpid);
642b9083ea5SNipun Gupta 	ptr = rte_dpaa_mem_ptov(qm_fd_addr(&dqrr[0]->fd));
643b9083ea5SNipun Gupta 	rte_prefetch0((void *)((uint8_t *)ptr + DEFAULT_RX_ICEOF));
64419b4aba2SHemant Agrawal 	bufs[0] = (struct rte_mbuf *)((char *)ptr - bp_info->meta_data_size);
645b9083ea5SNipun Gupta 
646b9083ea5SNipun Gupta 	for (i = 0; i < num_bufs; i++) {
64719b4aba2SHemant Agrawal 		if (i < num_bufs - 1) {
648b9083ea5SNipun Gupta 			bp_info = DPAA_BPID_TO_POOL_INFO(dqrr[i + 1]->fd.bpid);
649b9083ea5SNipun Gupta 			ptr = rte_dpaa_mem_ptov(qm_fd_addr(&dqrr[i + 1]->fd));
650b9083ea5SNipun Gupta 			rte_prefetch0((void *)((uint8_t *)ptr +
651b9083ea5SNipun Gupta 					DEFAULT_RX_ICEOF));
652b9083ea5SNipun Gupta 			bufs[i + 1] = (struct rte_mbuf *)((char *)ptr -
653b9083ea5SNipun Gupta 					bp_info->meta_data_size);
654b9083ea5SNipun Gupta 		}
655b9083ea5SNipun Gupta 
656b9083ea5SNipun Gupta 		fd = &dqrr[i]->fd;
6579abdad12SHemant Agrawal 		dpaa_intf = fq[0]->dpaa_intf;
658b9083ea5SNipun Gupta 		format = (fd->opaque & DPAA_FD_FORMAT_MASK) >>
659b9083ea5SNipun Gupta 				DPAA_FD_FORMAT_SHIFT;
660b9083ea5SNipun Gupta 		if (unlikely(format == qm_fd_sg)) {
661b9083ea5SNipun Gupta 			bufs[i] = dpaa_eth_sg_to_mbuf(fd, dpaa_intf->ifid);
662b9083ea5SNipun Gupta 			continue;
663b9083ea5SNipun Gupta 		}
664b9083ea5SNipun Gupta 
665b9083ea5SNipun Gupta 		offset = (fd->opaque & DPAA_FD_OFFSET_MASK) >>
666b9083ea5SNipun Gupta 				DPAA_FD_OFFSET_SHIFT;
667b9083ea5SNipun Gupta 		length = fd->opaque & DPAA_FD_LENGTH_MASK;
668b9083ea5SNipun Gupta 
669b9083ea5SNipun Gupta 		mbuf = bufs[i];
670b9083ea5SNipun Gupta 		mbuf->data_off = offset;
671b9083ea5SNipun Gupta 		mbuf->data_len = length;
672b9083ea5SNipun Gupta 		mbuf->pkt_len = length;
673b9083ea5SNipun Gupta 		mbuf->port = dpaa_intf->ifid;
674b9083ea5SNipun Gupta 
675b9083ea5SNipun Gupta 		mbuf->nb_segs = 1;
676b9083ea5SNipun Gupta 		mbuf->ol_flags = 0;
677b9083ea5SNipun Gupta 		mbuf->next = NULL;
678b9083ea5SNipun Gupta 		rte_mbuf_refcnt_set(mbuf, 1);
6790e5607e4SHemant Agrawal 		dpaa_eth_packet_info(mbuf, mbuf->buf_addr);
68077393f56SSachin Saxena 		dpaa_display_frame_info(fd, fq[0]->fqid, true);
681615352f5SVanshika Shukla 		if (dpaa_ieee_1588) {
682615352f5SVanshika Shukla 			annot = GET_ANNOTATIONS(mbuf->buf_addr);
683615352f5SVanshika Shukla 			dpaa_intf->rx_timestamp =
684615352f5SVanshika Shukla 				rte_cpu_to_be_64(annot->timestamp);
685615352f5SVanshika Shukla 		}
686b9083ea5SNipun Gupta 	}
687b9083ea5SNipun Gupta }
688b9083ea5SNipun Gupta 
68919b4aba2SHemant Agrawal void
69019b4aba2SHemant Agrawal dpaa_rx_cb(struct qman_fq **fq, struct qm_dqrr_entry **dqrr,
69119b4aba2SHemant Agrawal 	   void **bufs, int num_bufs)
69219b4aba2SHemant Agrawal {
69319b4aba2SHemant Agrawal 	struct rte_mbuf *mbuf;
69419b4aba2SHemant Agrawal 	const struct qm_fd *fd;
69519b4aba2SHemant Agrawal 	struct dpaa_if *dpaa_intf;
69619b4aba2SHemant Agrawal 	uint16_t offset, i;
69719b4aba2SHemant Agrawal 	uint32_t length;
69819b4aba2SHemant Agrawal 	uint8_t format;
699615352f5SVanshika Shukla 	struct annotations_t *annot;
70019b4aba2SHemant Agrawal 
70119b4aba2SHemant Agrawal 	for (i = 0; i < num_bufs; i++) {
70219b4aba2SHemant Agrawal 		fd = &dqrr[i]->fd;
70319b4aba2SHemant Agrawal 		dpaa_intf = fq[0]->dpaa_intf;
70419b4aba2SHemant Agrawal 		format = (fd->opaque & DPAA_FD_FORMAT_MASK) >>
70519b4aba2SHemant Agrawal 				DPAA_FD_FORMAT_SHIFT;
70619b4aba2SHemant Agrawal 		if (unlikely(format == qm_fd_sg)) {
70719b4aba2SHemant Agrawal 			bufs[i] = dpaa_eth_sg_to_mbuf(fd, dpaa_intf->ifid);
70819b4aba2SHemant Agrawal 			continue;
70919b4aba2SHemant Agrawal 		}
71019b4aba2SHemant Agrawal 
71119b4aba2SHemant Agrawal 		offset = (fd->opaque & DPAA_FD_OFFSET_MASK) >>
71219b4aba2SHemant Agrawal 				DPAA_FD_OFFSET_SHIFT;
71319b4aba2SHemant Agrawal 		length = fd->opaque & DPAA_FD_LENGTH_MASK;
71419b4aba2SHemant Agrawal 
71519b4aba2SHemant Agrawal 		mbuf = bufs[i];
71619b4aba2SHemant Agrawal 		mbuf->data_off = offset;
71719b4aba2SHemant Agrawal 		mbuf->data_len = length;
71819b4aba2SHemant Agrawal 		mbuf->pkt_len = length;
71919b4aba2SHemant Agrawal 		mbuf->port = dpaa_intf->ifid;
72019b4aba2SHemant Agrawal 
72119b4aba2SHemant Agrawal 		mbuf->nb_segs = 1;
72219b4aba2SHemant Agrawal 		mbuf->ol_flags = 0;
72319b4aba2SHemant Agrawal 		mbuf->next = NULL;
72419b4aba2SHemant Agrawal 		rte_mbuf_refcnt_set(mbuf, 1);
72519b4aba2SHemant Agrawal 		dpaa_eth_packet_info(mbuf, mbuf->buf_addr);
72677393f56SSachin Saxena 		dpaa_display_frame_info(fd, fq[0]->fqid, true);
727615352f5SVanshika Shukla 		if (dpaa_ieee_1588) {
728615352f5SVanshika Shukla 			annot = GET_ANNOTATIONS(mbuf->buf_addr);
729615352f5SVanshika Shukla 			dpaa_intf->rx_timestamp =
730615352f5SVanshika Shukla 				rte_cpu_to_be_64(annot->timestamp);
731615352f5SVanshika Shukla 		}
73219b4aba2SHemant Agrawal 	}
73319b4aba2SHemant Agrawal }
73419b4aba2SHemant Agrawal 
735b9083ea5SNipun Gupta void dpaa_rx_cb_prepare(struct qm_dqrr_entry *dq, void **bufs)
736b9083ea5SNipun Gupta {
737b9083ea5SNipun Gupta 	struct dpaa_bp_info *bp_info = DPAA_BPID_TO_POOL_INFO(dq->fd.bpid);
738b9083ea5SNipun Gupta 	void *ptr = rte_dpaa_mem_ptov(qm_fd_addr(&dq->fd));
739b9083ea5SNipun Gupta 
740b9083ea5SNipun Gupta 	/* In case of LS1046, annotation stashing is disabled due to L2 cache
7417be78d02SJosh Soref 	 * being bottleneck in case of multicore scenario for this platform.
7427be78d02SJosh Soref 	 * So we prefetch the annotation beforehand, so that it is available
743b9083ea5SNipun Gupta 	 * in cache when accessed.
744b9083ea5SNipun Gupta 	 */
745b9083ea5SNipun Gupta 	rte_prefetch0((void *)((uint8_t *)ptr + DEFAULT_RX_ICEOF));
746b9083ea5SNipun Gupta 
747b9083ea5SNipun Gupta 	*bufs = (struct rte_mbuf *)((char *)ptr - bp_info->meta_data_size);
7480c504f69SHemant Agrawal }
7490c504f69SHemant Agrawal 
7500c504f69SHemant Agrawal static uint16_t
7510c504f69SHemant Agrawal dpaa_eth_queue_portal_rx(struct qman_fq *fq,
7520c504f69SHemant Agrawal 			 struct rte_mbuf **bufs,
7530c504f69SHemant Agrawal 			 uint16_t nb_bufs)
7540c504f69SHemant Agrawal {
7550c504f69SHemant Agrawal 	int ret;
7560c504f69SHemant Agrawal 
757b9c94167SNipun Gupta 	if (unlikely(!fq->qp_initialized)) {
7580c504f69SHemant Agrawal 		ret = rte_dpaa_portal_fq_init((void *)0, fq);
7590c504f69SHemant Agrawal 		if (ret) {
7600c504f69SHemant Agrawal 			DPAA_PMD_ERR("Failure in affining portal %d", ret);
7610c504f69SHemant Agrawal 			return 0;
7620c504f69SHemant Agrawal 		}
763b9c94167SNipun Gupta 		fq->qp_initialized = 1;
7640c504f69SHemant Agrawal 	}
7650c504f69SHemant Agrawal 
7660c504f69SHemant Agrawal 	return qman_portal_poll_rx(nb_bufs, (void **)bufs, fq->qp);
7670c504f69SHemant Agrawal }
7680c504f69SHemant Agrawal 
7695e745593SSunil Kumar Kori enum qman_cb_dqrr_result
7705e745593SSunil Kumar Kori dpaa_rx_cb_parallel(void *event,
7715e745593SSunil Kumar Kori 		    struct qman_portal *qm __always_unused,
7725e745593SSunil Kumar Kori 		    struct qman_fq *fq,
7735e745593SSunil Kumar Kori 		    const struct qm_dqrr_entry *dqrr,
7745e745593SSunil Kumar Kori 		    void **bufs)
7755e745593SSunil Kumar Kori {
7765e745593SSunil Kumar Kori 	u32 ifid = ((struct dpaa_if *)fq->dpaa_intf)->ifid;
7775e745593SSunil Kumar Kori 	struct rte_mbuf *mbuf;
7785e745593SSunil Kumar Kori 	struct rte_event *ev = (struct rte_event *)event;
7795e745593SSunil Kumar Kori 
7805e745593SSunil Kumar Kori 	mbuf = dpaa_eth_fd_to_mbuf(&dqrr->fd, ifid);
7815e745593SSunil Kumar Kori 	ev->event_ptr = (void *)mbuf;
7825e745593SSunil Kumar Kori 	ev->flow_id = fq->ev.flow_id;
7835e745593SSunil Kumar Kori 	ev->sub_event_type = fq->ev.sub_event_type;
7845e745593SSunil Kumar Kori 	ev->event_type = RTE_EVENT_TYPE_ETHDEV;
7855e745593SSunil Kumar Kori 	ev->op = RTE_EVENT_OP_NEW;
7865e745593SSunil Kumar Kori 	ev->sched_type = fq->ev.sched_type;
7875e745593SSunil Kumar Kori 	ev->queue_id = fq->ev.queue_id;
7885e745593SSunil Kumar Kori 	ev->priority = fq->ev.priority;
7895e745593SSunil Kumar Kori 	ev->impl_opaque = (uint8_t)DPAA_INVALID_MBUF_SEQN;
790c9a1c2e5SDavid Marchand 	*dpaa_seqn(mbuf) = DPAA_INVALID_MBUF_SEQN;
7915e745593SSunil Kumar Kori 	*bufs = mbuf;
7925e745593SSunil Kumar Kori 
7935e745593SSunil Kumar Kori 	return qman_cb_dqrr_consume;
7945e745593SSunil Kumar Kori }
7955e745593SSunil Kumar Kori 
7965e745593SSunil Kumar Kori enum qman_cb_dqrr_result
7975e745593SSunil Kumar Kori dpaa_rx_cb_atomic(void *event,
7985e745593SSunil Kumar Kori 		  struct qman_portal *qm __always_unused,
7995e745593SSunil Kumar Kori 		  struct qman_fq *fq,
8005e745593SSunil Kumar Kori 		  const struct qm_dqrr_entry *dqrr,
8015e745593SSunil Kumar Kori 		  void **bufs)
8025e745593SSunil Kumar Kori {
8035e745593SSunil Kumar Kori 	u8 index;
8045e745593SSunil Kumar Kori 	u32 ifid = ((struct dpaa_if *)fq->dpaa_intf)->ifid;
8055e745593SSunil Kumar Kori 	struct rte_mbuf *mbuf;
8065e745593SSunil Kumar Kori 	struct rte_event *ev = (struct rte_event *)event;
8075e745593SSunil Kumar Kori 
8085e745593SSunil Kumar Kori 	mbuf = dpaa_eth_fd_to_mbuf(&dqrr->fd, ifid);
8095e745593SSunil Kumar Kori 	ev->event_ptr = (void *)mbuf;
8105e745593SSunil Kumar Kori 	ev->flow_id = fq->ev.flow_id;
8115e745593SSunil Kumar Kori 	ev->sub_event_type = fq->ev.sub_event_type;
8125e745593SSunil Kumar Kori 	ev->event_type = RTE_EVENT_TYPE_ETHDEV;
8135e745593SSunil Kumar Kori 	ev->op = RTE_EVENT_OP_NEW;
8145e745593SSunil Kumar Kori 	ev->sched_type = fq->ev.sched_type;
8155e745593SSunil Kumar Kori 	ev->queue_id = fq->ev.queue_id;
8165e745593SSunil Kumar Kori 	ev->priority = fq->ev.priority;
8175e745593SSunil Kumar Kori 
8185e745593SSunil Kumar Kori 	/* Save active dqrr entries */
8195e745593SSunil Kumar Kori 	index = DQRR_PTR2IDX(dqrr);
8205e745593SSunil Kumar Kori 	DPAA_PER_LCORE_DQRR_SIZE++;
8215e745593SSunil Kumar Kori 	DPAA_PER_LCORE_DQRR_HELD |= 1 << index;
8225e745593SSunil Kumar Kori 	DPAA_PER_LCORE_DQRR_MBUF(index) = mbuf;
8235e745593SSunil Kumar Kori 	ev->impl_opaque = index + 1;
824c9a1c2e5SDavid Marchand 	*dpaa_seqn(mbuf) = (uint32_t)index + 1;
8255e745593SSunil Kumar Kori 	*bufs = mbuf;
8265e745593SSunil Kumar Kori 
8275e745593SSunil Kumar Kori 	return qman_cb_dqrr_defer;
8285e745593SSunil Kumar Kori }
8295e745593SSunil Kumar Kori 
83077393f56SSachin Saxena #ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER
8319e97abf2SJun Yang static inline void
8329e97abf2SJun Yang dpaa_eth_err_queue(struct qman_fq *fq)
83377393f56SSachin Saxena {
83477393f56SSachin Saxena 	struct rte_mbuf *mbuf;
83577393f56SSachin Saxena 	struct qman_fq *debug_fq;
83677393f56SSachin Saxena 	int ret, i;
83777393f56SSachin Saxena 	struct qm_dqrr_entry *dq;
83877393f56SSachin Saxena 	struct qm_fd *fd;
8399e97abf2SJun Yang 	struct dpaa_if *dpaa_intf;
8409e97abf2SJun Yang 
8419e97abf2SJun Yang 	dpaa_intf = fq->dpaa_intf;
8429e97abf2SJun Yang 	if (fq != &dpaa_intf->rx_queues[0]) {
8439e97abf2SJun Yang 		/* Associate error queues to the first RXQ.*/
8449e97abf2SJun Yang 		return;
8459e97abf2SJun Yang 	}
8469e97abf2SJun Yang 
8479e97abf2SJun Yang 	if (dpaa_intf->cfg->fman_if->is_shared_mac) {
8489e97abf2SJun Yang 		/* Error queues of shared MAC are handled in kernel. */
8499e97abf2SJun Yang 		return;
8509e97abf2SJun Yang 	}
85177393f56SSachin Saxena 
85277393f56SSachin Saxena 	if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
85377393f56SSachin Saxena 		ret = rte_dpaa_portal_init((void *)0);
85477393f56SSachin Saxena 		if (ret) {
85577393f56SSachin Saxena 			DPAA_PMD_ERR("Failure in affining portal");
85677393f56SSachin Saxena 			return;
85777393f56SSachin Saxena 		}
85877393f56SSachin Saxena 	}
8599e97abf2SJun Yang 	for (i = 0; i < DPAA_DEBUG_FQ_MAX_NUM; i++) {
86077393f56SSachin Saxena 		debug_fq = &dpaa_intf->debug_queues[i];
86177393f56SSachin Saxena 		ret = qman_set_vdq(debug_fq, 4, QM_VDQCR_EXACT);
86277393f56SSachin Saxena 		if (ret)
86377393f56SSachin Saxena 			return;
86477393f56SSachin Saxena 
86577393f56SSachin Saxena 		do {
86677393f56SSachin Saxena 			dq = qman_dequeue(debug_fq);
86777393f56SSachin Saxena 			if (!dq)
86877393f56SSachin Saxena 				continue;
86977393f56SSachin Saxena 			fd = &dq->fd;
87077393f56SSachin Saxena 			if (i == DPAA_DEBUG_FQ_RX_ERROR)
87177393f56SSachin Saxena 				DPAA_PMD_ERR("RX ERROR status: 0x%08x",
87277393f56SSachin Saxena 					fd->status);
87377393f56SSachin Saxena 			else
87477393f56SSachin Saxena 				DPAA_PMD_ERR("TX ERROR status: 0x%08x",
87577393f56SSachin Saxena 					fd->status);
87677393f56SSachin Saxena 			dpaa_display_frame_info(fd, debug_fq->fqid,
87777393f56SSachin Saxena 				i == DPAA_DEBUG_FQ_RX_ERROR);
87877393f56SSachin Saxena 
87977393f56SSachin Saxena 			mbuf = dpaa_eth_fd_to_mbuf(fd, dpaa_intf->ifid);
88077393f56SSachin Saxena 			rte_pktmbuf_free(mbuf);
88177393f56SSachin Saxena 			qman_dqrr_consume(debug_fq, dq);
88277393f56SSachin Saxena 		} while (debug_fq->flags & QMAN_FQ_STATE_VDQCR);
88377393f56SSachin Saxena 	}
88477393f56SSachin Saxena }
88577393f56SSachin Saxena #endif
88677393f56SSachin Saxena 
88737f9b54bSShreyansh Jain uint16_t dpaa_eth_queue_rx(void *q,
88837f9b54bSShreyansh Jain 			   struct rte_mbuf **bufs,
88937f9b54bSShreyansh Jain 			   uint16_t nb_bufs)
89037f9b54bSShreyansh Jain {
89137f9b54bSShreyansh Jain 	struct qman_fq *fq = q;
89237f9b54bSShreyansh Jain 	struct qm_dqrr_entry *dq;
89337f9b54bSShreyansh Jain 	uint32_t num_rx = 0, ifid = ((struct dpaa_if *)fq->dpaa_intf)->ifid;
894f40d5a53SNipun Gupta 	int num_rx_bufs, ret;
895f40d5a53SNipun Gupta 	uint32_t vdqcr_flags = 0;
896615352f5SVanshika Shukla 	struct annotations_t *annot;
897615352f5SVanshika Shukla 	struct dpaa_if *dpaa_intf = fq->dpaa_intf;
89837f9b54bSShreyansh Jain 
899e1797f4bSAkhil Goyal 	if (unlikely(rte_dpaa_bpid_info == NULL &&
900e1797f4bSAkhil Goyal 				rte_eal_process_type() == RTE_PROC_SECONDARY))
901e1797f4bSAkhil Goyal 		rte_dpaa_bpid_info = fq->bp_array;
902e1797f4bSAkhil Goyal 
90377393f56SSachin Saxena #ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER
9049e97abf2SJun Yang 	dpaa_eth_err_queue(fq);
90577393f56SSachin Saxena #endif
90677393f56SSachin Saxena 
9070c504f69SHemant Agrawal 	if (likely(fq->is_static))
9080c504f69SHemant Agrawal 		return dpaa_eth_queue_portal_rx(fq, bufs, nb_bufs);
9090c504f69SHemant Agrawal 
910e5872221SRohit Raj 	if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
91137f9b54bSShreyansh Jain 		ret = rte_dpaa_portal_init((void *)0);
91237f9b54bSShreyansh Jain 		if (ret) {
91337f9b54bSShreyansh Jain 			DPAA_PMD_ERR("Failure in affining portal");
91437f9b54bSShreyansh Jain 			return 0;
91537f9b54bSShreyansh Jain 		}
9165d944582SNipun Gupta 	}
91737f9b54bSShreyansh Jain 
918f40d5a53SNipun Gupta 	/* Until request for four buffers, we provide exact number of buffers.
919f40d5a53SNipun Gupta 	 * Otherwise we do not set the QM_VDQCR_EXACT flag.
920f40d5a53SNipun Gupta 	 * Not setting QM_VDQCR_EXACT flag can provide two more buffers than
921f40d5a53SNipun Gupta 	 * requested, so we request two less in this case.
922f40d5a53SNipun Gupta 	 */
923f40d5a53SNipun Gupta 	if (nb_bufs < 4) {
924f40d5a53SNipun Gupta 		vdqcr_flags = QM_VDQCR_EXACT;
925f40d5a53SNipun Gupta 		num_rx_bufs = nb_bufs;
926f40d5a53SNipun Gupta 	} else {
927f40d5a53SNipun Gupta 		num_rx_bufs = nb_bufs > DPAA_MAX_DEQUEUE_NUM_FRAMES ?
928f40d5a53SNipun Gupta 			(DPAA_MAX_DEQUEUE_NUM_FRAMES - 2) : (nb_bufs - 2);
929f40d5a53SNipun Gupta 	}
930f40d5a53SNipun Gupta 	ret = qman_set_vdq(fq, num_rx_bufs, vdqcr_flags);
93137f9b54bSShreyansh Jain 	if (ret)
93237f9b54bSShreyansh Jain 		return 0;
93337f9b54bSShreyansh Jain 
93437f9b54bSShreyansh Jain 	do {
93537f9b54bSShreyansh Jain 		dq = qman_dequeue(fq);
93637f9b54bSShreyansh Jain 		if (!dq)
93737f9b54bSShreyansh Jain 			continue;
93837f9b54bSShreyansh Jain 		bufs[num_rx++] = dpaa_eth_fd_to_mbuf(&dq->fd, ifid);
93977393f56SSachin Saxena 		dpaa_display_frame_info(&dq->fd, fq->fqid, true);
940615352f5SVanshika Shukla 		if (dpaa_ieee_1588) {
941615352f5SVanshika Shukla 			annot = GET_ANNOTATIONS(bufs[num_rx - 1]->buf_addr);
942615352f5SVanshika Shukla 			dpaa_intf->rx_timestamp = rte_cpu_to_be_64(annot->timestamp);
943615352f5SVanshika Shukla 		}
94437f9b54bSShreyansh Jain 		qman_dqrr_consume(fq, dq);
94537f9b54bSShreyansh Jain 	} while (fq->flags & QMAN_FQ_STATE_VDQCR);
94637f9b54bSShreyansh Jain 
94737f9b54bSShreyansh Jain 	return num_rx;
94837f9b54bSShreyansh Jain }
94937f9b54bSShreyansh Jain 
950f191d5abSHemant Agrawal static int
9518cffdcbeSShreyansh Jain dpaa_eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf,
9528716c0ecSGagandeep Singh 		struct qm_fd *fd,
9538716c0ecSGagandeep Singh 		struct dpaa_sw_buf_free *free_buf,
9548716c0ecSGagandeep Singh 		uint32_t *free_count,
9558716c0ecSGagandeep Singh 		uint32_t pkt_id)
9568cffdcbeSShreyansh Jain {
9578716c0ecSGagandeep Singh 	struct rte_mbuf *cur_seg = mbuf;
9588cffdcbeSShreyansh Jain 	struct rte_mbuf *temp, *mi;
9598cffdcbeSShreyansh Jain 	struct qm_sg_entry *sg_temp, *sgt;
9608cffdcbeSShreyansh Jain 	int i = 0;
9618cffdcbeSShreyansh Jain 
9628cffdcbeSShreyansh Jain 	DPAA_DP_LOG(DEBUG, "Creating SG FD to transmit");
9638cffdcbeSShreyansh Jain 
964533c31ccSGagandeep Singh 	temp = rte_pktmbuf_alloc(dpaa_tx_sg_pool);
9658cffdcbeSShreyansh Jain 	if (!temp) {
9668cffdcbeSShreyansh Jain 		DPAA_PMD_ERR("Failure in allocation of mbuf");
9678cffdcbeSShreyansh Jain 		return -1;
9688cffdcbeSShreyansh Jain 	}
9698cffdcbeSShreyansh Jain 	if (temp->buf_len < ((mbuf->nb_segs * sizeof(struct qm_sg_entry))
9708cffdcbeSShreyansh Jain 				+ temp->data_off)) {
9718cffdcbeSShreyansh Jain 		DPAA_PMD_ERR("Insufficient space in mbuf for SG entries");
9728cffdcbeSShreyansh Jain 		return -1;
9738cffdcbeSShreyansh Jain 	}
9748cffdcbeSShreyansh Jain 
9758cffdcbeSShreyansh Jain 	fd->cmd = 0;
9768cffdcbeSShreyansh Jain 	fd->opaque_addr = 0;
9778cffdcbeSShreyansh Jain 
9788cffdcbeSShreyansh Jain 	if (mbuf->ol_flags & DPAA_TX_CKSUM_OFFLOAD_MASK) {
979d565c887SAshish Jain 		if (!mbuf->packet_type) {
980d565c887SAshish Jain 			struct rte_net_hdr_lens hdr_lens;
981d565c887SAshish Jain 
982d565c887SAshish Jain 			mbuf->packet_type = rte_net_get_ptype(mbuf, &hdr_lens,
983d565c887SAshish Jain 					RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK
984d565c887SAshish Jain 					| RTE_PTYPE_L4_MASK);
985d565c887SAshish Jain 			mbuf->l2_len = hdr_lens.l2_len;
986d565c887SAshish Jain 			mbuf->l3_len = hdr_lens.l3_len;
987d565c887SAshish Jain 		}
9888cffdcbeSShreyansh Jain 		if (temp->data_off < DEFAULT_TX_ICEOF
9898cffdcbeSShreyansh Jain 			+ sizeof(struct dpaa_eth_parse_results_t))
9908cffdcbeSShreyansh Jain 			temp->data_off = DEFAULT_TX_ICEOF
9918cffdcbeSShreyansh Jain 				+ sizeof(struct dpaa_eth_parse_results_t);
9928cffdcbeSShreyansh Jain 		dcbz_64(temp->buf_addr);
9938cffdcbeSShreyansh Jain 		dpaa_checksum_offload(mbuf, fd, temp->buf_addr);
9948cffdcbeSShreyansh Jain 	}
9958cffdcbeSShreyansh Jain 
9968cffdcbeSShreyansh Jain 	sgt = temp->buf_addr + temp->data_off;
9978cffdcbeSShreyansh Jain 	fd->format = QM_FD_SG;
998455da545SSantosh Shukla 	fd->addr = temp->buf_iova;
9998cffdcbeSShreyansh Jain 	fd->offset = temp->data_off;
1000533c31ccSGagandeep Singh 	fd->bpid = DPAA_MEMPOOL_TO_BPID(dpaa_tx_sg_pool);
10018cffdcbeSShreyansh Jain 	fd->length20 = mbuf->pkt_len;
10028cffdcbeSShreyansh Jain 
10038cffdcbeSShreyansh Jain 	while (i < DPAA_SGT_MAX_ENTRIES) {
10048cffdcbeSShreyansh Jain 		sg_temp = &sgt[i++];
10058cffdcbeSShreyansh Jain 		sg_temp->opaque = 0;
10068cffdcbeSShreyansh Jain 		sg_temp->val = 0;
1007455da545SSantosh Shukla 		sg_temp->addr = cur_seg->buf_iova;
10088cffdcbeSShreyansh Jain 		sg_temp->offset = cur_seg->data_off;
10098cffdcbeSShreyansh Jain 		sg_temp->length = cur_seg->data_len;
10108cffdcbeSShreyansh Jain 		if (RTE_MBUF_DIRECT(cur_seg)) {
10118cffdcbeSShreyansh Jain 			if (rte_mbuf_refcnt_read(cur_seg) > 1) {
10128cffdcbeSShreyansh Jain 				/*If refcnt > 1, invalid bpid is set to ensure
10138cffdcbeSShreyansh Jain 				 * buffer is not freed by HW.
10148cffdcbeSShreyansh Jain 				 */
10158cffdcbeSShreyansh Jain 				sg_temp->bpid = 0xff;
10168cffdcbeSShreyansh Jain 				rte_mbuf_refcnt_update(cur_seg, -1);
10178cffdcbeSShreyansh Jain 			} else {
10188cffdcbeSShreyansh Jain 				sg_temp->bpid =
10198cffdcbeSShreyansh Jain 					DPAA_MEMPOOL_TO_BPID(cur_seg->pool);
10208cffdcbeSShreyansh Jain 			}
1021f191d5abSHemant Agrawal 		} else if (RTE_MBUF_HAS_EXTBUF(cur_seg)) {
10228716c0ecSGagandeep Singh 			free_buf[*free_count].seg = cur_seg;
10238716c0ecSGagandeep Singh 			free_buf[*free_count].pkt_id = pkt_id;
10248716c0ecSGagandeep Singh 			++*free_count;
1025f191d5abSHemant Agrawal 			sg_temp->bpid = 0xff;
10268cffdcbeSShreyansh Jain 		} else {
10278cffdcbeSShreyansh Jain 			/* Get owner MBUF from indirect buffer */
10288cffdcbeSShreyansh Jain 			mi = rte_mbuf_from_indirect(cur_seg);
10298cffdcbeSShreyansh Jain 			if (rte_mbuf_refcnt_read(mi) > 1) {
10308cffdcbeSShreyansh Jain 				/*If refcnt > 1, invalid bpid is set to ensure
10318cffdcbeSShreyansh Jain 				 * owner buffer is not freed by HW.
10328cffdcbeSShreyansh Jain 				 */
10338cffdcbeSShreyansh Jain 				sg_temp->bpid = 0xff;
10348cffdcbeSShreyansh Jain 			} else {
10358cffdcbeSShreyansh Jain 				sg_temp->bpid = DPAA_MEMPOOL_TO_BPID(mi->pool);
10368cffdcbeSShreyansh Jain 				rte_mbuf_refcnt_update(mi, 1);
10378cffdcbeSShreyansh Jain 			}
10388716c0ecSGagandeep Singh 			free_buf[*free_count].seg = cur_seg;
10398716c0ecSGagandeep Singh 			free_buf[*free_count].pkt_id = pkt_id;
10408716c0ecSGagandeep Singh 			++*free_count;
10418cffdcbeSShreyansh Jain 		}
10428716c0ecSGagandeep Singh 		cur_seg = cur_seg->next;
10438cffdcbeSShreyansh Jain 		if (cur_seg == NULL) {
10448cffdcbeSShreyansh Jain 			sg_temp->final = 1;
10458cffdcbeSShreyansh Jain 			cpu_to_hw_sg(sg_temp);
10468cffdcbeSShreyansh Jain 			break;
10478cffdcbeSShreyansh Jain 		}
10488cffdcbeSShreyansh Jain 		cpu_to_hw_sg(sg_temp);
10498cffdcbeSShreyansh Jain 	}
10508cffdcbeSShreyansh Jain 	return 0;
10518cffdcbeSShreyansh Jain }
10528cffdcbeSShreyansh Jain 
105337f9b54bSShreyansh Jain /* Handle mbufs which are not segmented (non SG) */
105437f9b54bSShreyansh Jain static inline void
105537f9b54bSShreyansh Jain tx_on_dpaa_pool_unsegmented(struct rte_mbuf *mbuf,
105637f9b54bSShreyansh Jain 			    struct dpaa_bp_info *bp_info,
10578716c0ecSGagandeep Singh 			    struct qm_fd *fd_arr,
10588716c0ecSGagandeep Singh 			    struct dpaa_sw_buf_free *buf_to_free,
10598716c0ecSGagandeep Singh 			    uint32_t *free_count,
10608716c0ecSGagandeep Singh 			    uint32_t pkt_id)
106137f9b54bSShreyansh Jain {
106237f9b54bSShreyansh Jain 	struct rte_mbuf *mi = NULL;
106337f9b54bSShreyansh Jain 
106437f9b54bSShreyansh Jain 	if (RTE_MBUF_DIRECT(mbuf)) {
106537f9b54bSShreyansh Jain 		if (rte_mbuf_refcnt_read(mbuf) > 1) {
106637f9b54bSShreyansh Jain 			/* In case of direct mbuf and mbuf being cloned,
106737f9b54bSShreyansh Jain 			 * BMAN should _not_ release buffer.
106837f9b54bSShreyansh Jain 			 */
106937f9b54bSShreyansh Jain 			DPAA_MBUF_TO_CONTIG_FD(mbuf, fd_arr, 0xff);
107037f9b54bSShreyansh Jain 			/* Buffer should be releasd by EAL */
107137f9b54bSShreyansh Jain 			rte_mbuf_refcnt_update(mbuf, -1);
107237f9b54bSShreyansh Jain 		} else {
107337f9b54bSShreyansh Jain 			/* In case of direct mbuf and no cloning, mbuf can be
107437f9b54bSShreyansh Jain 			 * released by BMAN.
107537f9b54bSShreyansh Jain 			 */
107637f9b54bSShreyansh Jain 			DPAA_MBUF_TO_CONTIG_FD(mbuf, fd_arr, bp_info->bpid);
107737f9b54bSShreyansh Jain 		}
1078f191d5abSHemant Agrawal 	} else if (RTE_MBUF_HAS_EXTBUF(mbuf)) {
10798716c0ecSGagandeep Singh 		buf_to_free[*free_count].seg = mbuf;
10808716c0ecSGagandeep Singh 		buf_to_free[*free_count].pkt_id = pkt_id;
10818716c0ecSGagandeep Singh 		++*free_count;
1082f191d5abSHemant Agrawal 		DPAA_MBUF_TO_CONTIG_FD(mbuf, fd_arr,
1083f191d5abSHemant Agrawal 				bp_info ? bp_info->bpid : 0xff);
108437f9b54bSShreyansh Jain 	} else {
108537f9b54bSShreyansh Jain 		/* This is data-containing core mbuf: 'mi' */
108637f9b54bSShreyansh Jain 		mi = rte_mbuf_from_indirect(mbuf);
108737f9b54bSShreyansh Jain 		if (rte_mbuf_refcnt_read(mi) > 1) {
108837f9b54bSShreyansh Jain 			/* In case of indirect mbuf, and mbuf being cloned,
108937f9b54bSShreyansh Jain 			 * BMAN should _not_ release it and let EAL release
109037f9b54bSShreyansh Jain 			 * it through pktmbuf_free below.
109137f9b54bSShreyansh Jain 			 */
109237f9b54bSShreyansh Jain 			DPAA_MBUF_TO_CONTIG_FD(mbuf, fd_arr, 0xff);
109337f9b54bSShreyansh Jain 		} else {
109437f9b54bSShreyansh Jain 			/* In case of indirect mbuf, and no cloning, core mbuf
109537f9b54bSShreyansh Jain 			 * should be released by BMAN.
109637f9b54bSShreyansh Jain 			 * Increate refcnt of core mbuf so that when
109737f9b54bSShreyansh Jain 			 * pktmbuf_free is called and mbuf is released, EAL
109837f9b54bSShreyansh Jain 			 * doesn't try to release core mbuf which would have
109937f9b54bSShreyansh Jain 			 * been released by BMAN.
110037f9b54bSShreyansh Jain 			 */
110137f9b54bSShreyansh Jain 			rte_mbuf_refcnt_update(mi, 1);
1102f191d5abSHemant Agrawal 			DPAA_MBUF_TO_CONTIG_FD(mbuf, fd_arr,
1103f191d5abSHemant Agrawal 						bp_info ? bp_info->bpid : 0xff);
110437f9b54bSShreyansh Jain 		}
11058716c0ecSGagandeep Singh 		buf_to_free[*free_count].seg = mbuf;
11068716c0ecSGagandeep Singh 		buf_to_free[*free_count].pkt_id = pkt_id;
11078716c0ecSGagandeep Singh 		++*free_count;
110837f9b54bSShreyansh Jain 	}
11095a8cf1beSShreyansh Jain 
11105e0789e9SNipun Gupta 	if (mbuf->ol_flags & DPAA_TX_CKSUM_OFFLOAD_MASK)
11115e0789e9SNipun Gupta 		dpaa_unsegmented_checksum(mbuf, fd_arr);
111237f9b54bSShreyansh Jain }
111337f9b54bSShreyansh Jain 
111437f9b54bSShreyansh Jain /* Handle all mbufs on dpaa BMAN managed pool */
111537f9b54bSShreyansh Jain static inline uint16_t
111637f9b54bSShreyansh Jain tx_on_dpaa_pool(struct rte_mbuf *mbuf,
111737f9b54bSShreyansh Jain 		struct dpaa_bp_info *bp_info,
11188716c0ecSGagandeep Singh 		struct qm_fd *fd_arr,
11198716c0ecSGagandeep Singh 		struct dpaa_sw_buf_free *buf_to_free,
11208716c0ecSGagandeep Singh 		uint32_t *free_count,
11218716c0ecSGagandeep Singh 		uint32_t pkt_id)
112237f9b54bSShreyansh Jain {
112337f9b54bSShreyansh Jain 	DPAA_DP_LOG(DEBUG, "BMAN offloaded buffer, mbuf: %p", mbuf);
112437f9b54bSShreyansh Jain 
112537f9b54bSShreyansh Jain 	if (mbuf->nb_segs == 1) {
112637f9b54bSShreyansh Jain 		/* Case for non-segmented buffers */
11278716c0ecSGagandeep Singh 		tx_on_dpaa_pool_unsegmented(mbuf, bp_info, fd_arr,
11288716c0ecSGagandeep Singh 				buf_to_free, free_count, pkt_id);
11298cffdcbeSShreyansh Jain 	} else if (mbuf->nb_segs > 1 &&
11308cffdcbeSShreyansh Jain 		   mbuf->nb_segs <= DPAA_SGT_MAX_ENTRIES) {
11318716c0ecSGagandeep Singh 		if (dpaa_eth_mbuf_to_sg_fd(mbuf, fd_arr, buf_to_free,
11328716c0ecSGagandeep Singh 					   free_count, pkt_id)) {
11338cffdcbeSShreyansh Jain 			DPAA_PMD_DEBUG("Unable to create Scatter Gather FD");
11348cffdcbeSShreyansh Jain 			return 1;
11358cffdcbeSShreyansh Jain 		}
113637f9b54bSShreyansh Jain 	} else {
113737f9b54bSShreyansh Jain 		DPAA_PMD_DEBUG("Number of Segments not supported");
113837f9b54bSShreyansh Jain 		return 1;
113937f9b54bSShreyansh Jain 	}
114037f9b54bSShreyansh Jain 
114137f9b54bSShreyansh Jain 	return 0;
114237f9b54bSShreyansh Jain }
114337f9b54bSShreyansh Jain 
114437f9b54bSShreyansh Jain /* Handle all mbufs on an external pool (non-dpaa) */
1145f8c7a17aSNipun Gupta static inline struct rte_mbuf *
1146f8c7a17aSNipun Gupta reallocate_mbuf(struct qman_fq *txq, struct rte_mbuf *mbuf)
114737f9b54bSShreyansh Jain {
114837f9b54bSShreyansh Jain 	struct dpaa_if *dpaa_intf = txq->dpaa_intf;
1149f8c7a17aSNipun Gupta 	struct dpaa_bp_info *bp_info = dpaa_intf->bp_info;
1150f8c7a17aSNipun Gupta 	struct rte_mbuf *new_mbufs[DPAA_SGT_MAX_ENTRIES + 1] = {0};
1151f8c7a17aSNipun Gupta 	struct rte_mbuf *temp_mbuf;
1152f8c7a17aSNipun Gupta 	int num_new_segs, mbuf_greater, ret, extra_seg = 0, i = 0;
1153f8c7a17aSNipun Gupta 	uint64_t mbufs_size, bytes_to_copy, offset1 = 0, offset2 = 0;
1154f8c7a17aSNipun Gupta 	char *data;
115537f9b54bSShreyansh Jain 
1156f8c7a17aSNipun Gupta 	DPAA_DP_LOG(DEBUG, "Reallocating transmit buffer");
1157f8c7a17aSNipun Gupta 
1158f8c7a17aSNipun Gupta 	mbufs_size = bp_info->size -
1159f8c7a17aSNipun Gupta 		bp_info->meta_data_size - RTE_PKTMBUF_HEADROOM;
1160f8c7a17aSNipun Gupta 	extra_seg = !!(mbuf->pkt_len % mbufs_size);
1161f8c7a17aSNipun Gupta 	num_new_segs = (mbuf->pkt_len / mbufs_size) + extra_seg;
1162f8c7a17aSNipun Gupta 
1163f8c7a17aSNipun Gupta 	ret = rte_pktmbuf_alloc_bulk(bp_info->mp, new_mbufs, num_new_segs);
1164f8c7a17aSNipun Gupta 	if (ret != 0) {
1165f8c7a17aSNipun Gupta 		DPAA_DP_LOG(DEBUG, "Allocation for new buffers failed");
1166f8c7a17aSNipun Gupta 		return NULL;
116737f9b54bSShreyansh Jain 	}
116837f9b54bSShreyansh Jain 
1169f8c7a17aSNipun Gupta 	temp_mbuf = mbuf;
117037f9b54bSShreyansh Jain 
1171f8c7a17aSNipun Gupta 	while (temp_mbuf) {
1172f8c7a17aSNipun Gupta 		/* If mbuf data is less than new mbuf remaining memory */
1173f8c7a17aSNipun Gupta 		if ((temp_mbuf->data_len - offset1) < (mbufs_size - offset2)) {
1174f8c7a17aSNipun Gupta 			bytes_to_copy = temp_mbuf->data_len - offset1;
1175f8c7a17aSNipun Gupta 			mbuf_greater = -1;
1176f8c7a17aSNipun Gupta 		/* If mbuf data is greater than new mbuf remaining memory */
1177f8c7a17aSNipun Gupta 		} else if ((temp_mbuf->data_len - offset1) >
1178f8c7a17aSNipun Gupta 			   (mbufs_size - offset2)) {
1179f8c7a17aSNipun Gupta 			bytes_to_copy = mbufs_size - offset2;
1180f8c7a17aSNipun Gupta 			mbuf_greater = 1;
1181f8c7a17aSNipun Gupta 		/* if mbuf data is equal to new mbuf remaining memory */
1182f8c7a17aSNipun Gupta 		} else {
1183f8c7a17aSNipun Gupta 			bytes_to_copy = temp_mbuf->data_len - offset1;
1184f8c7a17aSNipun Gupta 			mbuf_greater = 0;
1185f8c7a17aSNipun Gupta 		}
1186f8c7a17aSNipun Gupta 
1187f8c7a17aSNipun Gupta 		/* Copy the data */
1188f8c7a17aSNipun Gupta 		data = rte_pktmbuf_append(new_mbufs[0], bytes_to_copy);
1189f8c7a17aSNipun Gupta 
1190f8c7a17aSNipun Gupta 		rte_memcpy((uint8_t *)data, rte_pktmbuf_mtod_offset(mbuf,
1191f8c7a17aSNipun Gupta 			   void *, offset1), bytes_to_copy);
1192f8c7a17aSNipun Gupta 
1193f8c7a17aSNipun Gupta 		/* Set new offsets and the temp buffers */
1194f8c7a17aSNipun Gupta 		if (mbuf_greater == -1) {
1195f8c7a17aSNipun Gupta 			offset1 = 0;
1196f8c7a17aSNipun Gupta 			offset2 += bytes_to_copy;
1197f8c7a17aSNipun Gupta 			temp_mbuf = temp_mbuf->next;
1198f8c7a17aSNipun Gupta 		} else if (mbuf_greater == 1) {
1199f8c7a17aSNipun Gupta 			offset2 = 0;
1200f8c7a17aSNipun Gupta 			offset1 += bytes_to_copy;
1201f8c7a17aSNipun Gupta 			new_mbufs[i]->next = new_mbufs[i + 1];
1202f8c7a17aSNipun Gupta 			new_mbufs[0]->nb_segs++;
1203f8c7a17aSNipun Gupta 			i++;
1204f8c7a17aSNipun Gupta 		} else {
1205f8c7a17aSNipun Gupta 			offset1 = 0;
1206f8c7a17aSNipun Gupta 			offset2 = 0;
1207f8c7a17aSNipun Gupta 			temp_mbuf = temp_mbuf->next;
1208f8c7a17aSNipun Gupta 			new_mbufs[i]->next = new_mbufs[i + 1];
1209f8c7a17aSNipun Gupta 			if (new_mbufs[i + 1])
1210f8c7a17aSNipun Gupta 				new_mbufs[0]->nb_segs++;
1211f8c7a17aSNipun Gupta 			i++;
1212f8c7a17aSNipun Gupta 		}
1213f8c7a17aSNipun Gupta 	}
1214f8c7a17aSNipun Gupta 
1215f8c7a17aSNipun Gupta 	/* Copy other required fields */
1216f8c7a17aSNipun Gupta 	new_mbufs[0]->ol_flags = mbuf->ol_flags;
1217f8c7a17aSNipun Gupta 	new_mbufs[0]->packet_type = mbuf->packet_type;
1218f8c7a17aSNipun Gupta 	new_mbufs[0]->tx_offload = mbuf->tx_offload;
1219f8c7a17aSNipun Gupta 
1220f8c7a17aSNipun Gupta 	rte_pktmbuf_free(mbuf);
1221f8c7a17aSNipun Gupta 
1222f8c7a17aSNipun Gupta 	return new_mbufs[0];
122337f9b54bSShreyansh Jain }
122437f9b54bSShreyansh Jain 
122537f9b54bSShreyansh Jain uint16_t
122637f9b54bSShreyansh Jain dpaa_eth_queue_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
122737f9b54bSShreyansh Jain {
122837f9b54bSShreyansh Jain 	struct rte_mbuf *mbuf, *mi = NULL;
122937f9b54bSShreyansh Jain 	struct rte_mempool *mp;
123037f9b54bSShreyansh Jain 	struct dpaa_bp_info *bp_info;
1231b0a87fe2SNipun Gupta 	struct qm_fd fd_arr[DPAA_TX_BURST_SIZE];
12325e0789e9SNipun Gupta 	uint32_t frames_to_send, loop, sent = 0;
123337f9b54bSShreyansh Jain 	uint16_t state;
1234f8c7a17aSNipun Gupta 	int ret, realloc_mbuf = 0;
12355e745593SSunil Kumar Kori 	uint32_t seqn, index, flags[DPAA_TX_BURST_SIZE] = {0};
12368716c0ecSGagandeep Singh 	struct dpaa_sw_buf_free buf_to_free[DPAA_MAX_SGS * DPAA_MAX_DEQUEUE_NUM_FRAMES];
12378716c0ecSGagandeep Singh 	uint32_t free_count = 0;
123858e0420fSVanshika Shukla 	struct qman_fq *fq = q;
1239615352f5SVanshika Shukla 	struct dpaa_if *dpaa_intf = fq->dpaa_intf;
1240d11482d9SVanshika Shukla 	struct qman_fq *fq_txconf = fq->tx_conf_queue;
124137f9b54bSShreyansh Jain 
1242e5872221SRohit Raj 	if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
124337f9b54bSShreyansh Jain 		ret = rte_dpaa_portal_init((void *)0);
124437f9b54bSShreyansh Jain 		if (ret) {
124537f9b54bSShreyansh Jain 			DPAA_PMD_ERR("Failure in affining portal");
124637f9b54bSShreyansh Jain 			return 0;
124737f9b54bSShreyansh Jain 		}
12485d944582SNipun Gupta 	}
124937f9b54bSShreyansh Jain 
125037f9b54bSShreyansh Jain 	DPAA_DP_LOG(DEBUG, "Transmitting %d buffers on queue: %p", nb_bufs, q);
125137f9b54bSShreyansh Jain 
1252615352f5SVanshika Shukla 	if (dpaa_ieee_1588) {
1253615352f5SVanshika Shukla 		dpaa_intf->next_tx_conf_queue = fq_txconf;
1254615352f5SVanshika Shukla 		dpaa_eth_tx_conf(fq_txconf);
1255615352f5SVanshika Shukla 		dpaa_intf->tx_timestamp = 0;
1256615352f5SVanshika Shukla 	}
1257615352f5SVanshika Shukla 
125837f9b54bSShreyansh Jain 	while (nb_bufs) {
1259b0a87fe2SNipun Gupta 		frames_to_send = (nb_bufs > DPAA_TX_BURST_SIZE) ?
1260b0a87fe2SNipun Gupta 				DPAA_TX_BURST_SIZE : nb_bufs;
12615e0789e9SNipun Gupta 		for (loop = 0; loop < frames_to_send; loop++) {
12625e0789e9SNipun Gupta 			mbuf = *(bufs++);
1263f8c7a17aSNipun Gupta 			/* In case the data offset is not multiple of 16,
1264f8c7a17aSNipun Gupta 			 * FMAN can stall because of an errata. So reallocate
1265f8c7a17aSNipun Gupta 			 * the buffer in such case.
1266f8c7a17aSNipun Gupta 			 */
1267f8c7a17aSNipun Gupta 			if (dpaa_svr_family == SVR_LS1043A_FAMILY &&
126859267d7bSNipun Gupta 					(mbuf->data_off & 0x7F) != 0x0)
1269f8c7a17aSNipun Gupta 				realloc_mbuf = 1;
1270615352f5SVanshika Shukla 
1271615352f5SVanshika Shukla 			fd_arr[loop].cmd = 0;
1272615352f5SVanshika Shukla 			if (dpaa_ieee_1588) {
1273615352f5SVanshika Shukla 				fd_arr[loop].cmd |= DPAA_FD_CMD_FCO |
1274615352f5SVanshika Shukla 					qman_fq_fqid(fq_txconf);
1275615352f5SVanshika Shukla 				fd_arr[loop].cmd |= DPAA_FD_CMD_RPD |
1276615352f5SVanshika Shukla 					DPAA_FD_CMD_UPD;
1277615352f5SVanshika Shukla 			}
1278c9a1c2e5SDavid Marchand 			seqn = *dpaa_seqn(mbuf);
12799afce5aaSSunil Kumar Kori 			if (seqn != DPAA_INVALID_MBUF_SEQN) {
12809afce5aaSSunil Kumar Kori 				index = seqn - 1;
12819afce5aaSSunil Kumar Kori 				if (DPAA_PER_LCORE_DQRR_HELD & (1 << index)) {
12829afce5aaSSunil Kumar Kori 					flags[loop] =
12839afce5aaSSunil Kumar Kori 					   ((index & QM_EQCR_DCA_IDXMASK) << 8);
12849afce5aaSSunil Kumar Kori 					flags[loop] |= QMAN_ENQUEUE_FLAG_DCA;
12859afce5aaSSunil Kumar Kori 					DPAA_PER_LCORE_DQRR_SIZE--;
12869afce5aaSSunil Kumar Kori 					DPAA_PER_LCORE_DQRR_HELD &=
12879afce5aaSSunil Kumar Kori 								~(1 << index);
12889afce5aaSSunil Kumar Kori 				}
12899afce5aaSSunil Kumar Kori 			}
12909afce5aaSSunil Kumar Kori 
12915e0789e9SNipun Gupta 			if (likely(RTE_MBUF_DIRECT(mbuf))) {
129237f9b54bSShreyansh Jain 				mp = mbuf->pool;
12935e0789e9SNipun Gupta 				bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp);
12945e0789e9SNipun Gupta 				if (likely(mp->ops_index ==
12955e0789e9SNipun Gupta 						bp_info->dpaa_ops_index &&
12965e0789e9SNipun Gupta 					mbuf->nb_segs == 1 &&
1297f8c7a17aSNipun Gupta 					realloc_mbuf == 0 &&
12985e0789e9SNipun Gupta 					rte_mbuf_refcnt_read(mbuf) == 1)) {
12995e0789e9SNipun Gupta 					DPAA_MBUF_TO_CONTIG_FD(mbuf,
13005e0789e9SNipun Gupta 						&fd_arr[loop], bp_info->bpid);
13015e0789e9SNipun Gupta 					if (mbuf->ol_flags &
13025e0789e9SNipun Gupta 						DPAA_TX_CKSUM_OFFLOAD_MASK)
13035e0789e9SNipun Gupta 						dpaa_unsegmented_checksum(mbuf,
13045e0789e9SNipun Gupta 							&fd_arr[loop]);
13055e0789e9SNipun Gupta 					continue;
13065e0789e9SNipun Gupta 				}
130737f9b54bSShreyansh Jain 			} else {
130837f9b54bSShreyansh Jain 				mi = rte_mbuf_from_indirect(mbuf);
130937f9b54bSShreyansh Jain 				mp = mi->pool;
131037f9b54bSShreyansh Jain 			}
131137f9b54bSShreyansh Jain 
1312f191d5abSHemant Agrawal 			if (unlikely(RTE_MBUF_HAS_EXTBUF(mbuf))) {
1313f191d5abSHemant Agrawal 				bp_info = NULL;
1314f191d5abSHemant Agrawal 				goto indirect_buf;
1315f191d5abSHemant Agrawal 			}
1316f191d5abSHemant Agrawal 
131737f9b54bSShreyansh Jain 			bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp);
1318f8c7a17aSNipun Gupta 			if (unlikely(mp->ops_index != bp_info->dpaa_ops_index ||
1319f8c7a17aSNipun Gupta 				     realloc_mbuf == 1)) {
1320f8c7a17aSNipun Gupta 				struct rte_mbuf *temp_mbuf;
1321f8c7a17aSNipun Gupta 
1322f8c7a17aSNipun Gupta 				temp_mbuf = reallocate_mbuf(q, mbuf);
1323f8c7a17aSNipun Gupta 				if (!temp_mbuf) {
1324f8c7a17aSNipun Gupta 					/* Set frames_to_send & nb_bufs so
1325f8c7a17aSNipun Gupta 					 * that packets are transmitted till
1326f8c7a17aSNipun Gupta 					 * previous frame.
1327f8c7a17aSNipun Gupta 					 */
1328f8c7a17aSNipun Gupta 					frames_to_send = loop;
1329f8c7a17aSNipun Gupta 					nb_bufs = loop;
1330f8c7a17aSNipun Gupta 					goto send_pkts;
1331f8c7a17aSNipun Gupta 				}
1332f8c7a17aSNipun Gupta 				mbuf = temp_mbuf;
1333f8c7a17aSNipun Gupta 				realloc_mbuf = 0;
1334f8c7a17aSNipun Gupta 			}
1335f191d5abSHemant Agrawal indirect_buf:
133637f9b54bSShreyansh Jain 			state = tx_on_dpaa_pool(mbuf, bp_info,
13378716c0ecSGagandeep Singh 						&fd_arr[loop],
13388716c0ecSGagandeep Singh 						buf_to_free,
13398716c0ecSGagandeep Singh 						&free_count,
13408716c0ecSGagandeep Singh 						loop);
134137f9b54bSShreyansh Jain 			if (unlikely(state)) {
134237f9b54bSShreyansh Jain 				/* Set frames_to_send & nb_bufs so
134337f9b54bSShreyansh Jain 				 * that packets are transmitted till
134437f9b54bSShreyansh Jain 				 * previous frame.
134537f9b54bSShreyansh Jain 				 */
134637f9b54bSShreyansh Jain 				frames_to_send = loop;
134737f9b54bSShreyansh Jain 				nb_bufs = loop;
134837f9b54bSShreyansh Jain 				goto send_pkts;
134937f9b54bSShreyansh Jain 			}
135037f9b54bSShreyansh Jain 		}
135137f9b54bSShreyansh Jain 
135237f9b54bSShreyansh Jain send_pkts:
135337f9b54bSShreyansh Jain 		loop = 0;
135437f9b54bSShreyansh Jain 		while (loop < frames_to_send) {
135537f9b54bSShreyansh Jain 			loop += qman_enqueue_multi(q, &fd_arr[loop],
13565e745593SSunil Kumar Kori 						   &flags[loop],
135737f9b54bSShreyansh Jain 						   frames_to_send - loop);
135837f9b54bSShreyansh Jain 		}
135937f9b54bSShreyansh Jain 		nb_bufs -= frames_to_send;
13605e0789e9SNipun Gupta 		sent += frames_to_send;
136137f9b54bSShreyansh Jain 	}
136237f9b54bSShreyansh Jain 
13635e0789e9SNipun Gupta 	DPAA_DP_LOG(DEBUG, "Transmitted %d buffers on queue: %p", sent, q);
136437f9b54bSShreyansh Jain 
13658716c0ecSGagandeep Singh 	for (loop = 0; loop < free_count; loop++) {
13668716c0ecSGagandeep Singh 		if (buf_to_free[loop].pkt_id < sent)
13678716c0ecSGagandeep Singh 			rte_pktmbuf_free_seg(buf_to_free[loop].seg);
1368f191d5abSHemant Agrawal 	}
1369f191d5abSHemant Agrawal 
13705e0789e9SNipun Gupta 	return sent;
137137f9b54bSShreyansh Jain }
137237f9b54bSShreyansh Jain 
137358e0420fSVanshika Shukla void
137458e0420fSVanshika Shukla dpaa_eth_tx_conf(void *q)
137558e0420fSVanshika Shukla {
137658e0420fSVanshika Shukla 	struct qman_fq *fq = q;
137758e0420fSVanshika Shukla 	struct qm_dqrr_entry *dq;
137858e0420fSVanshika Shukla 	int num_tx_conf, ret, dq_num;
137958e0420fSVanshika Shukla 	uint32_t vdqcr_flags = 0;
1380615352f5SVanshika Shukla 	struct dpaa_if *dpaa_intf = fq->dpaa_intf;
1381615352f5SVanshika Shukla 	struct qm_dqrr_entry *dqrr;
1382615352f5SVanshika Shukla 	struct dpaa_bp_info *bp_info;
1383615352f5SVanshika Shukla 	struct rte_mbuf *mbuf;
1384615352f5SVanshika Shukla 	void *ptr;
1385615352f5SVanshika Shukla 	struct annotations_t *annot;
138658e0420fSVanshika Shukla 
138758e0420fSVanshika Shukla 	if (unlikely(rte_dpaa_bpid_info == NULL &&
138858e0420fSVanshika Shukla 				rte_eal_process_type() == RTE_PROC_SECONDARY))
138958e0420fSVanshika Shukla 		rte_dpaa_bpid_info = fq->bp_array;
139058e0420fSVanshika Shukla 
139158e0420fSVanshika Shukla 	if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
139258e0420fSVanshika Shukla 		ret = rte_dpaa_portal_init((void *)0);
139358e0420fSVanshika Shukla 		if (ret) {
139458e0420fSVanshika Shukla 			DPAA_PMD_ERR("Failure in affining portal");
139558e0420fSVanshika Shukla 			return;
139658e0420fSVanshika Shukla 		}
139758e0420fSVanshika Shukla 	}
139858e0420fSVanshika Shukla 
139958e0420fSVanshika Shukla 	num_tx_conf = DPAA_MAX_DEQUEUE_NUM_FRAMES - 2;
140058e0420fSVanshika Shukla 
140158e0420fSVanshika Shukla 	do {
140258e0420fSVanshika Shukla 		dq_num = 0;
140358e0420fSVanshika Shukla 		ret = qman_set_vdq(fq, num_tx_conf, vdqcr_flags);
140458e0420fSVanshika Shukla 		if (ret)
140558e0420fSVanshika Shukla 			return;
140658e0420fSVanshika Shukla 		do {
140758e0420fSVanshika Shukla 			dq = qman_dequeue(fq);
140858e0420fSVanshika Shukla 			if (!dq)
140958e0420fSVanshika Shukla 				continue;
1410615352f5SVanshika Shukla 			dqrr = dq;
141158e0420fSVanshika Shukla 			dq_num++;
1412615352f5SVanshika Shukla 			bp_info = DPAA_BPID_TO_POOL_INFO(dqrr->fd.bpid);
1413615352f5SVanshika Shukla 			ptr = rte_dpaa_mem_ptov(qm_fd_addr(&dqrr->fd));
1414615352f5SVanshika Shukla 			rte_prefetch0((void *)((uint8_t *)ptr
1415615352f5SVanshika Shukla 						+ DEFAULT_RX_ICEOF));
1416615352f5SVanshika Shukla 			mbuf = (struct rte_mbuf *)
1417615352f5SVanshika Shukla 				((char *)ptr - bp_info->meta_data_size);
1418615352f5SVanshika Shukla 
1419615352f5SVanshika Shukla 			if (mbuf->ol_flags & RTE_MBUF_F_TX_IEEE1588_TMST) {
1420615352f5SVanshika Shukla 				annot = GET_ANNOTATIONS(mbuf->buf_addr);
1421615352f5SVanshika Shukla 				dpaa_intf->tx_timestamp =
1422615352f5SVanshika Shukla 					rte_cpu_to_be_64(annot->timestamp);
1423615352f5SVanshika Shukla 			}
142458e0420fSVanshika Shukla 			dpaa_display_frame_info(&dq->fd, fq->fqid, true);
142558e0420fSVanshika Shukla 			qman_dqrr_consume(fq, dq);
142658e0420fSVanshika Shukla 			dpaa_free_mbuf(&dq->fd);
142758e0420fSVanshika Shukla 		} while (fq->flags & QMAN_FQ_STATE_VDQCR);
142858e0420fSVanshika Shukla 	} while (dq_num == num_tx_conf);
142958e0420fSVanshika Shukla }
143058e0420fSVanshika Shukla 
14319124e65dSGagandeep Singh uint16_t
14329124e65dSGagandeep Singh dpaa_eth_queue_tx_slow(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
14339124e65dSGagandeep Singh {
14349124e65dSGagandeep Singh 	qman_ern_poll_free();
14359124e65dSGagandeep Singh 
14369124e65dSGagandeep Singh 	return dpaa_eth_queue_tx(q, bufs, nb_bufs);
14379124e65dSGagandeep Singh }
14389124e65dSGagandeep Singh 
143937f9b54bSShreyansh Jain uint16_t dpaa_eth_tx_drop_all(void *q  __rte_unused,
144037f9b54bSShreyansh Jain 			      struct rte_mbuf **bufs __rte_unused,
144137f9b54bSShreyansh Jain 		uint16_t nb_bufs __rte_unused)
144237f9b54bSShreyansh Jain {
144337f9b54bSShreyansh Jain 	DPAA_DP_LOG(DEBUG, "Drop all packets");
144437f9b54bSShreyansh Jain 
144537f9b54bSShreyansh Jain 	/* Drop all incoming packets. No need to free packets here
144637f9b54bSShreyansh Jain 	 * because the rte_eth f/w frees up the packets through tx_buffer
144737f9b54bSShreyansh Jain 	 * callback in case this functions returns count less than nb_bufs
144837f9b54bSShreyansh Jain 	 */
144937f9b54bSShreyansh Jain 	return 0;
145037f9b54bSShreyansh Jain }
1451