xref: /dpdk/drivers/net/dpaa2/dpaa2_rxtx.c (revision daa02b5cddbb8e11b31d41e2bf7bb1ae64dcae2f)
1131a75b6SHemant Agrawal /* SPDX-License-Identifier: BSD-3-Clause
2cd9935ceSHemant Agrawal  *
3cd9935ceSHemant Agrawal  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4cc8569f0SHemant Agrawal  *   Copyright 2016-2021 NXP
5cd9935ceSHemant Agrawal  *
6cd9935ceSHemant Agrawal  */
7cd9935ceSHemant Agrawal 
8cd9935ceSHemant Agrawal #include <time.h>
9cd9935ceSHemant Agrawal #include <net/if.h>
10cd9935ceSHemant Agrawal 
11cd9935ceSHemant Agrawal #include <rte_mbuf.h>
12df96fd0dSBruce Richardson #include <ethdev_driver.h>
13cd9935ceSHemant Agrawal #include <rte_malloc.h>
14cd9935ceSHemant Agrawal #include <rte_memcpy.h>
15cd9935ceSHemant Agrawal #include <rte_string_fns.h>
16cd9935ceSHemant Agrawal #include <rte_dev.h>
174690a611SNipun Gupta #include <rte_hexdump.h>
18cd9935ceSHemant Agrawal 
192d378863SNipun Gupta #include <rte_fslmc.h>
20cd9935ceSHemant Agrawal #include <fslmc_vfio.h>
21cd9935ceSHemant Agrawal #include <dpaa2_hw_pvt.h>
22cd9935ceSHemant Agrawal #include <dpaa2_hw_dpio.h>
23cd9935ceSHemant Agrawal #include <dpaa2_hw_mempool.h>
24cd9935ceSHemant Agrawal 
25a10a988aSShreyansh Jain #include "dpaa2_pmd_logs.h"
26cd9935ceSHemant Agrawal #include "dpaa2_ethdev.h"
27a5fc38d4SHemant Agrawal #include "base/dpaa2_hw_dpni_annot.h"
28a5fc38d4SHemant Agrawal 
29e3866e73SThomas Monjalon static inline uint32_t __rte_hot
302375f879SHemant Agrawal dpaa2_dev_rx_parse_slow(struct rte_mbuf *mbuf,
312375f879SHemant Agrawal 			struct dpaa2_annot_hdr *annotation);
322375f879SHemant Agrawal 
33f2fc83b4SThomas Monjalon static void enable_tx_tstamp(struct qbman_fd *fd) __rte_unused;
34e806bf87SPriyanka Jain 
3561c41e2eSThomas Monjalon static inline rte_mbuf_timestamp_t *
3661c41e2eSThomas Monjalon dpaa2_timestamp_dynfield(struct rte_mbuf *mbuf)
3761c41e2eSThomas Monjalon {
3861c41e2eSThomas Monjalon 	return RTE_MBUF_DYNFIELD(mbuf,
3961c41e2eSThomas Monjalon 		dpaa2_timestamp_dynfield_offset, rte_mbuf_timestamp_t *);
4061c41e2eSThomas Monjalon }
4161c41e2eSThomas Monjalon 
4248e7f156SNipun Gupta #define DPAA2_MBUF_TO_CONTIG_FD(_mbuf, _fd, _bpid)  do { \
4348e7f156SNipun Gupta 	DPAA2_SET_FD_ADDR(_fd, DPAA2_MBUF_VADDR_TO_IOVA(_mbuf)); \
4448e7f156SNipun Gupta 	DPAA2_SET_FD_LEN(_fd, _mbuf->data_len); \
4548e7f156SNipun Gupta 	DPAA2_SET_ONLY_FD_BPID(_fd, _bpid); \
4648e7f156SNipun Gupta 	DPAA2_SET_FD_OFFSET(_fd, _mbuf->data_off); \
47fa21a6feSHemant Agrawal 	DPAA2_SET_FD_FRC(_fd, 0);		\
48fa21a6feSHemant Agrawal 	DPAA2_RESET_FD_CTRL(_fd);		\
49fa21a6feSHemant Agrawal 	DPAA2_RESET_FD_FLC(_fd);		\
5048e7f156SNipun Gupta } while (0)
5148e7f156SNipun Gupta 
52e3866e73SThomas Monjalon static inline void __rte_hot
53bff6a98fSRoman Kapl dpaa2_dev_rx_parse_new(struct rte_mbuf *m, const struct qbman_fd *fd,
54bff6a98fSRoman Kapl 		       void *hw_annot_addr)
55a5852a94SNipun Gupta {
5651aa71e8SHemant Agrawal 	uint16_t frc = DPAA2_GET_FD_FRC_PARSE_SUM(fd);
57bff6a98fSRoman Kapl 	struct dpaa2_annot_hdr *annotation =
58bff6a98fSRoman Kapl 			(struct dpaa2_annot_hdr *)hw_annot_addr;
5951aa71e8SHemant Agrawal 
60a5852a94SNipun Gupta 	m->packet_type = RTE_PTYPE_UNKNOWN;
61a5852a94SNipun Gupta 	switch (frc) {
62a5852a94SNipun Gupta 	case DPAA2_PKT_TYPE_ETHER:
63a5852a94SNipun Gupta 		m->packet_type = RTE_PTYPE_L2_ETHER;
64a5852a94SNipun Gupta 		break;
65a5852a94SNipun Gupta 	case DPAA2_PKT_TYPE_IPV4:
66a5852a94SNipun Gupta 		m->packet_type = RTE_PTYPE_L2_ETHER |
67a5852a94SNipun Gupta 			RTE_PTYPE_L3_IPV4;
68a5852a94SNipun Gupta 		break;
69a5852a94SNipun Gupta 	case DPAA2_PKT_TYPE_IPV6:
70a5852a94SNipun Gupta 		m->packet_type = RTE_PTYPE_L2_ETHER |
71a5852a94SNipun Gupta 			RTE_PTYPE_L3_IPV6;
72a5852a94SNipun Gupta 		break;
73a5852a94SNipun Gupta 	case DPAA2_PKT_TYPE_IPV4_EXT:
74a5852a94SNipun Gupta 		m->packet_type = RTE_PTYPE_L2_ETHER |
75a5852a94SNipun Gupta 			RTE_PTYPE_L3_IPV4_EXT;
76a5852a94SNipun Gupta 		break;
77a5852a94SNipun Gupta 	case DPAA2_PKT_TYPE_IPV6_EXT:
78a5852a94SNipun Gupta 		m->packet_type = RTE_PTYPE_L2_ETHER |
79a5852a94SNipun Gupta 			RTE_PTYPE_L3_IPV6_EXT;
80a5852a94SNipun Gupta 		break;
81a5852a94SNipun Gupta 	case DPAA2_PKT_TYPE_IPV4_TCP:
82a5852a94SNipun Gupta 		m->packet_type = RTE_PTYPE_L2_ETHER |
83a5852a94SNipun Gupta 			RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP;
84a5852a94SNipun Gupta 		break;
85a5852a94SNipun Gupta 	case DPAA2_PKT_TYPE_IPV6_TCP:
86a5852a94SNipun Gupta 		m->packet_type = RTE_PTYPE_L2_ETHER |
87a5852a94SNipun Gupta 			RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP;
88a5852a94SNipun Gupta 		break;
89a5852a94SNipun Gupta 	case DPAA2_PKT_TYPE_IPV4_UDP:
90a5852a94SNipun Gupta 		m->packet_type = RTE_PTYPE_L2_ETHER |
91a5852a94SNipun Gupta 			RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP;
92a5852a94SNipun Gupta 		break;
93a5852a94SNipun Gupta 	case DPAA2_PKT_TYPE_IPV6_UDP:
94a5852a94SNipun Gupta 		m->packet_type = RTE_PTYPE_L2_ETHER |
95a5852a94SNipun Gupta 			RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP;
96a5852a94SNipun Gupta 		break;
97a5852a94SNipun Gupta 	case DPAA2_PKT_TYPE_IPV4_SCTP:
98a5852a94SNipun Gupta 		m->packet_type = RTE_PTYPE_L2_ETHER |
99a5852a94SNipun Gupta 			RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP;
100a5852a94SNipun Gupta 		break;
101a5852a94SNipun Gupta 	case DPAA2_PKT_TYPE_IPV6_SCTP:
102a5852a94SNipun Gupta 		m->packet_type = RTE_PTYPE_L2_ETHER |
103a5852a94SNipun Gupta 			RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_SCTP;
104a5852a94SNipun Gupta 		break;
105a5852a94SNipun Gupta 	case DPAA2_PKT_TYPE_IPV4_ICMP:
106a5852a94SNipun Gupta 		m->packet_type = RTE_PTYPE_L2_ETHER |
107a5852a94SNipun Gupta 			RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_ICMP;
108a5852a94SNipun Gupta 		break;
109a5852a94SNipun Gupta 	case DPAA2_PKT_TYPE_IPV6_ICMP:
110a5852a94SNipun Gupta 		m->packet_type = RTE_PTYPE_L2_ETHER |
111a5852a94SNipun Gupta 			RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_ICMP;
112a5852a94SNipun Gupta 		break;
113a5852a94SNipun Gupta 	default:
114bff6a98fSRoman Kapl 		m->packet_type = dpaa2_dev_rx_parse_slow(m, annotation);
115a5852a94SNipun Gupta 	}
11651aa71e8SHemant Agrawal 	m->hash.rss = fd->simple.flc_hi;
117*daa02b5cSOlivier Matz 	m->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
118c1870f65SAkhil Goyal 
119724f79dfSHemant Agrawal 	if (dpaa2_enable_ts[m->port]) {
12061c41e2eSThomas Monjalon 		*dpaa2_timestamp_dynfield(m) = annotation->word2;
12161c41e2eSThomas Monjalon 		m->ol_flags |= dpaa2_timestamp_rx_dynflag;
12261c41e2eSThomas Monjalon 		DPAA2_PMD_DP_DEBUG("pkt timestamp:0x%" PRIx64 "",
12361c41e2eSThomas Monjalon 				*dpaa2_timestamp_dynfield(m));
124c1870f65SAkhil Goyal 	}
125c1870f65SAkhil Goyal 
126c1870f65SAkhil Goyal 	DPAA2_PMD_DP_DEBUG("HW frc = 0x%x\t packet type =0x%x "
127c1870f65SAkhil Goyal 		"ol_flags =0x%" PRIx64 "",
128c1870f65SAkhil Goyal 		frc, m->packet_type, m->ol_flags);
129a5852a94SNipun Gupta }
130a5852a94SNipun Gupta 
131e3866e73SThomas Monjalon static inline uint32_t __rte_hot
1322375f879SHemant Agrawal dpaa2_dev_rx_parse_slow(struct rte_mbuf *mbuf,
1332375f879SHemant Agrawal 			struct dpaa2_annot_hdr *annotation)
134a5fc38d4SHemant Agrawal {
135a5fc38d4SHemant Agrawal 	uint32_t pkt_type = RTE_PTYPE_UNKNOWN;
1362375f879SHemant Agrawal 	uint16_t *vlan_tci;
137a5fc38d4SHemant Agrawal 
1382375f879SHemant Agrawal 	DPAA2_PMD_DP_DEBUG("(slow parse)annotation(3)=0x%" PRIx64 "\t"
1392375f879SHemant Agrawal 			"(4)=0x%" PRIx64 "\t",
1402375f879SHemant Agrawal 			annotation->word3, annotation->word4);
1412375f879SHemant Agrawal 
142e806bf87SPriyanka Jain #if defined(RTE_LIBRTE_IEEE1588)
143e806bf87SPriyanka Jain 	if (BIT_ISSET_AT_POS(annotation->word1, DPAA2_ETH_FAS_PTP))
144*daa02b5cSOlivier Matz 		mbuf->ol_flags |= RTE_MBUF_F_RX_IEEE1588_PTP;
145e806bf87SPriyanka Jain #endif
146e806bf87SPriyanka Jain 
1472375f879SHemant Agrawal 	if (BIT_ISSET_AT_POS(annotation->word3, L2_VLAN_1_PRESENT)) {
1482375f879SHemant Agrawal 		vlan_tci = rte_pktmbuf_mtod_offset(mbuf, uint16_t *,
1492375f879SHemant Agrawal 			(VLAN_TCI_OFFSET_1(annotation->word5) >> 16));
1502375f879SHemant Agrawal 		mbuf->vlan_tci = rte_be_to_cpu_16(*vlan_tci);
151*daa02b5cSOlivier Matz 		mbuf->ol_flags |= RTE_MBUF_F_RX_VLAN;
1522375f879SHemant Agrawal 		pkt_type |= RTE_PTYPE_L2_ETHER_VLAN;
1532375f879SHemant Agrawal 	} else if (BIT_ISSET_AT_POS(annotation->word3, L2_VLAN_N_PRESENT)) {
1542375f879SHemant Agrawal 		vlan_tci = rte_pktmbuf_mtod_offset(mbuf, uint16_t *,
1552375f879SHemant Agrawal 			(VLAN_TCI_OFFSET_1(annotation->word5) >> 16));
1562375f879SHemant Agrawal 		mbuf->vlan_tci = rte_be_to_cpu_16(*vlan_tci);
157*daa02b5cSOlivier Matz 		mbuf->ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_QINQ;
1582375f879SHemant Agrawal 		pkt_type |= RTE_PTYPE_L2_ETHER_QINQ;
1592375f879SHemant Agrawal 	}
1602375f879SHemant Agrawal 
161a5fc38d4SHemant Agrawal 	if (BIT_ISSET_AT_POS(annotation->word3, L2_ARP_PRESENT)) {
1622375f879SHemant Agrawal 		pkt_type |= RTE_PTYPE_L2_ETHER_ARP;
163a5fc38d4SHemant Agrawal 		goto parse_done;
164a5fc38d4SHemant Agrawal 	} else if (BIT_ISSET_AT_POS(annotation->word3, L2_ETH_MAC_PRESENT)) {
1652375f879SHemant Agrawal 		pkt_type |= RTE_PTYPE_L2_ETHER;
166a5fc38d4SHemant Agrawal 	} else {
167a5fc38d4SHemant Agrawal 		goto parse_done;
168a5fc38d4SHemant Agrawal 	}
169a5fc38d4SHemant Agrawal 
1701832bc8eSApeksha Gupta 	if (BIT_ISSET_AT_POS(annotation->word3, L2_MPLS_1_PRESENT |
1711832bc8eSApeksha Gupta 				L2_MPLS_N_PRESENT))
1721832bc8eSApeksha Gupta 		pkt_type |= RTE_PTYPE_L2_ETHER_MPLS;
1731832bc8eSApeksha Gupta 
174a5fc38d4SHemant Agrawal 	if (BIT_ISSET_AT_POS(annotation->word4, L3_IPV4_1_PRESENT |
175a5fc38d4SHemant Agrawal 			     L3_IPV4_N_PRESENT)) {
176a5fc38d4SHemant Agrawal 		pkt_type |= RTE_PTYPE_L3_IPV4;
177a5fc38d4SHemant Agrawal 		if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_OPT_PRESENT |
178a5fc38d4SHemant Agrawal 			L3_IP_N_OPT_PRESENT))
179a5fc38d4SHemant Agrawal 			pkt_type |= RTE_PTYPE_L3_IPV4_EXT;
180a5fc38d4SHemant Agrawal 
181a5fc38d4SHemant Agrawal 	} else if (BIT_ISSET_AT_POS(annotation->word4, L3_IPV6_1_PRESENT |
182a5fc38d4SHemant Agrawal 		  L3_IPV6_N_PRESENT)) {
183a5fc38d4SHemant Agrawal 		pkt_type |= RTE_PTYPE_L3_IPV6;
184a5fc38d4SHemant Agrawal 		if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_OPT_PRESENT |
185a5fc38d4SHemant Agrawal 		    L3_IP_N_OPT_PRESENT))
186a5fc38d4SHemant Agrawal 			pkt_type |= RTE_PTYPE_L3_IPV6_EXT;
187a5fc38d4SHemant Agrawal 	} else {
188a5fc38d4SHemant Agrawal 		goto parse_done;
189a5fc38d4SHemant Agrawal 	}
190a5fc38d4SHemant Agrawal 
19194d31549SHemant Agrawal 	if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L3CE))
192*daa02b5cSOlivier Matz 		mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
19394d31549SHemant Agrawal 	else if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L4CE))
194*daa02b5cSOlivier Matz 		mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
19594d31549SHemant Agrawal 
196a5fc38d4SHemant Agrawal 	if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_FIRST_FRAGMENT |
197a5fc38d4SHemant Agrawal 	    L3_IP_1_MORE_FRAGMENT |
198a5fc38d4SHemant Agrawal 	    L3_IP_N_FIRST_FRAGMENT |
199a5fc38d4SHemant Agrawal 	    L3_IP_N_MORE_FRAGMENT)) {
200a5fc38d4SHemant Agrawal 		pkt_type |= RTE_PTYPE_L4_FRAG;
201a5fc38d4SHemant Agrawal 		goto parse_done;
202a5fc38d4SHemant Agrawal 	} else {
203a5fc38d4SHemant Agrawal 		pkt_type |= RTE_PTYPE_L4_NONFRAG;
204a5fc38d4SHemant Agrawal 	}
205a5fc38d4SHemant Agrawal 
206a5fc38d4SHemant Agrawal 	if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_UDP_PRESENT))
207a5fc38d4SHemant Agrawal 		pkt_type |= RTE_PTYPE_L4_UDP;
208a5fc38d4SHemant Agrawal 
209a5fc38d4SHemant Agrawal 	else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_TCP_PRESENT))
210a5fc38d4SHemant Agrawal 		pkt_type |= RTE_PTYPE_L4_TCP;
211a5fc38d4SHemant Agrawal 
212a5fc38d4SHemant Agrawal 	else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_SCTP_PRESENT))
213a5fc38d4SHemant Agrawal 		pkt_type |= RTE_PTYPE_L4_SCTP;
214a5fc38d4SHemant Agrawal 
215a5fc38d4SHemant Agrawal 	else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_ICMP_PRESENT))
216a5fc38d4SHemant Agrawal 		pkt_type |= RTE_PTYPE_L4_ICMP;
217a5fc38d4SHemant Agrawal 
218a5fc38d4SHemant Agrawal 	else if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_UNKNOWN_PROTOCOL))
219a5fc38d4SHemant Agrawal 		pkt_type |= RTE_PTYPE_UNKNOWN;
220a5fc38d4SHemant Agrawal 
221a5fc38d4SHemant Agrawal parse_done:
222a5fc38d4SHemant Agrawal 	return pkt_type;
223a5fc38d4SHemant Agrawal }
224a5fc38d4SHemant Agrawal 
225e3866e73SThomas Monjalon static inline uint32_t __rte_hot
2265ae1edffSHemant Agrawal dpaa2_dev_rx_parse(struct rte_mbuf *mbuf, void *hw_annot_addr)
2277ec5e530SNipun Gupta {
2287ec5e530SNipun Gupta 	struct dpaa2_annot_hdr *annotation =
2297ec5e530SNipun Gupta 			(struct dpaa2_annot_hdr *)hw_annot_addr;
2307ec5e530SNipun Gupta 
231a10a988aSShreyansh Jain 	DPAA2_PMD_DP_DEBUG("(fast parse) Annotation = 0x%" PRIx64 "\t",
232a10a988aSShreyansh Jain 			   annotation->word4);
2337ec5e530SNipun Gupta 
234d2ef05d5SHemant Agrawal 	if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L3CE))
235*daa02b5cSOlivier Matz 		mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
236d2ef05d5SHemant Agrawal 	else if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L4CE))
237*daa02b5cSOlivier Matz 		mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
238d2ef05d5SHemant Agrawal 
23961c41e2eSThomas Monjalon 	if (dpaa2_enable_ts[mbuf->port]) {
24061c41e2eSThomas Monjalon 		*dpaa2_timestamp_dynfield(mbuf) = annotation->word2;
24161c41e2eSThomas Monjalon 		mbuf->ol_flags |= dpaa2_timestamp_rx_dynflag;
24261c41e2eSThomas Monjalon 		DPAA2_PMD_DP_DEBUG("pkt timestamp: 0x%" PRIx64 "",
24361c41e2eSThomas Monjalon 				*dpaa2_timestamp_dynfield(mbuf));
24461c41e2eSThomas Monjalon 	}
245c1870f65SAkhil Goyal 
2462375f879SHemant Agrawal 	/* Check detailed parsing requirement */
2472375f879SHemant Agrawal 	if (annotation->word3 & 0x7FFFFC3FFFF)
2482375f879SHemant Agrawal 		return dpaa2_dev_rx_parse_slow(mbuf, annotation);
2492375f879SHemant Agrawal 
2507ec5e530SNipun Gupta 	/* Return some common types from parse processing */
2517ec5e530SNipun Gupta 	switch (annotation->word4) {
2527ec5e530SNipun Gupta 	case DPAA2_L3_IPv4:
2537ec5e530SNipun Gupta 		return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4;
2547ec5e530SNipun Gupta 	case DPAA2_L3_IPv6:
2557ec5e530SNipun Gupta 		return  RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6;
2567ec5e530SNipun Gupta 	case DPAA2_L3_IPv4_TCP:
2577ec5e530SNipun Gupta 		return  RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 |
2587ec5e530SNipun Gupta 				RTE_PTYPE_L4_TCP;
2597ec5e530SNipun Gupta 	case DPAA2_L3_IPv4_UDP:
2607ec5e530SNipun Gupta 		return  RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 |
2617ec5e530SNipun Gupta 				RTE_PTYPE_L4_UDP;
2627ec5e530SNipun Gupta 	case DPAA2_L3_IPv6_TCP:
2637ec5e530SNipun Gupta 		return  RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 |
2647ec5e530SNipun Gupta 				RTE_PTYPE_L4_TCP;
2657ec5e530SNipun Gupta 	case DPAA2_L3_IPv6_UDP:
2667ec5e530SNipun Gupta 		return  RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 |
2677ec5e530SNipun Gupta 				RTE_PTYPE_L4_UDP;
2687ec5e530SNipun Gupta 	default:
2697ec5e530SNipun Gupta 		break;
2707ec5e530SNipun Gupta 	}
2717ec5e530SNipun Gupta 
2722375f879SHemant Agrawal 	return dpaa2_dev_rx_parse_slow(mbuf, annotation);
2737ec5e530SNipun Gupta }
2747ec5e530SNipun Gupta 
275e3866e73SThomas Monjalon static inline struct rte_mbuf *__rte_hot
276005d943eSNipun Gupta eth_sg_fd_to_mbuf(const struct qbman_fd *fd,
277005d943eSNipun Gupta 		  int port_id)
278774e9ea9SHemant Agrawal {
279774e9ea9SHemant Agrawal 	struct qbman_sge *sgt, *sge;
2805ae1edffSHemant Agrawal 	size_t sg_addr, fd_addr;
281774e9ea9SHemant Agrawal 	int i = 0;
282bff6a98fSRoman Kapl 	void *hw_annot_addr;
283774e9ea9SHemant Agrawal 	struct rte_mbuf *first_seg, *next_seg, *cur_seg, *temp;
284774e9ea9SHemant Agrawal 
2855ae1edffSHemant Agrawal 	fd_addr = (size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
286bff6a98fSRoman Kapl 	hw_annot_addr = (void *)(fd_addr + DPAA2_FD_PTA_SIZE);
287774e9ea9SHemant Agrawal 
288774e9ea9SHemant Agrawal 	/* Get Scatter gather table address */
289774e9ea9SHemant Agrawal 	sgt = (struct qbman_sge *)(fd_addr + DPAA2_GET_FD_OFFSET(fd));
290774e9ea9SHemant Agrawal 
291774e9ea9SHemant Agrawal 	sge = &sgt[i++];
2925ae1edffSHemant Agrawal 	sg_addr = (size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FLE_ADDR(sge));
293774e9ea9SHemant Agrawal 
294774e9ea9SHemant Agrawal 	/* First Scatter gather entry */
295774e9ea9SHemant Agrawal 	first_seg = DPAA2_INLINE_MBUF_FROM_BUF(sg_addr,
296774e9ea9SHemant Agrawal 		rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
297774e9ea9SHemant Agrawal 	/* Prepare all the metadata for first segment */
298774e9ea9SHemant Agrawal 	first_seg->buf_addr = (uint8_t *)sg_addr;
299774e9ea9SHemant Agrawal 	first_seg->ol_flags = 0;
300774e9ea9SHemant Agrawal 	first_seg->data_off = DPAA2_GET_FLE_OFFSET(sge);
301774e9ea9SHemant Agrawal 	first_seg->data_len = sge->length  & 0x1FFFF;
302774e9ea9SHemant Agrawal 	first_seg->pkt_len = DPAA2_GET_FD_LEN(fd);
303774e9ea9SHemant Agrawal 	first_seg->nb_segs = 1;
304774e9ea9SHemant Agrawal 	first_seg->next = NULL;
305005d943eSNipun Gupta 	first_seg->port = port_id;
306a5852a94SNipun Gupta 	if (dpaa2_svr_family == SVR_LX2160A)
307bff6a98fSRoman Kapl 		dpaa2_dev_rx_parse_new(first_seg, fd, hw_annot_addr);
308d2ef05d5SHemant Agrawal 	else
309bff6a98fSRoman Kapl 		first_seg->packet_type =
310bff6a98fSRoman Kapl 			dpaa2_dev_rx_parse(first_seg, hw_annot_addr);
311d2ef05d5SHemant Agrawal 
312774e9ea9SHemant Agrawal 	rte_mbuf_refcnt_set(first_seg, 1);
313774e9ea9SHemant Agrawal 	cur_seg = first_seg;
314774e9ea9SHemant Agrawal 	while (!DPAA2_SG_IS_FINAL(sge)) {
315774e9ea9SHemant Agrawal 		sge = &sgt[i++];
3165ae1edffSHemant Agrawal 		sg_addr = (size_t)DPAA2_IOVA_TO_VADDR(
317774e9ea9SHemant Agrawal 				DPAA2_GET_FLE_ADDR(sge));
318774e9ea9SHemant Agrawal 		next_seg = DPAA2_INLINE_MBUF_FROM_BUF(sg_addr,
319774e9ea9SHemant Agrawal 			rte_dpaa2_bpid_info[DPAA2_GET_FLE_BPID(sge)].meta_data_size);
320774e9ea9SHemant Agrawal 		next_seg->buf_addr  = (uint8_t *)sg_addr;
321774e9ea9SHemant Agrawal 		next_seg->data_off  = DPAA2_GET_FLE_OFFSET(sge);
322774e9ea9SHemant Agrawal 		next_seg->data_len  = sge->length  & 0x1FFFF;
323774e9ea9SHemant Agrawal 		first_seg->nb_segs += 1;
324774e9ea9SHemant Agrawal 		rte_mbuf_refcnt_set(next_seg, 1);
325774e9ea9SHemant Agrawal 		cur_seg->next = next_seg;
326774e9ea9SHemant Agrawal 		next_seg->next = NULL;
327774e9ea9SHemant Agrawal 		cur_seg = next_seg;
328774e9ea9SHemant Agrawal 	}
329774e9ea9SHemant Agrawal 	temp = DPAA2_INLINE_MBUF_FROM_BUF(fd_addr,
330774e9ea9SHemant Agrawal 		rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
331774e9ea9SHemant Agrawal 	rte_mbuf_refcnt_set(temp, 1);
332774e9ea9SHemant Agrawal 	rte_pktmbuf_free_seg(temp);
333774e9ea9SHemant Agrawal 
334774e9ea9SHemant Agrawal 	return (void *)first_seg;
335774e9ea9SHemant Agrawal }
336774e9ea9SHemant Agrawal 
337e3866e73SThomas Monjalon static inline struct rte_mbuf *__rte_hot
338005d943eSNipun Gupta eth_fd_to_mbuf(const struct qbman_fd *fd,
339005d943eSNipun Gupta 	       int port_id)
340cd9935ceSHemant Agrawal {
341bff6a98fSRoman Kapl 	void *v_addr = DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
342bff6a98fSRoman Kapl 	void *hw_annot_addr = (void *)((size_t)v_addr + DPAA2_FD_PTA_SIZE);
343bff6a98fSRoman Kapl 	struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF(v_addr,
344cd9935ceSHemant Agrawal 		     rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
345cd9935ceSHemant Agrawal 
346cd9935ceSHemant Agrawal 	/* need to repopulated some of the fields,
347cd9935ceSHemant Agrawal 	 * as they may have changed in last transmission
348cd9935ceSHemant Agrawal 	 */
349cd9935ceSHemant Agrawal 	mbuf->nb_segs = 1;
350cd9935ceSHemant Agrawal 	mbuf->ol_flags = 0;
351cd9935ceSHemant Agrawal 	mbuf->data_off = DPAA2_GET_FD_OFFSET(fd);
352cd9935ceSHemant Agrawal 	mbuf->data_len = DPAA2_GET_FD_LEN(fd);
353cd9935ceSHemant Agrawal 	mbuf->pkt_len = mbuf->data_len;
354005d943eSNipun Gupta 	mbuf->port = port_id;
3557ec5e530SNipun Gupta 	mbuf->next = NULL;
3567ec5e530SNipun Gupta 	rte_mbuf_refcnt_set(mbuf, 1);
357cd9935ceSHemant Agrawal 
358a5fc38d4SHemant Agrawal 	/* Parse the packet */
359a5852a94SNipun Gupta 	/* parse results for LX2 are there in FRC field of FD.
360a5852a94SNipun Gupta 	 * For other DPAA2 platforms , parse results are after
361a5852a94SNipun Gupta 	 * the private - sw annotation area
362a5852a94SNipun Gupta 	 */
363a5852a94SNipun Gupta 
364a5852a94SNipun Gupta 	if (dpaa2_svr_family == SVR_LX2160A)
365bff6a98fSRoman Kapl 		dpaa2_dev_rx_parse_new(mbuf, fd, hw_annot_addr);
366d2ef05d5SHemant Agrawal 	else
367bff6a98fSRoman Kapl 		mbuf->packet_type = dpaa2_dev_rx_parse(mbuf, hw_annot_addr);
368cd9935ceSHemant Agrawal 
369a10a988aSShreyansh Jain 	DPAA2_PMD_DP_DEBUG("to mbuf - mbuf =%p, mbuf->buf_addr =%p, off = %d,"
3705ae1edffSHemant Agrawal 		"fd_off=%d fd =%" PRIx64 ", meta = %d  bpid =%d, len=%d\n",
371a10a988aSShreyansh Jain 		mbuf, mbuf->buf_addr, mbuf->data_off,
372cd9935ceSHemant Agrawal 		DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ADDR(fd),
373cd9935ceSHemant Agrawal 		rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
374cd9935ceSHemant Agrawal 		DPAA2_GET_FD_BPID(fd), DPAA2_GET_FD_LEN(fd));
375cd9935ceSHemant Agrawal 
376cd9935ceSHemant Agrawal 	return mbuf;
377cd9935ceSHemant Agrawal }
378cd9935ceSHemant Agrawal 
379e3866e73SThomas Monjalon static int __rte_noinline __rte_hot
380774e9ea9SHemant Agrawal eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf,
381cc8569f0SHemant Agrawal 		  struct qbman_fd *fd,
382cc8569f0SHemant Agrawal 		  struct rte_mempool *mp, uint16_t bpid)
383774e9ea9SHemant Agrawal {
384774e9ea9SHemant Agrawal 	struct rte_mbuf *cur_seg = mbuf, *prev_seg, *mi, *temp;
385774e9ea9SHemant Agrawal 	struct qbman_sge *sgt, *sge = NULL;
386cc8569f0SHemant Agrawal 	int i, offset = 0;
387774e9ea9SHemant Agrawal 
388cc8569f0SHemant Agrawal #ifdef RTE_LIBRTE_IEEE1588
389cc8569f0SHemant Agrawal 	/* annotation area for timestamp in first buffer */
390cc8569f0SHemant Agrawal 	offset = 0x64;
391cc8569f0SHemant Agrawal #endif
392cc8569f0SHemant Agrawal 	if (RTE_MBUF_DIRECT(mbuf) &&
393cc8569f0SHemant Agrawal 		(mbuf->data_off > (mbuf->nb_segs * sizeof(struct qbman_sge)
394cc8569f0SHemant Agrawal 		+ offset))) {
395cc8569f0SHemant Agrawal 		temp = mbuf;
396cc8569f0SHemant Agrawal 		if (rte_mbuf_refcnt_read(temp) > 1) {
397cc8569f0SHemant Agrawal 			/* If refcnt > 1, invalid bpid is set to ensure
398cc8569f0SHemant Agrawal 			 * buffer is not freed by HW
399cc8569f0SHemant Agrawal 			 */
400cc8569f0SHemant Agrawal 			fd->simple.bpid_offset = 0;
401cc8569f0SHemant Agrawal 			DPAA2_SET_FD_IVP(fd);
402cc8569f0SHemant Agrawal 			rte_mbuf_refcnt_update(temp, -1);
403cc8569f0SHemant Agrawal 		} else {
404cc8569f0SHemant Agrawal 			DPAA2_SET_ONLY_FD_BPID(fd, bpid);
405cc8569f0SHemant Agrawal 		}
406cc8569f0SHemant Agrawal 		DPAA2_SET_FD_OFFSET(fd, offset);
407cc8569f0SHemant Agrawal 	} else {
408cc8569f0SHemant Agrawal 		temp = rte_pktmbuf_alloc(mp);
409774e9ea9SHemant Agrawal 		if (temp == NULL) {
410a10a988aSShreyansh Jain 			DPAA2_PMD_DP_DEBUG("No memory to allocate S/G table\n");
411774e9ea9SHemant Agrawal 			return -ENOMEM;
412774e9ea9SHemant Agrawal 		}
413a1a487f0SHemant Agrawal 		DPAA2_SET_ONLY_FD_BPID(fd, bpid);
414774e9ea9SHemant Agrawal 		DPAA2_SET_FD_OFFSET(fd, temp->data_off);
415cc8569f0SHemant Agrawal 	}
416cc8569f0SHemant Agrawal 	DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(temp));
417cc8569f0SHemant Agrawal 	DPAA2_SET_FD_LEN(fd, mbuf->pkt_len);
418774e9ea9SHemant Agrawal 	DPAA2_FD_SET_FORMAT(fd, qbman_fd_sg);
4191f4d77d2SNipun Gupta 	DPAA2_RESET_FD_FRC(fd);
4201f4d77d2SNipun Gupta 	DPAA2_RESET_FD_CTRL(fd);
421cc8569f0SHemant Agrawal 	DPAA2_RESET_FD_FLC(fd);
422774e9ea9SHemant Agrawal 	/*Set Scatter gather table and Scatter gather entries*/
423774e9ea9SHemant Agrawal 	sgt = (struct qbman_sge *)(
4245ae1edffSHemant Agrawal 			(size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd))
425774e9ea9SHemant Agrawal 			+ DPAA2_GET_FD_OFFSET(fd));
426774e9ea9SHemant Agrawal 
427774e9ea9SHemant Agrawal 	for (i = 0; i < mbuf->nb_segs; i++) {
428774e9ea9SHemant Agrawal 		sge = &sgt[i];
429774e9ea9SHemant Agrawal 		/*Resetting the buffer pool id and offset field*/
430774e9ea9SHemant Agrawal 		sge->fin_bpid_offset = 0;
431774e9ea9SHemant Agrawal 		DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(cur_seg));
432774e9ea9SHemant Agrawal 		DPAA2_SET_FLE_OFFSET(sge, cur_seg->data_off);
433774e9ea9SHemant Agrawal 		sge->length = cur_seg->data_len;
434774e9ea9SHemant Agrawal 		if (RTE_MBUF_DIRECT(cur_seg)) {
435cc8569f0SHemant Agrawal 			/* if we are using inline SGT in same buffers
436cc8569f0SHemant Agrawal 			 * set the FLE FMT as Frame Data Section
437cc8569f0SHemant Agrawal 			 */
438cc8569f0SHemant Agrawal 			if (temp == cur_seg) {
439cc8569f0SHemant Agrawal 				DPAA2_SG_SET_FORMAT(sge, qbman_fd_list);
440cc8569f0SHemant Agrawal 				DPAA2_SET_FLE_IVP(sge);
441cc8569f0SHemant Agrawal 			} else {
442774e9ea9SHemant Agrawal 				if (rte_mbuf_refcnt_read(cur_seg) > 1) {
443774e9ea9SHemant Agrawal 				/* If refcnt > 1, invalid bpid is set to ensure
444774e9ea9SHemant Agrawal 				 * buffer is not freed by HW
445774e9ea9SHemant Agrawal 				 */
446774e9ea9SHemant Agrawal 					DPAA2_SET_FLE_IVP(sge);
447774e9ea9SHemant Agrawal 					rte_mbuf_refcnt_update(cur_seg, -1);
448cc8569f0SHemant Agrawal 				} else {
449774e9ea9SHemant Agrawal 					DPAA2_SET_FLE_BPID(sge,
450774e9ea9SHemant Agrawal 						mempool_to_bpid(cur_seg->pool));
451cc8569f0SHemant Agrawal 				}
452cc8569f0SHemant Agrawal 			}
453774e9ea9SHemant Agrawal 			cur_seg = cur_seg->next;
4546bfbafe1SNipun Gupta 		} else if (RTE_MBUF_HAS_EXTBUF(cur_seg)) {
4556bfbafe1SNipun Gupta 			DPAA2_SET_FLE_IVP(sge);
4566bfbafe1SNipun Gupta 			cur_seg = cur_seg->next;
457774e9ea9SHemant Agrawal 		} else {
458774e9ea9SHemant Agrawal 			/* Get owner MBUF from indirect buffer */
459774e9ea9SHemant Agrawal 			mi = rte_mbuf_from_indirect(cur_seg);
460774e9ea9SHemant Agrawal 			if (rte_mbuf_refcnt_read(mi) > 1) {
461774e9ea9SHemant Agrawal 				/* If refcnt > 1, invalid bpid is set to ensure
462774e9ea9SHemant Agrawal 				 * owner buffer is not freed by HW
463774e9ea9SHemant Agrawal 				 */
464774e9ea9SHemant Agrawal 				DPAA2_SET_FLE_IVP(sge);
465774e9ea9SHemant Agrawal 			} else {
466774e9ea9SHemant Agrawal 				DPAA2_SET_FLE_BPID(sge,
467774e9ea9SHemant Agrawal 						   mempool_to_bpid(mi->pool));
468774e9ea9SHemant Agrawal 				rte_mbuf_refcnt_update(mi, 1);
469774e9ea9SHemant Agrawal 			}
470774e9ea9SHemant Agrawal 			prev_seg = cur_seg;
471774e9ea9SHemant Agrawal 			cur_seg = cur_seg->next;
472774e9ea9SHemant Agrawal 			prev_seg->next = NULL;
473774e9ea9SHemant Agrawal 			rte_pktmbuf_free(prev_seg);
474774e9ea9SHemant Agrawal 		}
475774e9ea9SHemant Agrawal 	}
476774e9ea9SHemant Agrawal 	DPAA2_SG_SET_FINAL(sge, true);
477774e9ea9SHemant Agrawal 	return 0;
478774e9ea9SHemant Agrawal }
479774e9ea9SHemant Agrawal 
480774e9ea9SHemant Agrawal static void
481774e9ea9SHemant Agrawal eth_mbuf_to_fd(struct rte_mbuf *mbuf,
482f2fc83b4SThomas Monjalon 	       struct qbman_fd *fd, uint16_t bpid) __rte_unused;
483774e9ea9SHemant Agrawal 
484e3866e73SThomas Monjalon static void __rte_noinline __rte_hot
485cd9935ceSHemant Agrawal eth_mbuf_to_fd(struct rte_mbuf *mbuf,
486cd9935ceSHemant Agrawal 	       struct qbman_fd *fd, uint16_t bpid)
487cd9935ceSHemant Agrawal {
48848e7f156SNipun Gupta 	DPAA2_MBUF_TO_CONTIG_FD(mbuf, fd, bpid);
489cd9935ceSHemant Agrawal 
490a10a988aSShreyansh Jain 	DPAA2_PMD_DP_DEBUG("mbuf =%p, mbuf->buf_addr =%p, off = %d,"
4915ae1edffSHemant Agrawal 		"fd_off=%d fd =%" PRIx64 ", meta = %d  bpid =%d, len=%d\n",
492a10a988aSShreyansh Jain 		mbuf, mbuf->buf_addr, mbuf->data_off,
493cd9935ceSHemant Agrawal 		DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ADDR(fd),
494cd9935ceSHemant Agrawal 		rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
495cd9935ceSHemant Agrawal 		DPAA2_GET_FD_BPID(fd), DPAA2_GET_FD_LEN(fd));
496774e9ea9SHemant Agrawal 	if (RTE_MBUF_DIRECT(mbuf)) {
497774e9ea9SHemant Agrawal 		if (rte_mbuf_refcnt_read(mbuf) > 1) {
498774e9ea9SHemant Agrawal 			DPAA2_SET_FD_IVP(fd);
499774e9ea9SHemant Agrawal 			rte_mbuf_refcnt_update(mbuf, -1);
500cd9935ceSHemant Agrawal 		}
5016bfbafe1SNipun Gupta 	} else if (RTE_MBUF_HAS_EXTBUF(mbuf)) {
5026bfbafe1SNipun Gupta 		DPAA2_SET_FD_IVP(fd);
503774e9ea9SHemant Agrawal 	} else {
504774e9ea9SHemant Agrawal 		struct rte_mbuf *mi;
505cd9935ceSHemant Agrawal 
506774e9ea9SHemant Agrawal 		mi = rte_mbuf_from_indirect(mbuf);
507774e9ea9SHemant Agrawal 		if (rte_mbuf_refcnt_read(mi) > 1)
508774e9ea9SHemant Agrawal 			DPAA2_SET_FD_IVP(fd);
509774e9ea9SHemant Agrawal 		else
510774e9ea9SHemant Agrawal 			rte_mbuf_refcnt_update(mi, 1);
511774e9ea9SHemant Agrawal 		rte_pktmbuf_free(mbuf);
512774e9ea9SHemant Agrawal 	}
513774e9ea9SHemant Agrawal }
5149e5f3e6dSHemant Agrawal 
515e3866e73SThomas Monjalon static inline int __rte_hot
5169e5f3e6dSHemant Agrawal eth_copy_mbuf_to_fd(struct rte_mbuf *mbuf,
5179e5f3e6dSHemant Agrawal 		    struct qbman_fd *fd, uint16_t bpid)
5189e5f3e6dSHemant Agrawal {
5199e5f3e6dSHemant Agrawal 	struct rte_mbuf *m;
5209e5f3e6dSHemant Agrawal 	void *mb = NULL;
5219e5f3e6dSHemant Agrawal 
5229e5f3e6dSHemant Agrawal 	if (rte_dpaa2_mbuf_alloc_bulk(
5239e5f3e6dSHemant Agrawal 		rte_dpaa2_bpid_info[bpid].bp_list->mp, &mb, 1)) {
524a10a988aSShreyansh Jain 		DPAA2_PMD_DP_DEBUG("Unable to allocated DPAA2 buffer\n");
5259e5f3e6dSHemant Agrawal 		return -1;
5269e5f3e6dSHemant Agrawal 	}
5279e5f3e6dSHemant Agrawal 	m = (struct rte_mbuf *)mb;
5289e5f3e6dSHemant Agrawal 	memcpy((char *)m->buf_addr + mbuf->data_off,
5299e5f3e6dSHemant Agrawal 	       (void *)((char *)mbuf->buf_addr + mbuf->data_off),
5309e5f3e6dSHemant Agrawal 		mbuf->pkt_len);
5319e5f3e6dSHemant Agrawal 
5329e5f3e6dSHemant Agrawal 	/* Copy required fields */
5339e5f3e6dSHemant Agrawal 	m->data_off = mbuf->data_off;
5349e5f3e6dSHemant Agrawal 	m->ol_flags = mbuf->ol_flags;
5359e5f3e6dSHemant Agrawal 	m->packet_type = mbuf->packet_type;
5369e5f3e6dSHemant Agrawal 	m->tx_offload = mbuf->tx_offload;
5379e5f3e6dSHemant Agrawal 
53848e7f156SNipun Gupta 	DPAA2_MBUF_TO_CONTIG_FD(m, fd, bpid);
5399e5f3e6dSHemant Agrawal 
540a10a988aSShreyansh Jain 	DPAA2_PMD_DP_DEBUG(
541a10a988aSShreyansh Jain 		"mbuf: %p, BMAN buf addr: %p, fdaddr: %" PRIx64 ", bpid: %d,"
542a10a988aSShreyansh Jain 		" meta: %d, off: %d, len: %d\n",
543a10a988aSShreyansh Jain 		(void *)mbuf,
544a10a988aSShreyansh Jain 		mbuf->buf_addr,
5455ae1edffSHemant Agrawal 		DPAA2_GET_FD_ADDR(fd),
5465ae1edffSHemant Agrawal 		DPAA2_GET_FD_BPID(fd),
547a10a988aSShreyansh Jain 		rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
548a10a988aSShreyansh Jain 		DPAA2_GET_FD_OFFSET(fd),
5499e5f3e6dSHemant Agrawal 		DPAA2_GET_FD_LEN(fd));
5509e5f3e6dSHemant Agrawal 
5519e5f3e6dSHemant Agrawal return 0;
5529e5f3e6dSHemant Agrawal }
5539e5f3e6dSHemant Agrawal 
5544690a611SNipun Gupta static void
5554690a611SNipun Gupta dump_err_pkts(struct dpaa2_queue *dpaa2_q)
5564690a611SNipun Gupta {
5574690a611SNipun Gupta 	/* Function receive frames for a given device and VQ */
5584690a611SNipun Gupta 	struct qbman_result *dq_storage;
5594690a611SNipun Gupta 	uint32_t fqid = dpaa2_q->fqid;
5604690a611SNipun Gupta 	int ret, num_rx = 0, num_pulled;
5614690a611SNipun Gupta 	uint8_t pending, status;
5624690a611SNipun Gupta 	struct qbman_swp *swp;
5634690a611SNipun Gupta 	const struct qbman_fd *fd;
5644690a611SNipun Gupta 	struct qbman_pull_desc pulldesc;
5654690a611SNipun Gupta 	struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
5664690a611SNipun Gupta 	uint32_t lcore_id = rte_lcore_id();
5674690a611SNipun Gupta 	void *v_addr, *hw_annot_addr;
5684690a611SNipun Gupta 	struct dpaa2_fas *fas;
5694690a611SNipun Gupta 
5704690a611SNipun Gupta 	if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
5714690a611SNipun Gupta 		ret = dpaa2_affine_qbman_swp();
5724690a611SNipun Gupta 		if (ret) {
5734690a611SNipun Gupta 			DPAA2_PMD_ERR("Failed to allocate IO portal, tid: %d\n",
5744690a611SNipun Gupta 				rte_gettid());
5754690a611SNipun Gupta 			return;
5764690a611SNipun Gupta 		}
5774690a611SNipun Gupta 	}
5784690a611SNipun Gupta 	swp = DPAA2_PER_LCORE_PORTAL;
5794690a611SNipun Gupta 
5804690a611SNipun Gupta 	dq_storage = dpaa2_q->q_storage[lcore_id].dq_storage[0];
5814690a611SNipun Gupta 	qbman_pull_desc_clear(&pulldesc);
5824690a611SNipun Gupta 	qbman_pull_desc_set_fq(&pulldesc, fqid);
5834690a611SNipun Gupta 	qbman_pull_desc_set_storage(&pulldesc, dq_storage,
5844690a611SNipun Gupta 			(size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
5854690a611SNipun Gupta 	qbman_pull_desc_set_numframes(&pulldesc, dpaa2_dqrr_size);
5864690a611SNipun Gupta 
5874690a611SNipun Gupta 	while (1) {
5884690a611SNipun Gupta 		if (qbman_swp_pull(swp, &pulldesc)) {
5894690a611SNipun Gupta 			DPAA2_PMD_DP_DEBUG("VDQ command is not issued.QBMAN is busy\n");
5904690a611SNipun Gupta 			/* Portal was busy, try again */
5914690a611SNipun Gupta 			continue;
5924690a611SNipun Gupta 		}
5934690a611SNipun Gupta 		break;
5944690a611SNipun Gupta 	}
5954690a611SNipun Gupta 
5964690a611SNipun Gupta 	/* Check if the previous issued command is completed. */
5974690a611SNipun Gupta 	while (!qbman_check_command_complete(dq_storage))
5984690a611SNipun Gupta 		;
5994690a611SNipun Gupta 
6004690a611SNipun Gupta 	num_pulled = 0;
6014690a611SNipun Gupta 	pending = 1;
6024690a611SNipun Gupta 	do {
6034690a611SNipun Gupta 		/* Loop until the dq_storage is updated with
6044690a611SNipun Gupta 		 * new token by QBMAN
6054690a611SNipun Gupta 		 */
6064690a611SNipun Gupta 		while (!qbman_check_new_result(dq_storage))
6074690a611SNipun Gupta 			;
6084690a611SNipun Gupta 
6094690a611SNipun Gupta 		/* Check whether Last Pull command is Expired and
6104690a611SNipun Gupta 		 * setting Condition for Loop termination
6114690a611SNipun Gupta 		 */
6124690a611SNipun Gupta 		if (qbman_result_DQ_is_pull_complete(dq_storage)) {
6134690a611SNipun Gupta 			pending = 0;
6144690a611SNipun Gupta 			/* Check for valid frame. */
6154690a611SNipun Gupta 			status = qbman_result_DQ_flags(dq_storage);
6164690a611SNipun Gupta 			if (unlikely((status &
6174690a611SNipun Gupta 				QBMAN_DQ_STAT_VALIDFRAME) == 0))
6184690a611SNipun Gupta 				continue;
6194690a611SNipun Gupta 		}
6204690a611SNipun Gupta 		fd = qbman_result_DQ_fd(dq_storage);
6214690a611SNipun Gupta 		v_addr = DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
6224690a611SNipun Gupta 		hw_annot_addr = (void *)((size_t)v_addr + DPAA2_FD_PTA_SIZE);
6234690a611SNipun Gupta 		fas = hw_annot_addr;
6244690a611SNipun Gupta 
6254690a611SNipun Gupta 		DPAA2_PMD_ERR("\n\n[%d] error packet on port[%d]:"
6264690a611SNipun Gupta 			" fd_off: %d, fd_err: %x, fas_status: %x",
6274690a611SNipun Gupta 			rte_lcore_id(), eth_data->port_id,
6284690a611SNipun Gupta 			DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ERR(fd),
6294690a611SNipun Gupta 			fas->status);
6304690a611SNipun Gupta 		rte_hexdump(stderr, "Error packet", v_addr,
6314690a611SNipun Gupta 			DPAA2_GET_FD_OFFSET(fd) + DPAA2_GET_FD_LEN(fd));
6324690a611SNipun Gupta 
6334690a611SNipun Gupta 		dq_storage++;
6344690a611SNipun Gupta 		num_rx++;
6354690a611SNipun Gupta 		num_pulled++;
6364690a611SNipun Gupta 	} while (pending);
6374690a611SNipun Gupta 
6384690a611SNipun Gupta 	dpaa2_q->err_pkts += num_rx;
6394690a611SNipun Gupta }
6404690a611SNipun Gupta 
64165a70a98SHemant Agrawal /* This function assumes that caller will be keep the same value for nb_pkts
64265a70a98SHemant Agrawal  * across calls per queue, if that is not the case, better use non-prefetch
64365a70a98SHemant Agrawal  * version of rx call.
64465a70a98SHemant Agrawal  * It will return the packets as requested in previous call without honoring
64565a70a98SHemant Agrawal  * the current nb_pkts or bufs space.
64665a70a98SHemant Agrawal  */
647cd9935ceSHemant Agrawal uint16_t
6485c6942fdSHemant Agrawal dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
649cd9935ceSHemant Agrawal {
6505c6942fdSHemant Agrawal 	/* Function receive frames for a given device and VQ*/
651cd9935ceSHemant Agrawal 	struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
6524bc5ab88SHemant Agrawal 	struct qbman_result *dq_storage, *dq_storage1 = NULL;
653cd9935ceSHemant Agrawal 	uint32_t fqid = dpaa2_q->fqid;
65465a70a98SHemant Agrawal 	int ret, num_rx = 0, pull_size;
6554bc5ab88SHemant Agrawal 	uint8_t pending, status;
656cd9935ceSHemant Agrawal 	struct qbman_swp *swp;
6572f41c930SNipun Gupta 	const struct qbman_fd *fd;
658cd9935ceSHemant Agrawal 	struct qbman_pull_desc pulldesc;
6595c6942fdSHemant Agrawal 	struct queue_storage_info_t *q_storage = dpaa2_q->q_storage;
66085ee5ddaSShreyansh Jain 	struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
661e806bf87SPriyanka Jain 	struct dpaa2_dev_priv *priv = eth_data->dev_private;
6624690a611SNipun Gupta 
6634690a611SNipun Gupta 	if (unlikely(dpaa2_enable_err_queue))
6644690a611SNipun Gupta 		dump_err_pkts(priv->rx_err_vq);
665cd9935ceSHemant Agrawal 
666b3ec974cSNipun Gupta 	if (unlikely(!DPAA2_PER_LCORE_ETHRX_DPIO)) {
667b3ec974cSNipun Gupta 		ret = dpaa2_affine_qbman_ethrx_swp();
668cd9935ceSHemant Agrawal 		if (ret) {
669a10a988aSShreyansh Jain 			DPAA2_PMD_ERR("Failure in affining portal");
670cd9935ceSHemant Agrawal 			return 0;
671cd9935ceSHemant Agrawal 		}
672cd9935ceSHemant Agrawal 	}
673109df460SShreyansh Jain 
674109df460SShreyansh Jain 	if (unlikely(!rte_dpaa2_bpid_info &&
675109df460SShreyansh Jain 		     rte_eal_process_type() == RTE_PROC_SECONDARY))
676109df460SShreyansh Jain 		rte_dpaa2_bpid_info = dpaa2_q->bp_array;
677109df460SShreyansh Jain 
678b3ec974cSNipun Gupta 	swp = DPAA2_PER_LCORE_ETHRX_PORTAL;
679bd23b1a8SNipun Gupta 	pull_size = (nb_pkts > dpaa2_dqrr_size) ? dpaa2_dqrr_size : nb_pkts;
680043b36f6SHemant Agrawal 	if (unlikely(!q_storage->active_dqs)) {
681043b36f6SHemant Agrawal 		q_storage->toggle = 0;
682043b36f6SHemant Agrawal 		dq_storage = q_storage->dq_storage[q_storage->toggle];
68365a70a98SHemant Agrawal 		q_storage->last_num_pkts = pull_size;
684043b36f6SHemant Agrawal 		qbman_pull_desc_clear(&pulldesc);
685043b36f6SHemant Agrawal 		qbman_pull_desc_set_numframes(&pulldesc,
686043b36f6SHemant Agrawal 					      q_storage->last_num_pkts);
687043b36f6SHemant Agrawal 		qbman_pull_desc_set_fq(&pulldesc, fqid);
688043b36f6SHemant Agrawal 		qbman_pull_desc_set_storage(&pulldesc, dq_storage,
6893ef648aaSHemant Agrawal 			(uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
690b3ec974cSNipun Gupta 		if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) {
6914bc5ab88SHemant Agrawal 			while (!qbman_check_command_complete(
692b3ec974cSNipun Gupta 			       get_swp_active_dqs(
693b3ec974cSNipun Gupta 			       DPAA2_PER_LCORE_ETHRX_DPIO->index)))
6944bc5ab88SHemant Agrawal 				;
695b3ec974cSNipun Gupta 			clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index);
6964bc5ab88SHemant Agrawal 		}
697cd9935ceSHemant Agrawal 		while (1) {
698cd9935ceSHemant Agrawal 			if (qbman_swp_pull(swp, &pulldesc)) {
699a10a988aSShreyansh Jain 				DPAA2_PMD_DP_DEBUG("VDQ command is not issued."
700a10a988aSShreyansh Jain 						  " QBMAN is busy (1)\n");
701cd9935ceSHemant Agrawal 				/* Portal was busy, try again */
702cd9935ceSHemant Agrawal 				continue;
703cd9935ceSHemant Agrawal 			}
704cd9935ceSHemant Agrawal 			break;
7055c6942fdSHemant Agrawal 		}
7065c6942fdSHemant Agrawal 		q_storage->active_dqs = dq_storage;
707b3ec974cSNipun Gupta 		q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index;
708b3ec974cSNipun Gupta 		set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index,
709b3ec974cSNipun Gupta 				   dq_storage);
7105c6942fdSHemant Agrawal 	}
711043b36f6SHemant Agrawal 
7125c6942fdSHemant Agrawal 	dq_storage = q_storage->active_dqs;
7135ae1edffSHemant Agrawal 	rte_prefetch0((void *)(size_t)(dq_storage));
7145ae1edffSHemant Agrawal 	rte_prefetch0((void *)(size_t)(dq_storage + 1));
7154bc5ab88SHemant Agrawal 
7164bc5ab88SHemant Agrawal 	/* Prepare next pull descriptor. This will give space for the
7174bc5ab88SHemant Agrawal 	 * prefething done on DQRR entries
7184bc5ab88SHemant Agrawal 	 */
7194bc5ab88SHemant Agrawal 	q_storage->toggle ^= 1;
7204bc5ab88SHemant Agrawal 	dq_storage1 = q_storage->dq_storage[q_storage->toggle];
7214bc5ab88SHemant Agrawal 	qbman_pull_desc_clear(&pulldesc);
72265a70a98SHemant Agrawal 	qbman_pull_desc_set_numframes(&pulldesc, pull_size);
7234bc5ab88SHemant Agrawal 	qbman_pull_desc_set_fq(&pulldesc, fqid);
7244bc5ab88SHemant Agrawal 	qbman_pull_desc_set_storage(&pulldesc, dq_storage1,
7253ef648aaSHemant Agrawal 		(uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage1)), 1);
7264bc5ab88SHemant Agrawal 
727cd9935ceSHemant Agrawal 	/* Check if the previous issued command is completed.
7285c6942fdSHemant Agrawal 	 * Also seems like the SWP is shared between the Ethernet Driver
7295c6942fdSHemant Agrawal 	 * and the SEC driver.
730cd9935ceSHemant Agrawal 	 */
73169293c77SHemant Agrawal 	while (!qbman_check_command_complete(dq_storage))
732cd9935ceSHemant Agrawal 		;
7335c6942fdSHemant Agrawal 	if (dq_storage == get_swp_active_dqs(q_storage->active_dpio_id))
7345c6942fdSHemant Agrawal 		clear_swp_active_dqs(q_storage->active_dpio_id);
735043b36f6SHemant Agrawal 
736043b36f6SHemant Agrawal 	pending = 1;
737043b36f6SHemant Agrawal 
738043b36f6SHemant Agrawal 	do {
739cd9935ceSHemant Agrawal 		/* Loop until the dq_storage is updated with
740cd9935ceSHemant Agrawal 		 * new token by QBMAN
741cd9935ceSHemant Agrawal 		 */
74269293c77SHemant Agrawal 		while (!qbman_check_new_result(dq_storage))
743cd9935ceSHemant Agrawal 			;
7445ae1edffSHemant Agrawal 		rte_prefetch0((void *)((size_t)(dq_storage + 2)));
745cd9935ceSHemant Agrawal 		/* Check whether Last Pull command is Expired and
746cd9935ceSHemant Agrawal 		 * setting Condition for Loop termination
747cd9935ceSHemant Agrawal 		 */
748cd9935ceSHemant Agrawal 		if (qbman_result_DQ_is_pull_complete(dq_storage)) {
749043b36f6SHemant Agrawal 			pending = 0;
750cd9935ceSHemant Agrawal 			/* Check for valid frame. */
751043b36f6SHemant Agrawal 			status = qbman_result_DQ_flags(dq_storage);
752cd9935ceSHemant Agrawal 			if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0))
753cd9935ceSHemant Agrawal 				continue;
754cd9935ceSHemant Agrawal 		}
755043b36f6SHemant Agrawal 		fd = qbman_result_DQ_fd(dq_storage);
756cd9935ceSHemant Agrawal 
7572f41c930SNipun Gupta #ifndef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
7581a814f4bSNipun Gupta 		if (dpaa2_svr_family != SVR_LX2160A) {
7592f41c930SNipun Gupta 			const struct qbman_fd *next_fd =
7602f41c930SNipun Gupta 				qbman_result_DQ_fd(dq_storage + 1);
761cd9935ceSHemant Agrawal 			/* Prefetch Annotation address for the parse results */
7622f41c930SNipun Gupta 			rte_prefetch0(DPAA2_IOVA_TO_VADDR((DPAA2_GET_FD_ADDR(
7632f41c930SNipun Gupta 				next_fd) + DPAA2_FD_PTA_SIZE + 16)));
7641a814f4bSNipun Gupta 		}
7652f41c930SNipun Gupta #endif
766cd9935ceSHemant Agrawal 
767043b36f6SHemant Agrawal 		if (unlikely(DPAA2_FD_GET_FORMAT(fd) == qbman_fd_sg))
768005d943eSNipun Gupta 			bufs[num_rx] = eth_sg_fd_to_mbuf(fd, eth_data->port_id);
769774e9ea9SHemant Agrawal 		else
770005d943eSNipun Gupta 			bufs[num_rx] = eth_fd_to_mbuf(fd, eth_data->port_id);
771e806bf87SPriyanka Jain #if defined(RTE_LIBRTE_IEEE1588)
77261c41e2eSThomas Monjalon 		priv->rx_timestamp = *dpaa2_timestamp_dynfield(bufs[num_rx]);
773e806bf87SPriyanka Jain #endif
774cd9935ceSHemant Agrawal 
77585ee5ddaSShreyansh Jain 		if (eth_data->dev_conf.rxmode.offloads &
776295968d1SFerruh Yigit 				RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
777d20e6e60SHemant Agrawal 			rte_vlan_strip(bufs[num_rx]);
778d20e6e60SHemant Agrawal 
779cd9935ceSHemant Agrawal 		dq_storage++;
7805c6942fdSHemant Agrawal 		num_rx++;
781043b36f6SHemant Agrawal 	} while (pending);
782043b36f6SHemant Agrawal 
783b3ec974cSNipun Gupta 	if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) {
7844bc5ab88SHemant Agrawal 		while (!qbman_check_command_complete(
785b3ec974cSNipun Gupta 		       get_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)))
786043b36f6SHemant Agrawal 			;
787b3ec974cSNipun Gupta 		clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index);
788043b36f6SHemant Agrawal 	}
789043b36f6SHemant Agrawal 	/* issue a volatile dequeue command for next pull */
7905c6942fdSHemant Agrawal 	while (1) {
7915c6942fdSHemant Agrawal 		if (qbman_swp_pull(swp, &pulldesc)) {
792a10a988aSShreyansh Jain 			DPAA2_PMD_DP_DEBUG("VDQ command is not issued."
793a10a988aSShreyansh Jain 					  "QBMAN is busy (2)\n");
7945c6942fdSHemant Agrawal 			continue;
7955c6942fdSHemant Agrawal 		}
7965c6942fdSHemant Agrawal 		break;
7975c6942fdSHemant Agrawal 	}
7984bc5ab88SHemant Agrawal 	q_storage->active_dqs = dq_storage1;
799b3ec974cSNipun Gupta 	q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index;
800b3ec974cSNipun Gupta 	set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index, dq_storage1);
801cd9935ceSHemant Agrawal 
802cd9935ceSHemant Agrawal 	dpaa2_q->rx_pkts += num_rx;
803cd9935ceSHemant Agrawal 
804cd9935ceSHemant Agrawal 	return num_rx;
805cd9935ceSHemant Agrawal }
806cd9935ceSHemant Agrawal 
807e3866e73SThomas Monjalon void __rte_hot
808b677d4c6SNipun Gupta dpaa2_dev_process_parallel_event(struct qbman_swp *swp,
809b677d4c6SNipun Gupta 				 const struct qbman_fd *fd,
810b677d4c6SNipun Gupta 				 const struct qbman_result *dq,
811b677d4c6SNipun Gupta 				 struct dpaa2_queue *rxq,
812b677d4c6SNipun Gupta 				 struct rte_event *ev)
813b677d4c6SNipun Gupta {
8145ae1edffSHemant Agrawal 	rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd) +
8158fc28889SNipun Gupta 		DPAA2_FD_PTA_SIZE + 16));
816b677d4c6SNipun Gupta 
817b677d4c6SNipun Gupta 	ev->flow_id = rxq->ev.flow_id;
818b677d4c6SNipun Gupta 	ev->sub_event_type = rxq->ev.sub_event_type;
819b677d4c6SNipun Gupta 	ev->event_type = RTE_EVENT_TYPE_ETHDEV;
820b677d4c6SNipun Gupta 	ev->op = RTE_EVENT_OP_NEW;
821b677d4c6SNipun Gupta 	ev->sched_type = rxq->ev.sched_type;
822b677d4c6SNipun Gupta 	ev->queue_id = rxq->ev.queue_id;
823b677d4c6SNipun Gupta 	ev->priority = rxq->ev.priority;
824b677d4c6SNipun Gupta 
825005d943eSNipun Gupta 	ev->mbuf = eth_fd_to_mbuf(fd, rxq->eth_data->port_id);
8268fc28889SNipun Gupta 
827b677d4c6SNipun Gupta 	qbman_swp_dqrr_consume(swp, dq);
828b677d4c6SNipun Gupta }
829b677d4c6SNipun Gupta 
830e3866e73SThomas Monjalon void __rte_hot
831f2fc83b4SThomas Monjalon dpaa2_dev_process_atomic_event(struct qbman_swp *swp __rte_unused,
8322d378863SNipun Gupta 			       const struct qbman_fd *fd,
8332d378863SNipun Gupta 			       const struct qbman_result *dq,
8342d378863SNipun Gupta 			       struct dpaa2_queue *rxq,
8352d378863SNipun Gupta 			       struct rte_event *ev)
8362d378863SNipun Gupta {
8378fc28889SNipun Gupta 	uint8_t dqrr_index;
8382d378863SNipun Gupta 
8395ae1edffSHemant Agrawal 	rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd) +
8408fc28889SNipun Gupta 		DPAA2_FD_PTA_SIZE + 16));
8412d378863SNipun Gupta 
8422d378863SNipun Gupta 	ev->flow_id = rxq->ev.flow_id;
8432d378863SNipun Gupta 	ev->sub_event_type = rxq->ev.sub_event_type;
8442d378863SNipun Gupta 	ev->event_type = RTE_EVENT_TYPE_ETHDEV;
8452d378863SNipun Gupta 	ev->op = RTE_EVENT_OP_NEW;
8462d378863SNipun Gupta 	ev->sched_type = rxq->ev.sched_type;
8472d378863SNipun Gupta 	ev->queue_id = rxq->ev.queue_id;
8482d378863SNipun Gupta 	ev->priority = rxq->ev.priority;
8492d378863SNipun Gupta 
850005d943eSNipun Gupta 	ev->mbuf = eth_fd_to_mbuf(fd, rxq->eth_data->port_id);
8518fc28889SNipun Gupta 
8528fc28889SNipun Gupta 	dqrr_index = qbman_get_dqrr_idx(dq);
853ea278063SDavid Marchand 	*dpaa2_seqn(ev->mbuf) = dqrr_index + 1;
8542d378863SNipun Gupta 	DPAA2_PER_LCORE_DQRR_SIZE++;
8552d378863SNipun Gupta 	DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index;
8562d378863SNipun Gupta 	DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = ev->mbuf;
8572d378863SNipun Gupta }
8582d378863SNipun Gupta 
859e3866e73SThomas Monjalon void __rte_hot
86016c4a3c4SNipun Gupta dpaa2_dev_process_ordered_event(struct qbman_swp *swp,
86116c4a3c4SNipun Gupta 				const struct qbman_fd *fd,
86216c4a3c4SNipun Gupta 				const struct qbman_result *dq,
86316c4a3c4SNipun Gupta 				struct dpaa2_queue *rxq,
86416c4a3c4SNipun Gupta 				struct rte_event *ev)
86516c4a3c4SNipun Gupta {
86616c4a3c4SNipun Gupta 	rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd) +
86716c4a3c4SNipun Gupta 		DPAA2_FD_PTA_SIZE + 16));
86816c4a3c4SNipun Gupta 
86916c4a3c4SNipun Gupta 	ev->flow_id = rxq->ev.flow_id;
87016c4a3c4SNipun Gupta 	ev->sub_event_type = rxq->ev.sub_event_type;
87116c4a3c4SNipun Gupta 	ev->event_type = RTE_EVENT_TYPE_ETHDEV;
87216c4a3c4SNipun Gupta 	ev->op = RTE_EVENT_OP_NEW;
87316c4a3c4SNipun Gupta 	ev->sched_type = rxq->ev.sched_type;
87416c4a3c4SNipun Gupta 	ev->queue_id = rxq->ev.queue_id;
87516c4a3c4SNipun Gupta 	ev->priority = rxq->ev.priority;
87616c4a3c4SNipun Gupta 
877005d943eSNipun Gupta 	ev->mbuf = eth_fd_to_mbuf(fd, rxq->eth_data->port_id);
87816c4a3c4SNipun Gupta 
879ea278063SDavid Marchand 	*dpaa2_seqn(ev->mbuf) = DPAA2_ENQUEUE_FLAG_ORP;
880ea278063SDavid Marchand 	*dpaa2_seqn(ev->mbuf) |= qbman_result_DQ_odpid(dq) << DPAA2_EQCR_OPRID_SHIFT;
881ea278063SDavid Marchand 	*dpaa2_seqn(ev->mbuf) |= qbman_result_DQ_seqnum(dq) << DPAA2_EQCR_SEQNUM_SHIFT;
88216c4a3c4SNipun Gupta 
88316c4a3c4SNipun Gupta 	qbman_swp_dqrr_consume(swp, dq);
88416c4a3c4SNipun Gupta }
88516c4a3c4SNipun Gupta 
88620191ab3SNipun Gupta uint16_t
88720191ab3SNipun Gupta dpaa2_dev_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
88820191ab3SNipun Gupta {
88920191ab3SNipun Gupta 	/* Function receive frames for a given device and VQ */
89020191ab3SNipun Gupta 	struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
89120191ab3SNipun Gupta 	struct qbman_result *dq_storage;
89220191ab3SNipun Gupta 	uint32_t fqid = dpaa2_q->fqid;
89320191ab3SNipun Gupta 	int ret, num_rx = 0, next_pull = nb_pkts, num_pulled;
89420191ab3SNipun Gupta 	uint8_t pending, status;
89520191ab3SNipun Gupta 	struct qbman_swp *swp;
8962f41c930SNipun Gupta 	const struct qbman_fd *fd;
89720191ab3SNipun Gupta 	struct qbman_pull_desc pulldesc;
89820191ab3SNipun Gupta 	struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
8994690a611SNipun Gupta 	struct dpaa2_dev_priv *priv = eth_data->dev_private;
9004690a611SNipun Gupta 
9014690a611SNipun Gupta 	if (unlikely(dpaa2_enable_err_queue))
9024690a611SNipun Gupta 		dump_err_pkts(priv->rx_err_vq);
90320191ab3SNipun Gupta 
90420191ab3SNipun Gupta 	if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
90520191ab3SNipun Gupta 		ret = dpaa2_affine_qbman_swp();
90620191ab3SNipun Gupta 		if (ret) {
907d527f5d9SNipun Gupta 			DPAA2_PMD_ERR(
908d527f5d9SNipun Gupta 				"Failed to allocate IO portal, tid: %d\n",
909d527f5d9SNipun Gupta 				rte_gettid());
91020191ab3SNipun Gupta 			return 0;
91120191ab3SNipun Gupta 		}
91220191ab3SNipun Gupta 	}
91320191ab3SNipun Gupta 	swp = DPAA2_PER_LCORE_PORTAL;
91420191ab3SNipun Gupta 
91520191ab3SNipun Gupta 	do {
91620191ab3SNipun Gupta 		dq_storage = dpaa2_q->q_storage->dq_storage[0];
91720191ab3SNipun Gupta 		qbman_pull_desc_clear(&pulldesc);
91820191ab3SNipun Gupta 		qbman_pull_desc_set_fq(&pulldesc, fqid);
91920191ab3SNipun Gupta 		qbman_pull_desc_set_storage(&pulldesc, dq_storage,
92020191ab3SNipun Gupta 				(size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
92120191ab3SNipun Gupta 
92220191ab3SNipun Gupta 		if (next_pull > dpaa2_dqrr_size) {
92320191ab3SNipun Gupta 			qbman_pull_desc_set_numframes(&pulldesc,
92420191ab3SNipun Gupta 				dpaa2_dqrr_size);
92520191ab3SNipun Gupta 			next_pull -= dpaa2_dqrr_size;
92620191ab3SNipun Gupta 		} else {
92720191ab3SNipun Gupta 			qbman_pull_desc_set_numframes(&pulldesc, next_pull);
92820191ab3SNipun Gupta 			next_pull = 0;
92920191ab3SNipun Gupta 		}
93020191ab3SNipun Gupta 
93120191ab3SNipun Gupta 		while (1) {
93220191ab3SNipun Gupta 			if (qbman_swp_pull(swp, &pulldesc)) {
93320191ab3SNipun Gupta 				DPAA2_PMD_DP_DEBUG(
93420191ab3SNipun Gupta 					"VDQ command is not issued.QBMAN is busy\n");
93520191ab3SNipun Gupta 				/* Portal was busy, try again */
93620191ab3SNipun Gupta 				continue;
93720191ab3SNipun Gupta 			}
93820191ab3SNipun Gupta 			break;
93920191ab3SNipun Gupta 		}
94020191ab3SNipun Gupta 
94120191ab3SNipun Gupta 		rte_prefetch0((void *)((size_t)(dq_storage + 1)));
94220191ab3SNipun Gupta 		/* Check if the previous issued command is completed. */
94320191ab3SNipun Gupta 		while (!qbman_check_command_complete(dq_storage))
94420191ab3SNipun Gupta 			;
94520191ab3SNipun Gupta 
94620191ab3SNipun Gupta 		num_pulled = 0;
94720191ab3SNipun Gupta 		pending = 1;
94820191ab3SNipun Gupta 		do {
94920191ab3SNipun Gupta 			/* Loop until the dq_storage is updated with
95020191ab3SNipun Gupta 			 * new token by QBMAN
95120191ab3SNipun Gupta 			 */
95220191ab3SNipun Gupta 			while (!qbman_check_new_result(dq_storage))
95320191ab3SNipun Gupta 				;
95420191ab3SNipun Gupta 			rte_prefetch0((void *)((size_t)(dq_storage + 2)));
95520191ab3SNipun Gupta 			/* Check whether Last Pull command is Expired and
95620191ab3SNipun Gupta 			 * setting Condition for Loop termination
95720191ab3SNipun Gupta 			 */
95820191ab3SNipun Gupta 			if (qbman_result_DQ_is_pull_complete(dq_storage)) {
95920191ab3SNipun Gupta 				pending = 0;
96020191ab3SNipun Gupta 				/* Check for valid frame. */
96120191ab3SNipun Gupta 				status = qbman_result_DQ_flags(dq_storage);
96220191ab3SNipun Gupta 				if (unlikely((status &
96320191ab3SNipun Gupta 					QBMAN_DQ_STAT_VALIDFRAME) == 0))
96420191ab3SNipun Gupta 					continue;
96520191ab3SNipun Gupta 			}
96620191ab3SNipun Gupta 			fd = qbman_result_DQ_fd(dq_storage);
96720191ab3SNipun Gupta 
9682f41c930SNipun Gupta #ifndef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
9692f41c930SNipun Gupta 			if (dpaa2_svr_family != SVR_LX2160A) {
9702f41c930SNipun Gupta 				const struct qbman_fd *next_fd =
9712f41c930SNipun Gupta 					qbman_result_DQ_fd(dq_storage + 1);
9722f41c930SNipun Gupta 
9732f41c930SNipun Gupta 				/* Prefetch Annotation address for the parse
9742f41c930SNipun Gupta 				 * results.
9752f41c930SNipun Gupta 				 */
9762f41c930SNipun Gupta 				rte_prefetch0((DPAA2_IOVA_TO_VADDR(
9772f41c930SNipun Gupta 					DPAA2_GET_FD_ADDR(next_fd) +
9782f41c930SNipun Gupta 					DPAA2_FD_PTA_SIZE + 16)));
9792f41c930SNipun Gupta 			}
9802f41c930SNipun Gupta #endif
98120191ab3SNipun Gupta 
98220191ab3SNipun Gupta 			if (unlikely(DPAA2_FD_GET_FORMAT(fd) == qbman_fd_sg))
983005d943eSNipun Gupta 				bufs[num_rx] = eth_sg_fd_to_mbuf(fd,
984005d943eSNipun Gupta 							eth_data->port_id);
98520191ab3SNipun Gupta 			else
986005d943eSNipun Gupta 				bufs[num_rx] = eth_fd_to_mbuf(fd,
987005d943eSNipun Gupta 							eth_data->port_id);
98820191ab3SNipun Gupta 
98920191ab3SNipun Gupta 		if (eth_data->dev_conf.rxmode.offloads &
990295968d1SFerruh Yigit 				RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
99120191ab3SNipun Gupta 			rte_vlan_strip(bufs[num_rx]);
99220191ab3SNipun Gupta 		}
99320191ab3SNipun Gupta 
99420191ab3SNipun Gupta 			dq_storage++;
99520191ab3SNipun Gupta 			num_rx++;
99620191ab3SNipun Gupta 			num_pulled++;
99720191ab3SNipun Gupta 		} while (pending);
99820191ab3SNipun Gupta 	/* Last VDQ provided all packets and more packets are requested */
99920191ab3SNipun Gupta 	} while (next_pull && num_pulled == dpaa2_dqrr_size);
100020191ab3SNipun Gupta 
100120191ab3SNipun Gupta 	dpaa2_q->rx_pkts += num_rx;
100220191ab3SNipun Gupta 
100320191ab3SNipun Gupta 	return num_rx;
100420191ab3SNipun Gupta }
100520191ab3SNipun Gupta 
10069ceacab7SPriyanka Jain uint16_t dpaa2_dev_tx_conf(void *queue)
10079ceacab7SPriyanka Jain {
10089ceacab7SPriyanka Jain 	/* Function receive frames for a given device and VQ */
10099ceacab7SPriyanka Jain 	struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
10109ceacab7SPriyanka Jain 	struct qbman_result *dq_storage;
10119ceacab7SPriyanka Jain 	uint32_t fqid = dpaa2_q->fqid;
10129ceacab7SPriyanka Jain 	int ret, num_tx_conf = 0, num_pulled;
10139ceacab7SPriyanka Jain 	uint8_t pending, status;
10149ceacab7SPriyanka Jain 	struct qbman_swp *swp;
10159ceacab7SPriyanka Jain 	const struct qbman_fd *fd, *next_fd;
10169ceacab7SPriyanka Jain 	struct qbman_pull_desc pulldesc;
10179ceacab7SPriyanka Jain 	struct qbman_release_desc releasedesc;
10189ceacab7SPriyanka Jain 	uint32_t bpid;
10199ceacab7SPriyanka Jain 	uint64_t buf;
1020e806bf87SPriyanka Jain #if defined(RTE_LIBRTE_IEEE1588)
1021e806bf87SPriyanka Jain 	struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
1022e806bf87SPriyanka Jain 	struct dpaa2_dev_priv *priv = eth_data->dev_private;
1023e806bf87SPriyanka Jain 	struct dpaa2_annot_hdr *annotation;
1024e806bf87SPriyanka Jain #endif
10259ceacab7SPriyanka Jain 
10269ceacab7SPriyanka Jain 	if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
10279ceacab7SPriyanka Jain 		ret = dpaa2_affine_qbman_swp();
10289ceacab7SPriyanka Jain 		if (ret) {
1029d527f5d9SNipun Gupta 			DPAA2_PMD_ERR(
1030d527f5d9SNipun Gupta 				"Failed to allocate IO portal, tid: %d\n",
1031d527f5d9SNipun Gupta 				rte_gettid());
10329ceacab7SPriyanka Jain 			return 0;
10339ceacab7SPriyanka Jain 		}
10349ceacab7SPriyanka Jain 	}
10359ceacab7SPriyanka Jain 	swp = DPAA2_PER_LCORE_PORTAL;
10369ceacab7SPriyanka Jain 
10379ceacab7SPriyanka Jain 	do {
10389ceacab7SPriyanka Jain 		dq_storage = dpaa2_q->q_storage->dq_storage[0];
10399ceacab7SPriyanka Jain 		qbman_pull_desc_clear(&pulldesc);
10409ceacab7SPriyanka Jain 		qbman_pull_desc_set_fq(&pulldesc, fqid);
10419ceacab7SPriyanka Jain 		qbman_pull_desc_set_storage(&pulldesc, dq_storage,
10429ceacab7SPriyanka Jain 				(size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
10439ceacab7SPriyanka Jain 
10449ceacab7SPriyanka Jain 		qbman_pull_desc_set_numframes(&pulldesc, dpaa2_dqrr_size);
10459ceacab7SPriyanka Jain 
10469ceacab7SPriyanka Jain 		while (1) {
10479ceacab7SPriyanka Jain 			if (qbman_swp_pull(swp, &pulldesc)) {
10489ceacab7SPriyanka Jain 				DPAA2_PMD_DP_DEBUG("VDQ command is not issued."
10499ceacab7SPriyanka Jain 						   "QBMAN is busy\n");
10509ceacab7SPriyanka Jain 				/* Portal was busy, try again */
10519ceacab7SPriyanka Jain 				continue;
10529ceacab7SPriyanka Jain 			}
10539ceacab7SPriyanka Jain 			break;
10549ceacab7SPriyanka Jain 		}
10559ceacab7SPriyanka Jain 
10569ceacab7SPriyanka Jain 		rte_prefetch0((void *)((size_t)(dq_storage + 1)));
10579ceacab7SPriyanka Jain 		/* Check if the previous issued command is completed. */
10589ceacab7SPriyanka Jain 		while (!qbman_check_command_complete(dq_storage))
10599ceacab7SPriyanka Jain 			;
10609ceacab7SPriyanka Jain 
10619ceacab7SPriyanka Jain 		num_pulled = 0;
10629ceacab7SPriyanka Jain 		pending = 1;
10639ceacab7SPriyanka Jain 		do {
10649ceacab7SPriyanka Jain 			/* Loop until the dq_storage is updated with
10659ceacab7SPriyanka Jain 			 * new token by QBMAN
10669ceacab7SPriyanka Jain 			 */
10679ceacab7SPriyanka Jain 			while (!qbman_check_new_result(dq_storage))
10689ceacab7SPriyanka Jain 				;
10699ceacab7SPriyanka Jain 			rte_prefetch0((void *)((size_t)(dq_storage + 2)));
10709ceacab7SPriyanka Jain 			/* Check whether Last Pull command is Expired and
10719ceacab7SPriyanka Jain 			 * setting Condition for Loop termination
10729ceacab7SPriyanka Jain 			 */
10739ceacab7SPriyanka Jain 			if (qbman_result_DQ_is_pull_complete(dq_storage)) {
10749ceacab7SPriyanka Jain 				pending = 0;
10759ceacab7SPriyanka Jain 				/* Check for valid frame. */
10769ceacab7SPriyanka Jain 				status = qbman_result_DQ_flags(dq_storage);
10779ceacab7SPriyanka Jain 				if (unlikely((status &
10789ceacab7SPriyanka Jain 					QBMAN_DQ_STAT_VALIDFRAME) == 0))
10799ceacab7SPriyanka Jain 					continue;
10809ceacab7SPriyanka Jain 			}
10819ceacab7SPriyanka Jain 			fd = qbman_result_DQ_fd(dq_storage);
10829ceacab7SPriyanka Jain 
10839ceacab7SPriyanka Jain 			next_fd = qbman_result_DQ_fd(dq_storage + 1);
10849ceacab7SPriyanka Jain 			/* Prefetch Annotation address for the parse results */
10859ceacab7SPriyanka Jain 			rte_prefetch0((void *)(size_t)
10869ceacab7SPriyanka Jain 				(DPAA2_GET_FD_ADDR(next_fd) +
10879ceacab7SPriyanka Jain 				 DPAA2_FD_PTA_SIZE + 16));
10889ceacab7SPriyanka Jain 
10899ceacab7SPriyanka Jain 			bpid = DPAA2_GET_FD_BPID(fd);
10909ceacab7SPriyanka Jain 
10919ceacab7SPriyanka Jain 			/* Create a release descriptor required for releasing
10929ceacab7SPriyanka Jain 			 * buffers into QBMAN
10939ceacab7SPriyanka Jain 			 */
10949ceacab7SPriyanka Jain 			qbman_release_desc_clear(&releasedesc);
10959ceacab7SPriyanka Jain 			qbman_release_desc_set_bpid(&releasedesc, bpid);
10969ceacab7SPriyanka Jain 
10979ceacab7SPriyanka Jain 			buf = DPAA2_GET_FD_ADDR(fd);
10989ceacab7SPriyanka Jain 			/* feed them to bman */
10999ceacab7SPriyanka Jain 			do {
11009ceacab7SPriyanka Jain 				ret = qbman_swp_release(swp, &releasedesc,
11019ceacab7SPriyanka Jain 							&buf, 1);
11029ceacab7SPriyanka Jain 			} while (ret == -EBUSY);
11039ceacab7SPriyanka Jain 
11049ceacab7SPriyanka Jain 			dq_storage++;
11059ceacab7SPriyanka Jain 			num_tx_conf++;
11069ceacab7SPriyanka Jain 			num_pulled++;
1107e806bf87SPriyanka Jain #if defined(RTE_LIBRTE_IEEE1588)
1108e806bf87SPriyanka Jain 			annotation = (struct dpaa2_annot_hdr *)((size_t)
1109e806bf87SPriyanka Jain 				DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)) +
1110e806bf87SPriyanka Jain 				DPAA2_FD_PTA_SIZE);
1111e806bf87SPriyanka Jain 			priv->tx_timestamp = annotation->word2;
1112e806bf87SPriyanka Jain #endif
11139ceacab7SPriyanka Jain 		} while (pending);
11149ceacab7SPriyanka Jain 
11159ceacab7SPriyanka Jain 	/* Last VDQ provided all packets and more packets are requested */
11169ceacab7SPriyanka Jain 	} while (num_pulled == dpaa2_dqrr_size);
11179ceacab7SPriyanka Jain 
11189ceacab7SPriyanka Jain 	dpaa2_q->rx_pkts += num_tx_conf;
11199ceacab7SPriyanka Jain 
11209ceacab7SPriyanka Jain 	return num_tx_conf;
11219ceacab7SPriyanka Jain }
11229ceacab7SPriyanka Jain 
1123e806bf87SPriyanka Jain /* Configure the egress frame annotation for timestamp update */
1124e806bf87SPriyanka Jain static void enable_tx_tstamp(struct qbman_fd *fd)
1125e806bf87SPriyanka Jain {
1126e806bf87SPriyanka Jain 	struct dpaa2_faead *fd_faead;
1127e806bf87SPriyanka Jain 
1128e806bf87SPriyanka Jain 	/* Set frame annotation status field as valid */
1129e806bf87SPriyanka Jain 	(fd)->simple.frc |= DPAA2_FD_FRC_FASV;
1130e806bf87SPriyanka Jain 
1131e806bf87SPriyanka Jain 	/* Set frame annotation egress action descriptor as valid */
1132e806bf87SPriyanka Jain 	(fd)->simple.frc |= DPAA2_FD_FRC_FAEADV;
1133e806bf87SPriyanka Jain 
1134e806bf87SPriyanka Jain 	/* Set Annotation Length as 128B */
1135e806bf87SPriyanka Jain 	(fd)->simple.ctrl |= DPAA2_FD_CTRL_ASAL;
1136e806bf87SPriyanka Jain 
1137e806bf87SPriyanka Jain 	/* enable update of confirmation frame annotation */
1138e806bf87SPriyanka Jain 	fd_faead = (struct dpaa2_faead *)((size_t)
1139e806bf87SPriyanka Jain 			DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)) +
1140e806bf87SPriyanka Jain 			DPAA2_FD_PTA_SIZE + DPAA2_FD_HW_ANNOT_FAEAD_OFFSET);
1141e806bf87SPriyanka Jain 	fd_faead->ctrl = DPAA2_ANNOT_FAEAD_A2V | DPAA2_ANNOT_FAEAD_UPDV |
1142e806bf87SPriyanka Jain 				DPAA2_ANNOT_FAEAD_UPD;
1143e806bf87SPriyanka Jain }
1144e806bf87SPriyanka Jain 
1145cd9935ceSHemant Agrawal /*
1146cd9935ceSHemant Agrawal  * Callback to handle sending packets through WRIOP based interface
1147cd9935ceSHemant Agrawal  */
1148cd9935ceSHemant Agrawal uint16_t
1149cd9935ceSHemant Agrawal dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
1150cd9935ceSHemant Agrawal {
1151cd9935ceSHemant Agrawal 	/* Function to transmit the frames to given device and VQ*/
1152a0840963SHemant Agrawal 	uint32_t loop, retry_count;
1153cd9935ceSHemant Agrawal 	int32_t ret;
1154cd9935ceSHemant Agrawal 	struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
1155774e9ea9SHemant Agrawal 	struct rte_mbuf *mi;
1156cd9935ceSHemant Agrawal 	uint32_t frames_to_send;
1157cd9935ceSHemant Agrawal 	struct rte_mempool *mp;
1158cd9935ceSHemant Agrawal 	struct qbman_eq_desc eqdesc;
1159cd9935ceSHemant Agrawal 	struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
1160cd9935ceSHemant Agrawal 	struct qbman_swp *swp;
1161cd9935ceSHemant Agrawal 	uint16_t num_tx = 0;
1162cd9935ceSHemant Agrawal 	uint16_t bpid;
116385ee5ddaSShreyansh Jain 	struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
116485ee5ddaSShreyansh Jain 	struct dpaa2_dev_priv *priv = eth_data->dev_private;
11652d378863SNipun Gupta 	uint32_t flags[MAX_TX_RING_SLOTS] = {0};
11666bfbafe1SNipun Gupta 	struct rte_mbuf **orig_bufs = bufs;
1167cd9935ceSHemant Agrawal 
1168cd9935ceSHemant Agrawal 	if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
1169cd9935ceSHemant Agrawal 		ret = dpaa2_affine_qbman_swp();
1170cd9935ceSHemant Agrawal 		if (ret) {
1171d527f5d9SNipun Gupta 			DPAA2_PMD_ERR(
1172d527f5d9SNipun Gupta 				"Failed to allocate IO portal, tid: %d\n",
1173d527f5d9SNipun Gupta 				rte_gettid());
1174cd9935ceSHemant Agrawal 			return 0;
1175cd9935ceSHemant Agrawal 		}
1176cd9935ceSHemant Agrawal 	}
1177cd9935ceSHemant Agrawal 	swp = DPAA2_PER_LCORE_PORTAL;
1178cd9935ceSHemant Agrawal 
117985ee5ddaSShreyansh Jain 	DPAA2_PMD_DP_DEBUG("===> eth_data =%p, fqid =%d\n",
118085ee5ddaSShreyansh Jain 			eth_data, dpaa2_q->fqid);
1181cd9935ceSHemant Agrawal 
1182e806bf87SPriyanka Jain #ifdef RTE_LIBRTE_IEEE1588
1183e806bf87SPriyanka Jain 	/* IEEE1588 driver need pointer to tx confirmation queue
1184e806bf87SPriyanka Jain 	 * corresponding to last packet transmitted for reading
1185e806bf87SPriyanka Jain 	 * the timestamp
1186e806bf87SPriyanka Jain 	 */
1187e806bf87SPriyanka Jain 	priv->next_tx_conf_queue = dpaa2_q->tx_conf_queue;
1188e806bf87SPriyanka Jain 	dpaa2_dev_tx_conf(dpaa2_q->tx_conf_queue);
1189e806bf87SPriyanka Jain #endif
1190e806bf87SPriyanka Jain 
1191cd9935ceSHemant Agrawal 	/*Prepare enqueue descriptor*/
1192cd9935ceSHemant Agrawal 	qbman_eq_desc_clear(&eqdesc);
1193cd9935ceSHemant Agrawal 	qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
1194e26bf82eSSachin Saxena 	qbman_eq_desc_set_fq(&eqdesc, dpaa2_q->fqid);
1195e26bf82eSSachin Saxena 
1196cd9935ceSHemant Agrawal 	/*Clear the unused FD fields before sending*/
1197cd9935ceSHemant Agrawal 	while (nb_pkts) {
11987ae777d0SHemant Agrawal 		/*Check if the queue is congested*/
1199a0840963SHemant Agrawal 		retry_count = 0;
120069293c77SHemant Agrawal 		while (qbman_result_SCN_state(dpaa2_q->cscn)) {
1201a0840963SHemant Agrawal 			retry_count++;
1202a0840963SHemant Agrawal 			/* Retry for some time before giving up */
1203a0840963SHemant Agrawal 			if (retry_count > CONG_RETRY_COUNT)
12047ae777d0SHemant Agrawal 				goto skip_tx;
1205a0840963SHemant Agrawal 		}
12067ae777d0SHemant Agrawal 
1207bd23b1a8SNipun Gupta 		frames_to_send = (nb_pkts > dpaa2_eqcr_size) ?
1208bd23b1a8SNipun Gupta 			dpaa2_eqcr_size : nb_pkts;
1209cd9935ceSHemant Agrawal 
1210cd9935ceSHemant Agrawal 		for (loop = 0; loop < frames_to_send; loop++) {
1211ea278063SDavid Marchand 			if (*dpaa2_seqn(*bufs)) {
1212ea278063SDavid Marchand 				uint8_t dqrr_index = *dpaa2_seqn(*bufs) - 1;
12132d378863SNipun Gupta 
12142d378863SNipun Gupta 				flags[loop] = QBMAN_ENQUEUE_FLAG_DCA |
12152d378863SNipun Gupta 						dqrr_index;
12162d378863SNipun Gupta 				DPAA2_PER_LCORE_DQRR_SIZE--;
12172d378863SNipun Gupta 				DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dqrr_index);
1218ea278063SDavid Marchand 				*dpaa2_seqn(*bufs) = DPAA2_INVALID_MBUF_SEQN;
12192d378863SNipun Gupta 			}
12202d378863SNipun Gupta 
122148e7f156SNipun Gupta 			if (likely(RTE_MBUF_DIRECT(*bufs))) {
1222cd9935ceSHemant Agrawal 				mp = (*bufs)->pool;
122348e7f156SNipun Gupta 				/* Check the basic scenario and set
122448e7f156SNipun Gupta 				 * the FD appropriately here itself.
122548e7f156SNipun Gupta 				 */
122648e7f156SNipun Gupta 				if (likely(mp && mp->ops_index ==
122748e7f156SNipun Gupta 				    priv->bp_list->dpaa2_ops_index &&
122848e7f156SNipun Gupta 				    (*bufs)->nb_segs == 1 &&
122948e7f156SNipun Gupta 				    rte_mbuf_refcnt_read((*bufs)) == 1)) {
12300ebce612SSunil Kumar Kori 					if (unlikely(((*bufs)->ol_flags
1231*daa02b5cSOlivier Matz 						& RTE_MBUF_F_TX_VLAN) ||
123285ee5ddaSShreyansh Jain 						(eth_data->dev_conf.txmode.offloads
1233295968d1SFerruh Yigit 						& RTE_ETH_TX_OFFLOAD_VLAN_INSERT))) {
123448e7f156SNipun Gupta 						ret = rte_vlan_insert(bufs);
123548e7f156SNipun Gupta 						if (ret)
123648e7f156SNipun Gupta 							goto send_n_return;
123748e7f156SNipun Gupta 					}
123848e7f156SNipun Gupta 					DPAA2_MBUF_TO_CONTIG_FD((*bufs),
123948e7f156SNipun Gupta 					&fd_arr[loop], mempool_to_bpid(mp));
124048e7f156SNipun Gupta 					bufs++;
1241e806bf87SPriyanka Jain #ifdef RTE_LIBRTE_IEEE1588
1242e806bf87SPriyanka Jain 					enable_tx_tstamp(&fd_arr[loop]);
1243e806bf87SPriyanka Jain #endif
124448e7f156SNipun Gupta 					continue;
124548e7f156SNipun Gupta 				}
1246774e9ea9SHemant Agrawal 			} else {
1247774e9ea9SHemant Agrawal 				mi = rte_mbuf_from_indirect(*bufs);
1248774e9ea9SHemant Agrawal 				mp = mi->pool;
1249774e9ea9SHemant Agrawal 			}
12506bfbafe1SNipun Gupta 
12516bfbafe1SNipun Gupta 			if (unlikely(RTE_MBUF_HAS_EXTBUF(*bufs))) {
12526bfbafe1SNipun Gupta 				if (unlikely((*bufs)->nb_segs > 1)) {
12536bfbafe1SNipun Gupta 					if (eth_mbuf_to_sg_fd(*bufs,
12546bfbafe1SNipun Gupta 							      &fd_arr[loop],
12556bfbafe1SNipun Gupta 							      mp, 0))
12566bfbafe1SNipun Gupta 						goto send_n_return;
12576bfbafe1SNipun Gupta 				} else {
12586bfbafe1SNipun Gupta 					eth_mbuf_to_fd(*bufs,
12596bfbafe1SNipun Gupta 						       &fd_arr[loop], 0);
12606bfbafe1SNipun Gupta 				}
12616bfbafe1SNipun Gupta 				bufs++;
12626bfbafe1SNipun Gupta #ifdef RTE_LIBRTE_IEEE1588
12636bfbafe1SNipun Gupta 				enable_tx_tstamp(&fd_arr[loop]);
12646bfbafe1SNipun Gupta #endif
12656bfbafe1SNipun Gupta 				continue;
12666bfbafe1SNipun Gupta 			}
12676bfbafe1SNipun Gupta 
12689e5f3e6dSHemant Agrawal 			/* Not a hw_pkt pool allocated frame */
1269790ec226SHemant Agrawal 			if (unlikely(!mp || !priv->bp_list)) {
1270a10a988aSShreyansh Jain 				DPAA2_PMD_ERR("Err: No buffer pool attached");
1271790ec226SHemant Agrawal 				goto send_n_return;
1272774e9ea9SHemant Agrawal 			}
1273790ec226SHemant Agrawal 
1274*daa02b5cSOlivier Matz 			if (unlikely(((*bufs)->ol_flags & RTE_MBUF_F_TX_VLAN) ||
127585ee5ddaSShreyansh Jain 				(eth_data->dev_conf.txmode.offloads
1276295968d1SFerruh Yigit 				& RTE_ETH_TX_OFFLOAD_VLAN_INSERT))) {
12770ebce612SSunil Kumar Kori 				int ret = rte_vlan_insert(bufs);
12780ebce612SSunil Kumar Kori 				if (ret)
12790ebce612SSunil Kumar Kori 					goto send_n_return;
12800ebce612SSunil Kumar Kori 			}
12819e5f3e6dSHemant Agrawal 			if (mp->ops_index != priv->bp_list->dpaa2_ops_index) {
1282a10a988aSShreyansh Jain 				DPAA2_PMD_WARN("Non DPAA2 buffer pool");
12839e5f3e6dSHemant Agrawal 				/* alloc should be from the default buffer pool
12849e5f3e6dSHemant Agrawal 				 * attached to this interface
12859e5f3e6dSHemant Agrawal 				 */
12869e5f3e6dSHemant Agrawal 				bpid = priv->bp_list->buf_pool.bpid;
1287790ec226SHemant Agrawal 
1288774e9ea9SHemant Agrawal 				if (unlikely((*bufs)->nb_segs > 1)) {
1289a10a988aSShreyansh Jain 					DPAA2_PMD_ERR("S/G support not added"
1290774e9ea9SHemant Agrawal 						" for non hw offload buffer");
1291790ec226SHemant Agrawal 					goto send_n_return;
1292774e9ea9SHemant Agrawal 				}
12939e5f3e6dSHemant Agrawal 				if (eth_copy_mbuf_to_fd(*bufs,
12949e5f3e6dSHemant Agrawal 							&fd_arr[loop], bpid)) {
1295790ec226SHemant Agrawal 					goto send_n_return;
12969e5f3e6dSHemant Agrawal 				}
1297790ec226SHemant Agrawal 				/* free the original packet */
1298790ec226SHemant Agrawal 				rte_pktmbuf_free(*bufs);
12999e5f3e6dSHemant Agrawal 			} else {
1300cd9935ceSHemant Agrawal 				bpid = mempool_to_bpid(mp);
1301774e9ea9SHemant Agrawal 				if (unlikely((*bufs)->nb_segs > 1)) {
1302774e9ea9SHemant Agrawal 					if (eth_mbuf_to_sg_fd(*bufs,
1303cc8569f0SHemant Agrawal 							&fd_arr[loop],
1304cc8569f0SHemant Agrawal 							mp, bpid))
1305790ec226SHemant Agrawal 						goto send_n_return;
1306774e9ea9SHemant Agrawal 				} else {
1307774e9ea9SHemant Agrawal 					eth_mbuf_to_fd(*bufs,
1308774e9ea9SHemant Agrawal 						       &fd_arr[loop], bpid);
1309774e9ea9SHemant Agrawal 				}
13109e5f3e6dSHemant Agrawal 			}
1311e806bf87SPriyanka Jain #ifdef RTE_LIBRTE_IEEE1588
1312e806bf87SPriyanka Jain 			enable_tx_tstamp(&fd_arr[loop]);
1313e806bf87SPriyanka Jain #endif
1314cd9935ceSHemant Agrawal 			bufs++;
1315cd9935ceSHemant Agrawal 		}
1316ce4fd609SNipun Gupta 
1317cd9935ceSHemant Agrawal 		loop = 0;
1318ce4fd609SNipun Gupta 		retry_count = 0;
1319cd9935ceSHemant Agrawal 		while (loop < frames_to_send) {
1320ce4fd609SNipun Gupta 			ret = qbman_swp_enqueue_multiple(swp, &eqdesc,
13212d378863SNipun Gupta 					&fd_arr[loop], &flags[loop],
1322496324d2SNipun Gupta 					frames_to_send - loop);
1323ce4fd609SNipun Gupta 			if (unlikely(ret < 0)) {
1324ce4fd609SNipun Gupta 				retry_count++;
1325ce4fd609SNipun Gupta 				if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) {
1326ce4fd609SNipun Gupta 					num_tx += loop;
1327ce4fd609SNipun Gupta 					nb_pkts -= loop;
1328ce4fd609SNipun Gupta 					goto send_n_return;
1329ce4fd609SNipun Gupta 				}
1330ce4fd609SNipun Gupta 			} else {
1331ce4fd609SNipun Gupta 				loop += ret;
1332ce4fd609SNipun Gupta 				retry_count = 0;
1333ce4fd609SNipun Gupta 			}
1334cd9935ceSHemant Agrawal 		}
1335cd9935ceSHemant Agrawal 
1336ce4fd609SNipun Gupta 		num_tx += loop;
1337ce4fd609SNipun Gupta 		nb_pkts -= loop;
1338cd9935ceSHemant Agrawal 	}
133948e7f156SNipun Gupta 	dpaa2_q->tx_pkts += num_tx;
13406bfbafe1SNipun Gupta 
13416bfbafe1SNipun Gupta 	loop = 0;
13426bfbafe1SNipun Gupta 	while (loop < num_tx) {
13436bfbafe1SNipun Gupta 		if (unlikely(RTE_MBUF_HAS_EXTBUF(*orig_bufs)))
13446bfbafe1SNipun Gupta 			rte_pktmbuf_free(*orig_bufs);
13456bfbafe1SNipun Gupta 		orig_bufs++;
13466bfbafe1SNipun Gupta 		loop++;
13476bfbafe1SNipun Gupta 	}
13486bfbafe1SNipun Gupta 
1349790ec226SHemant Agrawal 	return num_tx;
1350790ec226SHemant Agrawal 
1351790ec226SHemant Agrawal send_n_return:
1352790ec226SHemant Agrawal 	/* send any already prepared fd */
1353790ec226SHemant Agrawal 	if (loop) {
1354790ec226SHemant Agrawal 		unsigned int i = 0;
1355790ec226SHemant Agrawal 
1356ce4fd609SNipun Gupta 		retry_count = 0;
1357790ec226SHemant Agrawal 		while (i < loop) {
1358ce4fd609SNipun Gupta 			ret = qbman_swp_enqueue_multiple(swp, &eqdesc,
13592d378863SNipun Gupta 							 &fd_arr[i],
1360ce4fd609SNipun Gupta 							 &flags[i],
1361496324d2SNipun Gupta 							 loop - i);
1362ce4fd609SNipun Gupta 			if (unlikely(ret < 0)) {
1363ce4fd609SNipun Gupta 				retry_count++;
1364ce4fd609SNipun Gupta 				if (retry_count > DPAA2_MAX_TX_RETRY_COUNT)
1365ce4fd609SNipun Gupta 					break;
1366ce4fd609SNipun Gupta 			} else {
1367ce4fd609SNipun Gupta 				i += ret;
1368ce4fd609SNipun Gupta 				retry_count = 0;
1369790ec226SHemant Agrawal 			}
1370ce4fd609SNipun Gupta 		}
1371ce4fd609SNipun Gupta 		num_tx += i;
1372790ec226SHemant Agrawal 	}
13739e5f3e6dSHemant Agrawal skip_tx:
137448e7f156SNipun Gupta 	dpaa2_q->tx_pkts += num_tx;
13756bfbafe1SNipun Gupta 
13766bfbafe1SNipun Gupta 	loop = 0;
13776bfbafe1SNipun Gupta 	while (loop < num_tx) {
13786bfbafe1SNipun Gupta 		if (unlikely(RTE_MBUF_HAS_EXTBUF(*orig_bufs)))
13796bfbafe1SNipun Gupta 			rte_pktmbuf_free(*orig_bufs);
13806bfbafe1SNipun Gupta 		orig_bufs++;
13816bfbafe1SNipun Gupta 		loop++;
13826bfbafe1SNipun Gupta 	}
13836bfbafe1SNipun Gupta 
1384cd9935ceSHemant Agrawal 	return num_tx;
1385cd9935ceSHemant Agrawal }
1386a1f3a12cSHemant Agrawal 
138716c4a3c4SNipun Gupta void
138816c4a3c4SNipun Gupta dpaa2_dev_free_eqresp_buf(uint16_t eqresp_ci)
138916c4a3c4SNipun Gupta {
139016c4a3c4SNipun Gupta 	struct dpaa2_dpio_dev *dpio_dev = DPAA2_PER_LCORE_DPIO;
139116c4a3c4SNipun Gupta 	struct qbman_fd *fd;
139216c4a3c4SNipun Gupta 	struct rte_mbuf *m;
139316c4a3c4SNipun Gupta 
139416c4a3c4SNipun Gupta 	fd = qbman_result_eqresp_fd(&dpio_dev->eqresp[eqresp_ci]);
1395005d943eSNipun Gupta 
1396005d943eSNipun Gupta 	/* Setting port id does not matter as we are to free the mbuf */
1397005d943eSNipun Gupta 	m = eth_fd_to_mbuf(fd, 0);
139816c4a3c4SNipun Gupta 	rte_pktmbuf_free(m);
139916c4a3c4SNipun Gupta }
140016c4a3c4SNipun Gupta 
140116c4a3c4SNipun Gupta static void
140216c4a3c4SNipun Gupta dpaa2_set_enqueue_descriptor(struct dpaa2_queue *dpaa2_q,
140316c4a3c4SNipun Gupta 			     struct rte_mbuf *m,
140416c4a3c4SNipun Gupta 			     struct qbman_eq_desc *eqdesc)
140516c4a3c4SNipun Gupta {
140616c4a3c4SNipun Gupta 	struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
140716c4a3c4SNipun Gupta 	struct dpaa2_dev_priv *priv = eth_data->dev_private;
140816c4a3c4SNipun Gupta 	struct dpaa2_dpio_dev *dpio_dev = DPAA2_PER_LCORE_DPIO;
140916c4a3c4SNipun Gupta 	struct eqresp_metadata *eqresp_meta;
141016c4a3c4SNipun Gupta 	uint16_t orpid, seqnum;
141116c4a3c4SNipun Gupta 	uint8_t dq_idx;
141216c4a3c4SNipun Gupta 
1413e26bf82eSSachin Saxena 	qbman_eq_desc_set_fq(eqdesc, dpaa2_q->fqid);
141416c4a3c4SNipun Gupta 
1415ea278063SDavid Marchand 	if (*dpaa2_seqn(m) & DPAA2_ENQUEUE_FLAG_ORP) {
1416ea278063SDavid Marchand 		orpid = (*dpaa2_seqn(m) & DPAA2_EQCR_OPRID_MASK) >>
141716c4a3c4SNipun Gupta 			DPAA2_EQCR_OPRID_SHIFT;
1418ea278063SDavid Marchand 		seqnum = (*dpaa2_seqn(m) & DPAA2_EQCR_SEQNUM_MASK) >>
141916c4a3c4SNipun Gupta 			DPAA2_EQCR_SEQNUM_SHIFT;
142016c4a3c4SNipun Gupta 
142116c4a3c4SNipun Gupta 		if (!priv->en_loose_ordered) {
142216c4a3c4SNipun Gupta 			qbman_eq_desc_set_orp(eqdesc, 1, orpid, seqnum, 0);
142316c4a3c4SNipun Gupta 			qbman_eq_desc_set_response(eqdesc, (uint64_t)
142416c4a3c4SNipun Gupta 				DPAA2_VADDR_TO_IOVA(&dpio_dev->eqresp[
142516c4a3c4SNipun Gupta 				dpio_dev->eqresp_pi]), 1);
142616c4a3c4SNipun Gupta 			qbman_eq_desc_set_token(eqdesc, 1);
142716c4a3c4SNipun Gupta 
142816c4a3c4SNipun Gupta 			eqresp_meta = &dpio_dev->eqresp_meta[
142916c4a3c4SNipun Gupta 				dpio_dev->eqresp_pi];
143016c4a3c4SNipun Gupta 			eqresp_meta->dpaa2_q = dpaa2_q;
143116c4a3c4SNipun Gupta 			eqresp_meta->mp = m->pool;
143216c4a3c4SNipun Gupta 
143316c4a3c4SNipun Gupta 			dpio_dev->eqresp_pi + 1 < MAX_EQ_RESP_ENTRIES ?
143416c4a3c4SNipun Gupta 				dpio_dev->eqresp_pi++ :
143516c4a3c4SNipun Gupta 				(dpio_dev->eqresp_pi = 0);
143616c4a3c4SNipun Gupta 		} else {
143716c4a3c4SNipun Gupta 			qbman_eq_desc_set_orp(eqdesc, 0, orpid, seqnum, 0);
143816c4a3c4SNipun Gupta 		}
143916c4a3c4SNipun Gupta 	} else {
1440ea278063SDavid Marchand 		dq_idx = *dpaa2_seqn(m) - 1;
144116c4a3c4SNipun Gupta 		qbman_eq_desc_set_dca(eqdesc, 1, dq_idx, 0);
144216c4a3c4SNipun Gupta 		DPAA2_PER_LCORE_DQRR_SIZE--;
144316c4a3c4SNipun Gupta 		DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dq_idx);
144416c4a3c4SNipun Gupta 	}
1445ea278063SDavid Marchand 	*dpaa2_seqn(m) = DPAA2_INVALID_MBUF_SEQN;
144616c4a3c4SNipun Gupta }
144716c4a3c4SNipun Gupta 
144816c4a3c4SNipun Gupta /* Callback to handle sending ordered packets through WRIOP based interface */
144916c4a3c4SNipun Gupta uint16_t
145016c4a3c4SNipun Gupta dpaa2_dev_tx_ordered(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
145116c4a3c4SNipun Gupta {
145216c4a3c4SNipun Gupta 	/* Function to transmit the frames to given device and VQ*/
145316c4a3c4SNipun Gupta 	struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
145416c4a3c4SNipun Gupta 	struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
145516c4a3c4SNipun Gupta 	struct dpaa2_dev_priv *priv = eth_data->dev_private;
145616c4a3c4SNipun Gupta 	struct dpaa2_queue *order_sendq = (struct dpaa2_queue *)priv->tx_vq[0];
145716c4a3c4SNipun Gupta 	struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
145816c4a3c4SNipun Gupta 	struct rte_mbuf *mi;
145916c4a3c4SNipun Gupta 	struct rte_mempool *mp;
146016c4a3c4SNipun Gupta 	struct qbman_eq_desc eqdesc[MAX_TX_RING_SLOTS];
146116c4a3c4SNipun Gupta 	struct qbman_swp *swp;
146216c4a3c4SNipun Gupta 	uint32_t frames_to_send, num_free_eq_desc;
146316c4a3c4SNipun Gupta 	uint32_t loop, retry_count;
146416c4a3c4SNipun Gupta 	int32_t ret;
146516c4a3c4SNipun Gupta 	uint16_t num_tx = 0;
146616c4a3c4SNipun Gupta 	uint16_t bpid;
146716c4a3c4SNipun Gupta 
146816c4a3c4SNipun Gupta 	if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
146916c4a3c4SNipun Gupta 		ret = dpaa2_affine_qbman_swp();
147016c4a3c4SNipun Gupta 		if (ret) {
1471d527f5d9SNipun Gupta 			DPAA2_PMD_ERR(
1472d527f5d9SNipun Gupta 				"Failed to allocate IO portal, tid: %d\n",
1473d527f5d9SNipun Gupta 				rte_gettid());
147416c4a3c4SNipun Gupta 			return 0;
147516c4a3c4SNipun Gupta 		}
147616c4a3c4SNipun Gupta 	}
147716c4a3c4SNipun Gupta 	swp = DPAA2_PER_LCORE_PORTAL;
147816c4a3c4SNipun Gupta 
147916c4a3c4SNipun Gupta 	DPAA2_PMD_DP_DEBUG("===> eth_data =%p, fqid =%d\n",
148016c4a3c4SNipun Gupta 			   eth_data, dpaa2_q->fqid);
148116c4a3c4SNipun Gupta 
148216c4a3c4SNipun Gupta 	/* This would also handle normal and atomic queues as any type
148316c4a3c4SNipun Gupta 	 * of packet can be enqueued when ordered queues are being used.
148416c4a3c4SNipun Gupta 	 */
148516c4a3c4SNipun Gupta 	while (nb_pkts) {
148616c4a3c4SNipun Gupta 		/*Check if the queue is congested*/
148716c4a3c4SNipun Gupta 		retry_count = 0;
148816c4a3c4SNipun Gupta 		while (qbman_result_SCN_state(dpaa2_q->cscn)) {
148916c4a3c4SNipun Gupta 			retry_count++;
149016c4a3c4SNipun Gupta 			/* Retry for some time before giving up */
149116c4a3c4SNipun Gupta 			if (retry_count > CONG_RETRY_COUNT)
149216c4a3c4SNipun Gupta 				goto skip_tx;
149316c4a3c4SNipun Gupta 		}
149416c4a3c4SNipun Gupta 
149516c4a3c4SNipun Gupta 		frames_to_send = (nb_pkts > dpaa2_eqcr_size) ?
149616c4a3c4SNipun Gupta 			dpaa2_eqcr_size : nb_pkts;
149716c4a3c4SNipun Gupta 
149816c4a3c4SNipun Gupta 		if (!priv->en_loose_ordered) {
1499ea278063SDavid Marchand 			if (*dpaa2_seqn(*bufs) & DPAA2_ENQUEUE_FLAG_ORP) {
150016c4a3c4SNipun Gupta 				num_free_eq_desc = dpaa2_free_eq_descriptors();
150116c4a3c4SNipun Gupta 				if (num_free_eq_desc < frames_to_send)
150216c4a3c4SNipun Gupta 					frames_to_send = num_free_eq_desc;
150316c4a3c4SNipun Gupta 			}
150416c4a3c4SNipun Gupta 		}
150516c4a3c4SNipun Gupta 
150616c4a3c4SNipun Gupta 		for (loop = 0; loop < frames_to_send; loop++) {
150716c4a3c4SNipun Gupta 			/*Prepare enqueue descriptor*/
150816c4a3c4SNipun Gupta 			qbman_eq_desc_clear(&eqdesc[loop]);
150916c4a3c4SNipun Gupta 
1510ea278063SDavid Marchand 			if (*dpaa2_seqn(*bufs)) {
151116c4a3c4SNipun Gupta 				/* Use only queue 0 for Tx in case of atomic/
151216c4a3c4SNipun Gupta 				 * ordered packets as packets can get unordered
151316c4a3c4SNipun Gupta 				 * when being tranmitted out from the interface
151416c4a3c4SNipun Gupta 				 */
151516c4a3c4SNipun Gupta 				dpaa2_set_enqueue_descriptor(order_sendq,
151616c4a3c4SNipun Gupta 							     (*bufs),
151716c4a3c4SNipun Gupta 							     &eqdesc[loop]);
151816c4a3c4SNipun Gupta 			} else {
151916c4a3c4SNipun Gupta 				qbman_eq_desc_set_no_orp(&eqdesc[loop],
152016c4a3c4SNipun Gupta 							 DPAA2_EQ_RESP_ERR_FQ);
1521e26bf82eSSachin Saxena 				qbman_eq_desc_set_fq(&eqdesc[loop],
1522e26bf82eSSachin Saxena 						     dpaa2_q->fqid);
152316c4a3c4SNipun Gupta 			}
152416c4a3c4SNipun Gupta 
152516c4a3c4SNipun Gupta 			if (likely(RTE_MBUF_DIRECT(*bufs))) {
152616c4a3c4SNipun Gupta 				mp = (*bufs)->pool;
152716c4a3c4SNipun Gupta 				/* Check the basic scenario and set
152816c4a3c4SNipun Gupta 				 * the FD appropriately here itself.
152916c4a3c4SNipun Gupta 				 */
153016c4a3c4SNipun Gupta 				if (likely(mp && mp->ops_index ==
153116c4a3c4SNipun Gupta 				    priv->bp_list->dpaa2_ops_index &&
153216c4a3c4SNipun Gupta 				    (*bufs)->nb_segs == 1 &&
153316c4a3c4SNipun Gupta 				    rte_mbuf_refcnt_read((*bufs)) == 1)) {
153416c4a3c4SNipun Gupta 					if (unlikely((*bufs)->ol_flags
1535*daa02b5cSOlivier Matz 						& RTE_MBUF_F_TX_VLAN)) {
153616c4a3c4SNipun Gupta 					  ret = rte_vlan_insert(bufs);
153716c4a3c4SNipun Gupta 					  if (ret)
153816c4a3c4SNipun Gupta 						goto send_n_return;
153916c4a3c4SNipun Gupta 					}
154016c4a3c4SNipun Gupta 					DPAA2_MBUF_TO_CONTIG_FD((*bufs),
154116c4a3c4SNipun Gupta 						&fd_arr[loop],
154216c4a3c4SNipun Gupta 						mempool_to_bpid(mp));
154316c4a3c4SNipun Gupta 					bufs++;
154416c4a3c4SNipun Gupta 					continue;
154516c4a3c4SNipun Gupta 				}
154616c4a3c4SNipun Gupta 			} else {
154716c4a3c4SNipun Gupta 				mi = rte_mbuf_from_indirect(*bufs);
154816c4a3c4SNipun Gupta 				mp = mi->pool;
154916c4a3c4SNipun Gupta 			}
155016c4a3c4SNipun Gupta 			/* Not a hw_pkt pool allocated frame */
155116c4a3c4SNipun Gupta 			if (unlikely(!mp || !priv->bp_list)) {
155216c4a3c4SNipun Gupta 				DPAA2_PMD_ERR("Err: No buffer pool attached");
155316c4a3c4SNipun Gupta 				goto send_n_return;
155416c4a3c4SNipun Gupta 			}
155516c4a3c4SNipun Gupta 
155616c4a3c4SNipun Gupta 			if (mp->ops_index != priv->bp_list->dpaa2_ops_index) {
155716c4a3c4SNipun Gupta 				DPAA2_PMD_WARN("Non DPAA2 buffer pool");
155816c4a3c4SNipun Gupta 				/* alloc should be from the default buffer pool
155916c4a3c4SNipun Gupta 				 * attached to this interface
156016c4a3c4SNipun Gupta 				 */
156116c4a3c4SNipun Gupta 				bpid = priv->bp_list->buf_pool.bpid;
156216c4a3c4SNipun Gupta 
156316c4a3c4SNipun Gupta 				if (unlikely((*bufs)->nb_segs > 1)) {
156416c4a3c4SNipun Gupta 					DPAA2_PMD_ERR(
156516c4a3c4SNipun Gupta 						"S/G not supp for non hw offload buffer");
156616c4a3c4SNipun Gupta 					goto send_n_return;
156716c4a3c4SNipun Gupta 				}
156816c4a3c4SNipun Gupta 				if (eth_copy_mbuf_to_fd(*bufs,
156916c4a3c4SNipun Gupta 							&fd_arr[loop], bpid)) {
157016c4a3c4SNipun Gupta 					goto send_n_return;
157116c4a3c4SNipun Gupta 				}
157216c4a3c4SNipun Gupta 				/* free the original packet */
157316c4a3c4SNipun Gupta 				rte_pktmbuf_free(*bufs);
157416c4a3c4SNipun Gupta 			} else {
157516c4a3c4SNipun Gupta 				bpid = mempool_to_bpid(mp);
157616c4a3c4SNipun Gupta 				if (unlikely((*bufs)->nb_segs > 1)) {
157716c4a3c4SNipun Gupta 					if (eth_mbuf_to_sg_fd(*bufs,
157816c4a3c4SNipun Gupta 							      &fd_arr[loop],
1579cc8569f0SHemant Agrawal 							      mp,
158016c4a3c4SNipun Gupta 							      bpid))
158116c4a3c4SNipun Gupta 						goto send_n_return;
158216c4a3c4SNipun Gupta 				} else {
158316c4a3c4SNipun Gupta 					eth_mbuf_to_fd(*bufs,
158416c4a3c4SNipun Gupta 						       &fd_arr[loop], bpid);
158516c4a3c4SNipun Gupta 				}
158616c4a3c4SNipun Gupta 			}
158716c4a3c4SNipun Gupta 			bufs++;
158816c4a3c4SNipun Gupta 		}
1589ce4fd609SNipun Gupta 
159016c4a3c4SNipun Gupta 		loop = 0;
1591ce4fd609SNipun Gupta 		retry_count = 0;
159216c4a3c4SNipun Gupta 		while (loop < frames_to_send) {
1593ce4fd609SNipun Gupta 			ret = qbman_swp_enqueue_multiple_desc(swp,
159416c4a3c4SNipun Gupta 					&eqdesc[loop], &fd_arr[loop],
159516c4a3c4SNipun Gupta 					frames_to_send - loop);
1596ce4fd609SNipun Gupta 			if (unlikely(ret < 0)) {
1597ce4fd609SNipun Gupta 				retry_count++;
1598ce4fd609SNipun Gupta 				if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) {
1599ce4fd609SNipun Gupta 					num_tx += loop;
1600ce4fd609SNipun Gupta 					nb_pkts -= loop;
1601ce4fd609SNipun Gupta 					goto send_n_return;
1602ce4fd609SNipun Gupta 				}
1603ce4fd609SNipun Gupta 			} else {
1604ce4fd609SNipun Gupta 				loop += ret;
1605ce4fd609SNipun Gupta 				retry_count = 0;
1606ce4fd609SNipun Gupta 			}
160716c4a3c4SNipun Gupta 		}
160816c4a3c4SNipun Gupta 
1609ce4fd609SNipun Gupta 		num_tx += loop;
1610ce4fd609SNipun Gupta 		nb_pkts -= loop;
161116c4a3c4SNipun Gupta 	}
161216c4a3c4SNipun Gupta 	dpaa2_q->tx_pkts += num_tx;
161316c4a3c4SNipun Gupta 	return num_tx;
161416c4a3c4SNipun Gupta 
161516c4a3c4SNipun Gupta send_n_return:
161616c4a3c4SNipun Gupta 	/* send any already prepared fd */
161716c4a3c4SNipun Gupta 	if (loop) {
161816c4a3c4SNipun Gupta 		unsigned int i = 0;
161916c4a3c4SNipun Gupta 
1620ce4fd609SNipun Gupta 		retry_count = 0;
162116c4a3c4SNipun Gupta 		while (i < loop) {
1622ce4fd609SNipun Gupta 			ret = qbman_swp_enqueue_multiple_desc(swp,
1623ce4fd609SNipun Gupta 				       &eqdesc[loop], &fd_arr[i], loop - i);
1624ce4fd609SNipun Gupta 			if (unlikely(ret < 0)) {
1625ce4fd609SNipun Gupta 				retry_count++;
1626ce4fd609SNipun Gupta 				if (retry_count > DPAA2_MAX_TX_RETRY_COUNT)
1627ce4fd609SNipun Gupta 					break;
1628ce4fd609SNipun Gupta 			} else {
1629ce4fd609SNipun Gupta 				i += ret;
1630ce4fd609SNipun Gupta 				retry_count = 0;
163116c4a3c4SNipun Gupta 			}
1632ce4fd609SNipun Gupta 		}
1633ce4fd609SNipun Gupta 		num_tx += i;
163416c4a3c4SNipun Gupta 	}
163516c4a3c4SNipun Gupta skip_tx:
163616c4a3c4SNipun Gupta 	dpaa2_q->tx_pkts += num_tx;
163716c4a3c4SNipun Gupta 	return num_tx;
163816c4a3c4SNipun Gupta }
163916c4a3c4SNipun Gupta 
1640a1f3a12cSHemant Agrawal /**
1641a1f3a12cSHemant Agrawal  * Dummy DPDK callback for TX.
1642a1f3a12cSHemant Agrawal  *
1643a1f3a12cSHemant Agrawal  * This function is used to temporarily replace the real callback during
1644a1f3a12cSHemant Agrawal  * unsafe control operations on the queue, or in case of error.
1645a1f3a12cSHemant Agrawal  *
1646a1f3a12cSHemant Agrawal  * @param dpdk_txq
1647a1f3a12cSHemant Agrawal  *   Generic pointer to TX queue structure.
1648a1f3a12cSHemant Agrawal  * @param[in] pkts
1649a1f3a12cSHemant Agrawal  *   Packets to transmit.
1650a1f3a12cSHemant Agrawal  * @param pkts_n
1651a1f3a12cSHemant Agrawal  *   Number of packets in array.
1652a1f3a12cSHemant Agrawal  *
1653a1f3a12cSHemant Agrawal  * @return
1654a1f3a12cSHemant Agrawal  *   Number of packets successfully transmitted (<= pkts_n).
1655a1f3a12cSHemant Agrawal  */
1656a1f3a12cSHemant Agrawal uint16_t
1657a1f3a12cSHemant Agrawal dummy_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
1658a1f3a12cSHemant Agrawal {
1659a1f3a12cSHemant Agrawal 	(void)queue;
1660a1f3a12cSHemant Agrawal 	(void)bufs;
1661a1f3a12cSHemant Agrawal 	(void)nb_pkts;
1662a1f3a12cSHemant Agrawal 	return 0;
1663a1f3a12cSHemant Agrawal }
1664a3a997f0SHemant Agrawal 
1665a3a997f0SHemant Agrawal #if defined(RTE_TOOLCHAIN_GCC)
1666a3a997f0SHemant Agrawal #pragma GCC diagnostic push
1667a3a997f0SHemant Agrawal #pragma GCC diagnostic ignored "-Wcast-qual"
1668a3a997f0SHemant Agrawal #elif defined(RTE_TOOLCHAIN_CLANG)
1669a3a997f0SHemant Agrawal #pragma clang diagnostic push
1670a3a997f0SHemant Agrawal #pragma clang diagnostic ignored "-Wcast-qual"
1671a3a997f0SHemant Agrawal #endif
1672a3a997f0SHemant Agrawal 
1673a3a997f0SHemant Agrawal /* This function loopbacks all the received packets.*/
1674a3a997f0SHemant Agrawal uint16_t
1675a3a997f0SHemant Agrawal dpaa2_dev_loopback_rx(void *queue,
1676a3a997f0SHemant Agrawal 		      struct rte_mbuf **bufs __rte_unused,
1677a3a997f0SHemant Agrawal 		      uint16_t nb_pkts)
1678a3a997f0SHemant Agrawal {
1679a3a997f0SHemant Agrawal 	/* Function receive frames for a given device and VQ*/
1680a3a997f0SHemant Agrawal 	struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
1681a3a997f0SHemant Agrawal 	struct qbman_result *dq_storage, *dq_storage1 = NULL;
1682a3a997f0SHemant Agrawal 	uint32_t fqid = dpaa2_q->fqid;
1683a3a997f0SHemant Agrawal 	int ret, num_rx = 0, num_tx = 0, pull_size;
1684a3a997f0SHemant Agrawal 	uint8_t pending, status;
1685a3a997f0SHemant Agrawal 	struct qbman_swp *swp;
1686a3a997f0SHemant Agrawal 	struct qbman_fd *fd[DPAA2_LX2_DQRR_RING_SIZE];
1687a3a997f0SHemant Agrawal 	struct qbman_pull_desc pulldesc;
1688a3a997f0SHemant Agrawal 	struct qbman_eq_desc eqdesc;
1689a3a997f0SHemant Agrawal 	struct queue_storage_info_t *q_storage = dpaa2_q->q_storage;
1690a3a997f0SHemant Agrawal 	struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
1691a3a997f0SHemant Agrawal 	struct dpaa2_dev_priv *priv = eth_data->dev_private;
1692a3a997f0SHemant Agrawal 	struct dpaa2_queue *tx_q = priv->tx_vq[0];
1693a3a997f0SHemant Agrawal 	/* todo - currently we are using 1st TX queue only for loopback*/
1694a3a997f0SHemant Agrawal 
1695a3a997f0SHemant Agrawal 	if (unlikely(!DPAA2_PER_LCORE_ETHRX_DPIO)) {
1696a3a997f0SHemant Agrawal 		ret = dpaa2_affine_qbman_ethrx_swp();
1697a3a997f0SHemant Agrawal 		if (ret) {
1698a3a997f0SHemant Agrawal 			DPAA2_PMD_ERR("Failure in affining portal");
1699a3a997f0SHemant Agrawal 			return 0;
1700a3a997f0SHemant Agrawal 		}
1701a3a997f0SHemant Agrawal 	}
1702a3a997f0SHemant Agrawal 	swp = DPAA2_PER_LCORE_ETHRX_PORTAL;
1703a3a997f0SHemant Agrawal 	pull_size = (nb_pkts > dpaa2_dqrr_size) ? dpaa2_dqrr_size : nb_pkts;
1704a3a997f0SHemant Agrawal 	if (unlikely(!q_storage->active_dqs)) {
1705a3a997f0SHemant Agrawal 		q_storage->toggle = 0;
1706a3a997f0SHemant Agrawal 		dq_storage = q_storage->dq_storage[q_storage->toggle];
1707a3a997f0SHemant Agrawal 		q_storage->last_num_pkts = pull_size;
1708a3a997f0SHemant Agrawal 		qbman_pull_desc_clear(&pulldesc);
1709a3a997f0SHemant Agrawal 		qbman_pull_desc_set_numframes(&pulldesc,
1710a3a997f0SHemant Agrawal 					      q_storage->last_num_pkts);
1711a3a997f0SHemant Agrawal 		qbman_pull_desc_set_fq(&pulldesc, fqid);
1712a3a997f0SHemant Agrawal 		qbman_pull_desc_set_storage(&pulldesc, dq_storage,
1713a3a997f0SHemant Agrawal 			(size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
1714a3a997f0SHemant Agrawal 		if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) {
1715a3a997f0SHemant Agrawal 			while (!qbman_check_command_complete(
1716a3a997f0SHemant Agrawal 			       get_swp_active_dqs(
1717a3a997f0SHemant Agrawal 			       DPAA2_PER_LCORE_ETHRX_DPIO->index)))
1718a3a997f0SHemant Agrawal 				;
1719a3a997f0SHemant Agrawal 			clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index);
1720a3a997f0SHemant Agrawal 		}
1721a3a997f0SHemant Agrawal 		while (1) {
1722a3a997f0SHemant Agrawal 			if (qbman_swp_pull(swp, &pulldesc)) {
1723a3a997f0SHemant Agrawal 				DPAA2_PMD_DP_DEBUG(
1724a3a997f0SHemant Agrawal 					"VDQ command not issued.QBMAN busy\n");
1725a3a997f0SHemant Agrawal 				/* Portal was busy, try again */
1726a3a997f0SHemant Agrawal 				continue;
1727a3a997f0SHemant Agrawal 			}
1728a3a997f0SHemant Agrawal 			break;
1729a3a997f0SHemant Agrawal 		}
1730a3a997f0SHemant Agrawal 		q_storage->active_dqs = dq_storage;
1731a3a997f0SHemant Agrawal 		q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index;
1732a3a997f0SHemant Agrawal 		set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index,
1733a3a997f0SHemant Agrawal 				   dq_storage);
1734a3a997f0SHemant Agrawal 	}
1735a3a997f0SHemant Agrawal 
1736a3a997f0SHemant Agrawal 	dq_storage = q_storage->active_dqs;
1737a3a997f0SHemant Agrawal 	rte_prefetch0((void *)(size_t)(dq_storage));
1738a3a997f0SHemant Agrawal 	rte_prefetch0((void *)(size_t)(dq_storage + 1));
1739a3a997f0SHemant Agrawal 
1740a3a997f0SHemant Agrawal 	/* Prepare next pull descriptor. This will give space for the
1741a3a997f0SHemant Agrawal 	 * prefething done on DQRR entries
1742a3a997f0SHemant Agrawal 	 */
1743a3a997f0SHemant Agrawal 	q_storage->toggle ^= 1;
1744a3a997f0SHemant Agrawal 	dq_storage1 = q_storage->dq_storage[q_storage->toggle];
1745a3a997f0SHemant Agrawal 	qbman_pull_desc_clear(&pulldesc);
1746a3a997f0SHemant Agrawal 	qbman_pull_desc_set_numframes(&pulldesc, pull_size);
1747a3a997f0SHemant Agrawal 	qbman_pull_desc_set_fq(&pulldesc, fqid);
1748a3a997f0SHemant Agrawal 	qbman_pull_desc_set_storage(&pulldesc, dq_storage1,
1749a3a997f0SHemant Agrawal 		(size_t)(DPAA2_VADDR_TO_IOVA(dq_storage1)), 1);
1750a3a997f0SHemant Agrawal 
1751a3a997f0SHemant Agrawal 	/*Prepare enqueue descriptor*/
1752a3a997f0SHemant Agrawal 	qbman_eq_desc_clear(&eqdesc);
1753a3a997f0SHemant Agrawal 	qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
1754a3a997f0SHemant Agrawal 	qbman_eq_desc_set_response(&eqdesc, 0, 0);
1755a3a997f0SHemant Agrawal 	qbman_eq_desc_set_fq(&eqdesc, tx_q->fqid);
1756a3a997f0SHemant Agrawal 
1757a3a997f0SHemant Agrawal 	/* Check if the previous issued command is completed.
1758a3a997f0SHemant Agrawal 	 * Also seems like the SWP is shared between the Ethernet Driver
1759a3a997f0SHemant Agrawal 	 * and the SEC driver.
1760a3a997f0SHemant Agrawal 	 */
1761a3a997f0SHemant Agrawal 	while (!qbman_check_command_complete(dq_storage))
1762a3a997f0SHemant Agrawal 		;
1763a3a997f0SHemant Agrawal 	if (dq_storage == get_swp_active_dqs(q_storage->active_dpio_id))
1764a3a997f0SHemant Agrawal 		clear_swp_active_dqs(q_storage->active_dpio_id);
1765a3a997f0SHemant Agrawal 
1766a3a997f0SHemant Agrawal 	pending = 1;
1767a3a997f0SHemant Agrawal 
1768a3a997f0SHemant Agrawal 	do {
1769a3a997f0SHemant Agrawal 		/* Loop until the dq_storage is updated with
1770a3a997f0SHemant Agrawal 		 * new token by QBMAN
1771a3a997f0SHemant Agrawal 		 */
1772a3a997f0SHemant Agrawal 		while (!qbman_check_new_result(dq_storage))
1773a3a997f0SHemant Agrawal 			;
1774a3a997f0SHemant Agrawal 		rte_prefetch0((void *)((size_t)(dq_storage + 2)));
1775a3a997f0SHemant Agrawal 		/* Check whether Last Pull command is Expired and
1776a3a997f0SHemant Agrawal 		 * setting Condition for Loop termination
1777a3a997f0SHemant Agrawal 		 */
1778a3a997f0SHemant Agrawal 		if (qbman_result_DQ_is_pull_complete(dq_storage)) {
1779a3a997f0SHemant Agrawal 			pending = 0;
1780a3a997f0SHemant Agrawal 			/* Check for valid frame. */
1781a3a997f0SHemant Agrawal 			status = qbman_result_DQ_flags(dq_storage);
1782a3a997f0SHemant Agrawal 			if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0))
1783a3a997f0SHemant Agrawal 				continue;
1784a3a997f0SHemant Agrawal 		}
1785a3a997f0SHemant Agrawal 		fd[num_rx] = (struct qbman_fd *)qbman_result_DQ_fd(dq_storage);
1786a3a997f0SHemant Agrawal 
1787a3a997f0SHemant Agrawal 		dq_storage++;
1788a3a997f0SHemant Agrawal 		num_rx++;
1789a3a997f0SHemant Agrawal 	} while (pending);
1790a3a997f0SHemant Agrawal 
1791a3a997f0SHemant Agrawal 	while (num_tx < num_rx) {
1792a3a997f0SHemant Agrawal 		num_tx += qbman_swp_enqueue_multiple_fd(swp, &eqdesc,
1793a3a997f0SHemant Agrawal 				&fd[num_tx], 0, num_rx - num_tx);
1794a3a997f0SHemant Agrawal 	}
1795a3a997f0SHemant Agrawal 
1796a3a997f0SHemant Agrawal 	if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) {
1797a3a997f0SHemant Agrawal 		while (!qbman_check_command_complete(
1798a3a997f0SHemant Agrawal 		       get_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)))
1799a3a997f0SHemant Agrawal 			;
1800a3a997f0SHemant Agrawal 		clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index);
1801a3a997f0SHemant Agrawal 	}
1802a3a997f0SHemant Agrawal 	/* issue a volatile dequeue command for next pull */
1803a3a997f0SHemant Agrawal 	while (1) {
1804a3a997f0SHemant Agrawal 		if (qbman_swp_pull(swp, &pulldesc)) {
1805a3a997f0SHemant Agrawal 			DPAA2_PMD_DP_DEBUG("VDQ command is not issued."
1806a3a997f0SHemant Agrawal 					  "QBMAN is busy (2)\n");
1807a3a997f0SHemant Agrawal 			continue;
1808a3a997f0SHemant Agrawal 		}
1809a3a997f0SHemant Agrawal 		break;
1810a3a997f0SHemant Agrawal 	}
1811a3a997f0SHemant Agrawal 	q_storage->active_dqs = dq_storage1;
1812a3a997f0SHemant Agrawal 	q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index;
1813a3a997f0SHemant Agrawal 	set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index, dq_storage1);
1814a3a997f0SHemant Agrawal 
1815a3a997f0SHemant Agrawal 	dpaa2_q->rx_pkts += num_rx;
1816a3a997f0SHemant Agrawal 	dpaa2_q->tx_pkts += num_tx;
1817a3a997f0SHemant Agrawal 
1818a3a997f0SHemant Agrawal 	return 0;
1819a3a997f0SHemant Agrawal }
1820a3a997f0SHemant Agrawal #if defined(RTE_TOOLCHAIN_GCC)
1821a3a997f0SHemant Agrawal #pragma GCC diagnostic pop
1822a3a997f0SHemant Agrawal #elif defined(RTE_TOOLCHAIN_CLANG)
1823a3a997f0SHemant Agrawal #pragma clang diagnostic pop
1824a3a997f0SHemant Agrawal #endif
1825