xref: /dpdk/drivers/net/dpaa2/dpaa2_rxtx.c (revision 9ad3a41ab2a10db0059e1decdbf3ec038f348e08)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4  *   Copyright 2016-2021 NXP
5  *
6  */
7 
8 #include <time.h>
9 #include <net/if.h>
10 
11 #include <rte_mbuf.h>
12 #include <ethdev_driver.h>
13 #include <rte_malloc.h>
14 #include <rte_memcpy.h>
15 #include <rte_string_fns.h>
16 #include <rte_dev.h>
17 #include <rte_hexdump.h>
18 
19 #include <rte_fslmc.h>
20 #include <fslmc_vfio.h>
21 #include <dpaa2_hw_pvt.h>
22 #include <dpaa2_hw_dpio.h>
23 #include <dpaa2_hw_mempool.h>
24 
25 #include "dpaa2_pmd_logs.h"
26 #include "dpaa2_ethdev.h"
27 #include "base/dpaa2_hw_dpni_annot.h"
28 
29 static inline uint32_t __rte_hot
30 dpaa2_dev_rx_parse_slow(struct rte_mbuf *mbuf,
31 			struct dpaa2_annot_hdr *annotation);
32 
33 static void enable_tx_tstamp(struct qbman_fd *fd) __rte_unused;
34 
35 static inline rte_mbuf_timestamp_t *
36 dpaa2_timestamp_dynfield(struct rte_mbuf *mbuf)
37 {
38 	return RTE_MBUF_DYNFIELD(mbuf,
39 		dpaa2_timestamp_dynfield_offset, rte_mbuf_timestamp_t *);
40 }
41 
42 #define DPAA2_MBUF_TO_CONTIG_FD(_mbuf, _fd, _bpid)  do { \
43 	DPAA2_SET_FD_ADDR(_fd, DPAA2_MBUF_VADDR_TO_IOVA(_mbuf)); \
44 	DPAA2_SET_FD_LEN(_fd, _mbuf->data_len); \
45 	DPAA2_SET_ONLY_FD_BPID(_fd, _bpid); \
46 	DPAA2_SET_FD_OFFSET(_fd, _mbuf->data_off); \
47 	DPAA2_SET_FD_FRC(_fd, 0);		\
48 	DPAA2_RESET_FD_CTRL(_fd);		\
49 	DPAA2_RESET_FD_FLC(_fd);		\
50 } while (0)
51 
52 static inline void __rte_hot
53 dpaa2_dev_rx_parse_new(struct rte_mbuf *m, const struct qbman_fd *fd,
54 		       void *hw_annot_addr)
55 {
56 	uint16_t frc = DPAA2_GET_FD_FRC_PARSE_SUM(fd);
57 	struct dpaa2_annot_hdr *annotation =
58 			(struct dpaa2_annot_hdr *)hw_annot_addr;
59 
60 	m->packet_type = RTE_PTYPE_UNKNOWN;
61 	switch (frc) {
62 	case DPAA2_PKT_TYPE_ETHER:
63 		m->packet_type = RTE_PTYPE_L2_ETHER;
64 		break;
65 	case DPAA2_PKT_TYPE_IPV4:
66 		m->packet_type = RTE_PTYPE_L2_ETHER |
67 			RTE_PTYPE_L3_IPV4;
68 		break;
69 	case DPAA2_PKT_TYPE_IPV6:
70 		m->packet_type = RTE_PTYPE_L2_ETHER |
71 			RTE_PTYPE_L3_IPV6;
72 		break;
73 	case DPAA2_PKT_TYPE_IPV4_EXT:
74 		m->packet_type = RTE_PTYPE_L2_ETHER |
75 			RTE_PTYPE_L3_IPV4_EXT;
76 		break;
77 	case DPAA2_PKT_TYPE_IPV6_EXT:
78 		m->packet_type = RTE_PTYPE_L2_ETHER |
79 			RTE_PTYPE_L3_IPV6_EXT;
80 		break;
81 	case DPAA2_PKT_TYPE_IPV4_TCP:
82 		m->packet_type = RTE_PTYPE_L2_ETHER |
83 			RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP;
84 		break;
85 	case DPAA2_PKT_TYPE_IPV6_TCP:
86 		m->packet_type = RTE_PTYPE_L2_ETHER |
87 			RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP;
88 		break;
89 	case DPAA2_PKT_TYPE_IPV4_UDP:
90 		m->packet_type = RTE_PTYPE_L2_ETHER |
91 			RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP;
92 		break;
93 	case DPAA2_PKT_TYPE_IPV6_UDP:
94 		m->packet_type = RTE_PTYPE_L2_ETHER |
95 			RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP;
96 		break;
97 	case DPAA2_PKT_TYPE_IPV4_SCTP:
98 		m->packet_type = RTE_PTYPE_L2_ETHER |
99 			RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP;
100 		break;
101 	case DPAA2_PKT_TYPE_IPV6_SCTP:
102 		m->packet_type = RTE_PTYPE_L2_ETHER |
103 			RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_SCTP;
104 		break;
105 	case DPAA2_PKT_TYPE_IPV4_ICMP:
106 		m->packet_type = RTE_PTYPE_L2_ETHER |
107 			RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_ICMP;
108 		break;
109 	case DPAA2_PKT_TYPE_IPV6_ICMP:
110 		m->packet_type = RTE_PTYPE_L2_ETHER |
111 			RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_ICMP;
112 		break;
113 	default:
114 		m->packet_type = dpaa2_dev_rx_parse_slow(m, annotation);
115 	}
116 	m->hash.rss = fd->simple.flc_hi;
117 	m->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
118 
119 	if (dpaa2_enable_ts[m->port]) {
120 		*dpaa2_timestamp_dynfield(m) = annotation->word2;
121 		m->ol_flags |= dpaa2_timestamp_rx_dynflag;
122 		DPAA2_PMD_DP_DEBUG("pkt timestamp:0x%" PRIx64 "",
123 				*dpaa2_timestamp_dynfield(m));
124 	}
125 
126 	DPAA2_PMD_DP_DEBUG("HW frc = 0x%x\t packet type =0x%x "
127 		"ol_flags =0x%" PRIx64 "",
128 		frc, m->packet_type, m->ol_flags);
129 }
130 
131 static inline uint32_t __rte_hot
132 dpaa2_dev_rx_parse_slow(struct rte_mbuf *mbuf,
133 			struct dpaa2_annot_hdr *annotation)
134 {
135 	uint32_t pkt_type = RTE_PTYPE_UNKNOWN;
136 	uint16_t *vlan_tci;
137 
138 	DPAA2_PMD_DP_DEBUG("(slow parse)annotation(3)=0x%" PRIx64 "\t"
139 			"(4)=0x%" PRIx64 "\t",
140 			annotation->word3, annotation->word4);
141 
142 #if defined(RTE_LIBRTE_IEEE1588)
143 	if (BIT_ISSET_AT_POS(annotation->word1, DPAA2_ETH_FAS_PTP)) {
144 		mbuf->ol_flags |= RTE_MBUF_F_RX_IEEE1588_PTP;
145 		mbuf->ol_flags |= RTE_MBUF_F_RX_IEEE1588_TMST;
146 	}
147 #endif
148 
149 	if (BIT_ISSET_AT_POS(annotation->word3, L2_VLAN_1_PRESENT)) {
150 		vlan_tci = rte_pktmbuf_mtod_offset(mbuf, uint16_t *,
151 			(VLAN_TCI_OFFSET_1(annotation->word5) >> 16));
152 		mbuf->vlan_tci = rte_be_to_cpu_16(*vlan_tci);
153 		mbuf->ol_flags |= RTE_MBUF_F_RX_VLAN;
154 		pkt_type |= RTE_PTYPE_L2_ETHER_VLAN;
155 	} else if (BIT_ISSET_AT_POS(annotation->word3, L2_VLAN_N_PRESENT)) {
156 		vlan_tci = rte_pktmbuf_mtod_offset(mbuf, uint16_t *,
157 			(VLAN_TCI_OFFSET_1(annotation->word5) >> 16));
158 		mbuf->vlan_tci = rte_be_to_cpu_16(*vlan_tci);
159 		mbuf->ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_QINQ;
160 		pkt_type |= RTE_PTYPE_L2_ETHER_QINQ;
161 	}
162 
163 	if (BIT_ISSET_AT_POS(annotation->word3, L2_ARP_PRESENT)) {
164 		pkt_type |= RTE_PTYPE_L2_ETHER_ARP;
165 		goto parse_done;
166 	} else if (BIT_ISSET_AT_POS(annotation->word3, L2_ETH_MAC_PRESENT)) {
167 		pkt_type |= RTE_PTYPE_L2_ETHER;
168 	} else {
169 		goto parse_done;
170 	}
171 
172 	if (BIT_ISSET_AT_POS(annotation->word3, L2_MPLS_1_PRESENT |
173 				L2_MPLS_N_PRESENT))
174 		pkt_type |= RTE_PTYPE_L2_ETHER_MPLS;
175 
176 	if (BIT_ISSET_AT_POS(annotation->word4, L3_IPV4_1_PRESENT |
177 			     L3_IPV4_N_PRESENT)) {
178 		pkt_type |= RTE_PTYPE_L3_IPV4;
179 		if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_OPT_PRESENT |
180 			L3_IP_N_OPT_PRESENT))
181 			pkt_type |= RTE_PTYPE_L3_IPV4_EXT;
182 
183 	} else if (BIT_ISSET_AT_POS(annotation->word4, L3_IPV6_1_PRESENT |
184 		  L3_IPV6_N_PRESENT)) {
185 		pkt_type |= RTE_PTYPE_L3_IPV6;
186 		if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_OPT_PRESENT |
187 		    L3_IP_N_OPT_PRESENT))
188 			pkt_type |= RTE_PTYPE_L3_IPV6_EXT;
189 	} else {
190 		goto parse_done;
191 	}
192 
193 	if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L3CE))
194 		mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
195 	else if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L4CE))
196 		mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
197 
198 	if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_FIRST_FRAGMENT |
199 	    L3_IP_1_MORE_FRAGMENT |
200 	    L3_IP_N_FIRST_FRAGMENT |
201 	    L3_IP_N_MORE_FRAGMENT)) {
202 		pkt_type |= RTE_PTYPE_L4_FRAG;
203 		goto parse_done;
204 	} else {
205 		pkt_type |= RTE_PTYPE_L4_NONFRAG;
206 	}
207 
208 	if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_UDP_PRESENT))
209 		pkt_type |= RTE_PTYPE_L4_UDP;
210 
211 	else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_TCP_PRESENT))
212 		pkt_type |= RTE_PTYPE_L4_TCP;
213 
214 	else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_SCTP_PRESENT))
215 		pkt_type |= RTE_PTYPE_L4_SCTP;
216 
217 	else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_ICMP_PRESENT))
218 		pkt_type |= RTE_PTYPE_L4_ICMP;
219 
220 	else if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_UNKNOWN_PROTOCOL))
221 		pkt_type |= RTE_PTYPE_UNKNOWN;
222 
223 parse_done:
224 	return pkt_type;
225 }
226 
227 static inline uint32_t __rte_hot
228 dpaa2_dev_rx_parse(struct rte_mbuf *mbuf, void *hw_annot_addr)
229 {
230 	struct dpaa2_annot_hdr *annotation =
231 			(struct dpaa2_annot_hdr *)hw_annot_addr;
232 
233 	DPAA2_PMD_DP_DEBUG("(fast parse) Annotation = 0x%" PRIx64 "\t",
234 			   annotation->word4);
235 
236 	if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L3CE))
237 		mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
238 	else if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L4CE))
239 		mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
240 
241 	if (dpaa2_enable_ts[mbuf->port]) {
242 		*dpaa2_timestamp_dynfield(mbuf) = annotation->word2;
243 		mbuf->ol_flags |= dpaa2_timestamp_rx_dynflag;
244 		DPAA2_PMD_DP_DEBUG("pkt timestamp: 0x%" PRIx64 "",
245 				*dpaa2_timestamp_dynfield(mbuf));
246 	}
247 
248 	/* Check detailed parsing requirement */
249 	if (annotation->word3 & 0x7FFFFC3FFFF)
250 		return dpaa2_dev_rx_parse_slow(mbuf, annotation);
251 
252 	/* Return some common types from parse processing */
253 	switch (annotation->word4) {
254 	case DPAA2_L3_IPv4:
255 		return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4;
256 	case DPAA2_L3_IPv6:
257 		return  RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6;
258 	case DPAA2_L3_IPv4_TCP:
259 		return  RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 |
260 				RTE_PTYPE_L4_TCP;
261 	case DPAA2_L3_IPv4_UDP:
262 		return  RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 |
263 				RTE_PTYPE_L4_UDP;
264 	case DPAA2_L3_IPv6_TCP:
265 		return  RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 |
266 				RTE_PTYPE_L4_TCP;
267 	case DPAA2_L3_IPv6_UDP:
268 		return  RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 |
269 				RTE_PTYPE_L4_UDP;
270 	default:
271 		break;
272 	}
273 
274 	return dpaa2_dev_rx_parse_slow(mbuf, annotation);
275 }
276 
277 static inline struct rte_mbuf *__rte_hot
278 eth_sg_fd_to_mbuf(const struct qbman_fd *fd,
279 		  int port_id)
280 {
281 	struct qbman_sge *sgt, *sge;
282 	size_t sg_addr, fd_addr;
283 	int i = 0;
284 	void *hw_annot_addr;
285 	struct rte_mbuf *first_seg, *next_seg, *cur_seg, *temp;
286 
287 	fd_addr = (size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
288 	hw_annot_addr = (void *)(fd_addr + DPAA2_FD_PTA_SIZE);
289 
290 	/* Get Scatter gather table address */
291 	sgt = (struct qbman_sge *)(fd_addr + DPAA2_GET_FD_OFFSET(fd));
292 
293 	sge = &sgt[i++];
294 	sg_addr = (size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FLE_ADDR(sge));
295 
296 	/* First Scatter gather entry */
297 	first_seg = DPAA2_INLINE_MBUF_FROM_BUF(sg_addr,
298 		rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
299 	/* Prepare all the metadata for first segment */
300 	first_seg->buf_addr = (uint8_t *)sg_addr;
301 	first_seg->ol_flags = 0;
302 	first_seg->data_off = DPAA2_GET_FLE_OFFSET(sge);
303 	first_seg->data_len = sge->length  & 0x1FFFF;
304 	first_seg->pkt_len = DPAA2_GET_FD_LEN(fd);
305 	first_seg->nb_segs = 1;
306 	first_seg->next = NULL;
307 	first_seg->port = port_id;
308 	if (dpaa2_svr_family == SVR_LX2160A)
309 		dpaa2_dev_rx_parse_new(first_seg, fd, hw_annot_addr);
310 	else
311 		first_seg->packet_type =
312 			dpaa2_dev_rx_parse(first_seg, hw_annot_addr);
313 
314 	rte_mbuf_refcnt_set(first_seg, 1);
315 	cur_seg = first_seg;
316 	while (!DPAA2_SG_IS_FINAL(sge)) {
317 		sge = &sgt[i++];
318 		sg_addr = (size_t)DPAA2_IOVA_TO_VADDR(
319 				DPAA2_GET_FLE_ADDR(sge));
320 		next_seg = DPAA2_INLINE_MBUF_FROM_BUF(sg_addr,
321 			rte_dpaa2_bpid_info[DPAA2_GET_FLE_BPID(sge)].meta_data_size);
322 		next_seg->buf_addr  = (uint8_t *)sg_addr;
323 		next_seg->data_off  = DPAA2_GET_FLE_OFFSET(sge);
324 		next_seg->data_len  = sge->length  & 0x1FFFF;
325 		first_seg->nb_segs += 1;
326 		rte_mbuf_refcnt_set(next_seg, 1);
327 		cur_seg->next = next_seg;
328 		next_seg->next = NULL;
329 		cur_seg = next_seg;
330 	}
331 	temp = DPAA2_INLINE_MBUF_FROM_BUF(fd_addr,
332 		rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
333 	rte_mbuf_refcnt_set(temp, 1);
334 	rte_pktmbuf_free_seg(temp);
335 
336 	return (void *)first_seg;
337 }
338 
339 static inline struct rte_mbuf *__rte_hot
340 eth_fd_to_mbuf(const struct qbman_fd *fd,
341 	       int port_id)
342 {
343 	void *v_addr = DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
344 	void *hw_annot_addr = (void *)((size_t)v_addr + DPAA2_FD_PTA_SIZE);
345 	struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF(v_addr,
346 		     rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
347 
348 	/* need to repopulated some of the fields,
349 	 * as they may have changed in last transmission
350 	 */
351 	mbuf->nb_segs = 1;
352 	mbuf->ol_flags = 0;
353 	mbuf->data_off = DPAA2_GET_FD_OFFSET(fd);
354 	mbuf->data_len = DPAA2_GET_FD_LEN(fd);
355 	mbuf->pkt_len = mbuf->data_len;
356 	mbuf->port = port_id;
357 	mbuf->next = NULL;
358 	rte_mbuf_refcnt_set(mbuf, 1);
359 
360 	/* Parse the packet */
361 	/* parse results for LX2 are there in FRC field of FD.
362 	 * For other DPAA2 platforms , parse results are after
363 	 * the private - sw annotation area
364 	 */
365 
366 	if (dpaa2_svr_family == SVR_LX2160A)
367 		dpaa2_dev_rx_parse_new(mbuf, fd, hw_annot_addr);
368 	else
369 		mbuf->packet_type = dpaa2_dev_rx_parse(mbuf, hw_annot_addr);
370 
371 	DPAA2_PMD_DP_DEBUG("to mbuf - mbuf =%p, mbuf->buf_addr =%p, off = %d,"
372 		"fd_off=%d fd =%" PRIx64 ", meta = %d  bpid =%d, len=%d\n",
373 		mbuf, mbuf->buf_addr, mbuf->data_off,
374 		DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ADDR(fd),
375 		rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
376 		DPAA2_GET_FD_BPID(fd), DPAA2_GET_FD_LEN(fd));
377 
378 	return mbuf;
379 }
380 
381 static int __rte_noinline __rte_hot
382 eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf,
383 		  struct qbman_fd *fd,
384 		  struct rte_mempool *mp, uint16_t bpid)
385 {
386 	struct rte_mbuf *cur_seg = mbuf, *prev_seg, *mi, *temp;
387 	struct qbman_sge *sgt, *sge = NULL;
388 	int i, offset = 0;
389 
390 #ifdef RTE_LIBRTE_IEEE1588
391 	/* annotation area for timestamp in first buffer */
392 	offset = 0x64;
393 #endif
394 	if (RTE_MBUF_DIRECT(mbuf) &&
395 		(mbuf->data_off > (mbuf->nb_segs * sizeof(struct qbman_sge)
396 		+ offset))) {
397 		temp = mbuf;
398 		if (rte_mbuf_refcnt_read(temp) > 1) {
399 			/* If refcnt > 1, invalid bpid is set to ensure
400 			 * buffer is not freed by HW
401 			 */
402 			fd->simple.bpid_offset = 0;
403 			DPAA2_SET_FD_IVP(fd);
404 			rte_mbuf_refcnt_update(temp, -1);
405 		} else {
406 			DPAA2_SET_ONLY_FD_BPID(fd, bpid);
407 		}
408 		DPAA2_SET_FD_OFFSET(fd, offset);
409 	} else {
410 		temp = rte_pktmbuf_alloc(mp);
411 		if (temp == NULL) {
412 			DPAA2_PMD_DP_DEBUG("No memory to allocate S/G table\n");
413 			return -ENOMEM;
414 		}
415 		DPAA2_SET_ONLY_FD_BPID(fd, bpid);
416 		DPAA2_SET_FD_OFFSET(fd, temp->data_off);
417 	}
418 	DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(temp));
419 	DPAA2_SET_FD_LEN(fd, mbuf->pkt_len);
420 	DPAA2_FD_SET_FORMAT(fd, qbman_fd_sg);
421 	DPAA2_RESET_FD_FRC(fd);
422 	DPAA2_RESET_FD_CTRL(fd);
423 	DPAA2_RESET_FD_FLC(fd);
424 	/*Set Scatter gather table and Scatter gather entries*/
425 	sgt = (struct qbman_sge *)(
426 			(size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd))
427 			+ DPAA2_GET_FD_OFFSET(fd));
428 
429 	for (i = 0; i < mbuf->nb_segs; i++) {
430 		sge = &sgt[i];
431 		/*Resetting the buffer pool id and offset field*/
432 		sge->fin_bpid_offset = 0;
433 		DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(cur_seg));
434 		DPAA2_SET_FLE_OFFSET(sge, cur_seg->data_off);
435 		sge->length = cur_seg->data_len;
436 		if (RTE_MBUF_DIRECT(cur_seg)) {
437 			/* if we are using inline SGT in same buffers
438 			 * set the FLE FMT as Frame Data Section
439 			 */
440 			if (temp == cur_seg) {
441 				DPAA2_SG_SET_FORMAT(sge, qbman_fd_list);
442 				DPAA2_SET_FLE_IVP(sge);
443 			} else {
444 				if (rte_mbuf_refcnt_read(cur_seg) > 1) {
445 				/* If refcnt > 1, invalid bpid is set to ensure
446 				 * buffer is not freed by HW
447 				 */
448 					DPAA2_SET_FLE_IVP(sge);
449 					rte_mbuf_refcnt_update(cur_seg, -1);
450 				} else {
451 					DPAA2_SET_FLE_BPID(sge,
452 						mempool_to_bpid(cur_seg->pool));
453 				}
454 			}
455 			cur_seg = cur_seg->next;
456 		} else if (RTE_MBUF_HAS_EXTBUF(cur_seg)) {
457 			DPAA2_SET_FLE_IVP(sge);
458 			cur_seg = cur_seg->next;
459 		} else {
460 			/* Get owner MBUF from indirect buffer */
461 			mi = rte_mbuf_from_indirect(cur_seg);
462 			if (rte_mbuf_refcnt_read(mi) > 1) {
463 				/* If refcnt > 1, invalid bpid is set to ensure
464 				 * owner buffer is not freed by HW
465 				 */
466 				DPAA2_SET_FLE_IVP(sge);
467 			} else {
468 				DPAA2_SET_FLE_BPID(sge,
469 						   mempool_to_bpid(mi->pool));
470 				rte_mbuf_refcnt_update(mi, 1);
471 			}
472 			prev_seg = cur_seg;
473 			cur_seg = cur_seg->next;
474 			prev_seg->next = NULL;
475 			rte_pktmbuf_free(prev_seg);
476 		}
477 	}
478 	DPAA2_SG_SET_FINAL(sge, true);
479 	return 0;
480 }
481 
482 static void
483 eth_mbuf_to_fd(struct rte_mbuf *mbuf,
484 	       struct qbman_fd *fd, uint16_t bpid) __rte_unused;
485 
486 static void __rte_noinline __rte_hot
487 eth_mbuf_to_fd(struct rte_mbuf *mbuf,
488 	       struct qbman_fd *fd, uint16_t bpid)
489 {
490 	DPAA2_MBUF_TO_CONTIG_FD(mbuf, fd, bpid);
491 
492 	DPAA2_PMD_DP_DEBUG("mbuf =%p, mbuf->buf_addr =%p, off = %d,"
493 		"fd_off=%d fd =%" PRIx64 ", meta = %d  bpid =%d, len=%d\n",
494 		mbuf, mbuf->buf_addr, mbuf->data_off,
495 		DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ADDR(fd),
496 		rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
497 		DPAA2_GET_FD_BPID(fd), DPAA2_GET_FD_LEN(fd));
498 	if (RTE_MBUF_DIRECT(mbuf)) {
499 		if (rte_mbuf_refcnt_read(mbuf) > 1) {
500 			DPAA2_SET_FD_IVP(fd);
501 			rte_mbuf_refcnt_update(mbuf, -1);
502 		}
503 	} else if (RTE_MBUF_HAS_EXTBUF(mbuf)) {
504 		DPAA2_SET_FD_IVP(fd);
505 	} else {
506 		struct rte_mbuf *mi;
507 
508 		mi = rte_mbuf_from_indirect(mbuf);
509 		if (rte_mbuf_refcnt_read(mi) > 1)
510 			DPAA2_SET_FD_IVP(fd);
511 		else
512 			rte_mbuf_refcnt_update(mi, 1);
513 		rte_pktmbuf_free(mbuf);
514 	}
515 }
516 
517 static inline int __rte_hot
518 eth_copy_mbuf_to_fd(struct rte_mbuf *mbuf,
519 		    struct qbman_fd *fd, uint16_t bpid)
520 {
521 	struct rte_mbuf *m;
522 	void *mb = NULL;
523 
524 	if (rte_dpaa2_mbuf_alloc_bulk(
525 		rte_dpaa2_bpid_info[bpid].bp_list->mp, &mb, 1)) {
526 		DPAA2_PMD_DP_DEBUG("Unable to allocated DPAA2 buffer\n");
527 		return -1;
528 	}
529 	m = (struct rte_mbuf *)mb;
530 	memcpy((char *)m->buf_addr + mbuf->data_off,
531 	       (void *)((char *)mbuf->buf_addr + mbuf->data_off),
532 		mbuf->pkt_len);
533 
534 	/* Copy required fields */
535 	m->data_off = mbuf->data_off;
536 	m->ol_flags = mbuf->ol_flags;
537 	m->packet_type = mbuf->packet_type;
538 	m->tx_offload = mbuf->tx_offload;
539 
540 	DPAA2_MBUF_TO_CONTIG_FD(m, fd, bpid);
541 
542 	DPAA2_PMD_DP_DEBUG(
543 		"mbuf: %p, BMAN buf addr: %p, fdaddr: %" PRIx64 ", bpid: %d,"
544 		" meta: %d, off: %d, len: %d\n",
545 		(void *)mbuf,
546 		mbuf->buf_addr,
547 		DPAA2_GET_FD_ADDR(fd),
548 		DPAA2_GET_FD_BPID(fd),
549 		rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
550 		DPAA2_GET_FD_OFFSET(fd),
551 		DPAA2_GET_FD_LEN(fd));
552 
553 return 0;
554 }
555 
556 static void
557 dump_err_pkts(struct dpaa2_queue *dpaa2_q)
558 {
559 	/* Function receive frames for a given device and VQ */
560 	struct qbman_result *dq_storage;
561 	uint32_t fqid = dpaa2_q->fqid;
562 	int ret, num_rx = 0, num_pulled;
563 	uint8_t pending, status;
564 	struct qbman_swp *swp;
565 	const struct qbman_fd *fd;
566 	struct qbman_pull_desc pulldesc;
567 	struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
568 	uint32_t lcore_id = rte_lcore_id();
569 	void *v_addr, *hw_annot_addr;
570 	struct dpaa2_fas *fas;
571 
572 	if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
573 		ret = dpaa2_affine_qbman_swp();
574 		if (ret) {
575 			DPAA2_PMD_ERR("Failed to allocate IO portal, tid: %d\n",
576 				rte_gettid());
577 			return;
578 		}
579 	}
580 	swp = DPAA2_PER_LCORE_PORTAL;
581 
582 	dq_storage = dpaa2_q->q_storage[lcore_id].dq_storage[0];
583 	qbman_pull_desc_clear(&pulldesc);
584 	qbman_pull_desc_set_fq(&pulldesc, fqid);
585 	qbman_pull_desc_set_storage(&pulldesc, dq_storage,
586 			(size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
587 	qbman_pull_desc_set_numframes(&pulldesc, dpaa2_dqrr_size);
588 
589 	while (1) {
590 		if (qbman_swp_pull(swp, &pulldesc)) {
591 			DPAA2_PMD_DP_DEBUG("VDQ command is not issued.QBMAN is busy\n");
592 			/* Portal was busy, try again */
593 			continue;
594 		}
595 		break;
596 	}
597 
598 	/* Check if the previous issued command is completed. */
599 	while (!qbman_check_command_complete(dq_storage))
600 		;
601 
602 	num_pulled = 0;
603 	pending = 1;
604 	do {
605 		/* Loop until the dq_storage is updated with
606 		 * new token by QBMAN
607 		 */
608 		while (!qbman_check_new_result(dq_storage))
609 			;
610 
611 		/* Check whether Last Pull command is Expired and
612 		 * setting Condition for Loop termination
613 		 */
614 		if (qbman_result_DQ_is_pull_complete(dq_storage)) {
615 			pending = 0;
616 			/* Check for valid frame. */
617 			status = qbman_result_DQ_flags(dq_storage);
618 			if (unlikely((status &
619 				QBMAN_DQ_STAT_VALIDFRAME) == 0))
620 				continue;
621 		}
622 		fd = qbman_result_DQ_fd(dq_storage);
623 		v_addr = DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
624 		hw_annot_addr = (void *)((size_t)v_addr + DPAA2_FD_PTA_SIZE);
625 		fas = hw_annot_addr;
626 
627 		DPAA2_PMD_ERR("\n\n[%d] error packet on port[%d]:"
628 			" fd_off: %d, fd_err: %x, fas_status: %x",
629 			rte_lcore_id(), eth_data->port_id,
630 			DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ERR(fd),
631 			fas->status);
632 		rte_hexdump(stderr, "Error packet", v_addr,
633 			DPAA2_GET_FD_OFFSET(fd) + DPAA2_GET_FD_LEN(fd));
634 
635 		dq_storage++;
636 		num_rx++;
637 		num_pulled++;
638 	} while (pending);
639 
640 	dpaa2_q->err_pkts += num_rx;
641 }
642 
643 /* This function assumes that caller will be keep the same value for nb_pkts
644  * across calls per queue, if that is not the case, better use non-prefetch
645  * version of rx call.
646  * It will return the packets as requested in previous call without honoring
647  * the current nb_pkts or bufs space.
648  */
649 uint16_t
650 dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
651 {
652 	/* Function receive frames for a given device and VQ*/
653 	struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
654 	struct qbman_result *dq_storage, *dq_storage1 = NULL;
655 	uint32_t fqid = dpaa2_q->fqid;
656 	int ret, num_rx = 0, pull_size;
657 	uint8_t pending, status;
658 	struct qbman_swp *swp;
659 	const struct qbman_fd *fd;
660 	struct qbman_pull_desc pulldesc;
661 	struct queue_storage_info_t *q_storage = dpaa2_q->q_storage;
662 	struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
663 	struct dpaa2_dev_priv *priv = eth_data->dev_private;
664 
665 	if (unlikely(dpaa2_enable_err_queue))
666 		dump_err_pkts(priv->rx_err_vq);
667 
668 	if (unlikely(!DPAA2_PER_LCORE_ETHRX_DPIO)) {
669 		ret = dpaa2_affine_qbman_ethrx_swp();
670 		if (ret) {
671 			DPAA2_PMD_ERR("Failure in affining portal");
672 			return 0;
673 		}
674 	}
675 
676 	if (unlikely(!rte_dpaa2_bpid_info &&
677 		     rte_eal_process_type() == RTE_PROC_SECONDARY))
678 		rte_dpaa2_bpid_info = dpaa2_q->bp_array;
679 
680 	swp = DPAA2_PER_LCORE_ETHRX_PORTAL;
681 	pull_size = (nb_pkts > dpaa2_dqrr_size) ? dpaa2_dqrr_size : nb_pkts;
682 	if (unlikely(!q_storage->active_dqs)) {
683 		q_storage->toggle = 0;
684 		dq_storage = q_storage->dq_storage[q_storage->toggle];
685 		q_storage->last_num_pkts = pull_size;
686 		qbman_pull_desc_clear(&pulldesc);
687 		qbman_pull_desc_set_numframes(&pulldesc,
688 					      q_storage->last_num_pkts);
689 		qbman_pull_desc_set_fq(&pulldesc, fqid);
690 		qbman_pull_desc_set_storage(&pulldesc, dq_storage,
691 			(uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
692 		if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) {
693 			while (!qbman_check_command_complete(
694 			       get_swp_active_dqs(
695 			       DPAA2_PER_LCORE_ETHRX_DPIO->index)))
696 				;
697 			clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index);
698 		}
699 		while (1) {
700 			if (qbman_swp_pull(swp, &pulldesc)) {
701 				DPAA2_PMD_DP_DEBUG("VDQ command is not issued."
702 						  " QBMAN is busy (1)\n");
703 				/* Portal was busy, try again */
704 				continue;
705 			}
706 			break;
707 		}
708 		q_storage->active_dqs = dq_storage;
709 		q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index;
710 		set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index,
711 				   dq_storage);
712 	}
713 
714 	dq_storage = q_storage->active_dqs;
715 	rte_prefetch0((void *)(size_t)(dq_storage));
716 	rte_prefetch0((void *)(size_t)(dq_storage + 1));
717 
718 	/* Prepare next pull descriptor. This will give space for the
719 	 * prefetching done on DQRR entries
720 	 */
721 	q_storage->toggle ^= 1;
722 	dq_storage1 = q_storage->dq_storage[q_storage->toggle];
723 	qbman_pull_desc_clear(&pulldesc);
724 	qbman_pull_desc_set_numframes(&pulldesc, pull_size);
725 	qbman_pull_desc_set_fq(&pulldesc, fqid);
726 	qbman_pull_desc_set_storage(&pulldesc, dq_storage1,
727 		(uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage1)), 1);
728 
729 	/* Check if the previous issued command is completed.
730 	 * Also seems like the SWP is shared between the Ethernet Driver
731 	 * and the SEC driver.
732 	 */
733 	while (!qbman_check_command_complete(dq_storage))
734 		;
735 	if (dq_storage == get_swp_active_dqs(q_storage->active_dpio_id))
736 		clear_swp_active_dqs(q_storage->active_dpio_id);
737 
738 	pending = 1;
739 
740 	do {
741 		/* Loop until the dq_storage is updated with
742 		 * new token by QBMAN
743 		 */
744 		while (!qbman_check_new_result(dq_storage))
745 			;
746 		rte_prefetch0((void *)((size_t)(dq_storage + 2)));
747 		/* Check whether Last Pull command is Expired and
748 		 * setting Condition for Loop termination
749 		 */
750 		if (qbman_result_DQ_is_pull_complete(dq_storage)) {
751 			pending = 0;
752 			/* Check for valid frame. */
753 			status = qbman_result_DQ_flags(dq_storage);
754 			if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0))
755 				continue;
756 		}
757 		fd = qbman_result_DQ_fd(dq_storage);
758 
759 #ifndef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
760 		if (dpaa2_svr_family != SVR_LX2160A) {
761 			const struct qbman_fd *next_fd =
762 				qbman_result_DQ_fd(dq_storage + 1);
763 			/* Prefetch Annotation address for the parse results */
764 			rte_prefetch0(DPAA2_IOVA_TO_VADDR((DPAA2_GET_FD_ADDR(
765 				next_fd) + DPAA2_FD_PTA_SIZE + 16)));
766 		}
767 #endif
768 
769 		if (unlikely(DPAA2_FD_GET_FORMAT(fd) == qbman_fd_sg))
770 			bufs[num_rx] = eth_sg_fd_to_mbuf(fd, eth_data->port_id);
771 		else
772 			bufs[num_rx] = eth_fd_to_mbuf(fd, eth_data->port_id);
773 #if defined(RTE_LIBRTE_IEEE1588)
774 		if (bufs[num_rx]->ol_flags & PKT_RX_IEEE1588_TMST) {
775 			priv->rx_timestamp =
776 				*dpaa2_timestamp_dynfield(bufs[num_rx]);
777 		}
778 #endif
779 
780 		if (eth_data->dev_conf.rxmode.offloads &
781 				RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
782 			rte_vlan_strip(bufs[num_rx]);
783 
784 		dq_storage++;
785 		num_rx++;
786 	} while (pending);
787 
788 	if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) {
789 		while (!qbman_check_command_complete(
790 		       get_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)))
791 			;
792 		clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index);
793 	}
794 	/* issue a volatile dequeue command for next pull */
795 	while (1) {
796 		if (qbman_swp_pull(swp, &pulldesc)) {
797 			DPAA2_PMD_DP_DEBUG("VDQ command is not issued."
798 					  "QBMAN is busy (2)\n");
799 			continue;
800 		}
801 		break;
802 	}
803 	q_storage->active_dqs = dq_storage1;
804 	q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index;
805 	set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index, dq_storage1);
806 
807 	dpaa2_q->rx_pkts += num_rx;
808 
809 	return num_rx;
810 }
811 
812 void __rte_hot
813 dpaa2_dev_process_parallel_event(struct qbman_swp *swp,
814 				 const struct qbman_fd *fd,
815 				 const struct qbman_result *dq,
816 				 struct dpaa2_queue *rxq,
817 				 struct rte_event *ev)
818 {
819 	rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd) +
820 		DPAA2_FD_PTA_SIZE + 16));
821 
822 	ev->flow_id = rxq->ev.flow_id;
823 	ev->sub_event_type = rxq->ev.sub_event_type;
824 	ev->event_type = RTE_EVENT_TYPE_ETHDEV;
825 	ev->op = RTE_EVENT_OP_NEW;
826 	ev->sched_type = rxq->ev.sched_type;
827 	ev->queue_id = rxq->ev.queue_id;
828 	ev->priority = rxq->ev.priority;
829 
830 	ev->mbuf = eth_fd_to_mbuf(fd, rxq->eth_data->port_id);
831 
832 	qbman_swp_dqrr_consume(swp, dq);
833 }
834 
835 void __rte_hot
836 dpaa2_dev_process_atomic_event(struct qbman_swp *swp __rte_unused,
837 			       const struct qbman_fd *fd,
838 			       const struct qbman_result *dq,
839 			       struct dpaa2_queue *rxq,
840 			       struct rte_event *ev)
841 {
842 	uint8_t dqrr_index;
843 
844 	rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd) +
845 		DPAA2_FD_PTA_SIZE + 16));
846 
847 	ev->flow_id = rxq->ev.flow_id;
848 	ev->sub_event_type = rxq->ev.sub_event_type;
849 	ev->event_type = RTE_EVENT_TYPE_ETHDEV;
850 	ev->op = RTE_EVENT_OP_NEW;
851 	ev->sched_type = rxq->ev.sched_type;
852 	ev->queue_id = rxq->ev.queue_id;
853 	ev->priority = rxq->ev.priority;
854 
855 	ev->mbuf = eth_fd_to_mbuf(fd, rxq->eth_data->port_id);
856 
857 	dqrr_index = qbman_get_dqrr_idx(dq);
858 	*dpaa2_seqn(ev->mbuf) = dqrr_index + 1;
859 	DPAA2_PER_LCORE_DQRR_SIZE++;
860 	DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index;
861 	DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = ev->mbuf;
862 }
863 
864 void __rte_hot
865 dpaa2_dev_process_ordered_event(struct qbman_swp *swp,
866 				const struct qbman_fd *fd,
867 				const struct qbman_result *dq,
868 				struct dpaa2_queue *rxq,
869 				struct rte_event *ev)
870 {
871 	rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd) +
872 		DPAA2_FD_PTA_SIZE + 16));
873 
874 	ev->flow_id = rxq->ev.flow_id;
875 	ev->sub_event_type = rxq->ev.sub_event_type;
876 	ev->event_type = RTE_EVENT_TYPE_ETHDEV;
877 	ev->op = RTE_EVENT_OP_NEW;
878 	ev->sched_type = rxq->ev.sched_type;
879 	ev->queue_id = rxq->ev.queue_id;
880 	ev->priority = rxq->ev.priority;
881 
882 	ev->mbuf = eth_fd_to_mbuf(fd, rxq->eth_data->port_id);
883 
884 	*dpaa2_seqn(ev->mbuf) = DPAA2_ENQUEUE_FLAG_ORP;
885 	*dpaa2_seqn(ev->mbuf) |= qbman_result_DQ_odpid(dq) << DPAA2_EQCR_OPRID_SHIFT;
886 	*dpaa2_seqn(ev->mbuf) |= qbman_result_DQ_seqnum(dq) << DPAA2_EQCR_SEQNUM_SHIFT;
887 
888 	qbman_swp_dqrr_consume(swp, dq);
889 }
890 
891 uint16_t
892 dpaa2_dev_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
893 {
894 	/* Function receive frames for a given device and VQ */
895 	struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
896 	struct qbman_result *dq_storage;
897 	uint32_t fqid = dpaa2_q->fqid;
898 	int ret, num_rx = 0, next_pull = nb_pkts, num_pulled;
899 	uint8_t pending, status;
900 	struct qbman_swp *swp;
901 	const struct qbman_fd *fd;
902 	struct qbman_pull_desc pulldesc;
903 	struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
904 	struct dpaa2_dev_priv *priv = eth_data->dev_private;
905 
906 	if (unlikely(dpaa2_enable_err_queue))
907 		dump_err_pkts(priv->rx_err_vq);
908 
909 	if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
910 		ret = dpaa2_affine_qbman_swp();
911 		if (ret) {
912 			DPAA2_PMD_ERR(
913 				"Failed to allocate IO portal, tid: %d\n",
914 				rte_gettid());
915 			return 0;
916 		}
917 	}
918 	swp = DPAA2_PER_LCORE_PORTAL;
919 
920 	do {
921 		dq_storage = dpaa2_q->q_storage->dq_storage[0];
922 		qbman_pull_desc_clear(&pulldesc);
923 		qbman_pull_desc_set_fq(&pulldesc, fqid);
924 		qbman_pull_desc_set_storage(&pulldesc, dq_storage,
925 				(size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
926 
927 		if (next_pull > dpaa2_dqrr_size) {
928 			qbman_pull_desc_set_numframes(&pulldesc,
929 				dpaa2_dqrr_size);
930 			next_pull -= dpaa2_dqrr_size;
931 		} else {
932 			qbman_pull_desc_set_numframes(&pulldesc, next_pull);
933 			next_pull = 0;
934 		}
935 
936 		while (1) {
937 			if (qbman_swp_pull(swp, &pulldesc)) {
938 				DPAA2_PMD_DP_DEBUG(
939 					"VDQ command is not issued.QBMAN is busy\n");
940 				/* Portal was busy, try again */
941 				continue;
942 			}
943 			break;
944 		}
945 
946 		rte_prefetch0((void *)((size_t)(dq_storage + 1)));
947 		/* Check if the previous issued command is completed. */
948 		while (!qbman_check_command_complete(dq_storage))
949 			;
950 
951 		num_pulled = 0;
952 		pending = 1;
953 		do {
954 			/* Loop until the dq_storage is updated with
955 			 * new token by QBMAN
956 			 */
957 			while (!qbman_check_new_result(dq_storage))
958 				;
959 			rte_prefetch0((void *)((size_t)(dq_storage + 2)));
960 			/* Check whether Last Pull command is Expired and
961 			 * setting Condition for Loop termination
962 			 */
963 			if (qbman_result_DQ_is_pull_complete(dq_storage)) {
964 				pending = 0;
965 				/* Check for valid frame. */
966 				status = qbman_result_DQ_flags(dq_storage);
967 				if (unlikely((status &
968 					QBMAN_DQ_STAT_VALIDFRAME) == 0))
969 					continue;
970 			}
971 			fd = qbman_result_DQ_fd(dq_storage);
972 
973 #ifndef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
974 			if (dpaa2_svr_family != SVR_LX2160A) {
975 				const struct qbman_fd *next_fd =
976 					qbman_result_DQ_fd(dq_storage + 1);
977 
978 				/* Prefetch Annotation address for the parse
979 				 * results.
980 				 */
981 				rte_prefetch0((DPAA2_IOVA_TO_VADDR(
982 					DPAA2_GET_FD_ADDR(next_fd) +
983 					DPAA2_FD_PTA_SIZE + 16)));
984 			}
985 #endif
986 
987 			if (unlikely(DPAA2_FD_GET_FORMAT(fd) == qbman_fd_sg))
988 				bufs[num_rx] = eth_sg_fd_to_mbuf(fd,
989 							eth_data->port_id);
990 			else
991 				bufs[num_rx] = eth_fd_to_mbuf(fd,
992 							eth_data->port_id);
993 
994 #if defined(RTE_LIBRTE_IEEE1588)
995 		if (bufs[num_rx]->ol_flags & PKT_RX_IEEE1588_TMST) {
996 			priv->rx_timestamp =
997 				*dpaa2_timestamp_dynfield(bufs[num_rx]);
998 		}
999 #endif
1000 
1001 		if (eth_data->dev_conf.rxmode.offloads &
1002 				RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
1003 			rte_vlan_strip(bufs[num_rx]);
1004 		}
1005 
1006 			dq_storage++;
1007 			num_rx++;
1008 			num_pulled++;
1009 		} while (pending);
1010 	/* Last VDQ provided all packets and more packets are requested */
1011 	} while (next_pull && num_pulled == dpaa2_dqrr_size);
1012 
1013 	dpaa2_q->rx_pkts += num_rx;
1014 
1015 	return num_rx;
1016 }
1017 
1018 uint16_t dpaa2_dev_tx_conf(void *queue)
1019 {
1020 	/* Function receive frames for a given device and VQ */
1021 	struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
1022 	struct qbman_result *dq_storage;
1023 	uint32_t fqid = dpaa2_q->fqid;
1024 	int ret, num_tx_conf = 0, num_pulled;
1025 	uint8_t pending, status;
1026 	struct qbman_swp *swp;
1027 	const struct qbman_fd *fd, *next_fd;
1028 	struct qbman_pull_desc pulldesc;
1029 	struct qbman_release_desc releasedesc;
1030 	uint32_t bpid;
1031 	uint64_t buf;
1032 #if defined(RTE_LIBRTE_IEEE1588)
1033 	struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
1034 	struct dpaa2_dev_priv *priv = eth_data->dev_private;
1035 	struct dpaa2_annot_hdr *annotation;
1036 	void *v_addr;
1037 	struct rte_mbuf *mbuf;
1038 #endif
1039 
1040 	if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
1041 		ret = dpaa2_affine_qbman_swp();
1042 		if (ret) {
1043 			DPAA2_PMD_ERR(
1044 				"Failed to allocate IO portal, tid: %d\n",
1045 				rte_gettid());
1046 			return 0;
1047 		}
1048 	}
1049 	swp = DPAA2_PER_LCORE_PORTAL;
1050 
1051 	do {
1052 		dq_storage = dpaa2_q->q_storage->dq_storage[0];
1053 		qbman_pull_desc_clear(&pulldesc);
1054 		qbman_pull_desc_set_fq(&pulldesc, fqid);
1055 		qbman_pull_desc_set_storage(&pulldesc, dq_storage,
1056 				(size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
1057 
1058 		qbman_pull_desc_set_numframes(&pulldesc, dpaa2_dqrr_size);
1059 
1060 		while (1) {
1061 			if (qbman_swp_pull(swp, &pulldesc)) {
1062 				DPAA2_PMD_DP_DEBUG("VDQ command is not issued."
1063 						   "QBMAN is busy\n");
1064 				/* Portal was busy, try again */
1065 				continue;
1066 			}
1067 			break;
1068 		}
1069 
1070 		rte_prefetch0((void *)((size_t)(dq_storage + 1)));
1071 		/* Check if the previous issued command is completed. */
1072 		while (!qbman_check_command_complete(dq_storage))
1073 			;
1074 
1075 		num_pulled = 0;
1076 		pending = 1;
1077 		do {
1078 			/* Loop until the dq_storage is updated with
1079 			 * new token by QBMAN
1080 			 */
1081 			while (!qbman_check_new_result(dq_storage))
1082 				;
1083 			rte_prefetch0((void *)((size_t)(dq_storage + 2)));
1084 			/* Check whether Last Pull command is Expired and
1085 			 * setting Condition for Loop termination
1086 			 */
1087 			if (qbman_result_DQ_is_pull_complete(dq_storage)) {
1088 				pending = 0;
1089 				/* Check for valid frame. */
1090 				status = qbman_result_DQ_flags(dq_storage);
1091 				if (unlikely((status &
1092 					QBMAN_DQ_STAT_VALIDFRAME) == 0))
1093 					continue;
1094 			}
1095 			fd = qbman_result_DQ_fd(dq_storage);
1096 
1097 			next_fd = qbman_result_DQ_fd(dq_storage + 1);
1098 			/* Prefetch Annotation address for the parse results */
1099 			rte_prefetch0((void *)(size_t)
1100 				(DPAA2_GET_FD_ADDR(next_fd) +
1101 				 DPAA2_FD_PTA_SIZE + 16));
1102 
1103 			bpid = DPAA2_GET_FD_BPID(fd);
1104 
1105 			/* Create a release descriptor required for releasing
1106 			 * buffers into QBMAN
1107 			 */
1108 			qbman_release_desc_clear(&releasedesc);
1109 			qbman_release_desc_set_bpid(&releasedesc, bpid);
1110 
1111 			buf = DPAA2_GET_FD_ADDR(fd);
1112 			/* feed them to bman */
1113 			do {
1114 				ret = qbman_swp_release(swp, &releasedesc,
1115 							&buf, 1);
1116 			} while (ret == -EBUSY);
1117 
1118 			dq_storage++;
1119 			num_tx_conf++;
1120 			num_pulled++;
1121 #if defined(RTE_LIBRTE_IEEE1588)
1122 			v_addr = DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
1123 			mbuf = DPAA2_INLINE_MBUF_FROM_BUF(v_addr,
1124 				rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
1125 
1126 			if (mbuf->ol_flags & PKT_TX_IEEE1588_TMST) {
1127 				annotation = (struct dpaa2_annot_hdr *)((size_t)
1128 					DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)) +
1129 					DPAA2_FD_PTA_SIZE);
1130 				priv->tx_timestamp = annotation->word2;
1131 			}
1132 #endif
1133 		} while (pending);
1134 
1135 	/* Last VDQ provided all packets and more packets are requested */
1136 	} while (num_pulled == dpaa2_dqrr_size);
1137 
1138 	dpaa2_q->rx_pkts += num_tx_conf;
1139 
1140 	return num_tx_conf;
1141 }
1142 
1143 /* Configure the egress frame annotation for timestamp update */
1144 static void enable_tx_tstamp(struct qbman_fd *fd)
1145 {
1146 	struct dpaa2_faead *fd_faead;
1147 
1148 	/* Set frame annotation status field as valid */
1149 	(fd)->simple.frc |= DPAA2_FD_FRC_FASV;
1150 
1151 	/* Set frame annotation egress action descriptor as valid */
1152 	(fd)->simple.frc |= DPAA2_FD_FRC_FAEADV;
1153 
1154 	/* Set Annotation Length as 128B */
1155 	(fd)->simple.ctrl |= DPAA2_FD_CTRL_ASAL;
1156 
1157 	/* enable update of confirmation frame annotation */
1158 	fd_faead = (struct dpaa2_faead *)((size_t)
1159 			DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)) +
1160 			DPAA2_FD_PTA_SIZE + DPAA2_FD_HW_ANNOT_FAEAD_OFFSET);
1161 	fd_faead->ctrl = DPAA2_ANNOT_FAEAD_A2V | DPAA2_ANNOT_FAEAD_UPDV |
1162 				DPAA2_ANNOT_FAEAD_UPD;
1163 }
1164 
1165 /*
1166  * Callback to handle sending packets through WRIOP based interface
1167  */
1168 uint16_t
1169 dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
1170 {
1171 	/* Function to transmit the frames to given device and VQ*/
1172 	uint32_t loop, retry_count;
1173 	int32_t ret;
1174 	struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
1175 	struct rte_mbuf *mi;
1176 	uint32_t frames_to_send;
1177 	struct rte_mempool *mp;
1178 	struct qbman_eq_desc eqdesc;
1179 	struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
1180 	struct qbman_swp *swp;
1181 	uint16_t num_tx = 0;
1182 	uint16_t bpid;
1183 	struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
1184 	struct dpaa2_dev_priv *priv = eth_data->dev_private;
1185 	uint32_t flags[MAX_TX_RING_SLOTS] = {0};
1186 	struct rte_mbuf **orig_bufs = bufs;
1187 
1188 	if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
1189 		ret = dpaa2_affine_qbman_swp();
1190 		if (ret) {
1191 			DPAA2_PMD_ERR(
1192 				"Failed to allocate IO portal, tid: %d\n",
1193 				rte_gettid());
1194 			return 0;
1195 		}
1196 	}
1197 	swp = DPAA2_PER_LCORE_PORTAL;
1198 
1199 	DPAA2_PMD_DP_DEBUG("===> eth_data =%p, fqid =%d\n",
1200 			eth_data, dpaa2_q->fqid);
1201 
1202 #ifdef RTE_LIBRTE_IEEE1588
1203 	/* IEEE1588 driver need pointer to tx confirmation queue
1204 	 * corresponding to last packet transmitted for reading
1205 	 * the timestamp
1206 	 */
1207 	if ((*bufs)->ol_flags & PKT_TX_IEEE1588_TMST) {
1208 		priv->next_tx_conf_queue = dpaa2_q->tx_conf_queue;
1209 		dpaa2_dev_tx_conf(dpaa2_q->tx_conf_queue);
1210 		priv->tx_timestamp = 0;
1211 	}
1212 #endif
1213 
1214 	/*Prepare enqueue descriptor*/
1215 	qbman_eq_desc_clear(&eqdesc);
1216 	qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
1217 	qbman_eq_desc_set_fq(&eqdesc, dpaa2_q->fqid);
1218 
1219 	/*Clear the unused FD fields before sending*/
1220 	while (nb_pkts) {
1221 		/*Check if the queue is congested*/
1222 		retry_count = 0;
1223 		while (qbman_result_SCN_state(dpaa2_q->cscn)) {
1224 			retry_count++;
1225 			/* Retry for some time before giving up */
1226 			if (retry_count > CONG_RETRY_COUNT)
1227 				goto skip_tx;
1228 		}
1229 
1230 		frames_to_send = (nb_pkts > dpaa2_eqcr_size) ?
1231 			dpaa2_eqcr_size : nb_pkts;
1232 
1233 		for (loop = 0; loop < frames_to_send; loop++) {
1234 			if (*dpaa2_seqn(*bufs)) {
1235 				uint8_t dqrr_index = *dpaa2_seqn(*bufs) - 1;
1236 
1237 				flags[loop] = QBMAN_ENQUEUE_FLAG_DCA |
1238 						dqrr_index;
1239 				DPAA2_PER_LCORE_DQRR_SIZE--;
1240 				DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dqrr_index);
1241 				*dpaa2_seqn(*bufs) = DPAA2_INVALID_MBUF_SEQN;
1242 			}
1243 
1244 			if (likely(RTE_MBUF_DIRECT(*bufs))) {
1245 				mp = (*bufs)->pool;
1246 				/* Check the basic scenario and set
1247 				 * the FD appropriately here itself.
1248 				 */
1249 				if (likely(mp && mp->ops_index ==
1250 				    priv->bp_list->dpaa2_ops_index &&
1251 				    (*bufs)->nb_segs == 1 &&
1252 				    rte_mbuf_refcnt_read((*bufs)) == 1)) {
1253 					if (unlikely(((*bufs)->ol_flags
1254 						& RTE_MBUF_F_TX_VLAN) ||
1255 						(eth_data->dev_conf.txmode.offloads
1256 						& RTE_ETH_TX_OFFLOAD_VLAN_INSERT))) {
1257 						ret = rte_vlan_insert(bufs);
1258 						if (ret)
1259 							goto send_n_return;
1260 					}
1261 					DPAA2_MBUF_TO_CONTIG_FD((*bufs),
1262 					&fd_arr[loop], mempool_to_bpid(mp));
1263 					bufs++;
1264 #ifdef RTE_LIBRTE_IEEE1588
1265 					enable_tx_tstamp(&fd_arr[loop]);
1266 #endif
1267 					continue;
1268 				}
1269 			} else {
1270 				mi = rte_mbuf_from_indirect(*bufs);
1271 				mp = mi->pool;
1272 			}
1273 
1274 			if (unlikely(RTE_MBUF_HAS_EXTBUF(*bufs))) {
1275 				if (unlikely((*bufs)->nb_segs > 1)) {
1276 					if (eth_mbuf_to_sg_fd(*bufs,
1277 							      &fd_arr[loop],
1278 							      mp, 0))
1279 						goto send_n_return;
1280 				} else {
1281 					eth_mbuf_to_fd(*bufs,
1282 						       &fd_arr[loop], 0);
1283 				}
1284 				bufs++;
1285 #ifdef RTE_LIBRTE_IEEE1588
1286 				enable_tx_tstamp(&fd_arr[loop]);
1287 #endif
1288 				continue;
1289 			}
1290 
1291 			/* Not a hw_pkt pool allocated frame */
1292 			if (unlikely(!mp || !priv->bp_list)) {
1293 				DPAA2_PMD_ERR("Err: No buffer pool attached");
1294 				goto send_n_return;
1295 			}
1296 
1297 			if (unlikely(((*bufs)->ol_flags & RTE_MBUF_F_TX_VLAN) ||
1298 				(eth_data->dev_conf.txmode.offloads
1299 				& RTE_ETH_TX_OFFLOAD_VLAN_INSERT))) {
1300 				int ret = rte_vlan_insert(bufs);
1301 				if (ret)
1302 					goto send_n_return;
1303 			}
1304 			if (mp->ops_index != priv->bp_list->dpaa2_ops_index) {
1305 				DPAA2_PMD_WARN("Non DPAA2 buffer pool");
1306 				/* alloc should be from the default buffer pool
1307 				 * attached to this interface
1308 				 */
1309 				bpid = priv->bp_list->buf_pool.bpid;
1310 
1311 				if (unlikely((*bufs)->nb_segs > 1)) {
1312 					DPAA2_PMD_ERR("S/G support not added"
1313 						" for non hw offload buffer");
1314 					goto send_n_return;
1315 				}
1316 				if (eth_copy_mbuf_to_fd(*bufs,
1317 							&fd_arr[loop], bpid)) {
1318 					goto send_n_return;
1319 				}
1320 				/* free the original packet */
1321 				rte_pktmbuf_free(*bufs);
1322 			} else {
1323 				bpid = mempool_to_bpid(mp);
1324 				if (unlikely((*bufs)->nb_segs > 1)) {
1325 					if (eth_mbuf_to_sg_fd(*bufs,
1326 							&fd_arr[loop],
1327 							mp, bpid))
1328 						goto send_n_return;
1329 				} else {
1330 					eth_mbuf_to_fd(*bufs,
1331 						       &fd_arr[loop], bpid);
1332 				}
1333 			}
1334 #ifdef RTE_LIBRTE_IEEE1588
1335 			enable_tx_tstamp(&fd_arr[loop]);
1336 #endif
1337 			bufs++;
1338 		}
1339 
1340 		loop = 0;
1341 		retry_count = 0;
1342 		while (loop < frames_to_send) {
1343 			ret = qbman_swp_enqueue_multiple(swp, &eqdesc,
1344 					&fd_arr[loop], &flags[loop],
1345 					frames_to_send - loop);
1346 			if (unlikely(ret < 0)) {
1347 				retry_count++;
1348 				if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) {
1349 					num_tx += loop;
1350 					nb_pkts -= loop;
1351 					goto send_n_return;
1352 				}
1353 			} else {
1354 				loop += ret;
1355 				retry_count = 0;
1356 			}
1357 		}
1358 
1359 		num_tx += loop;
1360 		nb_pkts -= loop;
1361 	}
1362 	dpaa2_q->tx_pkts += num_tx;
1363 
1364 	loop = 0;
1365 	while (loop < num_tx) {
1366 		if (unlikely(RTE_MBUF_HAS_EXTBUF(*orig_bufs)))
1367 			rte_pktmbuf_free(*orig_bufs);
1368 		orig_bufs++;
1369 		loop++;
1370 	}
1371 
1372 	return num_tx;
1373 
1374 send_n_return:
1375 	/* send any already prepared fd */
1376 	if (loop) {
1377 		unsigned int i = 0;
1378 
1379 		retry_count = 0;
1380 		while (i < loop) {
1381 			ret = qbman_swp_enqueue_multiple(swp, &eqdesc,
1382 							 &fd_arr[i],
1383 							 &flags[i],
1384 							 loop - i);
1385 			if (unlikely(ret < 0)) {
1386 				retry_count++;
1387 				if (retry_count > DPAA2_MAX_TX_RETRY_COUNT)
1388 					break;
1389 			} else {
1390 				i += ret;
1391 				retry_count = 0;
1392 			}
1393 		}
1394 		num_tx += i;
1395 	}
1396 skip_tx:
1397 	dpaa2_q->tx_pkts += num_tx;
1398 
1399 	loop = 0;
1400 	while (loop < num_tx) {
1401 		if (unlikely(RTE_MBUF_HAS_EXTBUF(*orig_bufs)))
1402 			rte_pktmbuf_free(*orig_bufs);
1403 		orig_bufs++;
1404 		loop++;
1405 	}
1406 
1407 	return num_tx;
1408 }
1409 
1410 void
1411 dpaa2_dev_free_eqresp_buf(uint16_t eqresp_ci)
1412 {
1413 	struct dpaa2_dpio_dev *dpio_dev = DPAA2_PER_LCORE_DPIO;
1414 	struct qbman_fd *fd;
1415 	struct rte_mbuf *m;
1416 
1417 	fd = qbman_result_eqresp_fd(&dpio_dev->eqresp[eqresp_ci]);
1418 
1419 	/* Setting port id does not matter as we are to free the mbuf */
1420 	m = eth_fd_to_mbuf(fd, 0);
1421 	rte_pktmbuf_free(m);
1422 }
1423 
1424 static void
1425 dpaa2_set_enqueue_descriptor(struct dpaa2_queue *dpaa2_q,
1426 			     struct rte_mbuf *m,
1427 			     struct qbman_eq_desc *eqdesc)
1428 {
1429 	struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
1430 	struct dpaa2_dev_priv *priv = eth_data->dev_private;
1431 	struct dpaa2_dpio_dev *dpio_dev = DPAA2_PER_LCORE_DPIO;
1432 	struct eqresp_metadata *eqresp_meta;
1433 	uint16_t orpid, seqnum;
1434 	uint8_t dq_idx;
1435 
1436 	qbman_eq_desc_set_fq(eqdesc, dpaa2_q->fqid);
1437 
1438 	if (*dpaa2_seqn(m) & DPAA2_ENQUEUE_FLAG_ORP) {
1439 		orpid = (*dpaa2_seqn(m) & DPAA2_EQCR_OPRID_MASK) >>
1440 			DPAA2_EQCR_OPRID_SHIFT;
1441 		seqnum = (*dpaa2_seqn(m) & DPAA2_EQCR_SEQNUM_MASK) >>
1442 			DPAA2_EQCR_SEQNUM_SHIFT;
1443 
1444 		if (!priv->en_loose_ordered) {
1445 			qbman_eq_desc_set_orp(eqdesc, 1, orpid, seqnum, 0);
1446 			qbman_eq_desc_set_response(eqdesc, (uint64_t)
1447 				DPAA2_VADDR_TO_IOVA(&dpio_dev->eqresp[
1448 				dpio_dev->eqresp_pi]), 1);
1449 			qbman_eq_desc_set_token(eqdesc, 1);
1450 
1451 			eqresp_meta = &dpio_dev->eqresp_meta[
1452 				dpio_dev->eqresp_pi];
1453 			eqresp_meta->dpaa2_q = dpaa2_q;
1454 			eqresp_meta->mp = m->pool;
1455 
1456 			dpio_dev->eqresp_pi + 1 < MAX_EQ_RESP_ENTRIES ?
1457 				dpio_dev->eqresp_pi++ :
1458 				(dpio_dev->eqresp_pi = 0);
1459 		} else {
1460 			qbman_eq_desc_set_orp(eqdesc, 0, orpid, seqnum, 0);
1461 		}
1462 	} else {
1463 		dq_idx = *dpaa2_seqn(m) - 1;
1464 		qbman_eq_desc_set_dca(eqdesc, 1, dq_idx, 0);
1465 		DPAA2_PER_LCORE_DQRR_SIZE--;
1466 		DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dq_idx);
1467 	}
1468 	*dpaa2_seqn(m) = DPAA2_INVALID_MBUF_SEQN;
1469 }
1470 
1471 uint16_t
1472 dpaa2_dev_tx_multi_txq_ordered(void **queue,
1473 		struct rte_mbuf **bufs, uint16_t nb_pkts)
1474 {
1475 	/* Function to transmit the frames to multiple queues respectively.*/
1476 	uint32_t loop, retry_count;
1477 	int32_t ret;
1478 	struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
1479 	uint32_t frames_to_send;
1480 	struct rte_mempool *mp;
1481 	struct qbman_eq_desc eqdesc[MAX_TX_RING_SLOTS];
1482 	struct dpaa2_queue *dpaa2_q[MAX_TX_RING_SLOTS];
1483 	struct qbman_swp *swp;
1484 	uint16_t bpid;
1485 	struct rte_mbuf *mi;
1486 	struct rte_eth_dev_data *eth_data;
1487 	struct dpaa2_dev_priv *priv;
1488 	struct dpaa2_queue *order_sendq;
1489 
1490 	if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
1491 		ret = dpaa2_affine_qbman_swp();
1492 		if (ret) {
1493 			DPAA2_PMD_ERR(
1494 				"Failed to allocate IO portal, tid: %d\n",
1495 				rte_gettid());
1496 			return 0;
1497 		}
1498 	}
1499 	swp = DPAA2_PER_LCORE_PORTAL;
1500 
1501 	for (loop = 0; loop < nb_pkts; loop++) {
1502 		dpaa2_q[loop] = (struct dpaa2_queue *)queue[loop];
1503 		eth_data = dpaa2_q[loop]->eth_data;
1504 		priv = eth_data->dev_private;
1505 		qbman_eq_desc_clear(&eqdesc[loop]);
1506 		if (*dpaa2_seqn(*bufs) && priv->en_ordered) {
1507 			order_sendq = (struct dpaa2_queue *)priv->tx_vq[0];
1508 			dpaa2_set_enqueue_descriptor(order_sendq,
1509 							     (*bufs),
1510 							     &eqdesc[loop]);
1511 		} else {
1512 			qbman_eq_desc_set_no_orp(&eqdesc[loop],
1513 							 DPAA2_EQ_RESP_ERR_FQ);
1514 			qbman_eq_desc_set_fq(&eqdesc[loop],
1515 						     dpaa2_q[loop]->fqid);
1516 		}
1517 
1518 		retry_count = 0;
1519 		while (qbman_result_SCN_state(dpaa2_q[loop]->cscn)) {
1520 			retry_count++;
1521 			/* Retry for some time before giving up */
1522 			if (retry_count > CONG_RETRY_COUNT)
1523 				goto send_frames;
1524 		}
1525 
1526 		if (likely(RTE_MBUF_DIRECT(*bufs))) {
1527 			mp = (*bufs)->pool;
1528 			/* Check the basic scenario and set
1529 			 * the FD appropriately here itself.
1530 			 */
1531 			if (likely(mp && mp->ops_index ==
1532 				priv->bp_list->dpaa2_ops_index &&
1533 				(*bufs)->nb_segs == 1 &&
1534 				rte_mbuf_refcnt_read((*bufs)) == 1)) {
1535 				if (unlikely((*bufs)->ol_flags
1536 					& RTE_MBUF_F_TX_VLAN)) {
1537 					ret = rte_vlan_insert(bufs);
1538 					if (ret)
1539 						goto send_frames;
1540 				}
1541 				DPAA2_MBUF_TO_CONTIG_FD((*bufs),
1542 					&fd_arr[loop],
1543 					mempool_to_bpid(mp));
1544 				bufs++;
1545 				dpaa2_q[loop]++;
1546 				continue;
1547 			}
1548 		} else {
1549 			mi = rte_mbuf_from_indirect(*bufs);
1550 			mp = mi->pool;
1551 		}
1552 		/* Not a hw_pkt pool allocated frame */
1553 		if (unlikely(!mp || !priv->bp_list)) {
1554 			DPAA2_PMD_ERR("Err: No buffer pool attached");
1555 			goto send_frames;
1556 		}
1557 
1558 		if (mp->ops_index != priv->bp_list->dpaa2_ops_index) {
1559 			DPAA2_PMD_WARN("Non DPAA2 buffer pool");
1560 			/* alloc should be from the default buffer pool
1561 			 * attached to this interface
1562 			 */
1563 			bpid = priv->bp_list->buf_pool.bpid;
1564 
1565 			if (unlikely((*bufs)->nb_segs > 1)) {
1566 				DPAA2_PMD_ERR(
1567 					"S/G not supp for non hw offload buffer");
1568 				goto send_frames;
1569 			}
1570 			if (eth_copy_mbuf_to_fd(*bufs,
1571 						&fd_arr[loop], bpid)) {
1572 				goto send_frames;
1573 			}
1574 			/* free the original packet */
1575 			rte_pktmbuf_free(*bufs);
1576 		} else {
1577 			bpid = mempool_to_bpid(mp);
1578 			if (unlikely((*bufs)->nb_segs > 1)) {
1579 				if (eth_mbuf_to_sg_fd(*bufs,
1580 						      &fd_arr[loop],
1581 						      mp,
1582 						      bpid))
1583 					goto send_frames;
1584 			} else {
1585 				eth_mbuf_to_fd(*bufs,
1586 					       &fd_arr[loop], bpid);
1587 			}
1588 		}
1589 
1590 		bufs++;
1591 		dpaa2_q[loop]++;
1592 	}
1593 
1594 send_frames:
1595 	frames_to_send = loop;
1596 	loop = 0;
1597 	while (loop < frames_to_send) {
1598 		ret = qbman_swp_enqueue_multiple_desc(swp, &eqdesc[loop],
1599 				&fd_arr[loop],
1600 				frames_to_send - loop);
1601 		if (likely(ret > 0)) {
1602 			loop += ret;
1603 		} else {
1604 			retry_count++;
1605 			if (retry_count > DPAA2_MAX_TX_RETRY_COUNT)
1606 				break;
1607 		}
1608 	}
1609 
1610 	return loop;
1611 }
1612 
1613 /* Callback to handle sending ordered packets through WRIOP based interface */
1614 uint16_t
1615 dpaa2_dev_tx_ordered(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
1616 {
1617 	/* Function to transmit the frames to given device and VQ*/
1618 	struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
1619 	struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
1620 	struct dpaa2_dev_priv *priv = eth_data->dev_private;
1621 	struct dpaa2_queue *order_sendq = (struct dpaa2_queue *)priv->tx_vq[0];
1622 	struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
1623 	struct rte_mbuf *mi;
1624 	struct rte_mempool *mp;
1625 	struct qbman_eq_desc eqdesc[MAX_TX_RING_SLOTS];
1626 	struct qbman_swp *swp;
1627 	uint32_t frames_to_send, num_free_eq_desc;
1628 	uint32_t loop, retry_count;
1629 	int32_t ret;
1630 	uint16_t num_tx = 0;
1631 	uint16_t bpid;
1632 
1633 	if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
1634 		ret = dpaa2_affine_qbman_swp();
1635 		if (ret) {
1636 			DPAA2_PMD_ERR(
1637 				"Failed to allocate IO portal, tid: %d\n",
1638 				rte_gettid());
1639 			return 0;
1640 		}
1641 	}
1642 	swp = DPAA2_PER_LCORE_PORTAL;
1643 
1644 	DPAA2_PMD_DP_DEBUG("===> eth_data =%p, fqid =%d\n",
1645 			   eth_data, dpaa2_q->fqid);
1646 
1647 	/* This would also handle normal and atomic queues as any type
1648 	 * of packet can be enqueued when ordered queues are being used.
1649 	 */
1650 	while (nb_pkts) {
1651 		/*Check if the queue is congested*/
1652 		retry_count = 0;
1653 		while (qbman_result_SCN_state(dpaa2_q->cscn)) {
1654 			retry_count++;
1655 			/* Retry for some time before giving up */
1656 			if (retry_count > CONG_RETRY_COUNT)
1657 				goto skip_tx;
1658 		}
1659 
1660 		frames_to_send = (nb_pkts > dpaa2_eqcr_size) ?
1661 			dpaa2_eqcr_size : nb_pkts;
1662 
1663 		if (!priv->en_loose_ordered) {
1664 			if (*dpaa2_seqn(*bufs) & DPAA2_ENQUEUE_FLAG_ORP) {
1665 				num_free_eq_desc = dpaa2_free_eq_descriptors();
1666 				if (num_free_eq_desc < frames_to_send)
1667 					frames_to_send = num_free_eq_desc;
1668 			}
1669 		}
1670 
1671 		for (loop = 0; loop < frames_to_send; loop++) {
1672 			/*Prepare enqueue descriptor*/
1673 			qbman_eq_desc_clear(&eqdesc[loop]);
1674 
1675 			if (*dpaa2_seqn(*bufs)) {
1676 				/* Use only queue 0 for Tx in case of atomic/
1677 				 * ordered packets as packets can get unordered
1678 				 * when being transmitted out from the interface
1679 				 */
1680 				dpaa2_set_enqueue_descriptor(order_sendq,
1681 							     (*bufs),
1682 							     &eqdesc[loop]);
1683 			} else {
1684 				qbman_eq_desc_set_no_orp(&eqdesc[loop],
1685 							 DPAA2_EQ_RESP_ERR_FQ);
1686 				qbman_eq_desc_set_fq(&eqdesc[loop],
1687 						     dpaa2_q->fqid);
1688 			}
1689 
1690 			if (likely(RTE_MBUF_DIRECT(*bufs))) {
1691 				mp = (*bufs)->pool;
1692 				/* Check the basic scenario and set
1693 				 * the FD appropriately here itself.
1694 				 */
1695 				if (likely(mp && mp->ops_index ==
1696 				    priv->bp_list->dpaa2_ops_index &&
1697 				    (*bufs)->nb_segs == 1 &&
1698 				    rte_mbuf_refcnt_read((*bufs)) == 1)) {
1699 					if (unlikely((*bufs)->ol_flags
1700 						& RTE_MBUF_F_TX_VLAN)) {
1701 					  ret = rte_vlan_insert(bufs);
1702 					  if (ret)
1703 						goto send_n_return;
1704 					}
1705 					DPAA2_MBUF_TO_CONTIG_FD((*bufs),
1706 						&fd_arr[loop],
1707 						mempool_to_bpid(mp));
1708 					bufs++;
1709 					continue;
1710 				}
1711 			} else {
1712 				mi = rte_mbuf_from_indirect(*bufs);
1713 				mp = mi->pool;
1714 			}
1715 			/* Not a hw_pkt pool allocated frame */
1716 			if (unlikely(!mp || !priv->bp_list)) {
1717 				DPAA2_PMD_ERR("Err: No buffer pool attached");
1718 				goto send_n_return;
1719 			}
1720 
1721 			if (mp->ops_index != priv->bp_list->dpaa2_ops_index) {
1722 				DPAA2_PMD_WARN("Non DPAA2 buffer pool");
1723 				/* alloc should be from the default buffer pool
1724 				 * attached to this interface
1725 				 */
1726 				bpid = priv->bp_list->buf_pool.bpid;
1727 
1728 				if (unlikely((*bufs)->nb_segs > 1)) {
1729 					DPAA2_PMD_ERR(
1730 						"S/G not supp for non hw offload buffer");
1731 					goto send_n_return;
1732 				}
1733 				if (eth_copy_mbuf_to_fd(*bufs,
1734 							&fd_arr[loop], bpid)) {
1735 					goto send_n_return;
1736 				}
1737 				/* free the original packet */
1738 				rte_pktmbuf_free(*bufs);
1739 			} else {
1740 				bpid = mempool_to_bpid(mp);
1741 				if (unlikely((*bufs)->nb_segs > 1)) {
1742 					if (eth_mbuf_to_sg_fd(*bufs,
1743 							      &fd_arr[loop],
1744 							      mp,
1745 							      bpid))
1746 						goto send_n_return;
1747 				} else {
1748 					eth_mbuf_to_fd(*bufs,
1749 						       &fd_arr[loop], bpid);
1750 				}
1751 			}
1752 			bufs++;
1753 		}
1754 
1755 		loop = 0;
1756 		retry_count = 0;
1757 		while (loop < frames_to_send) {
1758 			ret = qbman_swp_enqueue_multiple_desc(swp,
1759 					&eqdesc[loop], &fd_arr[loop],
1760 					frames_to_send - loop);
1761 			if (unlikely(ret < 0)) {
1762 				retry_count++;
1763 				if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) {
1764 					num_tx += loop;
1765 					nb_pkts -= loop;
1766 					goto send_n_return;
1767 				}
1768 			} else {
1769 				loop += ret;
1770 				retry_count = 0;
1771 			}
1772 		}
1773 
1774 		num_tx += loop;
1775 		nb_pkts -= loop;
1776 	}
1777 	dpaa2_q->tx_pkts += num_tx;
1778 	return num_tx;
1779 
1780 send_n_return:
1781 	/* send any already prepared fd */
1782 	if (loop) {
1783 		unsigned int i = 0;
1784 
1785 		retry_count = 0;
1786 		while (i < loop) {
1787 			ret = qbman_swp_enqueue_multiple_desc(swp,
1788 				       &eqdesc[loop], &fd_arr[i], loop - i);
1789 			if (unlikely(ret < 0)) {
1790 				retry_count++;
1791 				if (retry_count > DPAA2_MAX_TX_RETRY_COUNT)
1792 					break;
1793 			} else {
1794 				i += ret;
1795 				retry_count = 0;
1796 			}
1797 		}
1798 		num_tx += i;
1799 	}
1800 skip_tx:
1801 	dpaa2_q->tx_pkts += num_tx;
1802 	return num_tx;
1803 }
1804 
1805 #if defined(RTE_TOOLCHAIN_GCC)
1806 #pragma GCC diagnostic push
1807 #pragma GCC diagnostic ignored "-Wcast-qual"
1808 #elif defined(RTE_TOOLCHAIN_CLANG)
1809 #pragma clang diagnostic push
1810 #pragma clang diagnostic ignored "-Wcast-qual"
1811 #endif
1812 
1813 /* This function loopbacks all the received packets.*/
1814 uint16_t
1815 dpaa2_dev_loopback_rx(void *queue,
1816 		      struct rte_mbuf **bufs __rte_unused,
1817 		      uint16_t nb_pkts)
1818 {
1819 	/* Function receive frames for a given device and VQ*/
1820 	struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
1821 	struct qbman_result *dq_storage, *dq_storage1 = NULL;
1822 	uint32_t fqid = dpaa2_q->fqid;
1823 	int ret, num_rx = 0, num_tx = 0, pull_size;
1824 	uint8_t pending, status;
1825 	struct qbman_swp *swp;
1826 	struct qbman_fd *fd[DPAA2_LX2_DQRR_RING_SIZE];
1827 	struct qbman_pull_desc pulldesc;
1828 	struct qbman_eq_desc eqdesc;
1829 	struct queue_storage_info_t *q_storage = dpaa2_q->q_storage;
1830 	struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
1831 	struct dpaa2_dev_priv *priv = eth_data->dev_private;
1832 	struct dpaa2_queue *tx_q = priv->tx_vq[0];
1833 	/* todo - currently we are using 1st TX queue only for loopback*/
1834 
1835 	if (unlikely(!DPAA2_PER_LCORE_ETHRX_DPIO)) {
1836 		ret = dpaa2_affine_qbman_ethrx_swp();
1837 		if (ret) {
1838 			DPAA2_PMD_ERR("Failure in affining portal");
1839 			return 0;
1840 		}
1841 	}
1842 	swp = DPAA2_PER_LCORE_ETHRX_PORTAL;
1843 	pull_size = (nb_pkts > dpaa2_dqrr_size) ? dpaa2_dqrr_size : nb_pkts;
1844 	if (unlikely(!q_storage->active_dqs)) {
1845 		q_storage->toggle = 0;
1846 		dq_storage = q_storage->dq_storage[q_storage->toggle];
1847 		q_storage->last_num_pkts = pull_size;
1848 		qbman_pull_desc_clear(&pulldesc);
1849 		qbman_pull_desc_set_numframes(&pulldesc,
1850 					      q_storage->last_num_pkts);
1851 		qbman_pull_desc_set_fq(&pulldesc, fqid);
1852 		qbman_pull_desc_set_storage(&pulldesc, dq_storage,
1853 			(size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
1854 		if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) {
1855 			while (!qbman_check_command_complete(
1856 			       get_swp_active_dqs(
1857 			       DPAA2_PER_LCORE_ETHRX_DPIO->index)))
1858 				;
1859 			clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index);
1860 		}
1861 		while (1) {
1862 			if (qbman_swp_pull(swp, &pulldesc)) {
1863 				DPAA2_PMD_DP_DEBUG(
1864 					"VDQ command not issued.QBMAN busy\n");
1865 				/* Portal was busy, try again */
1866 				continue;
1867 			}
1868 			break;
1869 		}
1870 		q_storage->active_dqs = dq_storage;
1871 		q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index;
1872 		set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index,
1873 				   dq_storage);
1874 	}
1875 
1876 	dq_storage = q_storage->active_dqs;
1877 	rte_prefetch0((void *)(size_t)(dq_storage));
1878 	rte_prefetch0((void *)(size_t)(dq_storage + 1));
1879 
1880 	/* Prepare next pull descriptor. This will give space for the
1881 	 * prefetching done on DQRR entries
1882 	 */
1883 	q_storage->toggle ^= 1;
1884 	dq_storage1 = q_storage->dq_storage[q_storage->toggle];
1885 	qbman_pull_desc_clear(&pulldesc);
1886 	qbman_pull_desc_set_numframes(&pulldesc, pull_size);
1887 	qbman_pull_desc_set_fq(&pulldesc, fqid);
1888 	qbman_pull_desc_set_storage(&pulldesc, dq_storage1,
1889 		(size_t)(DPAA2_VADDR_TO_IOVA(dq_storage1)), 1);
1890 
1891 	/*Prepare enqueue descriptor*/
1892 	qbman_eq_desc_clear(&eqdesc);
1893 	qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
1894 	qbman_eq_desc_set_response(&eqdesc, 0, 0);
1895 	qbman_eq_desc_set_fq(&eqdesc, tx_q->fqid);
1896 
1897 	/* Check if the previous issued command is completed.
1898 	 * Also seems like the SWP is shared between the Ethernet Driver
1899 	 * and the SEC driver.
1900 	 */
1901 	while (!qbman_check_command_complete(dq_storage))
1902 		;
1903 	if (dq_storage == get_swp_active_dqs(q_storage->active_dpio_id))
1904 		clear_swp_active_dqs(q_storage->active_dpio_id);
1905 
1906 	pending = 1;
1907 
1908 	do {
1909 		/* Loop until the dq_storage is updated with
1910 		 * new token by QBMAN
1911 		 */
1912 		while (!qbman_check_new_result(dq_storage))
1913 			;
1914 		rte_prefetch0((void *)((size_t)(dq_storage + 2)));
1915 		/* Check whether Last Pull command is Expired and
1916 		 * setting Condition for Loop termination
1917 		 */
1918 		if (qbman_result_DQ_is_pull_complete(dq_storage)) {
1919 			pending = 0;
1920 			/* Check for valid frame. */
1921 			status = qbman_result_DQ_flags(dq_storage);
1922 			if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0))
1923 				continue;
1924 		}
1925 		fd[num_rx] = (struct qbman_fd *)qbman_result_DQ_fd(dq_storage);
1926 
1927 		dq_storage++;
1928 		num_rx++;
1929 	} while (pending);
1930 
1931 	while (num_tx < num_rx) {
1932 		num_tx += qbman_swp_enqueue_multiple_fd(swp, &eqdesc,
1933 				&fd[num_tx], 0, num_rx - num_tx);
1934 	}
1935 
1936 	if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) {
1937 		while (!qbman_check_command_complete(
1938 		       get_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)))
1939 			;
1940 		clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index);
1941 	}
1942 	/* issue a volatile dequeue command for next pull */
1943 	while (1) {
1944 		if (qbman_swp_pull(swp, &pulldesc)) {
1945 			DPAA2_PMD_DP_DEBUG("VDQ command is not issued."
1946 					  "QBMAN is busy (2)\n");
1947 			continue;
1948 		}
1949 		break;
1950 	}
1951 	q_storage->active_dqs = dq_storage1;
1952 	q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index;
1953 	set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index, dq_storage1);
1954 
1955 	dpaa2_q->rx_pkts += num_rx;
1956 	dpaa2_q->tx_pkts += num_tx;
1957 
1958 	return 0;
1959 }
1960 #if defined(RTE_TOOLCHAIN_GCC)
1961 #pragma GCC diagnostic pop
1962 #elif defined(RTE_TOOLCHAIN_CLANG)
1963 #pragma clang diagnostic pop
1964 #endif
1965