xref: /dpdk/drivers/net/dpaa2/dpaa2_rxtx.c (revision 43fd3624fdfe3a33904a9b64d94306dd3d4f2c13)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4  *   Copyright 2016-2024 NXP
5  *
6  */
7 
8 #include <time.h>
9 #include <net/if.h>
10 
11 #include <rte_mbuf.h>
12 #include <ethdev_driver.h>
13 #include <rte_malloc.h>
14 #include <rte_memcpy.h>
15 #include <rte_string_fns.h>
16 #include <dev_driver.h>
17 #include <rte_hexdump.h>
18 
19 #include <bus_fslmc_driver.h>
20 #include <fslmc_vfio.h>
21 #include <dpaa2_hw_pvt.h>
22 #include <dpaa2_hw_dpio.h>
23 #include <dpaa2_hw_mempool.h>
24 
25 #include "dpaa2_pmd_logs.h"
26 #include "dpaa2_ethdev.h"
27 #include "base/dpaa2_hw_dpni_annot.h"
28 #include "dpaa2_parse_dump.h"
29 
30 static inline uint32_t __rte_hot
31 dpaa2_dev_rx_parse_slow(struct rte_mbuf *mbuf,
32 			struct dpaa2_annot_hdr *annotation);
33 
34 static void enable_tx_tstamp(struct qbman_fd *fd) __rte_unused;
35 
36 static inline rte_mbuf_timestamp_t *
37 dpaa2_timestamp_dynfield(struct rte_mbuf *mbuf)
38 {
39 	return RTE_MBUF_DYNFIELD(mbuf,
40 		dpaa2_timestamp_dynfield_offset, rte_mbuf_timestamp_t *);
41 }
42 
43 #define DPAA2_MBUF_TO_CONTIG_FD(_mbuf, _fd, _bpid)  do { \
44 	DPAA2_SET_FD_ADDR(_fd, DPAA2_MBUF_VADDR_TO_IOVA(_mbuf)); \
45 	DPAA2_SET_FD_LEN(_fd, _mbuf->data_len); \
46 	DPAA2_SET_ONLY_FD_BPID(_fd, _bpid); \
47 	DPAA2_SET_FD_OFFSET(_fd, _mbuf->data_off); \
48 	DPAA2_SET_FD_FRC(_fd, 0);		\
49 	DPAA2_RESET_FD_CTRL(_fd);		\
50 	DPAA2_RESET_FD_FLC(_fd);		\
51 } while (0)
52 
53 static inline void __rte_hot
54 dpaa2_dev_rx_parse_new(struct rte_mbuf *m, const struct qbman_fd *fd,
55 		       void *hw_annot_addr)
56 {
57 	uint16_t frc = DPAA2_GET_FD_FRC_PARSE_SUM(fd);
58 	struct dpaa2_annot_hdr *annotation =
59 			(struct dpaa2_annot_hdr *)hw_annot_addr;
60 
61 	if (unlikely(dpaa2_print_parser_result))
62 		dpaa2_print_parse_result(annotation);
63 
64 	m->packet_type = RTE_PTYPE_UNKNOWN;
65 	switch (frc) {
66 	case DPAA2_PKT_TYPE_ETHER:
67 		m->packet_type = RTE_PTYPE_L2_ETHER;
68 		break;
69 	case DPAA2_PKT_TYPE_IPV4:
70 		m->packet_type = RTE_PTYPE_L2_ETHER |
71 			RTE_PTYPE_L3_IPV4;
72 		break;
73 	case DPAA2_PKT_TYPE_IPV6:
74 		m->packet_type = RTE_PTYPE_L2_ETHER |
75 			RTE_PTYPE_L3_IPV6;
76 		break;
77 	case DPAA2_PKT_TYPE_IPV4_EXT:
78 		m->packet_type = RTE_PTYPE_L2_ETHER |
79 			RTE_PTYPE_L3_IPV4_EXT;
80 		break;
81 	case DPAA2_PKT_TYPE_IPV6_EXT:
82 		m->packet_type = RTE_PTYPE_L2_ETHER |
83 			RTE_PTYPE_L3_IPV6_EXT;
84 		break;
85 	case DPAA2_PKT_TYPE_IPV4_TCP:
86 		m->packet_type = RTE_PTYPE_L2_ETHER |
87 			RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP;
88 		break;
89 	case DPAA2_PKT_TYPE_IPV6_TCP:
90 		m->packet_type = RTE_PTYPE_L2_ETHER |
91 			RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP;
92 		break;
93 	case DPAA2_PKT_TYPE_IPV4_UDP:
94 		m->packet_type = RTE_PTYPE_L2_ETHER |
95 			RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP;
96 		break;
97 	case DPAA2_PKT_TYPE_IPV6_UDP:
98 		m->packet_type = RTE_PTYPE_L2_ETHER |
99 			RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP;
100 		break;
101 	case DPAA2_PKT_TYPE_IPV4_SCTP:
102 		m->packet_type = RTE_PTYPE_L2_ETHER |
103 			RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP;
104 		break;
105 	case DPAA2_PKT_TYPE_IPV6_SCTP:
106 		m->packet_type = RTE_PTYPE_L2_ETHER |
107 			RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_SCTP;
108 		break;
109 	case DPAA2_PKT_TYPE_IPV4_ICMP:
110 		m->packet_type = RTE_PTYPE_L2_ETHER |
111 			RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_ICMP;
112 		break;
113 	case DPAA2_PKT_TYPE_IPV6_ICMP:
114 		m->packet_type = RTE_PTYPE_L2_ETHER |
115 			RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_ICMP;
116 		break;
117 	default:
118 		m->packet_type = dpaa2_dev_rx_parse_slow(m, annotation);
119 	}
120 	m->hash.rss = fd->simple.flc_hi;
121 	m->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
122 
123 	if (dpaa2_enable_ts[m->port]) {
124 		*dpaa2_timestamp_dynfield(m) = annotation->word2;
125 		m->ol_flags |= dpaa2_timestamp_rx_dynflag;
126 		DPAA2_PMD_DP_DEBUG("pkt timestamp:0x%" PRIx64 "",
127 				*dpaa2_timestamp_dynfield(m));
128 	}
129 
130 	DPAA2_PMD_DP_DEBUG("HW frc = 0x%x\t packet type =0x%x "
131 		"ol_flags =0x%" PRIx64 "",
132 		frc, m->packet_type, m->ol_flags);
133 }
134 
135 static inline uint32_t __rte_hot
136 dpaa2_dev_rx_parse_slow(struct rte_mbuf *mbuf,
137 			struct dpaa2_annot_hdr *annotation)
138 {
139 	uint32_t pkt_type = RTE_PTYPE_UNKNOWN;
140 	uint16_t *vlan_tci;
141 
142 	DPAA2_PMD_DP_DEBUG("(slow parse)annotation(3)=0x%" PRIx64 "\t"
143 			"(4)=0x%" PRIx64 "\t",
144 			annotation->word3, annotation->word4);
145 
146 #if defined(RTE_LIBRTE_IEEE1588)
147 	if (BIT_ISSET_AT_POS(annotation->word1, DPAA2_ETH_FAS_PTP)) {
148 		mbuf->ol_flags |= RTE_MBUF_F_RX_IEEE1588_PTP;
149 		mbuf->ol_flags |= RTE_MBUF_F_RX_IEEE1588_TMST;
150 	}
151 #endif
152 
153 	if (BIT_ISSET_AT_POS(annotation->word3, L2_VLAN_1_PRESENT)) {
154 		vlan_tci = rte_pktmbuf_mtod_offset(mbuf, uint16_t *,
155 			(VLAN_TCI_OFFSET_1(annotation->word5) >> 16));
156 		mbuf->vlan_tci = rte_be_to_cpu_16(*vlan_tci);
157 		mbuf->ol_flags |= RTE_MBUF_F_RX_VLAN;
158 		pkt_type |= RTE_PTYPE_L2_ETHER_VLAN;
159 	} else if (BIT_ISSET_AT_POS(annotation->word3, L2_VLAN_N_PRESENT)) {
160 		vlan_tci = rte_pktmbuf_mtod_offset(mbuf, uint16_t *,
161 			(VLAN_TCI_OFFSET_1(annotation->word5) >> 16));
162 		mbuf->vlan_tci = rte_be_to_cpu_16(*vlan_tci);
163 		mbuf->ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_QINQ;
164 		pkt_type |= RTE_PTYPE_L2_ETHER_QINQ;
165 	}
166 
167 	if (BIT_ISSET_AT_POS(annotation->word3, L2_ARP_PRESENT)) {
168 		pkt_type |= RTE_PTYPE_L2_ETHER_ARP;
169 		goto parse_done;
170 	} else if (BIT_ISSET_AT_POS(annotation->word3, L2_ETH_MAC_PRESENT)) {
171 		pkt_type |= RTE_PTYPE_L2_ETHER;
172 	} else {
173 		goto parse_done;
174 	}
175 
176 	if (BIT_ISSET_AT_POS(annotation->word3, L2_MPLS_1_PRESENT |
177 				L2_MPLS_N_PRESENT))
178 		pkt_type |= RTE_PTYPE_L2_ETHER_MPLS;
179 
180 	if (BIT_ISSET_AT_POS(annotation->word4, L3_IPV4_1_PRESENT |
181 			     L3_IPV4_N_PRESENT)) {
182 		pkt_type |= RTE_PTYPE_L3_IPV4;
183 		if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_OPT_PRESENT |
184 			L3_IP_N_OPT_PRESENT))
185 			pkt_type |= RTE_PTYPE_L3_IPV4_EXT;
186 		if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_IPSEC_ESP_PRESENT |
187 					L3_PROTO_ESP_PRESENT))
188 			pkt_type |= RTE_PTYPE_TUNNEL_ESP;
189 
190 	} else if (BIT_ISSET_AT_POS(annotation->word4, L3_IPV6_1_PRESENT |
191 		  L3_IPV6_N_PRESENT)) {
192 		pkt_type |= RTE_PTYPE_L3_IPV6;
193 		if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_OPT_PRESENT |
194 		    L3_IP_N_OPT_PRESENT))
195 			pkt_type |= RTE_PTYPE_L3_IPV6_EXT;
196 		if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_IPSEC_ESP_PRESENT |
197 					L3_PROTO_ESP_PRESENT))
198 			pkt_type |= RTE_PTYPE_TUNNEL_ESP;
199 	} else {
200 		goto parse_done;
201 	}
202 
203 	if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L3CE))
204 		mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
205 	else
206 		mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
207 	if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L4CE))
208 		mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
209 	else
210 		mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
211 
212 	if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_FIRST_FRAGMENT |
213 	    L3_IP_1_MORE_FRAGMENT |
214 	    L3_IP_N_FIRST_FRAGMENT |
215 	    L3_IP_N_MORE_FRAGMENT)) {
216 		pkt_type |= RTE_PTYPE_L4_FRAG;
217 		goto parse_done;
218 	} else {
219 		pkt_type |= RTE_PTYPE_L4_NONFRAG;
220 	}
221 
222 	if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_UDP_PRESENT))
223 		pkt_type |= RTE_PTYPE_L4_UDP;
224 
225 	else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_TCP_PRESENT))
226 		pkt_type |= RTE_PTYPE_L4_TCP;
227 
228 	else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_SCTP_PRESENT))
229 		pkt_type |= RTE_PTYPE_L4_SCTP;
230 
231 	else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_ICMP_PRESENT))
232 		pkt_type |= RTE_PTYPE_L4_ICMP;
233 
234 	else if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_UNKNOWN_PROTOCOL))
235 		pkt_type |= RTE_PTYPE_UNKNOWN;
236 
237 parse_done:
238 	return pkt_type;
239 }
240 
241 static inline uint32_t __rte_hot
242 dpaa2_dev_rx_parse(struct rte_mbuf *mbuf, void *hw_annot_addr)
243 {
244 	struct dpaa2_annot_hdr *annotation =
245 			(struct dpaa2_annot_hdr *)hw_annot_addr;
246 
247 	DPAA2_PMD_DP_DEBUG("(fast parse) Annotation = 0x%" PRIx64 "\t",
248 			   annotation->word4);
249 
250 	if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L3CE))
251 		mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
252 	else
253 		mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
254 	if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L4CE))
255 		mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
256 	else
257 		mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
258 
259 	if (unlikely(dpaa2_print_parser_result))
260 		dpaa2_print_parse_result(annotation);
261 
262 	if (dpaa2_enable_ts[mbuf->port]) {
263 		*dpaa2_timestamp_dynfield(mbuf) = annotation->word2;
264 		mbuf->ol_flags |= dpaa2_timestamp_rx_dynflag;
265 		DPAA2_PMD_DP_DEBUG("pkt timestamp: 0x%" PRIx64 "",
266 				*dpaa2_timestamp_dynfield(mbuf));
267 	}
268 
269 	/* Check detailed parsing requirement */
270 	if (annotation->word3 & 0x7FFFFC3FFFF)
271 		return dpaa2_dev_rx_parse_slow(mbuf, annotation);
272 
273 	/* Return some common types from parse processing */
274 	switch (annotation->word4) {
275 	case DPAA2_L3_IPv4:
276 		return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4;
277 	case DPAA2_L3_IPv6:
278 		return  RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6;
279 	case DPAA2_L3_IPv4_TCP:
280 		return  RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 |
281 				RTE_PTYPE_L4_TCP;
282 	case DPAA2_L3_IPv4_UDP:
283 		return  RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 |
284 				RTE_PTYPE_L4_UDP;
285 	case DPAA2_L3_IPv6_TCP:
286 		return  RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 |
287 				RTE_PTYPE_L4_TCP;
288 	case DPAA2_L3_IPv6_UDP:
289 		return  RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 |
290 				RTE_PTYPE_L4_UDP;
291 	default:
292 		break;
293 	}
294 
295 	return dpaa2_dev_rx_parse_slow(mbuf, annotation);
296 }
297 
298 static inline struct rte_mbuf *__rte_hot
299 eth_sg_fd_to_mbuf(const struct qbman_fd *fd,
300 		  int port_id)
301 {
302 	struct qbman_sge *sgt, *sge;
303 	size_t sg_addr, fd_addr;
304 	int i = 0;
305 	void *hw_annot_addr;
306 	struct rte_mbuf *first_seg, *next_seg, *cur_seg, *temp;
307 
308 	fd_addr = (size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
309 	hw_annot_addr = (void *)(fd_addr + DPAA2_FD_PTA_SIZE);
310 
311 	/* Get Scatter gather table address */
312 	sgt = (struct qbman_sge *)(fd_addr + DPAA2_GET_FD_OFFSET(fd));
313 
314 	sge = &sgt[i++];
315 	sg_addr = (size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FLE_ADDR(sge));
316 
317 	/* First Scatter gather entry */
318 	first_seg = DPAA2_INLINE_MBUF_FROM_BUF(sg_addr,
319 		rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
320 	/* Prepare all the metadata for first segment */
321 	first_seg->buf_addr = (uint8_t *)sg_addr;
322 	first_seg->ol_flags = 0;
323 	first_seg->data_off = DPAA2_GET_FLE_OFFSET(sge);
324 	first_seg->data_len = sge->length  & 0x1FFFF;
325 	first_seg->pkt_len = DPAA2_GET_FD_LEN(fd);
326 	first_seg->nb_segs = 1;
327 	first_seg->next = NULL;
328 	first_seg->port = port_id;
329 	if (dpaa2_svr_family == SVR_LX2160A)
330 		dpaa2_dev_rx_parse_new(first_seg, fd, hw_annot_addr);
331 	else
332 		first_seg->packet_type =
333 			dpaa2_dev_rx_parse(first_seg, hw_annot_addr);
334 
335 	rte_mbuf_refcnt_set(first_seg, 1);
336 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
337 	rte_mempool_check_cookies(rte_mempool_from_obj((void *)first_seg),
338 			(void **)&first_seg, 1, 1);
339 #endif
340 	cur_seg = first_seg;
341 	while (!DPAA2_SG_IS_FINAL(sge)) {
342 		sge = &sgt[i++];
343 		sg_addr = (size_t)DPAA2_IOVA_TO_VADDR(
344 				DPAA2_GET_FLE_ADDR(sge));
345 		next_seg = DPAA2_INLINE_MBUF_FROM_BUF(sg_addr,
346 			rte_dpaa2_bpid_info[DPAA2_GET_FLE_BPID(sge)].meta_data_size);
347 		next_seg->buf_addr  = (uint8_t *)sg_addr;
348 		next_seg->data_off  = DPAA2_GET_FLE_OFFSET(sge);
349 		next_seg->data_len  = sge->length  & 0x1FFFF;
350 		first_seg->nb_segs += 1;
351 		rte_mbuf_refcnt_set(next_seg, 1);
352 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
353 		rte_mempool_check_cookies(rte_mempool_from_obj((void *)next_seg),
354 				(void **)&next_seg, 1, 1);
355 #endif
356 		cur_seg->next = next_seg;
357 		next_seg->next = NULL;
358 		cur_seg = next_seg;
359 	}
360 	temp = DPAA2_INLINE_MBUF_FROM_BUF(fd_addr,
361 		rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
362 	rte_mbuf_refcnt_set(temp, 1);
363 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
364 		rte_mempool_check_cookies(rte_mempool_from_obj((void *)temp),
365 				(void **)&temp, 1, 1);
366 #endif
367 	rte_pktmbuf_free_seg(temp);
368 
369 	return (void *)first_seg;
370 }
371 
372 static inline struct rte_mbuf *__rte_hot
373 eth_fd_to_mbuf(const struct qbman_fd *fd,
374 	       int port_id)
375 {
376 	void *v_addr = DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
377 	void *hw_annot_addr = (void *)((size_t)v_addr + DPAA2_FD_PTA_SIZE);
378 	struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF(v_addr,
379 		     rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
380 
381 	/* need to repopulated some of the fields,
382 	 * as they may have changed in last transmission
383 	 */
384 	mbuf->nb_segs = 1;
385 	mbuf->ol_flags = 0;
386 	mbuf->data_off = DPAA2_GET_FD_OFFSET(fd);
387 	mbuf->data_len = DPAA2_GET_FD_LEN(fd);
388 	mbuf->pkt_len = mbuf->data_len;
389 	mbuf->port = port_id;
390 	mbuf->next = NULL;
391 	mbuf->hash.sched.color = DPAA2_GET_FD_DROPP(fd);
392 	rte_mbuf_refcnt_set(mbuf, 1);
393 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
394 	rte_mempool_check_cookies(rte_mempool_from_obj((void *)mbuf),
395 			(void **)&mbuf, 1, 1);
396 #endif
397 
398 	/* Parse the packet */
399 	/* parse results for LX2 are there in FRC field of FD.
400 	 * For other DPAA2 platforms , parse results are after
401 	 * the private - sw annotation area
402 	 */
403 
404 	if (dpaa2_svr_family == SVR_LX2160A)
405 		dpaa2_dev_rx_parse_new(mbuf, fd, hw_annot_addr);
406 	else
407 		mbuf->packet_type = dpaa2_dev_rx_parse(mbuf, hw_annot_addr);
408 
409 	DPAA2_PMD_DP_DEBUG("to mbuf - mbuf =%p, mbuf->buf_addr =%p, off = %d,"
410 		"fd_off=%d fd =%" PRIx64 ", meta = %d  bpid =%d, len=%d",
411 		mbuf, mbuf->buf_addr, mbuf->data_off,
412 		DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ADDR(fd),
413 		rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
414 		DPAA2_GET_FD_BPID(fd), DPAA2_GET_FD_LEN(fd));
415 
416 	return mbuf;
417 }
418 
419 static int __rte_noinline __rte_hot
420 eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf,
421 		  struct qbman_fd *fd,
422 		  struct sw_buf_free *free_buf,
423 		  uint32_t *free_count,
424 		  uint32_t pkt_id,
425 		  uint16_t bpid)
426 {
427 	struct rte_mbuf *cur_seg = mbuf, *mi, *temp;
428 	struct qbman_sge *sgt, *sge = NULL;
429 	int i, offset = 0;
430 
431 #ifdef RTE_LIBRTE_IEEE1588
432 	/* annotation area for timestamp in first buffer */
433 	offset = 0x64;
434 #endif
435 	if (RTE_MBUF_DIRECT(mbuf) &&
436 		(mbuf->data_off > (mbuf->nb_segs * sizeof(struct qbman_sge)
437 		+ offset))) {
438 		temp = mbuf;
439 		if (rte_mbuf_refcnt_read(temp) > 1) {
440 			/* If refcnt > 1, invalid bpid is set to ensure
441 			 * buffer is not freed by HW
442 			 */
443 			fd->simple.bpid_offset = 0;
444 			DPAA2_SET_FD_IVP(fd);
445 			rte_mbuf_refcnt_update(temp, -1);
446 		} else {
447 			DPAA2_SET_ONLY_FD_BPID(fd, bpid);
448 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
449 			rte_mempool_check_cookies(rte_mempool_from_obj((void *)temp),
450 					(void **)&temp, 1, 0);
451 #endif
452 		}
453 		DPAA2_SET_FD_OFFSET(fd, offset);
454 	} else {
455 		temp = rte_pktmbuf_alloc(dpaa2_tx_sg_pool);
456 		if (temp == NULL) {
457 			DPAA2_PMD_DP_DEBUG("No memory to allocate S/G table");
458 			return -ENOMEM;
459 		}
460 		DPAA2_SET_ONLY_FD_BPID(fd, mempool_to_bpid(dpaa2_tx_sg_pool));
461 		DPAA2_SET_FD_OFFSET(fd, temp->data_off);
462 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
463 		rte_mempool_check_cookies(rte_mempool_from_obj((void *)temp),
464 			(void **)&temp, 1, 0);
465 #endif
466 	}
467 	DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(temp));
468 	DPAA2_SET_FD_LEN(fd, mbuf->pkt_len);
469 	DPAA2_FD_SET_FORMAT(fd, qbman_fd_sg);
470 	DPAA2_RESET_FD_FRC(fd);
471 	DPAA2_RESET_FD_CTRL(fd);
472 	DPAA2_RESET_FD_FLC(fd);
473 	/*Set Scatter gather table and Scatter gather entries*/
474 	sgt = (struct qbman_sge *)(
475 			(size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd))
476 			+ DPAA2_GET_FD_OFFSET(fd));
477 
478 	for (i = 0; i < mbuf->nb_segs; i++) {
479 		sge = &sgt[i];
480 		/*Resetting the buffer pool id and offset field*/
481 		sge->fin_bpid_offset = 0;
482 		DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(cur_seg));
483 		sge->length = cur_seg->data_len;
484 		if (RTE_MBUF_DIRECT(cur_seg)) {
485 			/* if we are using inline SGT in same buffers
486 			 * set the FLE FMT as Frame Data Section
487 			 */
488 			if (temp == cur_seg) {
489 				DPAA2_SG_SET_FORMAT(sge, qbman_fd_list);
490 				DPAA2_SET_FLE_IVP(sge);
491 			} else {
492 				if (rte_mbuf_refcnt_read(cur_seg) > 1) {
493 				/* If refcnt > 1, invalid bpid is set to ensure
494 				 * buffer is not freed by HW
495 				 */
496 					DPAA2_SET_FLE_IVP(sge);
497 					rte_mbuf_refcnt_update(cur_seg, -1);
498 				} else {
499 					DPAA2_SET_FLE_BPID(sge,
500 						mempool_to_bpid(cur_seg->pool));
501 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
502 				rte_mempool_check_cookies(rte_mempool_from_obj((void *)cur_seg),
503 					(void **)&cur_seg, 1, 0);
504 #endif
505 				}
506 			}
507 		} else if (RTE_MBUF_HAS_EXTBUF(cur_seg)) {
508 			free_buf[*free_count].seg = cur_seg;
509 			free_buf[*free_count].pkt_id = pkt_id;
510 			++*free_count;
511 			DPAA2_SET_FLE_IVP(sge);
512 		} else {
513 			/* Get owner MBUF from indirect buffer */
514 			mi = rte_mbuf_from_indirect(cur_seg);
515 			if (rte_mbuf_refcnt_read(mi) > 1) {
516 				/* If refcnt > 1, invalid bpid is set to ensure
517 				 * owner buffer is not freed by HW
518 				 */
519 				DPAA2_SET_FLE_IVP(sge);
520 			} else {
521 				DPAA2_SET_FLE_BPID(sge,
522 						   mempool_to_bpid(mi->pool));
523 				rte_mbuf_refcnt_update(mi, 1);
524 			}
525 			free_buf[*free_count].seg = cur_seg;
526 			free_buf[*free_count].pkt_id = pkt_id;
527 			++*free_count;
528 		}
529 		cur_seg = cur_seg->next;
530 	}
531 	DPAA2_SG_SET_FINAL(sge, true);
532 	return 0;
533 }
534 
535 static void
536 eth_mbuf_to_fd(struct rte_mbuf *mbuf,
537 	       struct qbman_fd *fd,
538 	       struct sw_buf_free *buf_to_free,
539 	       uint32_t *free_count,
540 	       uint32_t pkt_id,
541 	       uint16_t bpid) __rte_unused;
542 
543 static void __rte_noinline __rte_hot
544 eth_mbuf_to_fd(struct rte_mbuf *mbuf,
545 	       struct qbman_fd *fd,
546 	       struct sw_buf_free *buf_to_free,
547 	       uint32_t *free_count,
548 	       uint32_t pkt_id,
549 	       uint16_t bpid)
550 {
551 	DPAA2_MBUF_TO_CONTIG_FD(mbuf, fd, bpid);
552 
553 	DPAA2_PMD_DP_DEBUG("mbuf =%p, mbuf->buf_addr =%p, off = %d,"
554 		"fd_off=%d fd =%" PRIx64 ", meta = %d  bpid =%d, len=%d",
555 		mbuf, mbuf->buf_addr, mbuf->data_off,
556 		DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ADDR(fd),
557 		rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
558 		DPAA2_GET_FD_BPID(fd), DPAA2_GET_FD_LEN(fd));
559 	if (RTE_MBUF_DIRECT(mbuf)) {
560 		if (rte_mbuf_refcnt_read(mbuf) > 1) {
561 			DPAA2_SET_FD_IVP(fd);
562 			rte_mbuf_refcnt_update(mbuf, -1);
563 		}
564 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
565 		else
566 			rte_mempool_check_cookies(rte_mempool_from_obj((void *)mbuf),
567 				(void **)&mbuf, 1, 0);
568 #endif
569 	} else if (RTE_MBUF_HAS_EXTBUF(mbuf)) {
570 		buf_to_free[*free_count].seg = mbuf;
571 		buf_to_free[*free_count].pkt_id = pkt_id;
572 		++*free_count;
573 		DPAA2_SET_FD_IVP(fd);
574 	} else {
575 		struct rte_mbuf *mi;
576 
577 		mi = rte_mbuf_from_indirect(mbuf);
578 		if (rte_mbuf_refcnt_read(mi) > 1)
579 			DPAA2_SET_FD_IVP(fd);
580 		else
581 			rte_mbuf_refcnt_update(mi, 1);
582 
583 		buf_to_free[*free_count].seg = mbuf;
584 		buf_to_free[*free_count].pkt_id = pkt_id;
585 		++*free_count;
586 	}
587 }
588 
589 static inline int __rte_hot
590 eth_copy_mbuf_to_fd(struct rte_mbuf *mbuf,
591 		    struct qbman_fd *fd, uint16_t bpid)
592 {
593 	struct rte_mbuf *m;
594 	void *mb = NULL;
595 
596 	if (rte_dpaa2_mbuf_alloc_bulk(
597 		rte_dpaa2_bpid_info[bpid].bp_list->mp, &mb, 1)) {
598 		DPAA2_PMD_DP_DEBUG("Unable to allocated DPAA2 buffer");
599 		return -1;
600 	}
601 	m = (struct rte_mbuf *)mb;
602 	memcpy((char *)m->buf_addr + mbuf->data_off,
603 	       (void *)((char *)mbuf->buf_addr + mbuf->data_off),
604 		mbuf->pkt_len);
605 
606 	/* Copy required fields */
607 	m->data_off = mbuf->data_off;
608 	m->ol_flags = mbuf->ol_flags;
609 	m->packet_type = mbuf->packet_type;
610 	m->tx_offload = mbuf->tx_offload;
611 
612 	DPAA2_MBUF_TO_CONTIG_FD(m, fd, bpid);
613 
614 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
615 	rte_mempool_check_cookies(rte_mempool_from_obj((void *)m),
616 		(void **)&m, 1, 0);
617 #endif
618 	DPAA2_PMD_DP_DEBUG(
619 		"mbuf: %p, BMAN buf addr: %p, fdaddr: %" PRIx64 ", bpid: %d,"
620 		" meta: %d, off: %d, len: %d",
621 		(void *)mbuf,
622 		mbuf->buf_addr,
623 		DPAA2_GET_FD_ADDR(fd),
624 		DPAA2_GET_FD_BPID(fd),
625 		rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
626 		DPAA2_GET_FD_OFFSET(fd),
627 		DPAA2_GET_FD_LEN(fd));
628 
629 return 0;
630 }
631 
632 static void
633 dump_err_pkts(struct dpaa2_queue *dpaa2_q)
634 {
635 	/* Function receive frames for a given device and VQ */
636 	struct qbman_result *dq_storage;
637 	uint32_t fqid = dpaa2_q->fqid;
638 	int ret, num_rx = 0;
639 	uint8_t pending, status;
640 	struct qbman_swp *swp;
641 	const struct qbman_fd *fd;
642 	struct qbman_pull_desc pulldesc;
643 	struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
644 	uint32_t lcore_id = rte_lcore_id();
645 	void *v_addr, *hw_annot_addr;
646 	struct dpaa2_fas *fas;
647 
648 	if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
649 		ret = dpaa2_affine_qbman_swp();
650 		if (ret) {
651 			DPAA2_PMD_ERR("Failed to allocate IO portal, tid: %d",
652 				rte_gettid());
653 			return;
654 		}
655 	}
656 	swp = DPAA2_PER_LCORE_PORTAL;
657 
658 	dq_storage = dpaa2_q->q_storage[lcore_id]->dq_storage[0];
659 	qbman_pull_desc_clear(&pulldesc);
660 	qbman_pull_desc_set_fq(&pulldesc, fqid);
661 	qbman_pull_desc_set_storage(&pulldesc, dq_storage,
662 			(size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
663 	qbman_pull_desc_set_numframes(&pulldesc, dpaa2_dqrr_size);
664 
665 	while (1) {
666 		if (qbman_swp_pull(swp, &pulldesc)) {
667 			DPAA2_PMD_DP_DEBUG("VDQ command is not issued.QBMAN is busy");
668 			/* Portal was busy, try again */
669 			continue;
670 		}
671 		break;
672 	}
673 
674 	/* Check if the previous issued command is completed. */
675 	while (!qbman_check_command_complete(dq_storage))
676 		;
677 
678 	pending = 1;
679 	do {
680 		/* Loop until the dq_storage is updated with
681 		 * new token by QBMAN
682 		 */
683 		while (!qbman_check_new_result(dq_storage))
684 			;
685 
686 		/* Check whether Last Pull command is Expired and
687 		 * setting Condition for Loop termination
688 		 */
689 		if (qbman_result_DQ_is_pull_complete(dq_storage)) {
690 			pending = 0;
691 			/* Check for valid frame. */
692 			status = qbman_result_DQ_flags(dq_storage);
693 			if (unlikely((status &
694 				QBMAN_DQ_STAT_VALIDFRAME) == 0))
695 				continue;
696 		}
697 		fd = qbman_result_DQ_fd(dq_storage);
698 		v_addr = DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
699 		hw_annot_addr = (void *)((size_t)v_addr + DPAA2_FD_PTA_SIZE);
700 		fas = hw_annot_addr;
701 
702 		DPAA2_PMD_ERR("[%d] error packet on port[%d]:"
703 			" fd_off: %d, fd_err: %x, fas_status: %x",
704 			rte_lcore_id(), eth_data->port_id,
705 			DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ERR(fd),
706 			fas->status);
707 		rte_hexdump(stderr, "Error packet", v_addr,
708 			DPAA2_GET_FD_OFFSET(fd) + DPAA2_GET_FD_LEN(fd));
709 
710 		dq_storage++;
711 		num_rx++;
712 	} while (pending);
713 
714 	dpaa2_q->err_pkts += num_rx;
715 }
716 
717 /* This function assumes that caller will be keep the same value for nb_pkts
718  * across calls per queue, if that is not the case, better use non-prefetch
719  * version of rx call.
720  * It will return the packets as requested in previous call without honoring
721  * the current nb_pkts or bufs space.
722  */
723 uint16_t
724 dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
725 {
726 	/* Function receive frames for a given device and VQ*/
727 	struct dpaa2_queue *dpaa2_q = queue;
728 	struct qbman_result *dq_storage, *dq_storage1 = NULL;
729 	uint32_t fqid = dpaa2_q->fqid;
730 	int ret, num_rx = 0, pull_size;
731 	uint8_t pending, status;
732 	struct qbman_swp *swp;
733 	const struct qbman_fd *fd;
734 	struct qbman_pull_desc pulldesc;
735 	struct queue_storage_info_t *q_storage;
736 	struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
737 	struct dpaa2_dev_priv *priv = eth_data->dev_private;
738 
739 	q_storage = dpaa2_q->q_storage[rte_lcore_id()];
740 
741 	if (unlikely(dpaa2_enable_err_queue))
742 		dump_err_pkts(priv->rx_err_vq);
743 
744 	if (unlikely(!DPAA2_PER_LCORE_ETHRX_DPIO)) {
745 		ret = dpaa2_affine_qbman_ethrx_swp();
746 		if (ret) {
747 			DPAA2_PMD_ERR("Failure in affining portal");
748 			return 0;
749 		}
750 	}
751 
752 	if (unlikely(!rte_dpaa2_bpid_info &&
753 		     rte_eal_process_type() == RTE_PROC_SECONDARY))
754 		rte_dpaa2_bpid_info = dpaa2_q->bp_array;
755 
756 	swp = DPAA2_PER_LCORE_ETHRX_PORTAL;
757 	pull_size = (nb_pkts > dpaa2_dqrr_size) ? dpaa2_dqrr_size : nb_pkts;
758 	if (unlikely(!q_storage->active_dqs)) {
759 		q_storage->toggle = 0;
760 		dq_storage = q_storage->dq_storage[q_storage->toggle];
761 		q_storage->last_num_pkts = pull_size;
762 		qbman_pull_desc_clear(&pulldesc);
763 		qbman_pull_desc_set_numframes(&pulldesc,
764 					      q_storage->last_num_pkts);
765 		qbman_pull_desc_set_fq(&pulldesc, fqid);
766 		qbman_pull_desc_set_storage(&pulldesc, dq_storage,
767 			(uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
768 		if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) {
769 			while (!qbman_check_command_complete(
770 			       get_swp_active_dqs(
771 			       DPAA2_PER_LCORE_ETHRX_DPIO->index)))
772 				;
773 			clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index);
774 		}
775 		while (1) {
776 			if (qbman_swp_pull(swp, &pulldesc)) {
777 				DPAA2_PMD_DP_DEBUG("VDQ command is not issued."
778 						  " QBMAN is busy (1)");
779 				/* Portal was busy, try again */
780 				continue;
781 			}
782 			break;
783 		}
784 		q_storage->active_dqs = dq_storage;
785 		q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index;
786 		set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index,
787 				   dq_storage);
788 	}
789 
790 	dq_storage = q_storage->active_dqs;
791 	rte_prefetch0((void *)(size_t)(dq_storage));
792 	rte_prefetch0((void *)(size_t)(dq_storage + 1));
793 
794 	/* Prepare next pull descriptor. This will give space for the
795 	 * prefetching done on DQRR entries
796 	 */
797 	q_storage->toggle ^= 1;
798 	dq_storage1 = q_storage->dq_storage[q_storage->toggle];
799 	qbman_pull_desc_clear(&pulldesc);
800 	qbman_pull_desc_set_numframes(&pulldesc, pull_size);
801 	qbman_pull_desc_set_fq(&pulldesc, fqid);
802 	qbman_pull_desc_set_storage(&pulldesc, dq_storage1,
803 		(uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage1)), 1);
804 
805 	/* Check if the previous issued command is completed.
806 	 * Also seems like the SWP is shared between the Ethernet Driver
807 	 * and the SEC driver.
808 	 */
809 	while (!qbman_check_command_complete(dq_storage))
810 		;
811 	if (dq_storage == get_swp_active_dqs(q_storage->active_dpio_id))
812 		clear_swp_active_dqs(q_storage->active_dpio_id);
813 
814 	pending = 1;
815 
816 	do {
817 		/* Loop until the dq_storage is updated with
818 		 * new token by QBMAN
819 		 */
820 		while (!qbman_check_new_result(dq_storage))
821 			;
822 		rte_prefetch0((void *)((size_t)(dq_storage + 2)));
823 		/* Check whether Last Pull command is Expired and
824 		 * setting Condition for Loop termination
825 		 */
826 		if (qbman_result_DQ_is_pull_complete(dq_storage)) {
827 			pending = 0;
828 			/* Check for valid frame. */
829 			status = qbman_result_DQ_flags(dq_storage);
830 			if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0))
831 				continue;
832 		}
833 		fd = qbman_result_DQ_fd(dq_storage);
834 
835 #ifndef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
836 		if (dpaa2_svr_family != SVR_LX2160A) {
837 			const struct qbman_fd *next_fd =
838 				qbman_result_DQ_fd(dq_storage + 1);
839 			/* Prefetch Annotation address for the parse results */
840 			rte_prefetch0(DPAA2_IOVA_TO_VADDR((DPAA2_GET_FD_ADDR(
841 				next_fd) + DPAA2_FD_PTA_SIZE + 16)));
842 		}
843 #endif
844 
845 		if (unlikely(DPAA2_FD_GET_FORMAT(fd) == qbman_fd_sg))
846 			bufs[num_rx] = eth_sg_fd_to_mbuf(fd, eth_data->port_id);
847 		else
848 			bufs[num_rx] = eth_fd_to_mbuf(fd, eth_data->port_id);
849 #if defined(RTE_LIBRTE_IEEE1588)
850 		if (bufs[num_rx]->ol_flags & RTE_MBUF_F_RX_IEEE1588_TMST) {
851 			priv->rx_timestamp =
852 				*dpaa2_timestamp_dynfield(bufs[num_rx]);
853 		}
854 #endif
855 
856 		if (eth_data->dev_conf.rxmode.offloads &
857 				RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
858 			rte_vlan_strip(bufs[num_rx]);
859 
860 		dq_storage++;
861 		num_rx++;
862 	} while (pending);
863 
864 	if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) {
865 		while (!qbman_check_command_complete(
866 		       get_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)))
867 			;
868 		clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index);
869 	}
870 	/* issue a volatile dequeue command for next pull */
871 	while (1) {
872 		if (qbman_swp_pull(swp, &pulldesc)) {
873 			DPAA2_PMD_DP_DEBUG("VDQ command is not issued."
874 					  "QBMAN is busy (2)");
875 			continue;
876 		}
877 		break;
878 	}
879 	q_storage->active_dqs = dq_storage1;
880 	q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index;
881 	set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index, dq_storage1);
882 
883 	dpaa2_q->rx_pkts += num_rx;
884 
885 	return num_rx;
886 }
887 
888 void __rte_hot
889 dpaa2_dev_process_parallel_event(struct qbman_swp *swp,
890 				 const struct qbman_fd *fd,
891 				 const struct qbman_result *dq,
892 				 struct dpaa2_queue *rxq,
893 				 struct rte_event *ev)
894 {
895 	rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd) +
896 		DPAA2_FD_PTA_SIZE + 16));
897 
898 	ev->flow_id = rxq->ev.flow_id;
899 	ev->sub_event_type = rxq->ev.sub_event_type;
900 	ev->event_type = RTE_EVENT_TYPE_ETHDEV;
901 	ev->op = RTE_EVENT_OP_NEW;
902 	ev->sched_type = rxq->ev.sched_type;
903 	ev->queue_id = rxq->ev.queue_id;
904 	ev->priority = rxq->ev.priority;
905 
906 	ev->mbuf = eth_fd_to_mbuf(fd, rxq->eth_data->port_id);
907 
908 	qbman_swp_dqrr_consume(swp, dq);
909 }
910 
911 void __rte_hot
912 dpaa2_dev_process_atomic_event(struct qbman_swp *swp __rte_unused,
913 			       const struct qbman_fd *fd,
914 			       const struct qbman_result *dq,
915 			       struct dpaa2_queue *rxq,
916 			       struct rte_event *ev)
917 {
918 	uint8_t dqrr_index;
919 
920 	rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd) +
921 		DPAA2_FD_PTA_SIZE + 16));
922 
923 	ev->flow_id = rxq->ev.flow_id;
924 	ev->sub_event_type = rxq->ev.sub_event_type;
925 	ev->event_type = RTE_EVENT_TYPE_ETHDEV;
926 	ev->op = RTE_EVENT_OP_NEW;
927 	ev->sched_type = rxq->ev.sched_type;
928 	ev->queue_id = rxq->ev.queue_id;
929 	ev->priority = rxq->ev.priority;
930 
931 	ev->mbuf = eth_fd_to_mbuf(fd, rxq->eth_data->port_id);
932 
933 	dqrr_index = qbman_get_dqrr_idx(dq);
934 	*dpaa2_seqn(ev->mbuf) = dqrr_index + 1;
935 	DPAA2_PER_LCORE_DQRR_SIZE++;
936 	DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index;
937 	DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = ev->mbuf;
938 }
939 
940 void __rte_hot
941 dpaa2_dev_process_ordered_event(struct qbman_swp *swp,
942 				const struct qbman_fd *fd,
943 				const struct qbman_result *dq,
944 				struct dpaa2_queue *rxq,
945 				struct rte_event *ev)
946 {
947 	rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd) +
948 		DPAA2_FD_PTA_SIZE + 16));
949 
950 	ev->flow_id = rxq->ev.flow_id;
951 	ev->sub_event_type = rxq->ev.sub_event_type;
952 	ev->event_type = RTE_EVENT_TYPE_ETHDEV;
953 	ev->op = RTE_EVENT_OP_NEW;
954 	ev->sched_type = rxq->ev.sched_type;
955 	ev->queue_id = rxq->ev.queue_id;
956 	ev->priority = rxq->ev.priority;
957 
958 	ev->mbuf = eth_fd_to_mbuf(fd, rxq->eth_data->port_id);
959 
960 	*dpaa2_seqn(ev->mbuf) = DPAA2_ENQUEUE_FLAG_ORP;
961 	*dpaa2_seqn(ev->mbuf) |= qbman_result_DQ_odpid(dq) << DPAA2_EQCR_OPRID_SHIFT;
962 	*dpaa2_seqn(ev->mbuf) |= qbman_result_DQ_seqnum(dq) << DPAA2_EQCR_SEQNUM_SHIFT;
963 
964 	qbman_swp_dqrr_consume(swp, dq);
965 }
966 
967 uint16_t
968 dpaa2_dev_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
969 {
970 	/* Function receive frames for a given device and VQ */
971 	struct dpaa2_queue *dpaa2_q = queue;
972 	struct qbman_result *dq_storage;
973 	uint32_t fqid = dpaa2_q->fqid;
974 	int ret, num_rx = 0, next_pull = nb_pkts, num_pulled;
975 	uint8_t pending, status;
976 	struct qbman_swp *swp;
977 	const struct qbman_fd *fd;
978 	struct qbman_pull_desc pulldesc;
979 	struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
980 	struct dpaa2_dev_priv *priv = eth_data->dev_private;
981 
982 	if (unlikely(dpaa2_enable_err_queue))
983 		dump_err_pkts(priv->rx_err_vq);
984 
985 	if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
986 		ret = dpaa2_affine_qbman_swp();
987 		if (ret) {
988 			DPAA2_PMD_ERR(
989 				"Failed to allocate IO portal, tid: %d",
990 				rte_gettid());
991 			return 0;
992 		}
993 	}
994 	swp = DPAA2_PER_LCORE_PORTAL;
995 
996 	do {
997 		dq_storage = dpaa2_q->q_storage[0]->dq_storage[0];
998 		qbman_pull_desc_clear(&pulldesc);
999 		qbman_pull_desc_set_fq(&pulldesc, fqid);
1000 		qbman_pull_desc_set_storage(&pulldesc, dq_storage,
1001 				(size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
1002 
1003 		if (next_pull > dpaa2_dqrr_size) {
1004 			qbman_pull_desc_set_numframes(&pulldesc,
1005 				dpaa2_dqrr_size);
1006 			next_pull -= dpaa2_dqrr_size;
1007 		} else {
1008 			qbman_pull_desc_set_numframes(&pulldesc, next_pull);
1009 			next_pull = 0;
1010 		}
1011 
1012 		while (1) {
1013 			if (qbman_swp_pull(swp, &pulldesc)) {
1014 				DPAA2_PMD_DP_DEBUG(
1015 					"VDQ command is not issued.QBMAN is busy");
1016 				/* Portal was busy, try again */
1017 				continue;
1018 			}
1019 			break;
1020 		}
1021 
1022 		rte_prefetch0((void *)((size_t)(dq_storage + 1)));
1023 		/* Check if the previous issued command is completed. */
1024 		while (!qbman_check_command_complete(dq_storage))
1025 			;
1026 
1027 		num_pulled = 0;
1028 		pending = 1;
1029 		do {
1030 			/* Loop until the dq_storage is updated with
1031 			 * new token by QBMAN
1032 			 */
1033 			while (!qbman_check_new_result(dq_storage))
1034 				;
1035 			rte_prefetch0((void *)((size_t)(dq_storage + 2)));
1036 			/* Check whether Last Pull command is Expired and
1037 			 * setting Condition for Loop termination
1038 			 */
1039 			if (qbman_result_DQ_is_pull_complete(dq_storage)) {
1040 				pending = 0;
1041 				/* Check for valid frame. */
1042 				status = qbman_result_DQ_flags(dq_storage);
1043 				if (unlikely((status &
1044 					QBMAN_DQ_STAT_VALIDFRAME) == 0))
1045 					continue;
1046 			}
1047 			fd = qbman_result_DQ_fd(dq_storage);
1048 
1049 #ifndef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
1050 			if (dpaa2_svr_family != SVR_LX2160A) {
1051 				const struct qbman_fd *next_fd =
1052 					qbman_result_DQ_fd(dq_storage + 1);
1053 
1054 				/* Prefetch Annotation address for the parse
1055 				 * results.
1056 				 */
1057 				rte_prefetch0((DPAA2_IOVA_TO_VADDR(
1058 					DPAA2_GET_FD_ADDR(next_fd) +
1059 					DPAA2_FD_PTA_SIZE + 16)));
1060 			}
1061 #endif
1062 
1063 			if (unlikely(DPAA2_FD_GET_FORMAT(fd) == qbman_fd_sg))
1064 				bufs[num_rx] = eth_sg_fd_to_mbuf(fd,
1065 							eth_data->port_id);
1066 			else
1067 				bufs[num_rx] = eth_fd_to_mbuf(fd,
1068 							eth_data->port_id);
1069 
1070 #if defined(RTE_LIBRTE_IEEE1588)
1071 		if (bufs[num_rx]->ol_flags & RTE_MBUF_F_RX_IEEE1588_TMST) {
1072 			priv->rx_timestamp =
1073 				*dpaa2_timestamp_dynfield(bufs[num_rx]);
1074 		}
1075 #endif
1076 
1077 		if (eth_data->dev_conf.rxmode.offloads &
1078 				RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
1079 			rte_vlan_strip(bufs[num_rx]);
1080 		}
1081 
1082 			dq_storage++;
1083 			num_rx++;
1084 			num_pulled++;
1085 		} while (pending);
1086 	/* Last VDQ provided all packets and more packets are requested */
1087 	} while (next_pull && num_pulled == dpaa2_dqrr_size);
1088 
1089 	dpaa2_q->rx_pkts += num_rx;
1090 
1091 	return num_rx;
1092 }
1093 
1094 uint16_t dpaa2_dev_tx_conf(void *queue)
1095 {
1096 	/* Function receive frames for a given device and VQ */
1097 	struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
1098 	struct qbman_result *dq_storage;
1099 	uint32_t fqid = dpaa2_q->fqid;
1100 	int ret, num_tx_conf = 0, num_pulled;
1101 	uint8_t pending, status;
1102 	struct qbman_swp *swp;
1103 	const struct qbman_fd *fd, *next_fd;
1104 	struct qbman_pull_desc pulldesc;
1105 	struct qbman_release_desc releasedesc;
1106 	uint32_t bpid;
1107 	uint64_t buf;
1108 #if defined(RTE_LIBRTE_IEEE1588)
1109 	struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
1110 	struct dpaa2_dev_priv *priv = eth_data->dev_private;
1111 	struct dpaa2_annot_hdr *annotation;
1112 	void *v_addr;
1113 	struct rte_mbuf *mbuf;
1114 #endif
1115 
1116 	if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
1117 		ret = dpaa2_affine_qbman_swp();
1118 		if (ret) {
1119 			DPAA2_PMD_ERR(
1120 				"Failed to allocate IO portal, tid: %d",
1121 				rte_gettid());
1122 			return 0;
1123 		}
1124 	}
1125 	swp = DPAA2_PER_LCORE_PORTAL;
1126 
1127 	do {
1128 		dq_storage = dpaa2_q->q_storage[0]->dq_storage[0];
1129 		qbman_pull_desc_clear(&pulldesc);
1130 		qbman_pull_desc_set_fq(&pulldesc, fqid);
1131 		qbman_pull_desc_set_storage(&pulldesc, dq_storage,
1132 				(size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
1133 
1134 		qbman_pull_desc_set_numframes(&pulldesc, dpaa2_dqrr_size);
1135 
1136 		while (1) {
1137 			if (qbman_swp_pull(swp, &pulldesc)) {
1138 				DPAA2_PMD_DP_DEBUG("VDQ command is not issued."
1139 						   "QBMAN is busy");
1140 				/* Portal was busy, try again */
1141 				continue;
1142 			}
1143 			break;
1144 		}
1145 
1146 		rte_prefetch0((void *)((size_t)(dq_storage + 1)));
1147 		/* Check if the previous issued command is completed. */
1148 		while (!qbman_check_command_complete(dq_storage))
1149 			;
1150 
1151 		num_pulled = 0;
1152 		pending = 1;
1153 		do {
1154 			/* Loop until the dq_storage is updated with
1155 			 * new token by QBMAN
1156 			 */
1157 			while (!qbman_check_new_result(dq_storage))
1158 				;
1159 			rte_prefetch0((void *)((size_t)(dq_storage + 2)));
1160 			/* Check whether Last Pull command is Expired and
1161 			 * setting Condition for Loop termination
1162 			 */
1163 			if (qbman_result_DQ_is_pull_complete(dq_storage)) {
1164 				pending = 0;
1165 				/* Check for valid frame. */
1166 				status = qbman_result_DQ_flags(dq_storage);
1167 				if (unlikely((status &
1168 					QBMAN_DQ_STAT_VALIDFRAME) == 0))
1169 					continue;
1170 			}
1171 			fd = qbman_result_DQ_fd(dq_storage);
1172 
1173 			next_fd = qbman_result_DQ_fd(dq_storage + 1);
1174 			/* Prefetch Annotation address for the parse results */
1175 			rte_prefetch0((void *)(size_t)
1176 				(DPAA2_GET_FD_ADDR(next_fd) +
1177 				 DPAA2_FD_PTA_SIZE + 16));
1178 
1179 			bpid = DPAA2_GET_FD_BPID(fd);
1180 
1181 			/* Create a release descriptor required for releasing
1182 			 * buffers into QBMAN
1183 			 */
1184 			qbman_release_desc_clear(&releasedesc);
1185 			qbman_release_desc_set_bpid(&releasedesc, bpid);
1186 
1187 			buf = DPAA2_GET_FD_ADDR(fd);
1188 			/* feed them to bman */
1189 			do {
1190 				ret = qbman_swp_release(swp, &releasedesc,
1191 							&buf, 1);
1192 			} while (ret == -EBUSY);
1193 
1194 			dq_storage++;
1195 			num_tx_conf++;
1196 			num_pulled++;
1197 #if defined(RTE_LIBRTE_IEEE1588)
1198 			v_addr = DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
1199 			mbuf = DPAA2_INLINE_MBUF_FROM_BUF(v_addr,
1200 				rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
1201 
1202 			if (mbuf->ol_flags & RTE_MBUF_F_TX_IEEE1588_TMST) {
1203 				annotation = (struct dpaa2_annot_hdr *)((size_t)
1204 					DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)) +
1205 					DPAA2_FD_PTA_SIZE);
1206 				priv->tx_timestamp = annotation->word2;
1207 			}
1208 #endif
1209 		} while (pending);
1210 
1211 	/* Last VDQ provided all packets and more packets are requested */
1212 	} while (num_pulled == dpaa2_dqrr_size);
1213 
1214 	dpaa2_q->rx_pkts += num_tx_conf;
1215 
1216 	return num_tx_conf;
1217 }
1218 
1219 /* Configure the egress frame annotation for timestamp update */
1220 static void enable_tx_tstamp(struct qbman_fd *fd)
1221 {
1222 	struct dpaa2_faead *fd_faead;
1223 
1224 	/* Set frame annotation status field as valid */
1225 	(fd)->simple.frc |= DPAA2_FD_FRC_FASV;
1226 
1227 	/* Set frame annotation egress action descriptor as valid */
1228 	(fd)->simple.frc |= DPAA2_FD_FRC_FAEADV;
1229 
1230 	/* Set Annotation Length as 128B */
1231 	(fd)->simple.ctrl |= DPAA2_FD_CTRL_ASAL;
1232 
1233 	/* enable update of confirmation frame annotation */
1234 	fd_faead = (struct dpaa2_faead *)((size_t)
1235 			DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)) +
1236 			DPAA2_FD_PTA_SIZE + DPAA2_FD_HW_ANNOT_FAEAD_OFFSET);
1237 	fd_faead->ctrl = DPAA2_ANNOT_FAEAD_A2V | DPAA2_ANNOT_FAEAD_UPDV |
1238 				DPAA2_ANNOT_FAEAD_UPD;
1239 }
1240 
1241 /*
1242  * Callback to handle sending packets through WRIOP based interface
1243  */
1244 uint16_t
1245 dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
1246 {
1247 	/* Function to transmit the frames to given device and VQ*/
1248 	uint32_t loop, retry_count;
1249 	int32_t ret;
1250 	struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
1251 	struct rte_mbuf *mi;
1252 	uint32_t frames_to_send;
1253 	struct rte_mempool *mp;
1254 	struct qbman_eq_desc eqdesc;
1255 	struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
1256 	struct qbman_swp *swp;
1257 	uint16_t num_tx = 0;
1258 	uint16_t bpid;
1259 	struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
1260 	struct dpaa2_dev_priv *priv = eth_data->dev_private;
1261 	uint32_t flags[MAX_TX_RING_SLOTS] = {0};
1262 	struct sw_buf_free buf_to_free[DPAA2_MAX_SGS * dpaa2_dqrr_size];
1263 	uint32_t free_count = 0;
1264 
1265 	if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
1266 		ret = dpaa2_affine_qbman_swp();
1267 		if (ret) {
1268 			DPAA2_PMD_ERR(
1269 				"Failed to allocate IO portal, tid: %d",
1270 				rte_gettid());
1271 			return 0;
1272 		}
1273 	}
1274 	swp = DPAA2_PER_LCORE_PORTAL;
1275 
1276 	DPAA2_PMD_DP_DEBUG("===> eth_data =%p, fqid =%d",
1277 			eth_data, dpaa2_q->fqid);
1278 
1279 #ifdef RTE_LIBRTE_IEEE1588
1280 	/* IEEE1588 driver need pointer to tx confirmation queue
1281 	 * corresponding to last packet transmitted for reading
1282 	 * the timestamp
1283 	 */
1284 	if ((*bufs)->ol_flags & RTE_MBUF_F_TX_IEEE1588_TMST) {
1285 		priv->next_tx_conf_queue = dpaa2_q->tx_conf_queue;
1286 		dpaa2_dev_tx_conf(dpaa2_q->tx_conf_queue);
1287 		priv->tx_timestamp = 0;
1288 	}
1289 #endif
1290 
1291 	/*Prepare enqueue descriptor*/
1292 	qbman_eq_desc_clear(&eqdesc);
1293 	qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
1294 	qbman_eq_desc_set_fq(&eqdesc, dpaa2_q->fqid);
1295 
1296 	/*Clear the unused FD fields before sending*/
1297 	while (nb_pkts) {
1298 		/*Check if the queue is congested*/
1299 		retry_count = 0;
1300 		while (qbman_result_SCN_state(dpaa2_q->cscn)) {
1301 			retry_count++;
1302 			/* Retry for some time before giving up */
1303 			if (retry_count > CONG_RETRY_COUNT) {
1304 				if (dpaa2_q->tm_sw_td)
1305 					goto sw_td;
1306 				goto skip_tx;
1307 			}
1308 		}
1309 
1310 		frames_to_send = (nb_pkts > dpaa2_eqcr_size) ?
1311 			dpaa2_eqcr_size : nb_pkts;
1312 
1313 		for (loop = 0; loop < frames_to_send; loop++) {
1314 			if (*dpaa2_seqn(*bufs)) {
1315 				uint8_t dqrr_index = *dpaa2_seqn(*bufs) - 1;
1316 
1317 				flags[loop] = QBMAN_ENQUEUE_FLAG_DCA |
1318 						dqrr_index;
1319 				DPAA2_PER_LCORE_DQRR_SIZE--;
1320 				DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dqrr_index);
1321 				*dpaa2_seqn(*bufs) = DPAA2_INVALID_MBUF_SEQN;
1322 			}
1323 
1324 			if (likely(RTE_MBUF_DIRECT(*bufs))) {
1325 				mp = (*bufs)->pool;
1326 				/* Check the basic scenario and set
1327 				 * the FD appropriately here itself.
1328 				 */
1329 				if (likely(mp && mp->ops_index ==
1330 				    priv->bp_list->dpaa2_ops_index &&
1331 				    (*bufs)->nb_segs == 1 &&
1332 				    rte_mbuf_refcnt_read((*bufs)) == 1)) {
1333 					if (unlikely(((*bufs)->ol_flags
1334 						& RTE_MBUF_F_TX_VLAN) ||
1335 						(eth_data->dev_conf.txmode.offloads
1336 						& RTE_ETH_TX_OFFLOAD_VLAN_INSERT))) {
1337 						ret = rte_vlan_insert(bufs);
1338 						if (ret)
1339 							goto send_n_return;
1340 					}
1341 					DPAA2_MBUF_TO_CONTIG_FD((*bufs),
1342 					&fd_arr[loop], mempool_to_bpid(mp));
1343 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
1344 					rte_mempool_check_cookies
1345 						(rte_mempool_from_obj((void *)*bufs),
1346 						(void **)bufs, 1, 0);
1347 #endif
1348 					bufs++;
1349 #ifdef RTE_LIBRTE_IEEE1588
1350 					enable_tx_tstamp(&fd_arr[loop]);
1351 #endif
1352 					continue;
1353 				}
1354 			} else {
1355 				mi = rte_mbuf_from_indirect(*bufs);
1356 				mp = mi->pool;
1357 			}
1358 
1359 			if (unlikely(RTE_MBUF_HAS_EXTBUF(*bufs))) {
1360 				if (unlikely((*bufs)->nb_segs > 1)) {
1361 					mp = (*bufs)->pool;
1362 					if (eth_mbuf_to_sg_fd(*bufs,
1363 							      &fd_arr[loop],
1364 							      buf_to_free,
1365 							      &free_count,
1366 							      loop,
1367 							      mempool_to_bpid(mp)))
1368 						goto send_n_return;
1369 				} else {
1370 					eth_mbuf_to_fd(*bufs,
1371 							&fd_arr[loop],
1372 							buf_to_free,
1373 							&free_count,
1374 							loop, 0);
1375 				}
1376 				bufs++;
1377 #ifdef RTE_LIBRTE_IEEE1588
1378 				enable_tx_tstamp(&fd_arr[loop]);
1379 #endif
1380 				continue;
1381 			}
1382 
1383 			/* Not a hw_pkt pool allocated frame */
1384 			if (unlikely(!mp || !priv->bp_list)) {
1385 				DPAA2_PMD_ERR("Err: No buffer pool attached");
1386 				goto send_n_return;
1387 			}
1388 
1389 			if (unlikely(((*bufs)->ol_flags & RTE_MBUF_F_TX_VLAN) ||
1390 				(eth_data->dev_conf.txmode.offloads
1391 				& RTE_ETH_TX_OFFLOAD_VLAN_INSERT))) {
1392 				int ret = rte_vlan_insert(bufs);
1393 				if (ret)
1394 					goto send_n_return;
1395 			}
1396 			if (mp->ops_index != priv->bp_list->dpaa2_ops_index) {
1397 				DPAA2_PMD_WARN("Non DPAA2 buffer pool");
1398 				/* alloc should be from the default buffer pool
1399 				 * attached to this interface
1400 				 */
1401 				bpid = priv->bp_list->buf_pool.bpid;
1402 
1403 				if (unlikely((*bufs)->nb_segs > 1)) {
1404 					DPAA2_PMD_ERR("S/G support not added"
1405 						" for non hw offload buffer");
1406 					goto send_n_return;
1407 				}
1408 				if (eth_copy_mbuf_to_fd(*bufs,
1409 							&fd_arr[loop], bpid)) {
1410 					goto send_n_return;
1411 				}
1412 				/* free the original packet */
1413 				rte_pktmbuf_free(*bufs);
1414 			} else {
1415 				bpid = mempool_to_bpid(mp);
1416 				if (unlikely((*bufs)->nb_segs > 1)) {
1417 					if (eth_mbuf_to_sg_fd(*bufs,
1418 							&fd_arr[loop],
1419 							buf_to_free,
1420 							&free_count,
1421 							loop,
1422 							bpid))
1423 						goto send_n_return;
1424 				} else {
1425 					eth_mbuf_to_fd(*bufs,
1426 							&fd_arr[loop],
1427 							buf_to_free,
1428 							&free_count,
1429 							loop, bpid);
1430 				}
1431 			}
1432 #ifdef RTE_LIBRTE_IEEE1588
1433 			enable_tx_tstamp(&fd_arr[loop]);
1434 #endif
1435 			bufs++;
1436 		}
1437 
1438 		loop = 0;
1439 		retry_count = 0;
1440 		while (loop < frames_to_send) {
1441 			ret = qbman_swp_enqueue_multiple(swp, &eqdesc,
1442 					&fd_arr[loop], &flags[loop],
1443 					frames_to_send - loop);
1444 			if (unlikely(ret < 0)) {
1445 				retry_count++;
1446 				if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) {
1447 					num_tx += loop;
1448 					nb_pkts -= loop;
1449 					goto send_n_return;
1450 				}
1451 			} else {
1452 				loop += ret;
1453 				retry_count = 0;
1454 			}
1455 		}
1456 
1457 		num_tx += loop;
1458 		nb_pkts -= loop;
1459 	}
1460 	dpaa2_q->tx_pkts += num_tx;
1461 
1462 	for (loop = 0; loop < free_count; loop++) {
1463 		if (buf_to_free[loop].pkt_id < num_tx)
1464 			rte_pktmbuf_free_seg(buf_to_free[loop].seg);
1465 	}
1466 
1467 	return num_tx;
1468 
1469 send_n_return:
1470 	/* send any already prepared fd */
1471 	if (loop) {
1472 		unsigned int i = 0;
1473 
1474 		retry_count = 0;
1475 		while (i < loop) {
1476 			ret = qbman_swp_enqueue_multiple(swp, &eqdesc,
1477 							 &fd_arr[i],
1478 							 &flags[i],
1479 							 loop - i);
1480 			if (unlikely(ret < 0)) {
1481 				retry_count++;
1482 				if (retry_count > DPAA2_MAX_TX_RETRY_COUNT)
1483 					break;
1484 			} else {
1485 				i += ret;
1486 				retry_count = 0;
1487 			}
1488 		}
1489 		num_tx += i;
1490 	}
1491 skip_tx:
1492 	dpaa2_q->tx_pkts += num_tx;
1493 
1494 	for (loop = 0; loop < free_count; loop++) {
1495 		if (buf_to_free[loop].pkt_id < num_tx)
1496 			rte_pktmbuf_free_seg(buf_to_free[loop].seg);
1497 	}
1498 
1499 	return num_tx;
1500 sw_td:
1501 	loop = 0;
1502 	while (loop < num_tx) {
1503 		if (unlikely(RTE_MBUF_HAS_EXTBUF(*bufs)))
1504 			rte_pktmbuf_free(*bufs);
1505 		bufs++;
1506 		loop++;
1507 	}
1508 
1509 	/* free the pending buffers */
1510 	while (nb_pkts) {
1511 		rte_pktmbuf_free(*bufs);
1512 		bufs++;
1513 		nb_pkts--;
1514 		num_tx++;
1515 	}
1516 	dpaa2_q->tx_pkts += num_tx;
1517 
1518 	return num_tx;
1519 }
1520 
1521 void
1522 dpaa2_dev_free_eqresp_buf(uint16_t eqresp_ci,
1523 			  __rte_unused struct dpaa2_queue *dpaa2_q)
1524 {
1525 	struct dpaa2_dpio_dev *dpio_dev = DPAA2_PER_LCORE_DPIO;
1526 	struct qbman_fd *fd;
1527 	struct rte_mbuf *m;
1528 
1529 	fd = qbman_result_eqresp_fd(&dpio_dev->eqresp[eqresp_ci]);
1530 
1531 	/* Setting port id does not matter as we are to free the mbuf */
1532 	m = eth_fd_to_mbuf(fd, 0);
1533 	rte_pktmbuf_free(m);
1534 }
1535 
1536 static void
1537 dpaa2_set_enqueue_descriptor(struct dpaa2_queue *dpaa2_q,
1538 			     struct rte_mbuf *m,
1539 			     struct qbman_eq_desc *eqdesc)
1540 {
1541 	struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
1542 	struct dpaa2_dev_priv *priv = eth_data->dev_private;
1543 	struct dpaa2_dpio_dev *dpio_dev = DPAA2_PER_LCORE_DPIO;
1544 	struct eqresp_metadata *eqresp_meta;
1545 	uint16_t orpid, seqnum;
1546 	uint8_t dq_idx;
1547 
1548 	qbman_eq_desc_set_fq(eqdesc, dpaa2_q->fqid);
1549 
1550 	if (*dpaa2_seqn(m) & DPAA2_ENQUEUE_FLAG_ORP) {
1551 		orpid = (*dpaa2_seqn(m) & DPAA2_EQCR_OPRID_MASK) >>
1552 			DPAA2_EQCR_OPRID_SHIFT;
1553 		seqnum = (*dpaa2_seqn(m) & DPAA2_EQCR_SEQNUM_MASK) >>
1554 			DPAA2_EQCR_SEQNUM_SHIFT;
1555 
1556 		if (!priv->en_loose_ordered) {
1557 			qbman_eq_desc_set_orp(eqdesc, 1, orpid, seqnum, 0);
1558 			qbman_eq_desc_set_response(eqdesc, (uint64_t)
1559 				DPAA2_VADDR_TO_IOVA(&dpio_dev->eqresp[
1560 				dpio_dev->eqresp_pi]), 1);
1561 			qbman_eq_desc_set_token(eqdesc, 1);
1562 
1563 			eqresp_meta = &dpio_dev->eqresp_meta[
1564 				dpio_dev->eqresp_pi];
1565 			eqresp_meta->dpaa2_q = dpaa2_q;
1566 			eqresp_meta->mp = m->pool;
1567 
1568 			dpio_dev->eqresp_pi + 1 < MAX_EQ_RESP_ENTRIES ?
1569 				dpio_dev->eqresp_pi++ :
1570 				(dpio_dev->eqresp_pi = 0);
1571 		} else {
1572 			qbman_eq_desc_set_orp(eqdesc, 0, orpid, seqnum, 0);
1573 		}
1574 	} else {
1575 		dq_idx = *dpaa2_seqn(m) - 1;
1576 		qbman_eq_desc_set_dca(eqdesc, 1, dq_idx, 0);
1577 		DPAA2_PER_LCORE_DQRR_SIZE--;
1578 		DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dq_idx);
1579 	}
1580 	*dpaa2_seqn(m) = DPAA2_INVALID_MBUF_SEQN;
1581 }
1582 
1583 uint16_t
1584 dpaa2_dev_tx_multi_txq_ordered(void **queue,
1585 		struct rte_mbuf **bufs, uint16_t nb_pkts)
1586 {
1587 	/* Function to transmit the frames to multiple queues respectively.*/
1588 	uint32_t loop, i, retry_count;
1589 	int32_t ret;
1590 	struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
1591 	uint32_t frames_to_send, num_free_eq_desc = 0;
1592 	struct rte_mempool *mp;
1593 	struct qbman_eq_desc eqdesc[MAX_TX_RING_SLOTS];
1594 	struct dpaa2_queue *dpaa2_q[MAX_TX_RING_SLOTS];
1595 	struct qbman_swp *swp;
1596 	uint16_t bpid;
1597 	struct rte_mbuf *mi;
1598 	struct rte_eth_dev_data *eth_data;
1599 	struct dpaa2_dev_priv *priv;
1600 	struct dpaa2_queue *order_sendq;
1601 	struct sw_buf_free buf_to_free[DPAA2_MAX_SGS * dpaa2_dqrr_size];
1602 	uint32_t free_count = 0;
1603 
1604 	if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
1605 		ret = dpaa2_affine_qbman_swp();
1606 		if (ret) {
1607 			DPAA2_PMD_ERR(
1608 				"Failed to allocate IO portal, tid: %d",
1609 				rte_gettid());
1610 			return 0;
1611 		}
1612 	}
1613 	swp = DPAA2_PER_LCORE_PORTAL;
1614 
1615 	frames_to_send = (nb_pkts > dpaa2_eqcr_size) ?
1616 		dpaa2_eqcr_size : nb_pkts;
1617 
1618 	for (loop = 0; loop < frames_to_send; loop++) {
1619 		dpaa2_q[loop] = (struct dpaa2_queue *)queue[loop];
1620 		eth_data = dpaa2_q[loop]->eth_data;
1621 		priv = eth_data->dev_private;
1622 		if (!priv->en_loose_ordered) {
1623 			if (*dpaa2_seqn(*bufs) & DPAA2_ENQUEUE_FLAG_ORP) {
1624 				if (!num_free_eq_desc) {
1625 					num_free_eq_desc = dpaa2_free_eq_descriptors();
1626 					if (!num_free_eq_desc)
1627 						goto send_frames;
1628 				}
1629 				num_free_eq_desc--;
1630 			}
1631 		}
1632 
1633 		DPAA2_PMD_DP_DEBUG("===> eth_data =%p, fqid =%d",
1634 				   eth_data, dpaa2_q[loop]->fqid);
1635 
1636 		/* Check if the queue is congested */
1637 		retry_count = 0;
1638 		while (qbman_result_SCN_state(dpaa2_q[loop]->cscn)) {
1639 			retry_count++;
1640 			/* Retry for some time before giving up */
1641 			if (retry_count > CONG_RETRY_COUNT)
1642 				goto send_frames;
1643 		}
1644 
1645 		/* Prepare enqueue descriptor */
1646 		qbman_eq_desc_clear(&eqdesc[loop]);
1647 
1648 		if (*dpaa2_seqn(*bufs) && priv->en_ordered) {
1649 			order_sendq = (struct dpaa2_queue *)priv->tx_vq[0];
1650 			dpaa2_set_enqueue_descriptor(order_sendq,
1651 						     (*bufs),
1652 						     &eqdesc[loop]);
1653 		} else {
1654 			qbman_eq_desc_set_no_orp(&eqdesc[loop],
1655 							 DPAA2_EQ_RESP_ERR_FQ);
1656 			qbman_eq_desc_set_fq(&eqdesc[loop],
1657 						     dpaa2_q[loop]->fqid);
1658 		}
1659 
1660 		if (likely(RTE_MBUF_DIRECT(*bufs))) {
1661 			mp = (*bufs)->pool;
1662 			/* Check the basic scenario and set
1663 			 * the FD appropriately here itself.
1664 			 */
1665 			if (likely(mp && mp->ops_index ==
1666 				priv->bp_list->dpaa2_ops_index &&
1667 				(*bufs)->nb_segs == 1 &&
1668 				rte_mbuf_refcnt_read((*bufs)) == 1)) {
1669 				if (unlikely((*bufs)->ol_flags
1670 					& RTE_MBUF_F_TX_VLAN)) {
1671 					ret = rte_vlan_insert(bufs);
1672 					if (ret)
1673 						goto send_frames;
1674 				}
1675 				DPAA2_MBUF_TO_CONTIG_FD((*bufs),
1676 					&fd_arr[loop],
1677 					mempool_to_bpid(mp));
1678 				bufs++;
1679 				continue;
1680 			}
1681 		} else {
1682 			mi = rte_mbuf_from_indirect(*bufs);
1683 			mp = mi->pool;
1684 		}
1685 		/* Not a hw_pkt pool allocated frame */
1686 		if (unlikely(!mp || !priv->bp_list)) {
1687 			DPAA2_PMD_ERR("Err: No buffer pool attached");
1688 			goto send_frames;
1689 		}
1690 
1691 		if (mp->ops_index != priv->bp_list->dpaa2_ops_index) {
1692 			DPAA2_PMD_WARN("Non DPAA2 buffer pool");
1693 			/* alloc should be from the default buffer pool
1694 			 * attached to this interface
1695 			 */
1696 			bpid = priv->bp_list->buf_pool.bpid;
1697 
1698 			if (unlikely((*bufs)->nb_segs > 1)) {
1699 				DPAA2_PMD_ERR(
1700 					"S/G not supp for non hw offload buffer");
1701 				goto send_frames;
1702 			}
1703 			if (eth_copy_mbuf_to_fd(*bufs,
1704 						&fd_arr[loop], bpid)) {
1705 				goto send_frames;
1706 			}
1707 			/* free the original packet */
1708 			rte_pktmbuf_free(*bufs);
1709 		} else {
1710 			bpid = mempool_to_bpid(mp);
1711 			if (unlikely((*bufs)->nb_segs > 1)) {
1712 				if (eth_mbuf_to_sg_fd(*bufs,
1713 						      &fd_arr[loop],
1714 						      buf_to_free,
1715 						      &free_count,
1716 						      loop,
1717 						      bpid))
1718 					goto send_frames;
1719 			} else {
1720 				eth_mbuf_to_fd(*bufs,
1721 						&fd_arr[loop],
1722 						buf_to_free,
1723 						&free_count,
1724 						loop, bpid);
1725 			}
1726 		}
1727 
1728 		bufs++;
1729 	}
1730 
1731 send_frames:
1732 	frames_to_send = loop;
1733 	loop = 0;
1734 	retry_count = 0;
1735 	while (loop < frames_to_send) {
1736 		ret = qbman_swp_enqueue_multiple_desc(swp, &eqdesc[loop],
1737 				&fd_arr[loop],
1738 				frames_to_send - loop);
1739 		if (likely(ret > 0)) {
1740 			loop += ret;
1741 			retry_count = 0;
1742 		} else {
1743 			retry_count++;
1744 			if (retry_count > DPAA2_MAX_TX_RETRY_COUNT)
1745 				break;
1746 		}
1747 	}
1748 
1749 	for (i = 0; i < free_count; i++) {
1750 		if (buf_to_free[i].pkt_id < loop)
1751 			rte_pktmbuf_free_seg(buf_to_free[i].seg);
1752 	}
1753 	return loop;
1754 }
1755 
1756 /* Callback to handle sending ordered packets through WRIOP based interface */
1757 uint16_t
1758 dpaa2_dev_tx_ordered(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
1759 {
1760 	/* Function to transmit the frames to given device and VQ*/
1761 	struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
1762 	struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
1763 	struct dpaa2_dev_priv *priv = eth_data->dev_private;
1764 	struct dpaa2_queue *order_sendq = (struct dpaa2_queue *)priv->tx_vq[0];
1765 	struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
1766 	struct rte_mbuf *mi;
1767 	struct rte_mempool *mp;
1768 	struct qbman_eq_desc eqdesc[MAX_TX_RING_SLOTS];
1769 	struct qbman_swp *swp;
1770 	uint32_t frames_to_send, num_free_eq_desc;
1771 	uint32_t loop, retry_count;
1772 	int32_t ret;
1773 	uint16_t num_tx = 0;
1774 	uint16_t bpid;
1775 	struct sw_buf_free buf_to_free[DPAA2_MAX_SGS * dpaa2_dqrr_size];
1776 	uint32_t free_count = 0;
1777 
1778 	if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
1779 		ret = dpaa2_affine_qbman_swp();
1780 		if (ret) {
1781 			DPAA2_PMD_ERR(
1782 				"Failed to allocate IO portal, tid: %d",
1783 				rte_gettid());
1784 			return 0;
1785 		}
1786 	}
1787 	swp = DPAA2_PER_LCORE_PORTAL;
1788 
1789 	DPAA2_PMD_DP_DEBUG("===> eth_data =%p, fqid =%d",
1790 			   eth_data, dpaa2_q->fqid);
1791 
1792 	/* This would also handle normal and atomic queues as any type
1793 	 * of packet can be enqueued when ordered queues are being used.
1794 	 */
1795 	while (nb_pkts) {
1796 		/*Check if the queue is congested*/
1797 		retry_count = 0;
1798 		while (qbman_result_SCN_state(dpaa2_q->cscn)) {
1799 			retry_count++;
1800 			/* Retry for some time before giving up */
1801 			if (retry_count > CONG_RETRY_COUNT)
1802 				goto skip_tx;
1803 		}
1804 
1805 		frames_to_send = (nb_pkts > dpaa2_eqcr_size) ?
1806 			dpaa2_eqcr_size : nb_pkts;
1807 
1808 		if (!priv->en_loose_ordered) {
1809 			if (*dpaa2_seqn(*bufs) & DPAA2_ENQUEUE_FLAG_ORP) {
1810 				num_free_eq_desc = dpaa2_free_eq_descriptors();
1811 				if (num_free_eq_desc < frames_to_send)
1812 					frames_to_send = num_free_eq_desc;
1813 			}
1814 		}
1815 
1816 		for (loop = 0; loop < frames_to_send; loop++) {
1817 			/*Prepare enqueue descriptor*/
1818 			qbman_eq_desc_clear(&eqdesc[loop]);
1819 
1820 			if (*dpaa2_seqn(*bufs)) {
1821 				/* Use only queue 0 for Tx in case of atomic/
1822 				 * ordered packets as packets can get unordered
1823 				 * when being transmitted out from the interface
1824 				 */
1825 				dpaa2_set_enqueue_descriptor(order_sendq,
1826 							     (*bufs),
1827 							     &eqdesc[loop]);
1828 			} else {
1829 				qbman_eq_desc_set_no_orp(&eqdesc[loop],
1830 							 DPAA2_EQ_RESP_ERR_FQ);
1831 				qbman_eq_desc_set_fq(&eqdesc[loop],
1832 						     dpaa2_q->fqid);
1833 			}
1834 
1835 			if (likely(RTE_MBUF_DIRECT(*bufs))) {
1836 				mp = (*bufs)->pool;
1837 				/* Check the basic scenario and set
1838 				 * the FD appropriately here itself.
1839 				 */
1840 				if (likely(mp && mp->ops_index ==
1841 				    priv->bp_list->dpaa2_ops_index &&
1842 				    (*bufs)->nb_segs == 1 &&
1843 				    rte_mbuf_refcnt_read((*bufs)) == 1)) {
1844 					if (unlikely((*bufs)->ol_flags
1845 						& RTE_MBUF_F_TX_VLAN)) {
1846 					  ret = rte_vlan_insert(bufs);
1847 					  if (ret)
1848 						goto send_n_return;
1849 					}
1850 					DPAA2_MBUF_TO_CONTIG_FD((*bufs),
1851 						&fd_arr[loop],
1852 						mempool_to_bpid(mp));
1853 					bufs++;
1854 					continue;
1855 				}
1856 			} else {
1857 				mi = rte_mbuf_from_indirect(*bufs);
1858 				mp = mi->pool;
1859 			}
1860 			/* Not a hw_pkt pool allocated frame */
1861 			if (unlikely(!mp || !priv->bp_list)) {
1862 				DPAA2_PMD_ERR("Err: No buffer pool attached");
1863 				goto send_n_return;
1864 			}
1865 
1866 			if (mp->ops_index != priv->bp_list->dpaa2_ops_index) {
1867 				DPAA2_PMD_WARN("Non DPAA2 buffer pool");
1868 				/* alloc should be from the default buffer pool
1869 				 * attached to this interface
1870 				 */
1871 				bpid = priv->bp_list->buf_pool.bpid;
1872 
1873 				if (unlikely((*bufs)->nb_segs > 1)) {
1874 					DPAA2_PMD_ERR(
1875 						"S/G not supp for non hw offload buffer");
1876 					goto send_n_return;
1877 				}
1878 				if (eth_copy_mbuf_to_fd(*bufs,
1879 							&fd_arr[loop], bpid)) {
1880 					goto send_n_return;
1881 				}
1882 				/* free the original packet */
1883 				rte_pktmbuf_free(*bufs);
1884 			} else {
1885 				bpid = mempool_to_bpid(mp);
1886 				if (unlikely((*bufs)->nb_segs > 1)) {
1887 					if (eth_mbuf_to_sg_fd(*bufs,
1888 							      &fd_arr[loop],
1889 							      buf_to_free,
1890 							      &free_count,
1891 							      loop,
1892 							      bpid))
1893 						goto send_n_return;
1894 				} else {
1895 					eth_mbuf_to_fd(*bufs,
1896 							&fd_arr[loop],
1897 							buf_to_free,
1898 							&free_count,
1899 							loop, bpid);
1900 				}
1901 			}
1902 			bufs++;
1903 		}
1904 
1905 		loop = 0;
1906 		retry_count = 0;
1907 		while (loop < frames_to_send) {
1908 			ret = qbman_swp_enqueue_multiple_desc(swp,
1909 					&eqdesc[loop], &fd_arr[loop],
1910 					frames_to_send - loop);
1911 			if (unlikely(ret < 0)) {
1912 				retry_count++;
1913 				if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) {
1914 					num_tx += loop;
1915 					nb_pkts -= loop;
1916 					goto send_n_return;
1917 				}
1918 			} else {
1919 				loop += ret;
1920 				retry_count = 0;
1921 			}
1922 		}
1923 
1924 		num_tx += loop;
1925 		nb_pkts -= loop;
1926 	}
1927 	dpaa2_q->tx_pkts += num_tx;
1928 	for (loop = 0; loop < free_count; loop++) {
1929 		if (buf_to_free[loop].pkt_id < num_tx)
1930 			rte_pktmbuf_free_seg(buf_to_free[loop].seg);
1931 	}
1932 
1933 	return num_tx;
1934 
1935 send_n_return:
1936 	/* send any already prepared fd */
1937 	if (loop) {
1938 		unsigned int i = 0;
1939 
1940 		retry_count = 0;
1941 		while (i < loop) {
1942 			ret = qbman_swp_enqueue_multiple_desc(swp,
1943 				       &eqdesc[i], &fd_arr[i], loop - i);
1944 			if (unlikely(ret < 0)) {
1945 				retry_count++;
1946 				if (retry_count > DPAA2_MAX_TX_RETRY_COUNT)
1947 					break;
1948 			} else {
1949 				i += ret;
1950 				retry_count = 0;
1951 			}
1952 		}
1953 		num_tx += i;
1954 	}
1955 skip_tx:
1956 	dpaa2_q->tx_pkts += num_tx;
1957 	for (loop = 0; loop < free_count; loop++) {
1958 		if (buf_to_free[loop].pkt_id < num_tx)
1959 			rte_pktmbuf_free_seg(buf_to_free[loop].seg);
1960 	}
1961 
1962 	return num_tx;
1963 }
1964 
1965 /* This function loopbacks all the received packets.*/
1966 uint16_t
1967 dpaa2_dev_loopback_rx(void *queue,
1968 		      struct rte_mbuf **bufs __rte_unused,
1969 		      uint16_t nb_pkts)
1970 {
1971 	/* Function receive frames for a given device and VQ*/
1972 	struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
1973 	struct qbman_result *dq_storage, *dq_storage1 = NULL;
1974 	uint32_t fqid = dpaa2_q->fqid;
1975 	int ret, num_rx = 0, num_tx = 0, pull_size;
1976 	uint8_t pending, status;
1977 	struct qbman_swp *swp;
1978 	struct qbman_fd *fd[DPAA2_LX2_DQRR_RING_SIZE];
1979 	struct qbman_pull_desc pulldesc;
1980 	struct qbman_eq_desc eqdesc;
1981 	struct queue_storage_info_t *q_storage;
1982 	struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
1983 	struct dpaa2_dev_priv *priv = eth_data->dev_private;
1984 	struct dpaa2_queue *tx_q = priv->tx_vq[0];
1985 	/* todo - currently we are using 1st TX queue only for loopback*/
1986 
1987 	q_storage = dpaa2_q->q_storage[rte_lcore_id()];
1988 	if (unlikely(!DPAA2_PER_LCORE_ETHRX_DPIO)) {
1989 		ret = dpaa2_affine_qbman_ethrx_swp();
1990 		if (ret) {
1991 			DPAA2_PMD_ERR("Failure in affining portal");
1992 			return 0;
1993 		}
1994 	}
1995 	swp = DPAA2_PER_LCORE_ETHRX_PORTAL;
1996 	pull_size = (nb_pkts > dpaa2_dqrr_size) ? dpaa2_dqrr_size : nb_pkts;
1997 	if (unlikely(!q_storage->active_dqs)) {
1998 		q_storage->toggle = 0;
1999 		dq_storage = q_storage->dq_storage[q_storage->toggle];
2000 		q_storage->last_num_pkts = pull_size;
2001 		qbman_pull_desc_clear(&pulldesc);
2002 		qbman_pull_desc_set_numframes(&pulldesc,
2003 					      q_storage->last_num_pkts);
2004 		qbman_pull_desc_set_fq(&pulldesc, fqid);
2005 		qbman_pull_desc_set_storage(&pulldesc, dq_storage,
2006 			(size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
2007 		if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) {
2008 			while (!qbman_check_command_complete(
2009 			       get_swp_active_dqs(
2010 			       DPAA2_PER_LCORE_ETHRX_DPIO->index)))
2011 				;
2012 			clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index);
2013 		}
2014 		while (1) {
2015 			if (qbman_swp_pull(swp, &pulldesc)) {
2016 				DPAA2_PMD_DP_DEBUG(
2017 					"VDQ command not issued.QBMAN busy");
2018 				/* Portal was busy, try again */
2019 				continue;
2020 			}
2021 			break;
2022 		}
2023 		q_storage->active_dqs = dq_storage;
2024 		q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index;
2025 		set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index,
2026 				   dq_storage);
2027 	}
2028 
2029 	dq_storage = q_storage->active_dqs;
2030 	rte_prefetch0((void *)(size_t)(dq_storage));
2031 	rte_prefetch0((void *)(size_t)(dq_storage + 1));
2032 
2033 	/* Prepare next pull descriptor. This will give space for the
2034 	 * prefetching done on DQRR entries
2035 	 */
2036 	q_storage->toggle ^= 1;
2037 	dq_storage1 = q_storage->dq_storage[q_storage->toggle];
2038 	qbman_pull_desc_clear(&pulldesc);
2039 	qbman_pull_desc_set_numframes(&pulldesc, pull_size);
2040 	qbman_pull_desc_set_fq(&pulldesc, fqid);
2041 	qbman_pull_desc_set_storage(&pulldesc, dq_storage1,
2042 		(size_t)(DPAA2_VADDR_TO_IOVA(dq_storage1)), 1);
2043 
2044 	/*Prepare enqueue descriptor*/
2045 	qbman_eq_desc_clear(&eqdesc);
2046 	qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
2047 	qbman_eq_desc_set_response(&eqdesc, 0, 0);
2048 	qbman_eq_desc_set_fq(&eqdesc, tx_q->fqid);
2049 
2050 	/* Check if the previous issued command is completed.
2051 	 * Also seems like the SWP is shared between the Ethernet Driver
2052 	 * and the SEC driver.
2053 	 */
2054 	while (!qbman_check_command_complete(dq_storage))
2055 		;
2056 	if (dq_storage == get_swp_active_dqs(q_storage->active_dpio_id))
2057 		clear_swp_active_dqs(q_storage->active_dpio_id);
2058 
2059 	pending = 1;
2060 
2061 	do {
2062 		/* Loop until the dq_storage is updated with
2063 		 * new token by QBMAN
2064 		 */
2065 		while (!qbman_check_new_result(dq_storage))
2066 			;
2067 		rte_prefetch0((void *)((size_t)(dq_storage + 2)));
2068 		/* Check whether Last Pull command is Expired and
2069 		 * setting Condition for Loop termination
2070 		 */
2071 		if (qbman_result_DQ_is_pull_complete(dq_storage)) {
2072 			pending = 0;
2073 			/* Check for valid frame. */
2074 			status = qbman_result_DQ_flags(dq_storage);
2075 			if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0))
2076 				continue;
2077 		}
2078 		fd[num_rx] = RTE_PTR_UNQUAL(qbman_result_DQ_fd(dq_storage));
2079 
2080 		dq_storage++;
2081 		num_rx++;
2082 	} while (pending);
2083 
2084 	while (num_tx < num_rx) {
2085 		num_tx += qbman_swp_enqueue_multiple_fd(swp, &eqdesc,
2086 				&fd[num_tx], 0, num_rx - num_tx);
2087 	}
2088 
2089 	if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) {
2090 		while (!qbman_check_command_complete(
2091 		       get_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)))
2092 			;
2093 		clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index);
2094 	}
2095 	/* issue a volatile dequeue command for next pull */
2096 	while (1) {
2097 		if (qbman_swp_pull(swp, &pulldesc)) {
2098 			DPAA2_PMD_DP_DEBUG("VDQ command is not issued."
2099 					  "QBMAN is busy (2)");
2100 			continue;
2101 		}
2102 		break;
2103 	}
2104 	q_storage->active_dqs = dq_storage1;
2105 	q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index;
2106 	set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index, dq_storage1);
2107 
2108 	dpaa2_q->rx_pkts += num_rx;
2109 	dpaa2_q->tx_pkts += num_tx;
2110 
2111 	return 0;
2112 }
2113