xref: /dpdk/drivers/net/dpaa2/dpaa2_rxtx.c (revision 53e6597643e47652af29baa24df7566fffbf8b0c)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4  *   Copyright 2016-2021 NXP
5  *
6  */
7 
8 #include <time.h>
9 #include <net/if.h>
10 
11 #include <rte_mbuf.h>
12 #include <ethdev_driver.h>
13 #include <rte_malloc.h>
14 #include <rte_memcpy.h>
15 #include <rte_string_fns.h>
16 #include <dev_driver.h>
17 #include <rte_hexdump.h>
18 
19 #include <bus_fslmc_driver.h>
20 #include <fslmc_vfio.h>
21 #include <dpaa2_hw_pvt.h>
22 #include <dpaa2_hw_dpio.h>
23 #include <dpaa2_hw_mempool.h>
24 
25 #include "dpaa2_pmd_logs.h"
26 #include "dpaa2_ethdev.h"
27 #include "base/dpaa2_hw_dpni_annot.h"
28 
29 static inline uint32_t __rte_hot
30 dpaa2_dev_rx_parse_slow(struct rte_mbuf *mbuf,
31 			struct dpaa2_annot_hdr *annotation);
32 
33 static void enable_tx_tstamp(struct qbman_fd *fd) __rte_unused;
34 
35 static inline rte_mbuf_timestamp_t *
36 dpaa2_timestamp_dynfield(struct rte_mbuf *mbuf)
37 {
38 	return RTE_MBUF_DYNFIELD(mbuf,
39 		dpaa2_timestamp_dynfield_offset, rte_mbuf_timestamp_t *);
40 }
41 
42 #define DPAA2_MBUF_TO_CONTIG_FD(_mbuf, _fd, _bpid)  do { \
43 	DPAA2_SET_FD_ADDR(_fd, DPAA2_MBUF_VADDR_TO_IOVA(_mbuf)); \
44 	DPAA2_SET_FD_LEN(_fd, _mbuf->data_len); \
45 	DPAA2_SET_ONLY_FD_BPID(_fd, _bpid); \
46 	DPAA2_SET_FD_OFFSET(_fd, _mbuf->data_off); \
47 	DPAA2_SET_FD_FRC(_fd, 0);		\
48 	DPAA2_RESET_FD_CTRL(_fd);		\
49 	DPAA2_RESET_FD_FLC(_fd);		\
50 } while (0)
51 
52 static inline void __rte_hot
53 dpaa2_dev_rx_parse_new(struct rte_mbuf *m, const struct qbman_fd *fd,
54 		       void *hw_annot_addr)
55 {
56 	uint16_t frc = DPAA2_GET_FD_FRC_PARSE_SUM(fd);
57 	struct dpaa2_annot_hdr *annotation =
58 			(struct dpaa2_annot_hdr *)hw_annot_addr;
59 
60 	m->packet_type = RTE_PTYPE_UNKNOWN;
61 	switch (frc) {
62 	case DPAA2_PKT_TYPE_ETHER:
63 		m->packet_type = RTE_PTYPE_L2_ETHER;
64 		break;
65 	case DPAA2_PKT_TYPE_IPV4:
66 		m->packet_type = RTE_PTYPE_L2_ETHER |
67 			RTE_PTYPE_L3_IPV4;
68 		break;
69 	case DPAA2_PKT_TYPE_IPV6:
70 		m->packet_type = RTE_PTYPE_L2_ETHER |
71 			RTE_PTYPE_L3_IPV6;
72 		break;
73 	case DPAA2_PKT_TYPE_IPV4_EXT:
74 		m->packet_type = RTE_PTYPE_L2_ETHER |
75 			RTE_PTYPE_L3_IPV4_EXT;
76 		break;
77 	case DPAA2_PKT_TYPE_IPV6_EXT:
78 		m->packet_type = RTE_PTYPE_L2_ETHER |
79 			RTE_PTYPE_L3_IPV6_EXT;
80 		break;
81 	case DPAA2_PKT_TYPE_IPV4_TCP:
82 		m->packet_type = RTE_PTYPE_L2_ETHER |
83 			RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP;
84 		break;
85 	case DPAA2_PKT_TYPE_IPV6_TCP:
86 		m->packet_type = RTE_PTYPE_L2_ETHER |
87 			RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP;
88 		break;
89 	case DPAA2_PKT_TYPE_IPV4_UDP:
90 		m->packet_type = RTE_PTYPE_L2_ETHER |
91 			RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP;
92 		break;
93 	case DPAA2_PKT_TYPE_IPV6_UDP:
94 		m->packet_type = RTE_PTYPE_L2_ETHER |
95 			RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP;
96 		break;
97 	case DPAA2_PKT_TYPE_IPV4_SCTP:
98 		m->packet_type = RTE_PTYPE_L2_ETHER |
99 			RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP;
100 		break;
101 	case DPAA2_PKT_TYPE_IPV6_SCTP:
102 		m->packet_type = RTE_PTYPE_L2_ETHER |
103 			RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_SCTP;
104 		break;
105 	case DPAA2_PKT_TYPE_IPV4_ICMP:
106 		m->packet_type = RTE_PTYPE_L2_ETHER |
107 			RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_ICMP;
108 		break;
109 	case DPAA2_PKT_TYPE_IPV6_ICMP:
110 		m->packet_type = RTE_PTYPE_L2_ETHER |
111 			RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_ICMP;
112 		break;
113 	default:
114 		m->packet_type = dpaa2_dev_rx_parse_slow(m, annotation);
115 	}
116 	m->hash.rss = fd->simple.flc_hi;
117 	m->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
118 
119 	if (dpaa2_enable_ts[m->port]) {
120 		*dpaa2_timestamp_dynfield(m) = annotation->word2;
121 		m->ol_flags |= dpaa2_timestamp_rx_dynflag;
122 		DPAA2_PMD_DP_DEBUG("pkt timestamp:0x%" PRIx64 "",
123 				*dpaa2_timestamp_dynfield(m));
124 	}
125 
126 	DPAA2_PMD_DP_DEBUG("HW frc = 0x%x\t packet type =0x%x "
127 		"ol_flags =0x%" PRIx64 "",
128 		frc, m->packet_type, m->ol_flags);
129 }
130 
131 static inline uint32_t __rte_hot
132 dpaa2_dev_rx_parse_slow(struct rte_mbuf *mbuf,
133 			struct dpaa2_annot_hdr *annotation)
134 {
135 	uint32_t pkt_type = RTE_PTYPE_UNKNOWN;
136 	uint16_t *vlan_tci;
137 
138 	DPAA2_PMD_DP_DEBUG("(slow parse)annotation(3)=0x%" PRIx64 "\t"
139 			"(4)=0x%" PRIx64 "\t",
140 			annotation->word3, annotation->word4);
141 
142 #if defined(RTE_LIBRTE_IEEE1588)
143 	if (BIT_ISSET_AT_POS(annotation->word1, DPAA2_ETH_FAS_PTP)) {
144 		mbuf->ol_flags |= RTE_MBUF_F_RX_IEEE1588_PTP;
145 		mbuf->ol_flags |= RTE_MBUF_F_RX_IEEE1588_TMST;
146 	}
147 #endif
148 
149 	if (BIT_ISSET_AT_POS(annotation->word3, L2_VLAN_1_PRESENT)) {
150 		vlan_tci = rte_pktmbuf_mtod_offset(mbuf, uint16_t *,
151 			(VLAN_TCI_OFFSET_1(annotation->word5) >> 16));
152 		mbuf->vlan_tci = rte_be_to_cpu_16(*vlan_tci);
153 		mbuf->ol_flags |= RTE_MBUF_F_RX_VLAN;
154 		pkt_type |= RTE_PTYPE_L2_ETHER_VLAN;
155 	} else if (BIT_ISSET_AT_POS(annotation->word3, L2_VLAN_N_PRESENT)) {
156 		vlan_tci = rte_pktmbuf_mtod_offset(mbuf, uint16_t *,
157 			(VLAN_TCI_OFFSET_1(annotation->word5) >> 16));
158 		mbuf->vlan_tci = rte_be_to_cpu_16(*vlan_tci);
159 		mbuf->ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_QINQ;
160 		pkt_type |= RTE_PTYPE_L2_ETHER_QINQ;
161 	}
162 
163 	if (BIT_ISSET_AT_POS(annotation->word3, L2_ARP_PRESENT)) {
164 		pkt_type |= RTE_PTYPE_L2_ETHER_ARP;
165 		goto parse_done;
166 	} else if (BIT_ISSET_AT_POS(annotation->word3, L2_ETH_MAC_PRESENT)) {
167 		pkt_type |= RTE_PTYPE_L2_ETHER;
168 	} else {
169 		goto parse_done;
170 	}
171 
172 	if (BIT_ISSET_AT_POS(annotation->word3, L2_MPLS_1_PRESENT |
173 				L2_MPLS_N_PRESENT))
174 		pkt_type |= RTE_PTYPE_L2_ETHER_MPLS;
175 
176 	if (BIT_ISSET_AT_POS(annotation->word4, L3_IPV4_1_PRESENT |
177 			     L3_IPV4_N_PRESENT)) {
178 		pkt_type |= RTE_PTYPE_L3_IPV4;
179 		if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_OPT_PRESENT |
180 			L3_IP_N_OPT_PRESENT))
181 			pkt_type |= RTE_PTYPE_L3_IPV4_EXT;
182 		if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_IPSEC_ESP_PRESENT |
183 					L3_PROTO_ESP_PRESENT))
184 			pkt_type |= RTE_PTYPE_TUNNEL_ESP;
185 
186 	} else if (BIT_ISSET_AT_POS(annotation->word4, L3_IPV6_1_PRESENT |
187 		  L3_IPV6_N_PRESENT)) {
188 		pkt_type |= RTE_PTYPE_L3_IPV6;
189 		if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_OPT_PRESENT |
190 		    L3_IP_N_OPT_PRESENT))
191 			pkt_type |= RTE_PTYPE_L3_IPV6_EXT;
192 		if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_IPSEC_ESP_PRESENT |
193 					L3_PROTO_ESP_PRESENT))
194 			pkt_type |= RTE_PTYPE_TUNNEL_ESP;
195 	} else {
196 		goto parse_done;
197 	}
198 
199 	if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L3CE))
200 		mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
201 	else if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L4CE))
202 		mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
203 
204 	if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_FIRST_FRAGMENT |
205 	    L3_IP_1_MORE_FRAGMENT |
206 	    L3_IP_N_FIRST_FRAGMENT |
207 	    L3_IP_N_MORE_FRAGMENT)) {
208 		pkt_type |= RTE_PTYPE_L4_FRAG;
209 		goto parse_done;
210 	} else {
211 		pkt_type |= RTE_PTYPE_L4_NONFRAG;
212 	}
213 
214 	if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_UDP_PRESENT))
215 		pkt_type |= RTE_PTYPE_L4_UDP;
216 
217 	else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_TCP_PRESENT))
218 		pkt_type |= RTE_PTYPE_L4_TCP;
219 
220 	else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_SCTP_PRESENT))
221 		pkt_type |= RTE_PTYPE_L4_SCTP;
222 
223 	else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_ICMP_PRESENT))
224 		pkt_type |= RTE_PTYPE_L4_ICMP;
225 
226 	else if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_UNKNOWN_PROTOCOL))
227 		pkt_type |= RTE_PTYPE_UNKNOWN;
228 
229 parse_done:
230 	return pkt_type;
231 }
232 
233 static inline uint32_t __rte_hot
234 dpaa2_dev_rx_parse(struct rte_mbuf *mbuf, void *hw_annot_addr)
235 {
236 	struct dpaa2_annot_hdr *annotation =
237 			(struct dpaa2_annot_hdr *)hw_annot_addr;
238 
239 	DPAA2_PMD_DP_DEBUG("(fast parse) Annotation = 0x%" PRIx64 "\t",
240 			   annotation->word4);
241 
242 	if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L3CE))
243 		mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
244 	else if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L4CE))
245 		mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
246 
247 	if (dpaa2_enable_ts[mbuf->port]) {
248 		*dpaa2_timestamp_dynfield(mbuf) = annotation->word2;
249 		mbuf->ol_flags |= dpaa2_timestamp_rx_dynflag;
250 		DPAA2_PMD_DP_DEBUG("pkt timestamp: 0x%" PRIx64 "",
251 				*dpaa2_timestamp_dynfield(mbuf));
252 	}
253 
254 	/* Check detailed parsing requirement */
255 	if (annotation->word3 & 0x7FFFFC3FFFF)
256 		return dpaa2_dev_rx_parse_slow(mbuf, annotation);
257 
258 	/* Return some common types from parse processing */
259 	switch (annotation->word4) {
260 	case DPAA2_L3_IPv4:
261 		return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4;
262 	case DPAA2_L3_IPv6:
263 		return  RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6;
264 	case DPAA2_L3_IPv4_TCP:
265 		return  RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 |
266 				RTE_PTYPE_L4_TCP;
267 	case DPAA2_L3_IPv4_UDP:
268 		return  RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 |
269 				RTE_PTYPE_L4_UDP;
270 	case DPAA2_L3_IPv6_TCP:
271 		return  RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 |
272 				RTE_PTYPE_L4_TCP;
273 	case DPAA2_L3_IPv6_UDP:
274 		return  RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 |
275 				RTE_PTYPE_L4_UDP;
276 	default:
277 		break;
278 	}
279 
280 	return dpaa2_dev_rx_parse_slow(mbuf, annotation);
281 }
282 
283 static inline struct rte_mbuf *__rte_hot
284 eth_sg_fd_to_mbuf(const struct qbman_fd *fd,
285 		  int port_id)
286 {
287 	struct qbman_sge *sgt, *sge;
288 	size_t sg_addr, fd_addr;
289 	int i = 0;
290 	void *hw_annot_addr;
291 	struct rte_mbuf *first_seg, *next_seg, *cur_seg, *temp;
292 
293 	fd_addr = (size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
294 	hw_annot_addr = (void *)(fd_addr + DPAA2_FD_PTA_SIZE);
295 
296 	/* Get Scatter gather table address */
297 	sgt = (struct qbman_sge *)(fd_addr + DPAA2_GET_FD_OFFSET(fd));
298 
299 	sge = &sgt[i++];
300 	sg_addr = (size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FLE_ADDR(sge));
301 
302 	/* First Scatter gather entry */
303 	first_seg = DPAA2_INLINE_MBUF_FROM_BUF(sg_addr,
304 		rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
305 	/* Prepare all the metadata for first segment */
306 	first_seg->buf_addr = (uint8_t *)sg_addr;
307 	first_seg->ol_flags = 0;
308 	first_seg->data_off = DPAA2_GET_FLE_OFFSET(sge);
309 	first_seg->data_len = sge->length  & 0x1FFFF;
310 	first_seg->pkt_len = DPAA2_GET_FD_LEN(fd);
311 	first_seg->nb_segs = 1;
312 	first_seg->next = NULL;
313 	first_seg->port = port_id;
314 	if (dpaa2_svr_family == SVR_LX2160A)
315 		dpaa2_dev_rx_parse_new(first_seg, fd, hw_annot_addr);
316 	else
317 		first_seg->packet_type =
318 			dpaa2_dev_rx_parse(first_seg, hw_annot_addr);
319 
320 	rte_mbuf_refcnt_set(first_seg, 1);
321 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
322 	rte_mempool_check_cookies(rte_mempool_from_obj((void *)first_seg),
323 			(void **)&first_seg, 1, 1);
324 #endif
325 	cur_seg = first_seg;
326 	while (!DPAA2_SG_IS_FINAL(sge)) {
327 		sge = &sgt[i++];
328 		sg_addr = (size_t)DPAA2_IOVA_TO_VADDR(
329 				DPAA2_GET_FLE_ADDR(sge));
330 		next_seg = DPAA2_INLINE_MBUF_FROM_BUF(sg_addr,
331 			rte_dpaa2_bpid_info[DPAA2_GET_FLE_BPID(sge)].meta_data_size);
332 		next_seg->buf_addr  = (uint8_t *)sg_addr;
333 		next_seg->data_off  = DPAA2_GET_FLE_OFFSET(sge);
334 		next_seg->data_len  = sge->length  & 0x1FFFF;
335 		first_seg->nb_segs += 1;
336 		rte_mbuf_refcnt_set(next_seg, 1);
337 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
338 		rte_mempool_check_cookies(rte_mempool_from_obj((void *)next_seg),
339 				(void **)&next_seg, 1, 1);
340 #endif
341 		cur_seg->next = next_seg;
342 		next_seg->next = NULL;
343 		cur_seg = next_seg;
344 	}
345 	temp = DPAA2_INLINE_MBUF_FROM_BUF(fd_addr,
346 		rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
347 	rte_mbuf_refcnt_set(temp, 1);
348 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
349 		rte_mempool_check_cookies(rte_mempool_from_obj((void *)temp),
350 				(void **)&temp, 1, 1);
351 #endif
352 	rte_pktmbuf_free_seg(temp);
353 
354 	return (void *)first_seg;
355 }
356 
357 static inline struct rte_mbuf *__rte_hot
358 eth_fd_to_mbuf(const struct qbman_fd *fd,
359 	       int port_id)
360 {
361 	void *v_addr = DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
362 	void *hw_annot_addr = (void *)((size_t)v_addr + DPAA2_FD_PTA_SIZE);
363 	struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF(v_addr,
364 		     rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
365 
366 	/* need to repopulated some of the fields,
367 	 * as they may have changed in last transmission
368 	 */
369 	mbuf->nb_segs = 1;
370 	mbuf->ol_flags = 0;
371 	mbuf->data_off = DPAA2_GET_FD_OFFSET(fd);
372 	mbuf->data_len = DPAA2_GET_FD_LEN(fd);
373 	mbuf->pkt_len = mbuf->data_len;
374 	mbuf->port = port_id;
375 	mbuf->next = NULL;
376 	rte_mbuf_refcnt_set(mbuf, 1);
377 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
378 	rte_mempool_check_cookies(rte_mempool_from_obj((void *)mbuf),
379 			(void **)&mbuf, 1, 1);
380 #endif
381 
382 	/* Parse the packet */
383 	/* parse results for LX2 are there in FRC field of FD.
384 	 * For other DPAA2 platforms , parse results are after
385 	 * the private - sw annotation area
386 	 */
387 
388 	if (dpaa2_svr_family == SVR_LX2160A)
389 		dpaa2_dev_rx_parse_new(mbuf, fd, hw_annot_addr);
390 	else
391 		mbuf->packet_type = dpaa2_dev_rx_parse(mbuf, hw_annot_addr);
392 
393 	DPAA2_PMD_DP_DEBUG("to mbuf - mbuf =%p, mbuf->buf_addr =%p, off = %d,"
394 		"fd_off=%d fd =%" PRIx64 ", meta = %d  bpid =%d, len=%d\n",
395 		mbuf, mbuf->buf_addr, mbuf->data_off,
396 		DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ADDR(fd),
397 		rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
398 		DPAA2_GET_FD_BPID(fd), DPAA2_GET_FD_LEN(fd));
399 
400 	return mbuf;
401 }
402 
403 static int __rte_noinline __rte_hot
404 eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf,
405 		  struct qbman_fd *fd,
406 		  struct sw_buf_free *free_buf,
407 		  uint32_t *free_count,
408 		  uint32_t pkt_id,
409 		  uint16_t bpid)
410 {
411 	struct rte_mbuf *cur_seg = mbuf, *mi, *temp;
412 	struct qbman_sge *sgt, *sge = NULL;
413 	int i, offset = 0;
414 
415 #ifdef RTE_LIBRTE_IEEE1588
416 	/* annotation area for timestamp in first buffer */
417 	offset = 0x64;
418 #endif
419 	if (RTE_MBUF_DIRECT(mbuf) &&
420 		(mbuf->data_off > (mbuf->nb_segs * sizeof(struct qbman_sge)
421 		+ offset))) {
422 		temp = mbuf;
423 		if (rte_mbuf_refcnt_read(temp) > 1) {
424 			/* If refcnt > 1, invalid bpid is set to ensure
425 			 * buffer is not freed by HW
426 			 */
427 			fd->simple.bpid_offset = 0;
428 			DPAA2_SET_FD_IVP(fd);
429 			rte_mbuf_refcnt_update(temp, -1);
430 		} else {
431 			DPAA2_SET_ONLY_FD_BPID(fd, bpid);
432 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
433 			rte_mempool_check_cookies(rte_mempool_from_obj((void *)temp),
434 					(void **)&temp, 1, 0);
435 #endif
436 		}
437 		DPAA2_SET_FD_OFFSET(fd, offset);
438 	} else {
439 		temp = rte_pktmbuf_alloc(dpaa2_tx_sg_pool);
440 		if (temp == NULL) {
441 			DPAA2_PMD_DP_DEBUG("No memory to allocate S/G table\n");
442 			return -ENOMEM;
443 		}
444 		DPAA2_SET_ONLY_FD_BPID(fd, mempool_to_bpid(dpaa2_tx_sg_pool));
445 		DPAA2_SET_FD_OFFSET(fd, temp->data_off);
446 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
447 		rte_mempool_check_cookies(rte_mempool_from_obj((void *)temp),
448 			(void **)&temp, 1, 0);
449 #endif
450 	}
451 	DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(temp));
452 	DPAA2_SET_FD_LEN(fd, mbuf->pkt_len);
453 	DPAA2_FD_SET_FORMAT(fd, qbman_fd_sg);
454 	DPAA2_RESET_FD_FRC(fd);
455 	DPAA2_RESET_FD_CTRL(fd);
456 	DPAA2_RESET_FD_FLC(fd);
457 	/*Set Scatter gather table and Scatter gather entries*/
458 	sgt = (struct qbman_sge *)(
459 			(size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd))
460 			+ DPAA2_GET_FD_OFFSET(fd));
461 
462 	for (i = 0; i < mbuf->nb_segs; i++) {
463 		sge = &sgt[i];
464 		/*Resetting the buffer pool id and offset field*/
465 		sge->fin_bpid_offset = 0;
466 		DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(cur_seg));
467 		DPAA2_SET_FLE_OFFSET(sge, cur_seg->data_off);
468 		sge->length = cur_seg->data_len;
469 		if (RTE_MBUF_DIRECT(cur_seg)) {
470 			/* if we are using inline SGT in same buffers
471 			 * set the FLE FMT as Frame Data Section
472 			 */
473 			if (temp == cur_seg) {
474 				DPAA2_SG_SET_FORMAT(sge, qbman_fd_list);
475 				DPAA2_SET_FLE_IVP(sge);
476 			} else {
477 				if (rte_mbuf_refcnt_read(cur_seg) > 1) {
478 				/* If refcnt > 1, invalid bpid is set to ensure
479 				 * buffer is not freed by HW
480 				 */
481 					DPAA2_SET_FLE_IVP(sge);
482 					rte_mbuf_refcnt_update(cur_seg, -1);
483 				} else {
484 					DPAA2_SET_FLE_BPID(sge,
485 						mempool_to_bpid(cur_seg->pool));
486 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
487 				rte_mempool_check_cookies(rte_mempool_from_obj((void *)cur_seg),
488 					(void **)&cur_seg, 1, 0);
489 #endif
490 				}
491 			}
492 		} else if (RTE_MBUF_HAS_EXTBUF(cur_seg)) {
493 			free_buf[*free_count].seg = cur_seg;
494 			free_buf[*free_count].pkt_id = pkt_id;
495 			++*free_count;
496 			DPAA2_SET_FLE_IVP(sge);
497 		} else {
498 			/* Get owner MBUF from indirect buffer */
499 			mi = rte_mbuf_from_indirect(cur_seg);
500 			if (rte_mbuf_refcnt_read(mi) > 1) {
501 				/* If refcnt > 1, invalid bpid is set to ensure
502 				 * owner buffer is not freed by HW
503 				 */
504 				DPAA2_SET_FLE_IVP(sge);
505 			} else {
506 				DPAA2_SET_FLE_BPID(sge,
507 						   mempool_to_bpid(mi->pool));
508 				rte_mbuf_refcnt_update(mi, 1);
509 			}
510 			free_buf[*free_count].seg = cur_seg;
511 			free_buf[*free_count].pkt_id = pkt_id;
512 			++*free_count;
513 		}
514 		cur_seg = cur_seg->next;
515 	}
516 	DPAA2_SG_SET_FINAL(sge, true);
517 	return 0;
518 }
519 
520 static void
521 eth_mbuf_to_fd(struct rte_mbuf *mbuf,
522 	       struct qbman_fd *fd,
523 	       struct sw_buf_free *buf_to_free,
524 	       uint32_t *free_count,
525 	       uint32_t pkt_id,
526 	       uint16_t bpid) __rte_unused;
527 
528 static void __rte_noinline __rte_hot
529 eth_mbuf_to_fd(struct rte_mbuf *mbuf,
530 	       struct qbman_fd *fd,
531 	       struct sw_buf_free *buf_to_free,
532 	       uint32_t *free_count,
533 	       uint32_t pkt_id,
534 	       uint16_t bpid)
535 {
536 	DPAA2_MBUF_TO_CONTIG_FD(mbuf, fd, bpid);
537 
538 	DPAA2_PMD_DP_DEBUG("mbuf =%p, mbuf->buf_addr =%p, off = %d,"
539 		"fd_off=%d fd =%" PRIx64 ", meta = %d  bpid =%d, len=%d\n",
540 		mbuf, mbuf->buf_addr, mbuf->data_off,
541 		DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ADDR(fd),
542 		rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
543 		DPAA2_GET_FD_BPID(fd), DPAA2_GET_FD_LEN(fd));
544 	if (RTE_MBUF_DIRECT(mbuf)) {
545 		if (rte_mbuf_refcnt_read(mbuf) > 1) {
546 			DPAA2_SET_FD_IVP(fd);
547 			rte_mbuf_refcnt_update(mbuf, -1);
548 		}
549 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
550 		else
551 			rte_mempool_check_cookies(rte_mempool_from_obj((void *)mbuf),
552 				(void **)&mbuf, 1, 0);
553 #endif
554 	} else if (RTE_MBUF_HAS_EXTBUF(mbuf)) {
555 		buf_to_free[*free_count].seg = mbuf;
556 		buf_to_free[*free_count].pkt_id = pkt_id;
557 		++*free_count;
558 		DPAA2_SET_FD_IVP(fd);
559 	} else {
560 		struct rte_mbuf *mi;
561 
562 		mi = rte_mbuf_from_indirect(mbuf);
563 		if (rte_mbuf_refcnt_read(mi) > 1)
564 			DPAA2_SET_FD_IVP(fd);
565 		else
566 			rte_mbuf_refcnt_update(mi, 1);
567 
568 		buf_to_free[*free_count].seg = mbuf;
569 		buf_to_free[*free_count].pkt_id = pkt_id;
570 		++*free_count;
571 	}
572 }
573 
574 static inline int __rte_hot
575 eth_copy_mbuf_to_fd(struct rte_mbuf *mbuf,
576 		    struct qbman_fd *fd, uint16_t bpid)
577 {
578 	struct rte_mbuf *m;
579 	void *mb = NULL;
580 
581 	if (rte_dpaa2_mbuf_alloc_bulk(
582 		rte_dpaa2_bpid_info[bpid].bp_list->mp, &mb, 1)) {
583 		DPAA2_PMD_DP_DEBUG("Unable to allocated DPAA2 buffer\n");
584 		return -1;
585 	}
586 	m = (struct rte_mbuf *)mb;
587 	memcpy((char *)m->buf_addr + mbuf->data_off,
588 	       (void *)((char *)mbuf->buf_addr + mbuf->data_off),
589 		mbuf->pkt_len);
590 
591 	/* Copy required fields */
592 	m->data_off = mbuf->data_off;
593 	m->ol_flags = mbuf->ol_flags;
594 	m->packet_type = mbuf->packet_type;
595 	m->tx_offload = mbuf->tx_offload;
596 
597 	DPAA2_MBUF_TO_CONTIG_FD(m, fd, bpid);
598 
599 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
600 	rte_mempool_check_cookies(rte_mempool_from_obj((void *)m),
601 		(void **)&m, 1, 0);
602 #endif
603 	DPAA2_PMD_DP_DEBUG(
604 		"mbuf: %p, BMAN buf addr: %p, fdaddr: %" PRIx64 ", bpid: %d,"
605 		" meta: %d, off: %d, len: %d\n",
606 		(void *)mbuf,
607 		mbuf->buf_addr,
608 		DPAA2_GET_FD_ADDR(fd),
609 		DPAA2_GET_FD_BPID(fd),
610 		rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
611 		DPAA2_GET_FD_OFFSET(fd),
612 		DPAA2_GET_FD_LEN(fd));
613 
614 return 0;
615 }
616 
617 static void
618 dump_err_pkts(struct dpaa2_queue *dpaa2_q)
619 {
620 	/* Function receive frames for a given device and VQ */
621 	struct qbman_result *dq_storage;
622 	uint32_t fqid = dpaa2_q->fqid;
623 	int ret, num_rx = 0;
624 	uint8_t pending, status;
625 	struct qbman_swp *swp;
626 	const struct qbman_fd *fd;
627 	struct qbman_pull_desc pulldesc;
628 	struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
629 	uint32_t lcore_id = rte_lcore_id();
630 	void *v_addr, *hw_annot_addr;
631 	struct dpaa2_fas *fas;
632 
633 	if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
634 		ret = dpaa2_affine_qbman_swp();
635 		if (ret) {
636 			DPAA2_PMD_ERR("Failed to allocate IO portal, tid: %d\n",
637 				rte_gettid());
638 			return;
639 		}
640 	}
641 	swp = DPAA2_PER_LCORE_PORTAL;
642 
643 	dq_storage = dpaa2_q->q_storage[lcore_id].dq_storage[0];
644 	qbman_pull_desc_clear(&pulldesc);
645 	qbman_pull_desc_set_fq(&pulldesc, fqid);
646 	qbman_pull_desc_set_storage(&pulldesc, dq_storage,
647 			(size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
648 	qbman_pull_desc_set_numframes(&pulldesc, dpaa2_dqrr_size);
649 
650 	while (1) {
651 		if (qbman_swp_pull(swp, &pulldesc)) {
652 			DPAA2_PMD_DP_DEBUG("VDQ command is not issued.QBMAN is busy\n");
653 			/* Portal was busy, try again */
654 			continue;
655 		}
656 		break;
657 	}
658 
659 	/* Check if the previous issued command is completed. */
660 	while (!qbman_check_command_complete(dq_storage))
661 		;
662 
663 	pending = 1;
664 	do {
665 		/* Loop until the dq_storage is updated with
666 		 * new token by QBMAN
667 		 */
668 		while (!qbman_check_new_result(dq_storage))
669 			;
670 
671 		/* Check whether Last Pull command is Expired and
672 		 * setting Condition for Loop termination
673 		 */
674 		if (qbman_result_DQ_is_pull_complete(dq_storage)) {
675 			pending = 0;
676 			/* Check for valid frame. */
677 			status = qbman_result_DQ_flags(dq_storage);
678 			if (unlikely((status &
679 				QBMAN_DQ_STAT_VALIDFRAME) == 0))
680 				continue;
681 		}
682 		fd = qbman_result_DQ_fd(dq_storage);
683 		v_addr = DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
684 		hw_annot_addr = (void *)((size_t)v_addr + DPAA2_FD_PTA_SIZE);
685 		fas = hw_annot_addr;
686 
687 		DPAA2_PMD_ERR("\n\n[%d] error packet on port[%d]:"
688 			" fd_off: %d, fd_err: %x, fas_status: %x",
689 			rte_lcore_id(), eth_data->port_id,
690 			DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ERR(fd),
691 			fas->status);
692 		rte_hexdump(stderr, "Error packet", v_addr,
693 			DPAA2_GET_FD_OFFSET(fd) + DPAA2_GET_FD_LEN(fd));
694 
695 		dq_storage++;
696 		num_rx++;
697 	} while (pending);
698 
699 	dpaa2_q->err_pkts += num_rx;
700 }
701 
702 /* This function assumes that caller will be keep the same value for nb_pkts
703  * across calls per queue, if that is not the case, better use non-prefetch
704  * version of rx call.
705  * It will return the packets as requested in previous call without honoring
706  * the current nb_pkts or bufs space.
707  */
708 uint16_t
709 dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
710 {
711 	/* Function receive frames for a given device and VQ*/
712 	struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
713 	struct qbman_result *dq_storage, *dq_storage1 = NULL;
714 	uint32_t fqid = dpaa2_q->fqid;
715 	int ret, num_rx = 0, pull_size;
716 	uint8_t pending, status;
717 	struct qbman_swp *swp;
718 	const struct qbman_fd *fd;
719 	struct qbman_pull_desc pulldesc;
720 	struct queue_storage_info_t *q_storage = dpaa2_q->q_storage;
721 	struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
722 	struct dpaa2_dev_priv *priv = eth_data->dev_private;
723 
724 	if (unlikely(dpaa2_enable_err_queue))
725 		dump_err_pkts(priv->rx_err_vq);
726 
727 	if (unlikely(!DPAA2_PER_LCORE_ETHRX_DPIO)) {
728 		ret = dpaa2_affine_qbman_ethrx_swp();
729 		if (ret) {
730 			DPAA2_PMD_ERR("Failure in affining portal");
731 			return 0;
732 		}
733 	}
734 
735 	if (unlikely(!rte_dpaa2_bpid_info &&
736 		     rte_eal_process_type() == RTE_PROC_SECONDARY))
737 		rte_dpaa2_bpid_info = dpaa2_q->bp_array;
738 
739 	swp = DPAA2_PER_LCORE_ETHRX_PORTAL;
740 	pull_size = (nb_pkts > dpaa2_dqrr_size) ? dpaa2_dqrr_size : nb_pkts;
741 	if (unlikely(!q_storage->active_dqs)) {
742 		q_storage->toggle = 0;
743 		dq_storage = q_storage->dq_storage[q_storage->toggle];
744 		q_storage->last_num_pkts = pull_size;
745 		qbman_pull_desc_clear(&pulldesc);
746 		qbman_pull_desc_set_numframes(&pulldesc,
747 					      q_storage->last_num_pkts);
748 		qbman_pull_desc_set_fq(&pulldesc, fqid);
749 		qbman_pull_desc_set_storage(&pulldesc, dq_storage,
750 			(uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
751 		if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) {
752 			while (!qbman_check_command_complete(
753 			       get_swp_active_dqs(
754 			       DPAA2_PER_LCORE_ETHRX_DPIO->index)))
755 				;
756 			clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index);
757 		}
758 		while (1) {
759 			if (qbman_swp_pull(swp, &pulldesc)) {
760 				DPAA2_PMD_DP_DEBUG("VDQ command is not issued."
761 						  " QBMAN is busy (1)\n");
762 				/* Portal was busy, try again */
763 				continue;
764 			}
765 			break;
766 		}
767 		q_storage->active_dqs = dq_storage;
768 		q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index;
769 		set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index,
770 				   dq_storage);
771 	}
772 
773 	dq_storage = q_storage->active_dqs;
774 	rte_prefetch0((void *)(size_t)(dq_storage));
775 	rte_prefetch0((void *)(size_t)(dq_storage + 1));
776 
777 	/* Prepare next pull descriptor. This will give space for the
778 	 * prefetching done on DQRR entries
779 	 */
780 	q_storage->toggle ^= 1;
781 	dq_storage1 = q_storage->dq_storage[q_storage->toggle];
782 	qbman_pull_desc_clear(&pulldesc);
783 	qbman_pull_desc_set_numframes(&pulldesc, pull_size);
784 	qbman_pull_desc_set_fq(&pulldesc, fqid);
785 	qbman_pull_desc_set_storage(&pulldesc, dq_storage1,
786 		(uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage1)), 1);
787 
788 	/* Check if the previous issued command is completed.
789 	 * Also seems like the SWP is shared between the Ethernet Driver
790 	 * and the SEC driver.
791 	 */
792 	while (!qbman_check_command_complete(dq_storage))
793 		;
794 	if (dq_storage == get_swp_active_dqs(q_storage->active_dpio_id))
795 		clear_swp_active_dqs(q_storage->active_dpio_id);
796 
797 	pending = 1;
798 
799 	do {
800 		/* Loop until the dq_storage is updated with
801 		 * new token by QBMAN
802 		 */
803 		while (!qbman_check_new_result(dq_storage))
804 			;
805 		rte_prefetch0((void *)((size_t)(dq_storage + 2)));
806 		/* Check whether Last Pull command is Expired and
807 		 * setting Condition for Loop termination
808 		 */
809 		if (qbman_result_DQ_is_pull_complete(dq_storage)) {
810 			pending = 0;
811 			/* Check for valid frame. */
812 			status = qbman_result_DQ_flags(dq_storage);
813 			if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0))
814 				continue;
815 		}
816 		fd = qbman_result_DQ_fd(dq_storage);
817 
818 #ifndef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
819 		if (dpaa2_svr_family != SVR_LX2160A) {
820 			const struct qbman_fd *next_fd =
821 				qbman_result_DQ_fd(dq_storage + 1);
822 			/* Prefetch Annotation address for the parse results */
823 			rte_prefetch0(DPAA2_IOVA_TO_VADDR((DPAA2_GET_FD_ADDR(
824 				next_fd) + DPAA2_FD_PTA_SIZE + 16)));
825 		}
826 #endif
827 
828 		if (unlikely(DPAA2_FD_GET_FORMAT(fd) == qbman_fd_sg))
829 			bufs[num_rx] = eth_sg_fd_to_mbuf(fd, eth_data->port_id);
830 		else
831 			bufs[num_rx] = eth_fd_to_mbuf(fd, eth_data->port_id);
832 #if defined(RTE_LIBRTE_IEEE1588)
833 		if (bufs[num_rx]->ol_flags & RTE_MBUF_F_RX_IEEE1588_TMST) {
834 			priv->rx_timestamp =
835 				*dpaa2_timestamp_dynfield(bufs[num_rx]);
836 		}
837 #endif
838 
839 		if (eth_data->dev_conf.rxmode.offloads &
840 				RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
841 			rte_vlan_strip(bufs[num_rx]);
842 
843 		dq_storage++;
844 		num_rx++;
845 	} while (pending);
846 
847 	if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) {
848 		while (!qbman_check_command_complete(
849 		       get_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)))
850 			;
851 		clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index);
852 	}
853 	/* issue a volatile dequeue command for next pull */
854 	while (1) {
855 		if (qbman_swp_pull(swp, &pulldesc)) {
856 			DPAA2_PMD_DP_DEBUG("VDQ command is not issued."
857 					  "QBMAN is busy (2)\n");
858 			continue;
859 		}
860 		break;
861 	}
862 	q_storage->active_dqs = dq_storage1;
863 	q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index;
864 	set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index, dq_storage1);
865 
866 	dpaa2_q->rx_pkts += num_rx;
867 
868 	return num_rx;
869 }
870 
871 void __rte_hot
872 dpaa2_dev_process_parallel_event(struct qbman_swp *swp,
873 				 const struct qbman_fd *fd,
874 				 const struct qbman_result *dq,
875 				 struct dpaa2_queue *rxq,
876 				 struct rte_event *ev)
877 {
878 	rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd) +
879 		DPAA2_FD_PTA_SIZE + 16));
880 
881 	ev->flow_id = rxq->ev.flow_id;
882 	ev->sub_event_type = rxq->ev.sub_event_type;
883 	ev->event_type = RTE_EVENT_TYPE_ETHDEV;
884 	ev->op = RTE_EVENT_OP_NEW;
885 	ev->sched_type = rxq->ev.sched_type;
886 	ev->queue_id = rxq->ev.queue_id;
887 	ev->priority = rxq->ev.priority;
888 
889 	ev->mbuf = eth_fd_to_mbuf(fd, rxq->eth_data->port_id);
890 
891 	qbman_swp_dqrr_consume(swp, dq);
892 }
893 
894 void __rte_hot
895 dpaa2_dev_process_atomic_event(struct qbman_swp *swp __rte_unused,
896 			       const struct qbman_fd *fd,
897 			       const struct qbman_result *dq,
898 			       struct dpaa2_queue *rxq,
899 			       struct rte_event *ev)
900 {
901 	uint8_t dqrr_index;
902 
903 	rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd) +
904 		DPAA2_FD_PTA_SIZE + 16));
905 
906 	ev->flow_id = rxq->ev.flow_id;
907 	ev->sub_event_type = rxq->ev.sub_event_type;
908 	ev->event_type = RTE_EVENT_TYPE_ETHDEV;
909 	ev->op = RTE_EVENT_OP_NEW;
910 	ev->sched_type = rxq->ev.sched_type;
911 	ev->queue_id = rxq->ev.queue_id;
912 	ev->priority = rxq->ev.priority;
913 
914 	ev->mbuf = eth_fd_to_mbuf(fd, rxq->eth_data->port_id);
915 
916 	dqrr_index = qbman_get_dqrr_idx(dq);
917 	*dpaa2_seqn(ev->mbuf) = dqrr_index + 1;
918 	DPAA2_PER_LCORE_DQRR_SIZE++;
919 	DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index;
920 	DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = ev->mbuf;
921 }
922 
923 void __rte_hot
924 dpaa2_dev_process_ordered_event(struct qbman_swp *swp,
925 				const struct qbman_fd *fd,
926 				const struct qbman_result *dq,
927 				struct dpaa2_queue *rxq,
928 				struct rte_event *ev)
929 {
930 	rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd) +
931 		DPAA2_FD_PTA_SIZE + 16));
932 
933 	ev->flow_id = rxq->ev.flow_id;
934 	ev->sub_event_type = rxq->ev.sub_event_type;
935 	ev->event_type = RTE_EVENT_TYPE_ETHDEV;
936 	ev->op = RTE_EVENT_OP_NEW;
937 	ev->sched_type = rxq->ev.sched_type;
938 	ev->queue_id = rxq->ev.queue_id;
939 	ev->priority = rxq->ev.priority;
940 
941 	ev->mbuf = eth_fd_to_mbuf(fd, rxq->eth_data->port_id);
942 
943 	*dpaa2_seqn(ev->mbuf) = DPAA2_ENQUEUE_FLAG_ORP;
944 	*dpaa2_seqn(ev->mbuf) |= qbman_result_DQ_odpid(dq) << DPAA2_EQCR_OPRID_SHIFT;
945 	*dpaa2_seqn(ev->mbuf) |= qbman_result_DQ_seqnum(dq) << DPAA2_EQCR_SEQNUM_SHIFT;
946 
947 	qbman_swp_dqrr_consume(swp, dq);
948 }
949 
950 uint16_t
951 dpaa2_dev_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
952 {
953 	/* Function receive frames for a given device and VQ */
954 	struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
955 	struct qbman_result *dq_storage;
956 	uint32_t fqid = dpaa2_q->fqid;
957 	int ret, num_rx = 0, next_pull = nb_pkts, num_pulled;
958 	uint8_t pending, status;
959 	struct qbman_swp *swp;
960 	const struct qbman_fd *fd;
961 	struct qbman_pull_desc pulldesc;
962 	struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
963 	struct dpaa2_dev_priv *priv = eth_data->dev_private;
964 
965 	if (unlikely(dpaa2_enable_err_queue))
966 		dump_err_pkts(priv->rx_err_vq);
967 
968 	if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
969 		ret = dpaa2_affine_qbman_swp();
970 		if (ret) {
971 			DPAA2_PMD_ERR(
972 				"Failed to allocate IO portal, tid: %d\n",
973 				rte_gettid());
974 			return 0;
975 		}
976 	}
977 	swp = DPAA2_PER_LCORE_PORTAL;
978 
979 	do {
980 		dq_storage = dpaa2_q->q_storage->dq_storage[0];
981 		qbman_pull_desc_clear(&pulldesc);
982 		qbman_pull_desc_set_fq(&pulldesc, fqid);
983 		qbman_pull_desc_set_storage(&pulldesc, dq_storage,
984 				(size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
985 
986 		if (next_pull > dpaa2_dqrr_size) {
987 			qbman_pull_desc_set_numframes(&pulldesc,
988 				dpaa2_dqrr_size);
989 			next_pull -= dpaa2_dqrr_size;
990 		} else {
991 			qbman_pull_desc_set_numframes(&pulldesc, next_pull);
992 			next_pull = 0;
993 		}
994 
995 		while (1) {
996 			if (qbman_swp_pull(swp, &pulldesc)) {
997 				DPAA2_PMD_DP_DEBUG(
998 					"VDQ command is not issued.QBMAN is busy\n");
999 				/* Portal was busy, try again */
1000 				continue;
1001 			}
1002 			break;
1003 		}
1004 
1005 		rte_prefetch0((void *)((size_t)(dq_storage + 1)));
1006 		/* Check if the previous issued command is completed. */
1007 		while (!qbman_check_command_complete(dq_storage))
1008 			;
1009 
1010 		num_pulled = 0;
1011 		pending = 1;
1012 		do {
1013 			/* Loop until the dq_storage is updated with
1014 			 * new token by QBMAN
1015 			 */
1016 			while (!qbman_check_new_result(dq_storage))
1017 				;
1018 			rte_prefetch0((void *)((size_t)(dq_storage + 2)));
1019 			/* Check whether Last Pull command is Expired and
1020 			 * setting Condition for Loop termination
1021 			 */
1022 			if (qbman_result_DQ_is_pull_complete(dq_storage)) {
1023 				pending = 0;
1024 				/* Check for valid frame. */
1025 				status = qbman_result_DQ_flags(dq_storage);
1026 				if (unlikely((status &
1027 					QBMAN_DQ_STAT_VALIDFRAME) == 0))
1028 					continue;
1029 			}
1030 			fd = qbman_result_DQ_fd(dq_storage);
1031 
1032 #ifndef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
1033 			if (dpaa2_svr_family != SVR_LX2160A) {
1034 				const struct qbman_fd *next_fd =
1035 					qbman_result_DQ_fd(dq_storage + 1);
1036 
1037 				/* Prefetch Annotation address for the parse
1038 				 * results.
1039 				 */
1040 				rte_prefetch0((DPAA2_IOVA_TO_VADDR(
1041 					DPAA2_GET_FD_ADDR(next_fd) +
1042 					DPAA2_FD_PTA_SIZE + 16)));
1043 			}
1044 #endif
1045 
1046 			if (unlikely(DPAA2_FD_GET_FORMAT(fd) == qbman_fd_sg))
1047 				bufs[num_rx] = eth_sg_fd_to_mbuf(fd,
1048 							eth_data->port_id);
1049 			else
1050 				bufs[num_rx] = eth_fd_to_mbuf(fd,
1051 							eth_data->port_id);
1052 
1053 #if defined(RTE_LIBRTE_IEEE1588)
1054 		if (bufs[num_rx]->ol_flags & RTE_MBUF_F_RX_IEEE1588_TMST) {
1055 			priv->rx_timestamp =
1056 				*dpaa2_timestamp_dynfield(bufs[num_rx]);
1057 		}
1058 #endif
1059 
1060 		if (eth_data->dev_conf.rxmode.offloads &
1061 				RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
1062 			rte_vlan_strip(bufs[num_rx]);
1063 		}
1064 
1065 			dq_storage++;
1066 			num_rx++;
1067 			num_pulled++;
1068 		} while (pending);
1069 	/* Last VDQ provided all packets and more packets are requested */
1070 	} while (next_pull && num_pulled == dpaa2_dqrr_size);
1071 
1072 	dpaa2_q->rx_pkts += num_rx;
1073 
1074 	return num_rx;
1075 }
1076 
1077 uint16_t dpaa2_dev_tx_conf(void *queue)
1078 {
1079 	/* Function receive frames for a given device and VQ */
1080 	struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
1081 	struct qbman_result *dq_storage;
1082 	uint32_t fqid = dpaa2_q->fqid;
1083 	int ret, num_tx_conf = 0, num_pulled;
1084 	uint8_t pending, status;
1085 	struct qbman_swp *swp;
1086 	const struct qbman_fd *fd, *next_fd;
1087 	struct qbman_pull_desc pulldesc;
1088 	struct qbman_release_desc releasedesc;
1089 	uint32_t bpid;
1090 	uint64_t buf;
1091 #if defined(RTE_LIBRTE_IEEE1588)
1092 	struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
1093 	struct dpaa2_dev_priv *priv = eth_data->dev_private;
1094 	struct dpaa2_annot_hdr *annotation;
1095 	void *v_addr;
1096 	struct rte_mbuf *mbuf;
1097 #endif
1098 
1099 	if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
1100 		ret = dpaa2_affine_qbman_swp();
1101 		if (ret) {
1102 			DPAA2_PMD_ERR(
1103 				"Failed to allocate IO portal, tid: %d\n",
1104 				rte_gettid());
1105 			return 0;
1106 		}
1107 	}
1108 	swp = DPAA2_PER_LCORE_PORTAL;
1109 
1110 	do {
1111 		dq_storage = dpaa2_q->q_storage->dq_storage[0];
1112 		qbman_pull_desc_clear(&pulldesc);
1113 		qbman_pull_desc_set_fq(&pulldesc, fqid);
1114 		qbman_pull_desc_set_storage(&pulldesc, dq_storage,
1115 				(size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
1116 
1117 		qbman_pull_desc_set_numframes(&pulldesc, dpaa2_dqrr_size);
1118 
1119 		while (1) {
1120 			if (qbman_swp_pull(swp, &pulldesc)) {
1121 				DPAA2_PMD_DP_DEBUG("VDQ command is not issued."
1122 						   "QBMAN is busy\n");
1123 				/* Portal was busy, try again */
1124 				continue;
1125 			}
1126 			break;
1127 		}
1128 
1129 		rte_prefetch0((void *)((size_t)(dq_storage + 1)));
1130 		/* Check if the previous issued command is completed. */
1131 		while (!qbman_check_command_complete(dq_storage))
1132 			;
1133 
1134 		num_pulled = 0;
1135 		pending = 1;
1136 		do {
1137 			/* Loop until the dq_storage is updated with
1138 			 * new token by QBMAN
1139 			 */
1140 			while (!qbman_check_new_result(dq_storage))
1141 				;
1142 			rte_prefetch0((void *)((size_t)(dq_storage + 2)));
1143 			/* Check whether Last Pull command is Expired and
1144 			 * setting Condition for Loop termination
1145 			 */
1146 			if (qbman_result_DQ_is_pull_complete(dq_storage)) {
1147 				pending = 0;
1148 				/* Check for valid frame. */
1149 				status = qbman_result_DQ_flags(dq_storage);
1150 				if (unlikely((status &
1151 					QBMAN_DQ_STAT_VALIDFRAME) == 0))
1152 					continue;
1153 			}
1154 			fd = qbman_result_DQ_fd(dq_storage);
1155 
1156 			next_fd = qbman_result_DQ_fd(dq_storage + 1);
1157 			/* Prefetch Annotation address for the parse results */
1158 			rte_prefetch0((void *)(size_t)
1159 				(DPAA2_GET_FD_ADDR(next_fd) +
1160 				 DPAA2_FD_PTA_SIZE + 16));
1161 
1162 			bpid = DPAA2_GET_FD_BPID(fd);
1163 
1164 			/* Create a release descriptor required for releasing
1165 			 * buffers into QBMAN
1166 			 */
1167 			qbman_release_desc_clear(&releasedesc);
1168 			qbman_release_desc_set_bpid(&releasedesc, bpid);
1169 
1170 			buf = DPAA2_GET_FD_ADDR(fd);
1171 			/* feed them to bman */
1172 			do {
1173 				ret = qbman_swp_release(swp, &releasedesc,
1174 							&buf, 1);
1175 			} while (ret == -EBUSY);
1176 
1177 			dq_storage++;
1178 			num_tx_conf++;
1179 			num_pulled++;
1180 #if defined(RTE_LIBRTE_IEEE1588)
1181 			v_addr = DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
1182 			mbuf = DPAA2_INLINE_MBUF_FROM_BUF(v_addr,
1183 				rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
1184 
1185 			if (mbuf->ol_flags & RTE_MBUF_F_TX_IEEE1588_TMST) {
1186 				annotation = (struct dpaa2_annot_hdr *)((size_t)
1187 					DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)) +
1188 					DPAA2_FD_PTA_SIZE);
1189 				priv->tx_timestamp = annotation->word2;
1190 			}
1191 #endif
1192 		} while (pending);
1193 
1194 	/* Last VDQ provided all packets and more packets are requested */
1195 	} while (num_pulled == dpaa2_dqrr_size);
1196 
1197 	dpaa2_q->rx_pkts += num_tx_conf;
1198 
1199 	return num_tx_conf;
1200 }
1201 
1202 /* Configure the egress frame annotation for timestamp update */
1203 static void enable_tx_tstamp(struct qbman_fd *fd)
1204 {
1205 	struct dpaa2_faead *fd_faead;
1206 
1207 	/* Set frame annotation status field as valid */
1208 	(fd)->simple.frc |= DPAA2_FD_FRC_FASV;
1209 
1210 	/* Set frame annotation egress action descriptor as valid */
1211 	(fd)->simple.frc |= DPAA2_FD_FRC_FAEADV;
1212 
1213 	/* Set Annotation Length as 128B */
1214 	(fd)->simple.ctrl |= DPAA2_FD_CTRL_ASAL;
1215 
1216 	/* enable update of confirmation frame annotation */
1217 	fd_faead = (struct dpaa2_faead *)((size_t)
1218 			DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)) +
1219 			DPAA2_FD_PTA_SIZE + DPAA2_FD_HW_ANNOT_FAEAD_OFFSET);
1220 	fd_faead->ctrl = DPAA2_ANNOT_FAEAD_A2V | DPAA2_ANNOT_FAEAD_UPDV |
1221 				DPAA2_ANNOT_FAEAD_UPD;
1222 }
1223 
1224 /*
1225  * Callback to handle sending packets through WRIOP based interface
1226  */
1227 uint16_t
1228 dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
1229 {
1230 	/* Function to transmit the frames to given device and VQ*/
1231 	uint32_t loop, retry_count;
1232 	int32_t ret;
1233 	struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
1234 	struct rte_mbuf *mi;
1235 	uint32_t frames_to_send;
1236 	struct rte_mempool *mp;
1237 	struct qbman_eq_desc eqdesc;
1238 	struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
1239 	struct qbman_swp *swp;
1240 	uint16_t num_tx = 0;
1241 	uint16_t bpid;
1242 	struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
1243 	struct dpaa2_dev_priv *priv = eth_data->dev_private;
1244 	uint32_t flags[MAX_TX_RING_SLOTS] = {0};
1245 	struct sw_buf_free buf_to_free[DPAA2_MAX_SGS * dpaa2_dqrr_size];
1246 	uint32_t free_count = 0;
1247 
1248 	if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
1249 		ret = dpaa2_affine_qbman_swp();
1250 		if (ret) {
1251 			DPAA2_PMD_ERR(
1252 				"Failed to allocate IO portal, tid: %d\n",
1253 				rte_gettid());
1254 			return 0;
1255 		}
1256 	}
1257 	swp = DPAA2_PER_LCORE_PORTAL;
1258 
1259 	DPAA2_PMD_DP_DEBUG("===> eth_data =%p, fqid =%d\n",
1260 			eth_data, dpaa2_q->fqid);
1261 
1262 #ifdef RTE_LIBRTE_IEEE1588
1263 	/* IEEE1588 driver need pointer to tx confirmation queue
1264 	 * corresponding to last packet transmitted for reading
1265 	 * the timestamp
1266 	 */
1267 	if ((*bufs)->ol_flags & RTE_MBUF_F_TX_IEEE1588_TMST) {
1268 		priv->next_tx_conf_queue = dpaa2_q->tx_conf_queue;
1269 		dpaa2_dev_tx_conf(dpaa2_q->tx_conf_queue);
1270 		priv->tx_timestamp = 0;
1271 	}
1272 #endif
1273 
1274 	/*Prepare enqueue descriptor*/
1275 	qbman_eq_desc_clear(&eqdesc);
1276 	qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
1277 	qbman_eq_desc_set_fq(&eqdesc, dpaa2_q->fqid);
1278 
1279 	/*Clear the unused FD fields before sending*/
1280 	while (nb_pkts) {
1281 		/*Check if the queue is congested*/
1282 		retry_count = 0;
1283 		while (qbman_result_SCN_state(dpaa2_q->cscn)) {
1284 			retry_count++;
1285 			/* Retry for some time before giving up */
1286 			if (retry_count > CONG_RETRY_COUNT)
1287 				goto skip_tx;
1288 		}
1289 
1290 		frames_to_send = (nb_pkts > dpaa2_eqcr_size) ?
1291 			dpaa2_eqcr_size : nb_pkts;
1292 
1293 		for (loop = 0; loop < frames_to_send; loop++) {
1294 			if (*dpaa2_seqn(*bufs)) {
1295 				uint8_t dqrr_index = *dpaa2_seqn(*bufs) - 1;
1296 
1297 				flags[loop] = QBMAN_ENQUEUE_FLAG_DCA |
1298 						dqrr_index;
1299 				DPAA2_PER_LCORE_DQRR_SIZE--;
1300 				DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dqrr_index);
1301 				*dpaa2_seqn(*bufs) = DPAA2_INVALID_MBUF_SEQN;
1302 			}
1303 
1304 			if (likely(RTE_MBUF_DIRECT(*bufs))) {
1305 				mp = (*bufs)->pool;
1306 				/* Check the basic scenario and set
1307 				 * the FD appropriately here itself.
1308 				 */
1309 				if (likely(mp && mp->ops_index ==
1310 				    priv->bp_list->dpaa2_ops_index &&
1311 				    (*bufs)->nb_segs == 1 &&
1312 				    rte_mbuf_refcnt_read((*bufs)) == 1)) {
1313 					if (unlikely(((*bufs)->ol_flags
1314 						& RTE_MBUF_F_TX_VLAN) ||
1315 						(eth_data->dev_conf.txmode.offloads
1316 						& RTE_ETH_TX_OFFLOAD_VLAN_INSERT))) {
1317 						ret = rte_vlan_insert(bufs);
1318 						if (ret)
1319 							goto send_n_return;
1320 					}
1321 					DPAA2_MBUF_TO_CONTIG_FD((*bufs),
1322 					&fd_arr[loop], mempool_to_bpid(mp));
1323 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
1324 					rte_mempool_check_cookies
1325 						(rte_mempool_from_obj((void *)*bufs),
1326 						(void **)bufs, 1, 0);
1327 #endif
1328 					bufs++;
1329 #ifdef RTE_LIBRTE_IEEE1588
1330 					enable_tx_tstamp(&fd_arr[loop]);
1331 #endif
1332 					continue;
1333 				}
1334 			} else {
1335 				mi = rte_mbuf_from_indirect(*bufs);
1336 				mp = mi->pool;
1337 			}
1338 
1339 			if (unlikely(RTE_MBUF_HAS_EXTBUF(*bufs))) {
1340 				if (unlikely((*bufs)->nb_segs > 1)) {
1341 					mp = (*bufs)->pool;
1342 					if (eth_mbuf_to_sg_fd(*bufs,
1343 							      &fd_arr[loop],
1344 							      buf_to_free,
1345 							      &free_count,
1346 							      loop,
1347 							      mempool_to_bpid(mp)))
1348 						goto send_n_return;
1349 				} else {
1350 					eth_mbuf_to_fd(*bufs,
1351 							&fd_arr[loop],
1352 							buf_to_free,
1353 							&free_count,
1354 							loop, 0);
1355 				}
1356 				bufs++;
1357 #ifdef RTE_LIBRTE_IEEE1588
1358 				enable_tx_tstamp(&fd_arr[loop]);
1359 #endif
1360 				continue;
1361 			}
1362 
1363 			/* Not a hw_pkt pool allocated frame */
1364 			if (unlikely(!mp || !priv->bp_list)) {
1365 				DPAA2_PMD_ERR("Err: No buffer pool attached");
1366 				goto send_n_return;
1367 			}
1368 
1369 			if (unlikely(((*bufs)->ol_flags & RTE_MBUF_F_TX_VLAN) ||
1370 				(eth_data->dev_conf.txmode.offloads
1371 				& RTE_ETH_TX_OFFLOAD_VLAN_INSERT))) {
1372 				int ret = rte_vlan_insert(bufs);
1373 				if (ret)
1374 					goto send_n_return;
1375 			}
1376 			if (mp->ops_index != priv->bp_list->dpaa2_ops_index) {
1377 				DPAA2_PMD_WARN("Non DPAA2 buffer pool");
1378 				/* alloc should be from the default buffer pool
1379 				 * attached to this interface
1380 				 */
1381 				bpid = priv->bp_list->buf_pool.bpid;
1382 
1383 				if (unlikely((*bufs)->nb_segs > 1)) {
1384 					DPAA2_PMD_ERR("S/G support not added"
1385 						" for non hw offload buffer");
1386 					goto send_n_return;
1387 				}
1388 				if (eth_copy_mbuf_to_fd(*bufs,
1389 							&fd_arr[loop], bpid)) {
1390 					goto send_n_return;
1391 				}
1392 				/* free the original packet */
1393 				rte_pktmbuf_free(*bufs);
1394 			} else {
1395 				bpid = mempool_to_bpid(mp);
1396 				if (unlikely((*bufs)->nb_segs > 1)) {
1397 					if (eth_mbuf_to_sg_fd(*bufs,
1398 							&fd_arr[loop],
1399 							buf_to_free,
1400 							&free_count,
1401 							loop,
1402 							bpid))
1403 						goto send_n_return;
1404 				} else {
1405 					eth_mbuf_to_fd(*bufs,
1406 							&fd_arr[loop],
1407 							buf_to_free,
1408 							&free_count,
1409 							loop, bpid);
1410 				}
1411 			}
1412 #ifdef RTE_LIBRTE_IEEE1588
1413 			enable_tx_tstamp(&fd_arr[loop]);
1414 #endif
1415 			bufs++;
1416 		}
1417 
1418 		loop = 0;
1419 		retry_count = 0;
1420 		while (loop < frames_to_send) {
1421 			ret = qbman_swp_enqueue_multiple(swp, &eqdesc,
1422 					&fd_arr[loop], &flags[loop],
1423 					frames_to_send - loop);
1424 			if (unlikely(ret < 0)) {
1425 				retry_count++;
1426 				if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) {
1427 					num_tx += loop;
1428 					nb_pkts -= loop;
1429 					goto send_n_return;
1430 				}
1431 			} else {
1432 				loop += ret;
1433 				retry_count = 0;
1434 			}
1435 		}
1436 
1437 		num_tx += loop;
1438 		nb_pkts -= loop;
1439 	}
1440 	dpaa2_q->tx_pkts += num_tx;
1441 
1442 	for (loop = 0; loop < free_count; loop++) {
1443 		if (buf_to_free[loop].pkt_id < num_tx)
1444 			rte_pktmbuf_free_seg(buf_to_free[loop].seg);
1445 	}
1446 
1447 	return num_tx;
1448 
1449 send_n_return:
1450 	/* send any already prepared fd */
1451 	if (loop) {
1452 		unsigned int i = 0;
1453 
1454 		retry_count = 0;
1455 		while (i < loop) {
1456 			ret = qbman_swp_enqueue_multiple(swp, &eqdesc,
1457 							 &fd_arr[i],
1458 							 &flags[i],
1459 							 loop - i);
1460 			if (unlikely(ret < 0)) {
1461 				retry_count++;
1462 				if (retry_count > DPAA2_MAX_TX_RETRY_COUNT)
1463 					break;
1464 			} else {
1465 				i += ret;
1466 				retry_count = 0;
1467 			}
1468 		}
1469 		num_tx += i;
1470 	}
1471 skip_tx:
1472 	dpaa2_q->tx_pkts += num_tx;
1473 
1474 	for (loop = 0; loop < free_count; loop++) {
1475 		if (buf_to_free[loop].pkt_id < num_tx)
1476 			rte_pktmbuf_free_seg(buf_to_free[loop].seg);
1477 	}
1478 
1479 	return num_tx;
1480 }
1481 
1482 void
1483 dpaa2_dev_free_eqresp_buf(uint16_t eqresp_ci,
1484 			  __rte_unused struct dpaa2_queue *dpaa2_q)
1485 {
1486 	struct dpaa2_dpio_dev *dpio_dev = DPAA2_PER_LCORE_DPIO;
1487 	struct qbman_fd *fd;
1488 	struct rte_mbuf *m;
1489 
1490 	fd = qbman_result_eqresp_fd(&dpio_dev->eqresp[eqresp_ci]);
1491 
1492 	/* Setting port id does not matter as we are to free the mbuf */
1493 	m = eth_fd_to_mbuf(fd, 0);
1494 	rte_pktmbuf_free(m);
1495 }
1496 
1497 static void
1498 dpaa2_set_enqueue_descriptor(struct dpaa2_queue *dpaa2_q,
1499 			     struct rte_mbuf *m,
1500 			     struct qbman_eq_desc *eqdesc)
1501 {
1502 	struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
1503 	struct dpaa2_dev_priv *priv = eth_data->dev_private;
1504 	struct dpaa2_dpio_dev *dpio_dev = DPAA2_PER_LCORE_DPIO;
1505 	struct eqresp_metadata *eqresp_meta;
1506 	uint16_t orpid, seqnum;
1507 	uint8_t dq_idx;
1508 
1509 	qbman_eq_desc_set_fq(eqdesc, dpaa2_q->fqid);
1510 
1511 	if (*dpaa2_seqn(m) & DPAA2_ENQUEUE_FLAG_ORP) {
1512 		orpid = (*dpaa2_seqn(m) & DPAA2_EQCR_OPRID_MASK) >>
1513 			DPAA2_EQCR_OPRID_SHIFT;
1514 		seqnum = (*dpaa2_seqn(m) & DPAA2_EQCR_SEQNUM_MASK) >>
1515 			DPAA2_EQCR_SEQNUM_SHIFT;
1516 
1517 		if (!priv->en_loose_ordered) {
1518 			qbman_eq_desc_set_orp(eqdesc, 1, orpid, seqnum, 0);
1519 			qbman_eq_desc_set_response(eqdesc, (uint64_t)
1520 				DPAA2_VADDR_TO_IOVA(&dpio_dev->eqresp[
1521 				dpio_dev->eqresp_pi]), 1);
1522 			qbman_eq_desc_set_token(eqdesc, 1);
1523 
1524 			eqresp_meta = &dpio_dev->eqresp_meta[
1525 				dpio_dev->eqresp_pi];
1526 			eqresp_meta->dpaa2_q = dpaa2_q;
1527 			eqresp_meta->mp = m->pool;
1528 
1529 			dpio_dev->eqresp_pi + 1 < MAX_EQ_RESP_ENTRIES ?
1530 				dpio_dev->eqresp_pi++ :
1531 				(dpio_dev->eqresp_pi = 0);
1532 		} else {
1533 			qbman_eq_desc_set_orp(eqdesc, 0, orpid, seqnum, 0);
1534 		}
1535 	} else {
1536 		dq_idx = *dpaa2_seqn(m) - 1;
1537 		qbman_eq_desc_set_dca(eqdesc, 1, dq_idx, 0);
1538 		DPAA2_PER_LCORE_DQRR_SIZE--;
1539 		DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dq_idx);
1540 	}
1541 	*dpaa2_seqn(m) = DPAA2_INVALID_MBUF_SEQN;
1542 }
1543 
1544 uint16_t
1545 dpaa2_dev_tx_multi_txq_ordered(void **queue,
1546 		struct rte_mbuf **bufs, uint16_t nb_pkts)
1547 {
1548 	/* Function to transmit the frames to multiple queues respectively.*/
1549 	uint32_t loop, i, retry_count;
1550 	int32_t ret;
1551 	struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
1552 	uint32_t frames_to_send, num_free_eq_desc = 0;
1553 	struct rte_mempool *mp;
1554 	struct qbman_eq_desc eqdesc[MAX_TX_RING_SLOTS];
1555 	struct dpaa2_queue *dpaa2_q[MAX_TX_RING_SLOTS];
1556 	struct qbman_swp *swp;
1557 	uint16_t bpid;
1558 	struct rte_mbuf *mi;
1559 	struct rte_eth_dev_data *eth_data;
1560 	struct dpaa2_dev_priv *priv;
1561 	struct dpaa2_queue *order_sendq;
1562 	struct sw_buf_free buf_to_free[DPAA2_MAX_SGS * dpaa2_dqrr_size];
1563 	uint32_t free_count = 0;
1564 
1565 	if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
1566 		ret = dpaa2_affine_qbman_swp();
1567 		if (ret) {
1568 			DPAA2_PMD_ERR(
1569 				"Failed to allocate IO portal, tid: %d\n",
1570 				rte_gettid());
1571 			return 0;
1572 		}
1573 	}
1574 	swp = DPAA2_PER_LCORE_PORTAL;
1575 
1576 	frames_to_send = (nb_pkts > dpaa2_eqcr_size) ?
1577 		dpaa2_eqcr_size : nb_pkts;
1578 
1579 	for (loop = 0; loop < frames_to_send; loop++) {
1580 		dpaa2_q[loop] = (struct dpaa2_queue *)queue[loop];
1581 		eth_data = dpaa2_q[loop]->eth_data;
1582 		priv = eth_data->dev_private;
1583 		if (!priv->en_loose_ordered) {
1584 			if (*dpaa2_seqn(*bufs) & DPAA2_ENQUEUE_FLAG_ORP) {
1585 				if (!num_free_eq_desc) {
1586 					num_free_eq_desc = dpaa2_free_eq_descriptors();
1587 					if (!num_free_eq_desc)
1588 						goto send_frames;
1589 				}
1590 				num_free_eq_desc--;
1591 			}
1592 		}
1593 
1594 		DPAA2_PMD_DP_DEBUG("===> eth_data =%p, fqid =%d\n",
1595 				   eth_data, dpaa2_q[loop]->fqid);
1596 
1597 		/* Check if the queue is congested */
1598 		retry_count = 0;
1599 		while (qbman_result_SCN_state(dpaa2_q[loop]->cscn)) {
1600 			retry_count++;
1601 			/* Retry for some time before giving up */
1602 			if (retry_count > CONG_RETRY_COUNT)
1603 				goto send_frames;
1604 		}
1605 
1606 		/* Prepare enqueue descriptor */
1607 		qbman_eq_desc_clear(&eqdesc[loop]);
1608 
1609 		if (*dpaa2_seqn(*bufs) && priv->en_ordered) {
1610 			order_sendq = (struct dpaa2_queue *)priv->tx_vq[0];
1611 			dpaa2_set_enqueue_descriptor(order_sendq,
1612 						     (*bufs),
1613 						     &eqdesc[loop]);
1614 		} else {
1615 			qbman_eq_desc_set_no_orp(&eqdesc[loop],
1616 							 DPAA2_EQ_RESP_ERR_FQ);
1617 			qbman_eq_desc_set_fq(&eqdesc[loop],
1618 						     dpaa2_q[loop]->fqid);
1619 		}
1620 
1621 		if (likely(RTE_MBUF_DIRECT(*bufs))) {
1622 			mp = (*bufs)->pool;
1623 			/* Check the basic scenario and set
1624 			 * the FD appropriately here itself.
1625 			 */
1626 			if (likely(mp && mp->ops_index ==
1627 				priv->bp_list->dpaa2_ops_index &&
1628 				(*bufs)->nb_segs == 1 &&
1629 				rte_mbuf_refcnt_read((*bufs)) == 1)) {
1630 				if (unlikely((*bufs)->ol_flags
1631 					& RTE_MBUF_F_TX_VLAN)) {
1632 					ret = rte_vlan_insert(bufs);
1633 					if (ret)
1634 						goto send_frames;
1635 				}
1636 				DPAA2_MBUF_TO_CONTIG_FD((*bufs),
1637 					&fd_arr[loop],
1638 					mempool_to_bpid(mp));
1639 				bufs++;
1640 				continue;
1641 			}
1642 		} else {
1643 			mi = rte_mbuf_from_indirect(*bufs);
1644 			mp = mi->pool;
1645 		}
1646 		/* Not a hw_pkt pool allocated frame */
1647 		if (unlikely(!mp || !priv->bp_list)) {
1648 			DPAA2_PMD_ERR("Err: No buffer pool attached");
1649 			goto send_frames;
1650 		}
1651 
1652 		if (mp->ops_index != priv->bp_list->dpaa2_ops_index) {
1653 			DPAA2_PMD_WARN("Non DPAA2 buffer pool");
1654 			/* alloc should be from the default buffer pool
1655 			 * attached to this interface
1656 			 */
1657 			bpid = priv->bp_list->buf_pool.bpid;
1658 
1659 			if (unlikely((*bufs)->nb_segs > 1)) {
1660 				DPAA2_PMD_ERR(
1661 					"S/G not supp for non hw offload buffer");
1662 				goto send_frames;
1663 			}
1664 			if (eth_copy_mbuf_to_fd(*bufs,
1665 						&fd_arr[loop], bpid)) {
1666 				goto send_frames;
1667 			}
1668 			/* free the original packet */
1669 			rte_pktmbuf_free(*bufs);
1670 		} else {
1671 			bpid = mempool_to_bpid(mp);
1672 			if (unlikely((*bufs)->nb_segs > 1)) {
1673 				if (eth_mbuf_to_sg_fd(*bufs,
1674 						      &fd_arr[loop],
1675 						      buf_to_free,
1676 						      &free_count,
1677 						      loop,
1678 						      bpid))
1679 					goto send_frames;
1680 			} else {
1681 				eth_mbuf_to_fd(*bufs,
1682 						&fd_arr[loop],
1683 						buf_to_free,
1684 						&free_count,
1685 						loop, bpid);
1686 			}
1687 		}
1688 
1689 		bufs++;
1690 	}
1691 
1692 send_frames:
1693 	frames_to_send = loop;
1694 	loop = 0;
1695 	retry_count = 0;
1696 	while (loop < frames_to_send) {
1697 		ret = qbman_swp_enqueue_multiple_desc(swp, &eqdesc[loop],
1698 				&fd_arr[loop],
1699 				frames_to_send - loop);
1700 		if (likely(ret > 0)) {
1701 			loop += ret;
1702 			retry_count = 0;
1703 		} else {
1704 			retry_count++;
1705 			if (retry_count > DPAA2_MAX_TX_RETRY_COUNT)
1706 				break;
1707 		}
1708 	}
1709 
1710 	for (i = 0; i < free_count; i++) {
1711 		if (buf_to_free[i].pkt_id < loop)
1712 			rte_pktmbuf_free_seg(buf_to_free[i].seg);
1713 	}
1714 	return loop;
1715 }
1716 
1717 /* Callback to handle sending ordered packets through WRIOP based interface */
1718 uint16_t
1719 dpaa2_dev_tx_ordered(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
1720 {
1721 	/* Function to transmit the frames to given device and VQ*/
1722 	struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
1723 	struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
1724 	struct dpaa2_dev_priv *priv = eth_data->dev_private;
1725 	struct dpaa2_queue *order_sendq = (struct dpaa2_queue *)priv->tx_vq[0];
1726 	struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
1727 	struct rte_mbuf *mi;
1728 	struct rte_mempool *mp;
1729 	struct qbman_eq_desc eqdesc[MAX_TX_RING_SLOTS];
1730 	struct qbman_swp *swp;
1731 	uint32_t frames_to_send, num_free_eq_desc;
1732 	uint32_t loop, retry_count;
1733 	int32_t ret;
1734 	uint16_t num_tx = 0;
1735 	uint16_t bpid;
1736 	struct sw_buf_free buf_to_free[DPAA2_MAX_SGS * dpaa2_dqrr_size];
1737 	uint32_t free_count = 0;
1738 
1739 	if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
1740 		ret = dpaa2_affine_qbman_swp();
1741 		if (ret) {
1742 			DPAA2_PMD_ERR(
1743 				"Failed to allocate IO portal, tid: %d\n",
1744 				rte_gettid());
1745 			return 0;
1746 		}
1747 	}
1748 	swp = DPAA2_PER_LCORE_PORTAL;
1749 
1750 	DPAA2_PMD_DP_DEBUG("===> eth_data =%p, fqid =%d\n",
1751 			   eth_data, dpaa2_q->fqid);
1752 
1753 	/* This would also handle normal and atomic queues as any type
1754 	 * of packet can be enqueued when ordered queues are being used.
1755 	 */
1756 	while (nb_pkts) {
1757 		/*Check if the queue is congested*/
1758 		retry_count = 0;
1759 		while (qbman_result_SCN_state(dpaa2_q->cscn)) {
1760 			retry_count++;
1761 			/* Retry for some time before giving up */
1762 			if (retry_count > CONG_RETRY_COUNT)
1763 				goto skip_tx;
1764 		}
1765 
1766 		frames_to_send = (nb_pkts > dpaa2_eqcr_size) ?
1767 			dpaa2_eqcr_size : nb_pkts;
1768 
1769 		if (!priv->en_loose_ordered) {
1770 			if (*dpaa2_seqn(*bufs) & DPAA2_ENQUEUE_FLAG_ORP) {
1771 				num_free_eq_desc = dpaa2_free_eq_descriptors();
1772 				if (num_free_eq_desc < frames_to_send)
1773 					frames_to_send = num_free_eq_desc;
1774 			}
1775 		}
1776 
1777 		for (loop = 0; loop < frames_to_send; loop++) {
1778 			/*Prepare enqueue descriptor*/
1779 			qbman_eq_desc_clear(&eqdesc[loop]);
1780 
1781 			if (*dpaa2_seqn(*bufs)) {
1782 				/* Use only queue 0 for Tx in case of atomic/
1783 				 * ordered packets as packets can get unordered
1784 				 * when being transmitted out from the interface
1785 				 */
1786 				dpaa2_set_enqueue_descriptor(order_sendq,
1787 							     (*bufs),
1788 							     &eqdesc[loop]);
1789 			} else {
1790 				qbman_eq_desc_set_no_orp(&eqdesc[loop],
1791 							 DPAA2_EQ_RESP_ERR_FQ);
1792 				qbman_eq_desc_set_fq(&eqdesc[loop],
1793 						     dpaa2_q->fqid);
1794 			}
1795 
1796 			if (likely(RTE_MBUF_DIRECT(*bufs))) {
1797 				mp = (*bufs)->pool;
1798 				/* Check the basic scenario and set
1799 				 * the FD appropriately here itself.
1800 				 */
1801 				if (likely(mp && mp->ops_index ==
1802 				    priv->bp_list->dpaa2_ops_index &&
1803 				    (*bufs)->nb_segs == 1 &&
1804 				    rte_mbuf_refcnt_read((*bufs)) == 1)) {
1805 					if (unlikely((*bufs)->ol_flags
1806 						& RTE_MBUF_F_TX_VLAN)) {
1807 					  ret = rte_vlan_insert(bufs);
1808 					  if (ret)
1809 						goto send_n_return;
1810 					}
1811 					DPAA2_MBUF_TO_CONTIG_FD((*bufs),
1812 						&fd_arr[loop],
1813 						mempool_to_bpid(mp));
1814 					bufs++;
1815 					continue;
1816 				}
1817 			} else {
1818 				mi = rte_mbuf_from_indirect(*bufs);
1819 				mp = mi->pool;
1820 			}
1821 			/* Not a hw_pkt pool allocated frame */
1822 			if (unlikely(!mp || !priv->bp_list)) {
1823 				DPAA2_PMD_ERR("Err: No buffer pool attached");
1824 				goto send_n_return;
1825 			}
1826 
1827 			if (mp->ops_index != priv->bp_list->dpaa2_ops_index) {
1828 				DPAA2_PMD_WARN("Non DPAA2 buffer pool");
1829 				/* alloc should be from the default buffer pool
1830 				 * attached to this interface
1831 				 */
1832 				bpid = priv->bp_list->buf_pool.bpid;
1833 
1834 				if (unlikely((*bufs)->nb_segs > 1)) {
1835 					DPAA2_PMD_ERR(
1836 						"S/G not supp for non hw offload buffer");
1837 					goto send_n_return;
1838 				}
1839 				if (eth_copy_mbuf_to_fd(*bufs,
1840 							&fd_arr[loop], bpid)) {
1841 					goto send_n_return;
1842 				}
1843 				/* free the original packet */
1844 				rte_pktmbuf_free(*bufs);
1845 			} else {
1846 				bpid = mempool_to_bpid(mp);
1847 				if (unlikely((*bufs)->nb_segs > 1)) {
1848 					if (eth_mbuf_to_sg_fd(*bufs,
1849 							      &fd_arr[loop],
1850 							      buf_to_free,
1851 							      &free_count,
1852 							      loop,
1853 							      bpid))
1854 						goto send_n_return;
1855 				} else {
1856 					eth_mbuf_to_fd(*bufs,
1857 							&fd_arr[loop],
1858 							buf_to_free,
1859 							&free_count,
1860 							loop, bpid);
1861 				}
1862 			}
1863 			bufs++;
1864 		}
1865 
1866 		loop = 0;
1867 		retry_count = 0;
1868 		while (loop < frames_to_send) {
1869 			ret = qbman_swp_enqueue_multiple_desc(swp,
1870 					&eqdesc[loop], &fd_arr[loop],
1871 					frames_to_send - loop);
1872 			if (unlikely(ret < 0)) {
1873 				retry_count++;
1874 				if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) {
1875 					num_tx += loop;
1876 					nb_pkts -= loop;
1877 					goto send_n_return;
1878 				}
1879 			} else {
1880 				loop += ret;
1881 				retry_count = 0;
1882 			}
1883 		}
1884 
1885 		num_tx += loop;
1886 		nb_pkts -= loop;
1887 	}
1888 	dpaa2_q->tx_pkts += num_tx;
1889 	for (loop = 0; loop < free_count; loop++) {
1890 		if (buf_to_free[loop].pkt_id < num_tx)
1891 			rte_pktmbuf_free_seg(buf_to_free[loop].seg);
1892 	}
1893 
1894 	return num_tx;
1895 
1896 send_n_return:
1897 	/* send any already prepared fd */
1898 	if (loop) {
1899 		unsigned int i = 0;
1900 
1901 		retry_count = 0;
1902 		while (i < loop) {
1903 			ret = qbman_swp_enqueue_multiple_desc(swp,
1904 				       &eqdesc[i], &fd_arr[i], loop - i);
1905 			if (unlikely(ret < 0)) {
1906 				retry_count++;
1907 				if (retry_count > DPAA2_MAX_TX_RETRY_COUNT)
1908 					break;
1909 			} else {
1910 				i += ret;
1911 				retry_count = 0;
1912 			}
1913 		}
1914 		num_tx += i;
1915 	}
1916 skip_tx:
1917 	dpaa2_q->tx_pkts += num_tx;
1918 	for (loop = 0; loop < free_count; loop++) {
1919 		if (buf_to_free[loop].pkt_id < num_tx)
1920 			rte_pktmbuf_free_seg(buf_to_free[loop].seg);
1921 	}
1922 
1923 	return num_tx;
1924 }
1925 
1926 #if defined(RTE_TOOLCHAIN_GCC)
1927 #pragma GCC diagnostic push
1928 #pragma GCC diagnostic ignored "-Wcast-qual"
1929 #elif defined(RTE_TOOLCHAIN_CLANG)
1930 #pragma clang diagnostic push
1931 #pragma clang diagnostic ignored "-Wcast-qual"
1932 #endif
1933 
1934 /* This function loopbacks all the received packets.*/
1935 uint16_t
1936 dpaa2_dev_loopback_rx(void *queue,
1937 		      struct rte_mbuf **bufs __rte_unused,
1938 		      uint16_t nb_pkts)
1939 {
1940 	/* Function receive frames for a given device and VQ*/
1941 	struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
1942 	struct qbman_result *dq_storage, *dq_storage1 = NULL;
1943 	uint32_t fqid = dpaa2_q->fqid;
1944 	int ret, num_rx = 0, num_tx = 0, pull_size;
1945 	uint8_t pending, status;
1946 	struct qbman_swp *swp;
1947 	struct qbman_fd *fd[DPAA2_LX2_DQRR_RING_SIZE];
1948 	struct qbman_pull_desc pulldesc;
1949 	struct qbman_eq_desc eqdesc;
1950 	struct queue_storage_info_t *q_storage = dpaa2_q->q_storage;
1951 	struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
1952 	struct dpaa2_dev_priv *priv = eth_data->dev_private;
1953 	struct dpaa2_queue *tx_q = priv->tx_vq[0];
1954 	/* todo - currently we are using 1st TX queue only for loopback*/
1955 
1956 	if (unlikely(!DPAA2_PER_LCORE_ETHRX_DPIO)) {
1957 		ret = dpaa2_affine_qbman_ethrx_swp();
1958 		if (ret) {
1959 			DPAA2_PMD_ERR("Failure in affining portal");
1960 			return 0;
1961 		}
1962 	}
1963 	swp = DPAA2_PER_LCORE_ETHRX_PORTAL;
1964 	pull_size = (nb_pkts > dpaa2_dqrr_size) ? dpaa2_dqrr_size : nb_pkts;
1965 	if (unlikely(!q_storage->active_dqs)) {
1966 		q_storage->toggle = 0;
1967 		dq_storage = q_storage->dq_storage[q_storage->toggle];
1968 		q_storage->last_num_pkts = pull_size;
1969 		qbman_pull_desc_clear(&pulldesc);
1970 		qbman_pull_desc_set_numframes(&pulldesc,
1971 					      q_storage->last_num_pkts);
1972 		qbman_pull_desc_set_fq(&pulldesc, fqid);
1973 		qbman_pull_desc_set_storage(&pulldesc, dq_storage,
1974 			(size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
1975 		if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) {
1976 			while (!qbman_check_command_complete(
1977 			       get_swp_active_dqs(
1978 			       DPAA2_PER_LCORE_ETHRX_DPIO->index)))
1979 				;
1980 			clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index);
1981 		}
1982 		while (1) {
1983 			if (qbman_swp_pull(swp, &pulldesc)) {
1984 				DPAA2_PMD_DP_DEBUG(
1985 					"VDQ command not issued.QBMAN busy\n");
1986 				/* Portal was busy, try again */
1987 				continue;
1988 			}
1989 			break;
1990 		}
1991 		q_storage->active_dqs = dq_storage;
1992 		q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index;
1993 		set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index,
1994 				   dq_storage);
1995 	}
1996 
1997 	dq_storage = q_storage->active_dqs;
1998 	rte_prefetch0((void *)(size_t)(dq_storage));
1999 	rte_prefetch0((void *)(size_t)(dq_storage + 1));
2000 
2001 	/* Prepare next pull descriptor. This will give space for the
2002 	 * prefetching done on DQRR entries
2003 	 */
2004 	q_storage->toggle ^= 1;
2005 	dq_storage1 = q_storage->dq_storage[q_storage->toggle];
2006 	qbman_pull_desc_clear(&pulldesc);
2007 	qbman_pull_desc_set_numframes(&pulldesc, pull_size);
2008 	qbman_pull_desc_set_fq(&pulldesc, fqid);
2009 	qbman_pull_desc_set_storage(&pulldesc, dq_storage1,
2010 		(size_t)(DPAA2_VADDR_TO_IOVA(dq_storage1)), 1);
2011 
2012 	/*Prepare enqueue descriptor*/
2013 	qbman_eq_desc_clear(&eqdesc);
2014 	qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
2015 	qbman_eq_desc_set_response(&eqdesc, 0, 0);
2016 	qbman_eq_desc_set_fq(&eqdesc, tx_q->fqid);
2017 
2018 	/* Check if the previous issued command is completed.
2019 	 * Also seems like the SWP is shared between the Ethernet Driver
2020 	 * and the SEC driver.
2021 	 */
2022 	while (!qbman_check_command_complete(dq_storage))
2023 		;
2024 	if (dq_storage == get_swp_active_dqs(q_storage->active_dpio_id))
2025 		clear_swp_active_dqs(q_storage->active_dpio_id);
2026 
2027 	pending = 1;
2028 
2029 	do {
2030 		/* Loop until the dq_storage is updated with
2031 		 * new token by QBMAN
2032 		 */
2033 		while (!qbman_check_new_result(dq_storage))
2034 			;
2035 		rte_prefetch0((void *)((size_t)(dq_storage + 2)));
2036 		/* Check whether Last Pull command is Expired and
2037 		 * setting Condition for Loop termination
2038 		 */
2039 		if (qbman_result_DQ_is_pull_complete(dq_storage)) {
2040 			pending = 0;
2041 			/* Check for valid frame. */
2042 			status = qbman_result_DQ_flags(dq_storage);
2043 			if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0))
2044 				continue;
2045 		}
2046 		fd[num_rx] = (struct qbman_fd *)qbman_result_DQ_fd(dq_storage);
2047 
2048 		dq_storage++;
2049 		num_rx++;
2050 	} while (pending);
2051 
2052 	while (num_tx < num_rx) {
2053 		num_tx += qbman_swp_enqueue_multiple_fd(swp, &eqdesc,
2054 				&fd[num_tx], 0, num_rx - num_tx);
2055 	}
2056 
2057 	if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) {
2058 		while (!qbman_check_command_complete(
2059 		       get_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)))
2060 			;
2061 		clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index);
2062 	}
2063 	/* issue a volatile dequeue command for next pull */
2064 	while (1) {
2065 		if (qbman_swp_pull(swp, &pulldesc)) {
2066 			DPAA2_PMD_DP_DEBUG("VDQ command is not issued."
2067 					  "QBMAN is busy (2)\n");
2068 			continue;
2069 		}
2070 		break;
2071 	}
2072 	q_storage->active_dqs = dq_storage1;
2073 	q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index;
2074 	set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index, dq_storage1);
2075 
2076 	dpaa2_q->rx_pkts += num_rx;
2077 	dpaa2_q->tx_pkts += num_tx;
2078 
2079 	return 0;
2080 }
2081 #if defined(RTE_TOOLCHAIN_GCC)
2082 #pragma GCC diagnostic pop
2083 #elif defined(RTE_TOOLCHAIN_CLANG)
2084 #pragma clang diagnostic pop
2085 #endif
2086