xref: /dpdk/drivers/net/dpaa2/dpaa2_rxtx.c (revision 12dc2539f7b12b2ec4570197c1e8a16a973d71f6)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4  *   Copyright 2016-2024 NXP
5  *
6  */
7 
8 #include <time.h>
9 #include <net/if.h>
10 
11 #include <rte_mbuf.h>
12 #include <ethdev_driver.h>
13 #include <rte_malloc.h>
14 #include <rte_memcpy.h>
15 #include <rte_string_fns.h>
16 #include <dev_driver.h>
17 #include <rte_hexdump.h>
18 
19 #include <bus_fslmc_driver.h>
20 #include <fslmc_vfio.h>
21 #include <dpaa2_hw_pvt.h>
22 #include <dpaa2_hw_dpio.h>
23 #include <dpaa2_hw_mempool.h>
24 
25 #include "dpaa2_pmd_logs.h"
26 #include "dpaa2_ethdev.h"
27 #include "base/dpaa2_hw_dpni_annot.h"
28 
29 static inline uint32_t __rte_hot
30 dpaa2_dev_rx_parse_slow(struct rte_mbuf *mbuf,
31 			struct dpaa2_annot_hdr *annotation);
32 
33 static void enable_tx_tstamp(struct qbman_fd *fd) __rte_unused;
34 
35 static inline rte_mbuf_timestamp_t *
36 dpaa2_timestamp_dynfield(struct rte_mbuf *mbuf)
37 {
38 	return RTE_MBUF_DYNFIELD(mbuf,
39 		dpaa2_timestamp_dynfield_offset, rte_mbuf_timestamp_t *);
40 }
41 
42 #define DPAA2_MBUF_TO_CONTIG_FD(_mbuf, _fd, _bpid)  do { \
43 	DPAA2_SET_FD_ADDR(_fd, DPAA2_MBUF_VADDR_TO_IOVA(_mbuf)); \
44 	DPAA2_SET_FD_LEN(_fd, _mbuf->data_len); \
45 	DPAA2_SET_ONLY_FD_BPID(_fd, _bpid); \
46 	DPAA2_SET_FD_OFFSET(_fd, _mbuf->data_off); \
47 	DPAA2_SET_FD_FRC(_fd, 0);		\
48 	DPAA2_RESET_FD_CTRL(_fd);		\
49 	DPAA2_RESET_FD_FLC(_fd);		\
50 } while (0)
51 
52 static inline void __rte_hot
53 dpaa2_dev_rx_parse_new(struct rte_mbuf *m, const struct qbman_fd *fd,
54 		       void *hw_annot_addr)
55 {
56 	uint16_t frc = DPAA2_GET_FD_FRC_PARSE_SUM(fd);
57 	struct dpaa2_annot_hdr *annotation =
58 			(struct dpaa2_annot_hdr *)hw_annot_addr;
59 
60 	m->packet_type = RTE_PTYPE_UNKNOWN;
61 	switch (frc) {
62 	case DPAA2_PKT_TYPE_ETHER:
63 		m->packet_type = RTE_PTYPE_L2_ETHER;
64 		break;
65 	case DPAA2_PKT_TYPE_IPV4:
66 		m->packet_type = RTE_PTYPE_L2_ETHER |
67 			RTE_PTYPE_L3_IPV4;
68 		break;
69 	case DPAA2_PKT_TYPE_IPV6:
70 		m->packet_type = RTE_PTYPE_L2_ETHER |
71 			RTE_PTYPE_L3_IPV6;
72 		break;
73 	case DPAA2_PKT_TYPE_IPV4_EXT:
74 		m->packet_type = RTE_PTYPE_L2_ETHER |
75 			RTE_PTYPE_L3_IPV4_EXT;
76 		break;
77 	case DPAA2_PKT_TYPE_IPV6_EXT:
78 		m->packet_type = RTE_PTYPE_L2_ETHER |
79 			RTE_PTYPE_L3_IPV6_EXT;
80 		break;
81 	case DPAA2_PKT_TYPE_IPV4_TCP:
82 		m->packet_type = RTE_PTYPE_L2_ETHER |
83 			RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP;
84 		break;
85 	case DPAA2_PKT_TYPE_IPV6_TCP:
86 		m->packet_type = RTE_PTYPE_L2_ETHER |
87 			RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP;
88 		break;
89 	case DPAA2_PKT_TYPE_IPV4_UDP:
90 		m->packet_type = RTE_PTYPE_L2_ETHER |
91 			RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP;
92 		break;
93 	case DPAA2_PKT_TYPE_IPV6_UDP:
94 		m->packet_type = RTE_PTYPE_L2_ETHER |
95 			RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP;
96 		break;
97 	case DPAA2_PKT_TYPE_IPV4_SCTP:
98 		m->packet_type = RTE_PTYPE_L2_ETHER |
99 			RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP;
100 		break;
101 	case DPAA2_PKT_TYPE_IPV6_SCTP:
102 		m->packet_type = RTE_PTYPE_L2_ETHER |
103 			RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_SCTP;
104 		break;
105 	case DPAA2_PKT_TYPE_IPV4_ICMP:
106 		m->packet_type = RTE_PTYPE_L2_ETHER |
107 			RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_ICMP;
108 		break;
109 	case DPAA2_PKT_TYPE_IPV6_ICMP:
110 		m->packet_type = RTE_PTYPE_L2_ETHER |
111 			RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_ICMP;
112 		break;
113 	default:
114 		m->packet_type = dpaa2_dev_rx_parse_slow(m, annotation);
115 	}
116 	m->hash.rss = fd->simple.flc_hi;
117 	m->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
118 
119 	if (dpaa2_enable_ts[m->port]) {
120 		*dpaa2_timestamp_dynfield(m) = annotation->word2;
121 		m->ol_flags |= dpaa2_timestamp_rx_dynflag;
122 		DPAA2_PMD_DP_DEBUG("pkt timestamp:0x%" PRIx64 "",
123 				*dpaa2_timestamp_dynfield(m));
124 	}
125 
126 	DPAA2_PMD_DP_DEBUG("HW frc = 0x%x\t packet type =0x%x "
127 		"ol_flags =0x%" PRIx64 "",
128 		frc, m->packet_type, m->ol_flags);
129 }
130 
131 static inline uint32_t __rte_hot
132 dpaa2_dev_rx_parse_slow(struct rte_mbuf *mbuf,
133 			struct dpaa2_annot_hdr *annotation)
134 {
135 	uint32_t pkt_type = RTE_PTYPE_UNKNOWN;
136 	uint16_t *vlan_tci;
137 
138 	DPAA2_PMD_DP_DEBUG("(slow parse)annotation(3)=0x%" PRIx64 "\t"
139 			"(4)=0x%" PRIx64 "\t",
140 			annotation->word3, annotation->word4);
141 
142 #if defined(RTE_LIBRTE_IEEE1588)
143 	if (BIT_ISSET_AT_POS(annotation->word1, DPAA2_ETH_FAS_PTP)) {
144 		mbuf->ol_flags |= RTE_MBUF_F_RX_IEEE1588_PTP;
145 		mbuf->ol_flags |= RTE_MBUF_F_RX_IEEE1588_TMST;
146 	}
147 #endif
148 
149 	if (BIT_ISSET_AT_POS(annotation->word3, L2_VLAN_1_PRESENT)) {
150 		vlan_tci = rte_pktmbuf_mtod_offset(mbuf, uint16_t *,
151 			(VLAN_TCI_OFFSET_1(annotation->word5) >> 16));
152 		mbuf->vlan_tci = rte_be_to_cpu_16(*vlan_tci);
153 		mbuf->ol_flags |= RTE_MBUF_F_RX_VLAN;
154 		pkt_type |= RTE_PTYPE_L2_ETHER_VLAN;
155 	} else if (BIT_ISSET_AT_POS(annotation->word3, L2_VLAN_N_PRESENT)) {
156 		vlan_tci = rte_pktmbuf_mtod_offset(mbuf, uint16_t *,
157 			(VLAN_TCI_OFFSET_1(annotation->word5) >> 16));
158 		mbuf->vlan_tci = rte_be_to_cpu_16(*vlan_tci);
159 		mbuf->ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_QINQ;
160 		pkt_type |= RTE_PTYPE_L2_ETHER_QINQ;
161 	}
162 
163 	if (BIT_ISSET_AT_POS(annotation->word3, L2_ARP_PRESENT)) {
164 		pkt_type |= RTE_PTYPE_L2_ETHER_ARP;
165 		goto parse_done;
166 	} else if (BIT_ISSET_AT_POS(annotation->word3, L2_ETH_MAC_PRESENT)) {
167 		pkt_type |= RTE_PTYPE_L2_ETHER;
168 	} else {
169 		goto parse_done;
170 	}
171 
172 	if (BIT_ISSET_AT_POS(annotation->word3, L2_MPLS_1_PRESENT |
173 				L2_MPLS_N_PRESENT))
174 		pkt_type |= RTE_PTYPE_L2_ETHER_MPLS;
175 
176 	if (BIT_ISSET_AT_POS(annotation->word4, L3_IPV4_1_PRESENT |
177 			     L3_IPV4_N_PRESENT)) {
178 		pkt_type |= RTE_PTYPE_L3_IPV4;
179 		if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_OPT_PRESENT |
180 			L3_IP_N_OPT_PRESENT))
181 			pkt_type |= RTE_PTYPE_L3_IPV4_EXT;
182 		if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_IPSEC_ESP_PRESENT |
183 					L3_PROTO_ESP_PRESENT))
184 			pkt_type |= RTE_PTYPE_TUNNEL_ESP;
185 
186 	} else if (BIT_ISSET_AT_POS(annotation->word4, L3_IPV6_1_PRESENT |
187 		  L3_IPV6_N_PRESENT)) {
188 		pkt_type |= RTE_PTYPE_L3_IPV6;
189 		if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_OPT_PRESENT |
190 		    L3_IP_N_OPT_PRESENT))
191 			pkt_type |= RTE_PTYPE_L3_IPV6_EXT;
192 		if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_IPSEC_ESP_PRESENT |
193 					L3_PROTO_ESP_PRESENT))
194 			pkt_type |= RTE_PTYPE_TUNNEL_ESP;
195 	} else {
196 		goto parse_done;
197 	}
198 
199 	if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L3CE))
200 		mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
201 	else
202 		mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
203 	if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L4CE))
204 		mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
205 	else
206 		mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
207 
208 	if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_FIRST_FRAGMENT |
209 	    L3_IP_1_MORE_FRAGMENT |
210 	    L3_IP_N_FIRST_FRAGMENT |
211 	    L3_IP_N_MORE_FRAGMENT)) {
212 		pkt_type |= RTE_PTYPE_L4_FRAG;
213 		goto parse_done;
214 	} else {
215 		pkt_type |= RTE_PTYPE_L4_NONFRAG;
216 	}
217 
218 	if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_UDP_PRESENT))
219 		pkt_type |= RTE_PTYPE_L4_UDP;
220 
221 	else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_TCP_PRESENT))
222 		pkt_type |= RTE_PTYPE_L4_TCP;
223 
224 	else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_SCTP_PRESENT))
225 		pkt_type |= RTE_PTYPE_L4_SCTP;
226 
227 	else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_ICMP_PRESENT))
228 		pkt_type |= RTE_PTYPE_L4_ICMP;
229 
230 	else if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_UNKNOWN_PROTOCOL))
231 		pkt_type |= RTE_PTYPE_UNKNOWN;
232 
233 parse_done:
234 	return pkt_type;
235 }
236 
237 static inline uint32_t __rte_hot
238 dpaa2_dev_rx_parse(struct rte_mbuf *mbuf, void *hw_annot_addr)
239 {
240 	struct dpaa2_annot_hdr *annotation =
241 			(struct dpaa2_annot_hdr *)hw_annot_addr;
242 
243 	DPAA2_PMD_DP_DEBUG("(fast parse) Annotation = 0x%" PRIx64 "\t",
244 			   annotation->word4);
245 
246 	if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L3CE))
247 		mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
248 	else
249 		mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
250 	if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L4CE))
251 		mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
252 	else
253 		mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
254 
255 	if (dpaa2_enable_ts[mbuf->port]) {
256 		*dpaa2_timestamp_dynfield(mbuf) = annotation->word2;
257 		mbuf->ol_flags |= dpaa2_timestamp_rx_dynflag;
258 		DPAA2_PMD_DP_DEBUG("pkt timestamp: 0x%" PRIx64 "",
259 				*dpaa2_timestamp_dynfield(mbuf));
260 	}
261 
262 	/* Check detailed parsing requirement */
263 	if (annotation->word3 & 0x7FFFFC3FFFF)
264 		return dpaa2_dev_rx_parse_slow(mbuf, annotation);
265 
266 	/* Return some common types from parse processing */
267 	switch (annotation->word4) {
268 	case DPAA2_L3_IPv4:
269 		return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4;
270 	case DPAA2_L3_IPv6:
271 		return  RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6;
272 	case DPAA2_L3_IPv4_TCP:
273 		return  RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 |
274 				RTE_PTYPE_L4_TCP;
275 	case DPAA2_L3_IPv4_UDP:
276 		return  RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 |
277 				RTE_PTYPE_L4_UDP;
278 	case DPAA2_L3_IPv6_TCP:
279 		return  RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 |
280 				RTE_PTYPE_L4_TCP;
281 	case DPAA2_L3_IPv6_UDP:
282 		return  RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 |
283 				RTE_PTYPE_L4_UDP;
284 	default:
285 		break;
286 	}
287 
288 	return dpaa2_dev_rx_parse_slow(mbuf, annotation);
289 }
290 
291 static inline struct rte_mbuf *__rte_hot
292 eth_sg_fd_to_mbuf(const struct qbman_fd *fd,
293 		  int port_id)
294 {
295 	struct qbman_sge *sgt, *sge;
296 	size_t sg_addr, fd_addr;
297 	int i = 0;
298 	void *hw_annot_addr;
299 	struct rte_mbuf *first_seg, *next_seg, *cur_seg, *temp;
300 
301 	fd_addr = (size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
302 	hw_annot_addr = (void *)(fd_addr + DPAA2_FD_PTA_SIZE);
303 
304 	/* Get Scatter gather table address */
305 	sgt = (struct qbman_sge *)(fd_addr + DPAA2_GET_FD_OFFSET(fd));
306 
307 	sge = &sgt[i++];
308 	sg_addr = (size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FLE_ADDR(sge));
309 
310 	/* First Scatter gather entry */
311 	first_seg = DPAA2_INLINE_MBUF_FROM_BUF(sg_addr,
312 		rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
313 	/* Prepare all the metadata for first segment */
314 	first_seg->buf_addr = (uint8_t *)sg_addr;
315 	first_seg->ol_flags = 0;
316 	first_seg->data_off = DPAA2_GET_FLE_OFFSET(sge);
317 	first_seg->data_len = sge->length  & 0x1FFFF;
318 	first_seg->pkt_len = DPAA2_GET_FD_LEN(fd);
319 	first_seg->nb_segs = 1;
320 	first_seg->next = NULL;
321 	first_seg->port = port_id;
322 	if (dpaa2_svr_family == SVR_LX2160A)
323 		dpaa2_dev_rx_parse_new(first_seg, fd, hw_annot_addr);
324 	else
325 		first_seg->packet_type =
326 			dpaa2_dev_rx_parse(first_seg, hw_annot_addr);
327 
328 	rte_mbuf_refcnt_set(first_seg, 1);
329 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
330 	rte_mempool_check_cookies(rte_mempool_from_obj((void *)first_seg),
331 			(void **)&first_seg, 1, 1);
332 #endif
333 	cur_seg = first_seg;
334 	while (!DPAA2_SG_IS_FINAL(sge)) {
335 		sge = &sgt[i++];
336 		sg_addr = (size_t)DPAA2_IOVA_TO_VADDR(
337 				DPAA2_GET_FLE_ADDR(sge));
338 		next_seg = DPAA2_INLINE_MBUF_FROM_BUF(sg_addr,
339 			rte_dpaa2_bpid_info[DPAA2_GET_FLE_BPID(sge)].meta_data_size);
340 		next_seg->buf_addr  = (uint8_t *)sg_addr;
341 		next_seg->data_off  = DPAA2_GET_FLE_OFFSET(sge);
342 		next_seg->data_len  = sge->length  & 0x1FFFF;
343 		first_seg->nb_segs += 1;
344 		rte_mbuf_refcnt_set(next_seg, 1);
345 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
346 		rte_mempool_check_cookies(rte_mempool_from_obj((void *)next_seg),
347 				(void **)&next_seg, 1, 1);
348 #endif
349 		cur_seg->next = next_seg;
350 		next_seg->next = NULL;
351 		cur_seg = next_seg;
352 	}
353 	temp = DPAA2_INLINE_MBUF_FROM_BUF(fd_addr,
354 		rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
355 	rte_mbuf_refcnt_set(temp, 1);
356 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
357 		rte_mempool_check_cookies(rte_mempool_from_obj((void *)temp),
358 				(void **)&temp, 1, 1);
359 #endif
360 	rte_pktmbuf_free_seg(temp);
361 
362 	return (void *)first_seg;
363 }
364 
365 static inline struct rte_mbuf *__rte_hot
366 eth_fd_to_mbuf(const struct qbman_fd *fd,
367 	       int port_id)
368 {
369 	void *v_addr = DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
370 	void *hw_annot_addr = (void *)((size_t)v_addr + DPAA2_FD_PTA_SIZE);
371 	struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF(v_addr,
372 		     rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
373 
374 	/* need to repopulated some of the fields,
375 	 * as they may have changed in last transmission
376 	 */
377 	mbuf->nb_segs = 1;
378 	mbuf->ol_flags = 0;
379 	mbuf->data_off = DPAA2_GET_FD_OFFSET(fd);
380 	mbuf->data_len = DPAA2_GET_FD_LEN(fd);
381 	mbuf->pkt_len = mbuf->data_len;
382 	mbuf->port = port_id;
383 	mbuf->next = NULL;
384 	rte_mbuf_refcnt_set(mbuf, 1);
385 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
386 	rte_mempool_check_cookies(rte_mempool_from_obj((void *)mbuf),
387 			(void **)&mbuf, 1, 1);
388 #endif
389 
390 	/* Parse the packet */
391 	/* parse results for LX2 are there in FRC field of FD.
392 	 * For other DPAA2 platforms , parse results are after
393 	 * the private - sw annotation area
394 	 */
395 
396 	if (dpaa2_svr_family == SVR_LX2160A)
397 		dpaa2_dev_rx_parse_new(mbuf, fd, hw_annot_addr);
398 	else
399 		mbuf->packet_type = dpaa2_dev_rx_parse(mbuf, hw_annot_addr);
400 
401 	DPAA2_PMD_DP_DEBUG("to mbuf - mbuf =%p, mbuf->buf_addr =%p, off = %d,"
402 		"fd_off=%d fd =%" PRIx64 ", meta = %d  bpid =%d, len=%d",
403 		mbuf, mbuf->buf_addr, mbuf->data_off,
404 		DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ADDR(fd),
405 		rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
406 		DPAA2_GET_FD_BPID(fd), DPAA2_GET_FD_LEN(fd));
407 
408 	return mbuf;
409 }
410 
411 static int __rte_noinline __rte_hot
412 eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf,
413 		  struct qbman_fd *fd,
414 		  struct sw_buf_free *free_buf,
415 		  uint32_t *free_count,
416 		  uint32_t pkt_id,
417 		  uint16_t bpid)
418 {
419 	struct rte_mbuf *cur_seg = mbuf, *mi, *temp;
420 	struct qbman_sge *sgt, *sge = NULL;
421 	int i, offset = 0;
422 
423 #ifdef RTE_LIBRTE_IEEE1588
424 	/* annotation area for timestamp in first buffer */
425 	offset = 0x64;
426 #endif
427 	if (RTE_MBUF_DIRECT(mbuf) &&
428 		(mbuf->data_off > (mbuf->nb_segs * sizeof(struct qbman_sge)
429 		+ offset))) {
430 		temp = mbuf;
431 		if (rte_mbuf_refcnt_read(temp) > 1) {
432 			/* If refcnt > 1, invalid bpid is set to ensure
433 			 * buffer is not freed by HW
434 			 */
435 			fd->simple.bpid_offset = 0;
436 			DPAA2_SET_FD_IVP(fd);
437 			rte_mbuf_refcnt_update(temp, -1);
438 		} else {
439 			DPAA2_SET_ONLY_FD_BPID(fd, bpid);
440 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
441 			rte_mempool_check_cookies(rte_mempool_from_obj((void *)temp),
442 					(void **)&temp, 1, 0);
443 #endif
444 		}
445 		DPAA2_SET_FD_OFFSET(fd, offset);
446 	} else {
447 		temp = rte_pktmbuf_alloc(dpaa2_tx_sg_pool);
448 		if (temp == NULL) {
449 			DPAA2_PMD_DP_DEBUG("No memory to allocate S/G table");
450 			return -ENOMEM;
451 		}
452 		DPAA2_SET_ONLY_FD_BPID(fd, mempool_to_bpid(dpaa2_tx_sg_pool));
453 		DPAA2_SET_FD_OFFSET(fd, temp->data_off);
454 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
455 		rte_mempool_check_cookies(rte_mempool_from_obj((void *)temp),
456 			(void **)&temp, 1, 0);
457 #endif
458 	}
459 	DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(temp));
460 	DPAA2_SET_FD_LEN(fd, mbuf->pkt_len);
461 	DPAA2_FD_SET_FORMAT(fd, qbman_fd_sg);
462 	DPAA2_RESET_FD_FRC(fd);
463 	DPAA2_RESET_FD_CTRL(fd);
464 	DPAA2_RESET_FD_FLC(fd);
465 	/*Set Scatter gather table and Scatter gather entries*/
466 	sgt = (struct qbman_sge *)(
467 			(size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd))
468 			+ DPAA2_GET_FD_OFFSET(fd));
469 
470 	for (i = 0; i < mbuf->nb_segs; i++) {
471 		sge = &sgt[i];
472 		/*Resetting the buffer pool id and offset field*/
473 		sge->fin_bpid_offset = 0;
474 		DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(cur_seg));
475 		sge->length = cur_seg->data_len;
476 		if (RTE_MBUF_DIRECT(cur_seg)) {
477 			/* if we are using inline SGT in same buffers
478 			 * set the FLE FMT as Frame Data Section
479 			 */
480 			if (temp == cur_seg) {
481 				DPAA2_SG_SET_FORMAT(sge, qbman_fd_list);
482 				DPAA2_SET_FLE_IVP(sge);
483 			} else {
484 				if (rte_mbuf_refcnt_read(cur_seg) > 1) {
485 				/* If refcnt > 1, invalid bpid is set to ensure
486 				 * buffer is not freed by HW
487 				 */
488 					DPAA2_SET_FLE_IVP(sge);
489 					rte_mbuf_refcnt_update(cur_seg, -1);
490 				} else {
491 					DPAA2_SET_FLE_BPID(sge,
492 						mempool_to_bpid(cur_seg->pool));
493 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
494 				rte_mempool_check_cookies(rte_mempool_from_obj((void *)cur_seg),
495 					(void **)&cur_seg, 1, 0);
496 #endif
497 				}
498 			}
499 		} else if (RTE_MBUF_HAS_EXTBUF(cur_seg)) {
500 			free_buf[*free_count].seg = cur_seg;
501 			free_buf[*free_count].pkt_id = pkt_id;
502 			++*free_count;
503 			DPAA2_SET_FLE_IVP(sge);
504 		} else {
505 			/* Get owner MBUF from indirect buffer */
506 			mi = rte_mbuf_from_indirect(cur_seg);
507 			if (rte_mbuf_refcnt_read(mi) > 1) {
508 				/* If refcnt > 1, invalid bpid is set to ensure
509 				 * owner buffer is not freed by HW
510 				 */
511 				DPAA2_SET_FLE_IVP(sge);
512 			} else {
513 				DPAA2_SET_FLE_BPID(sge,
514 						   mempool_to_bpid(mi->pool));
515 				rte_mbuf_refcnt_update(mi, 1);
516 			}
517 			free_buf[*free_count].seg = cur_seg;
518 			free_buf[*free_count].pkt_id = pkt_id;
519 			++*free_count;
520 		}
521 		cur_seg = cur_seg->next;
522 	}
523 	DPAA2_SG_SET_FINAL(sge, true);
524 	return 0;
525 }
526 
527 static void
528 eth_mbuf_to_fd(struct rte_mbuf *mbuf,
529 	       struct qbman_fd *fd,
530 	       struct sw_buf_free *buf_to_free,
531 	       uint32_t *free_count,
532 	       uint32_t pkt_id,
533 	       uint16_t bpid) __rte_unused;
534 
535 static void __rte_noinline __rte_hot
536 eth_mbuf_to_fd(struct rte_mbuf *mbuf,
537 	       struct qbman_fd *fd,
538 	       struct sw_buf_free *buf_to_free,
539 	       uint32_t *free_count,
540 	       uint32_t pkt_id,
541 	       uint16_t bpid)
542 {
543 	DPAA2_MBUF_TO_CONTIG_FD(mbuf, fd, bpid);
544 
545 	DPAA2_PMD_DP_DEBUG("mbuf =%p, mbuf->buf_addr =%p, off = %d,"
546 		"fd_off=%d fd =%" PRIx64 ", meta = %d  bpid =%d, len=%d",
547 		mbuf, mbuf->buf_addr, mbuf->data_off,
548 		DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ADDR(fd),
549 		rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
550 		DPAA2_GET_FD_BPID(fd), DPAA2_GET_FD_LEN(fd));
551 	if (RTE_MBUF_DIRECT(mbuf)) {
552 		if (rte_mbuf_refcnt_read(mbuf) > 1) {
553 			DPAA2_SET_FD_IVP(fd);
554 			rte_mbuf_refcnt_update(mbuf, -1);
555 		}
556 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
557 		else
558 			rte_mempool_check_cookies(rte_mempool_from_obj((void *)mbuf),
559 				(void **)&mbuf, 1, 0);
560 #endif
561 	} else if (RTE_MBUF_HAS_EXTBUF(mbuf)) {
562 		buf_to_free[*free_count].seg = mbuf;
563 		buf_to_free[*free_count].pkt_id = pkt_id;
564 		++*free_count;
565 		DPAA2_SET_FD_IVP(fd);
566 	} else {
567 		struct rte_mbuf *mi;
568 
569 		mi = rte_mbuf_from_indirect(mbuf);
570 		if (rte_mbuf_refcnt_read(mi) > 1)
571 			DPAA2_SET_FD_IVP(fd);
572 		else
573 			rte_mbuf_refcnt_update(mi, 1);
574 
575 		buf_to_free[*free_count].seg = mbuf;
576 		buf_to_free[*free_count].pkt_id = pkt_id;
577 		++*free_count;
578 	}
579 }
580 
581 static inline int __rte_hot
582 eth_copy_mbuf_to_fd(struct rte_mbuf *mbuf,
583 		    struct qbman_fd *fd, uint16_t bpid)
584 {
585 	struct rte_mbuf *m;
586 	void *mb = NULL;
587 
588 	if (rte_dpaa2_mbuf_alloc_bulk(
589 		rte_dpaa2_bpid_info[bpid].bp_list->mp, &mb, 1)) {
590 		DPAA2_PMD_DP_DEBUG("Unable to allocated DPAA2 buffer");
591 		return -1;
592 	}
593 	m = (struct rte_mbuf *)mb;
594 	memcpy((char *)m->buf_addr + mbuf->data_off,
595 	       (void *)((char *)mbuf->buf_addr + mbuf->data_off),
596 		mbuf->pkt_len);
597 
598 	/* Copy required fields */
599 	m->data_off = mbuf->data_off;
600 	m->ol_flags = mbuf->ol_flags;
601 	m->packet_type = mbuf->packet_type;
602 	m->tx_offload = mbuf->tx_offload;
603 
604 	DPAA2_MBUF_TO_CONTIG_FD(m, fd, bpid);
605 
606 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
607 	rte_mempool_check_cookies(rte_mempool_from_obj((void *)m),
608 		(void **)&m, 1, 0);
609 #endif
610 	DPAA2_PMD_DP_DEBUG(
611 		"mbuf: %p, BMAN buf addr: %p, fdaddr: %" PRIx64 ", bpid: %d,"
612 		" meta: %d, off: %d, len: %d",
613 		(void *)mbuf,
614 		mbuf->buf_addr,
615 		DPAA2_GET_FD_ADDR(fd),
616 		DPAA2_GET_FD_BPID(fd),
617 		rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
618 		DPAA2_GET_FD_OFFSET(fd),
619 		DPAA2_GET_FD_LEN(fd));
620 
621 return 0;
622 }
623 
624 static void
625 dump_err_pkts(struct dpaa2_queue *dpaa2_q)
626 {
627 	/* Function receive frames for a given device and VQ */
628 	struct qbman_result *dq_storage;
629 	uint32_t fqid = dpaa2_q->fqid;
630 	int ret, num_rx = 0;
631 	uint8_t pending, status;
632 	struct qbman_swp *swp;
633 	const struct qbman_fd *fd;
634 	struct qbman_pull_desc pulldesc;
635 	struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
636 	uint32_t lcore_id = rte_lcore_id();
637 	void *v_addr, *hw_annot_addr;
638 	struct dpaa2_fas *fas;
639 
640 	if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
641 		ret = dpaa2_affine_qbman_swp();
642 		if (ret) {
643 			DPAA2_PMD_ERR("Failed to allocate IO portal, tid: %d",
644 				rte_gettid());
645 			return;
646 		}
647 	}
648 	swp = DPAA2_PER_LCORE_PORTAL;
649 
650 	dq_storage = dpaa2_q->q_storage[lcore_id]->dq_storage[0];
651 	qbman_pull_desc_clear(&pulldesc);
652 	qbman_pull_desc_set_fq(&pulldesc, fqid);
653 	qbman_pull_desc_set_storage(&pulldesc, dq_storage,
654 			(size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
655 	qbman_pull_desc_set_numframes(&pulldesc, dpaa2_dqrr_size);
656 
657 	while (1) {
658 		if (qbman_swp_pull(swp, &pulldesc)) {
659 			DPAA2_PMD_DP_DEBUG("VDQ command is not issued.QBMAN is busy");
660 			/* Portal was busy, try again */
661 			continue;
662 		}
663 		break;
664 	}
665 
666 	/* Check if the previous issued command is completed. */
667 	while (!qbman_check_command_complete(dq_storage))
668 		;
669 
670 	pending = 1;
671 	do {
672 		/* Loop until the dq_storage is updated with
673 		 * new token by QBMAN
674 		 */
675 		while (!qbman_check_new_result(dq_storage))
676 			;
677 
678 		/* Check whether Last Pull command is Expired and
679 		 * setting Condition for Loop termination
680 		 */
681 		if (qbman_result_DQ_is_pull_complete(dq_storage)) {
682 			pending = 0;
683 			/* Check for valid frame. */
684 			status = qbman_result_DQ_flags(dq_storage);
685 			if (unlikely((status &
686 				QBMAN_DQ_STAT_VALIDFRAME) == 0))
687 				continue;
688 		}
689 		fd = qbman_result_DQ_fd(dq_storage);
690 		v_addr = DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
691 		hw_annot_addr = (void *)((size_t)v_addr + DPAA2_FD_PTA_SIZE);
692 		fas = hw_annot_addr;
693 
694 		DPAA2_PMD_ERR("[%d] error packet on port[%d]:"
695 			" fd_off: %d, fd_err: %x, fas_status: %x",
696 			rte_lcore_id(), eth_data->port_id,
697 			DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ERR(fd),
698 			fas->status);
699 		rte_hexdump(stderr, "Error packet", v_addr,
700 			DPAA2_GET_FD_OFFSET(fd) + DPAA2_GET_FD_LEN(fd));
701 
702 		dq_storage++;
703 		num_rx++;
704 	} while (pending);
705 
706 	dpaa2_q->err_pkts += num_rx;
707 }
708 
709 /* This function assumes that caller will be keep the same value for nb_pkts
710  * across calls per queue, if that is not the case, better use non-prefetch
711  * version of rx call.
712  * It will return the packets as requested in previous call without honoring
713  * the current nb_pkts or bufs space.
714  */
715 uint16_t
716 dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
717 {
718 	/* Function receive frames for a given device and VQ*/
719 	struct dpaa2_queue *dpaa2_q = queue;
720 	struct qbman_result *dq_storage, *dq_storage1 = NULL;
721 	uint32_t fqid = dpaa2_q->fqid;
722 	int ret, num_rx = 0, pull_size;
723 	uint8_t pending, status;
724 	struct qbman_swp *swp;
725 	const struct qbman_fd *fd;
726 	struct qbman_pull_desc pulldesc;
727 	struct queue_storage_info_t *q_storage;
728 	struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
729 	struct dpaa2_dev_priv *priv = eth_data->dev_private;
730 
731 	q_storage = dpaa2_q->q_storage[rte_lcore_id()];
732 
733 	if (unlikely(dpaa2_enable_err_queue))
734 		dump_err_pkts(priv->rx_err_vq);
735 
736 	if (unlikely(!DPAA2_PER_LCORE_ETHRX_DPIO)) {
737 		ret = dpaa2_affine_qbman_ethrx_swp();
738 		if (ret) {
739 			DPAA2_PMD_ERR("Failure in affining portal");
740 			return 0;
741 		}
742 	}
743 
744 	if (unlikely(!rte_dpaa2_bpid_info &&
745 		     rte_eal_process_type() == RTE_PROC_SECONDARY))
746 		rte_dpaa2_bpid_info = dpaa2_q->bp_array;
747 
748 	swp = DPAA2_PER_LCORE_ETHRX_PORTAL;
749 	pull_size = (nb_pkts > dpaa2_dqrr_size) ? dpaa2_dqrr_size : nb_pkts;
750 	if (unlikely(!q_storage->active_dqs)) {
751 		q_storage->toggle = 0;
752 		dq_storage = q_storage->dq_storage[q_storage->toggle];
753 		q_storage->last_num_pkts = pull_size;
754 		qbman_pull_desc_clear(&pulldesc);
755 		qbman_pull_desc_set_numframes(&pulldesc,
756 					      q_storage->last_num_pkts);
757 		qbman_pull_desc_set_fq(&pulldesc, fqid);
758 		qbman_pull_desc_set_storage(&pulldesc, dq_storage,
759 			(uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
760 		if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) {
761 			while (!qbman_check_command_complete(
762 			       get_swp_active_dqs(
763 			       DPAA2_PER_LCORE_ETHRX_DPIO->index)))
764 				;
765 			clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index);
766 		}
767 		while (1) {
768 			if (qbman_swp_pull(swp, &pulldesc)) {
769 				DPAA2_PMD_DP_DEBUG("VDQ command is not issued."
770 						  " QBMAN is busy (1)");
771 				/* Portal was busy, try again */
772 				continue;
773 			}
774 			break;
775 		}
776 		q_storage->active_dqs = dq_storage;
777 		q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index;
778 		set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index,
779 				   dq_storage);
780 	}
781 
782 	dq_storage = q_storage->active_dqs;
783 	rte_prefetch0((void *)(size_t)(dq_storage));
784 	rte_prefetch0((void *)(size_t)(dq_storage + 1));
785 
786 	/* Prepare next pull descriptor. This will give space for the
787 	 * prefetching done on DQRR entries
788 	 */
789 	q_storage->toggle ^= 1;
790 	dq_storage1 = q_storage->dq_storage[q_storage->toggle];
791 	qbman_pull_desc_clear(&pulldesc);
792 	qbman_pull_desc_set_numframes(&pulldesc, pull_size);
793 	qbman_pull_desc_set_fq(&pulldesc, fqid);
794 	qbman_pull_desc_set_storage(&pulldesc, dq_storage1,
795 		(uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage1)), 1);
796 
797 	/* Check if the previous issued command is completed.
798 	 * Also seems like the SWP is shared between the Ethernet Driver
799 	 * and the SEC driver.
800 	 */
801 	while (!qbman_check_command_complete(dq_storage))
802 		;
803 	if (dq_storage == get_swp_active_dqs(q_storage->active_dpio_id))
804 		clear_swp_active_dqs(q_storage->active_dpio_id);
805 
806 	pending = 1;
807 
808 	do {
809 		/* Loop until the dq_storage is updated with
810 		 * new token by QBMAN
811 		 */
812 		while (!qbman_check_new_result(dq_storage))
813 			;
814 		rte_prefetch0((void *)((size_t)(dq_storage + 2)));
815 		/* Check whether Last Pull command is Expired and
816 		 * setting Condition for Loop termination
817 		 */
818 		if (qbman_result_DQ_is_pull_complete(dq_storage)) {
819 			pending = 0;
820 			/* Check for valid frame. */
821 			status = qbman_result_DQ_flags(dq_storage);
822 			if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0))
823 				continue;
824 		}
825 		fd = qbman_result_DQ_fd(dq_storage);
826 
827 #ifndef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
828 		if (dpaa2_svr_family != SVR_LX2160A) {
829 			const struct qbman_fd *next_fd =
830 				qbman_result_DQ_fd(dq_storage + 1);
831 			/* Prefetch Annotation address for the parse results */
832 			rte_prefetch0(DPAA2_IOVA_TO_VADDR((DPAA2_GET_FD_ADDR(
833 				next_fd) + DPAA2_FD_PTA_SIZE + 16)));
834 		}
835 #endif
836 
837 		if (unlikely(DPAA2_FD_GET_FORMAT(fd) == qbman_fd_sg))
838 			bufs[num_rx] = eth_sg_fd_to_mbuf(fd, eth_data->port_id);
839 		else
840 			bufs[num_rx] = eth_fd_to_mbuf(fd, eth_data->port_id);
841 #if defined(RTE_LIBRTE_IEEE1588)
842 		if (bufs[num_rx]->ol_flags & RTE_MBUF_F_RX_IEEE1588_TMST) {
843 			priv->rx_timestamp =
844 				*dpaa2_timestamp_dynfield(bufs[num_rx]);
845 		}
846 #endif
847 
848 		if (eth_data->dev_conf.rxmode.offloads &
849 				RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
850 			rte_vlan_strip(bufs[num_rx]);
851 
852 		dq_storage++;
853 		num_rx++;
854 	} while (pending);
855 
856 	if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) {
857 		while (!qbman_check_command_complete(
858 		       get_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)))
859 			;
860 		clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index);
861 	}
862 	/* issue a volatile dequeue command for next pull */
863 	while (1) {
864 		if (qbman_swp_pull(swp, &pulldesc)) {
865 			DPAA2_PMD_DP_DEBUG("VDQ command is not issued."
866 					  "QBMAN is busy (2)");
867 			continue;
868 		}
869 		break;
870 	}
871 	q_storage->active_dqs = dq_storage1;
872 	q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index;
873 	set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index, dq_storage1);
874 
875 	dpaa2_q->rx_pkts += num_rx;
876 
877 	return num_rx;
878 }
879 
880 void __rte_hot
881 dpaa2_dev_process_parallel_event(struct qbman_swp *swp,
882 				 const struct qbman_fd *fd,
883 				 const struct qbman_result *dq,
884 				 struct dpaa2_queue *rxq,
885 				 struct rte_event *ev)
886 {
887 	rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd) +
888 		DPAA2_FD_PTA_SIZE + 16));
889 
890 	ev->flow_id = rxq->ev.flow_id;
891 	ev->sub_event_type = rxq->ev.sub_event_type;
892 	ev->event_type = RTE_EVENT_TYPE_ETHDEV;
893 	ev->op = RTE_EVENT_OP_NEW;
894 	ev->sched_type = rxq->ev.sched_type;
895 	ev->queue_id = rxq->ev.queue_id;
896 	ev->priority = rxq->ev.priority;
897 
898 	ev->mbuf = eth_fd_to_mbuf(fd, rxq->eth_data->port_id);
899 
900 	qbman_swp_dqrr_consume(swp, dq);
901 }
902 
903 void __rte_hot
904 dpaa2_dev_process_atomic_event(struct qbman_swp *swp __rte_unused,
905 			       const struct qbman_fd *fd,
906 			       const struct qbman_result *dq,
907 			       struct dpaa2_queue *rxq,
908 			       struct rte_event *ev)
909 {
910 	uint8_t dqrr_index;
911 
912 	rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd) +
913 		DPAA2_FD_PTA_SIZE + 16));
914 
915 	ev->flow_id = rxq->ev.flow_id;
916 	ev->sub_event_type = rxq->ev.sub_event_type;
917 	ev->event_type = RTE_EVENT_TYPE_ETHDEV;
918 	ev->op = RTE_EVENT_OP_NEW;
919 	ev->sched_type = rxq->ev.sched_type;
920 	ev->queue_id = rxq->ev.queue_id;
921 	ev->priority = rxq->ev.priority;
922 
923 	ev->mbuf = eth_fd_to_mbuf(fd, rxq->eth_data->port_id);
924 
925 	dqrr_index = qbman_get_dqrr_idx(dq);
926 	*dpaa2_seqn(ev->mbuf) = dqrr_index + 1;
927 	DPAA2_PER_LCORE_DQRR_SIZE++;
928 	DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index;
929 	DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = ev->mbuf;
930 }
931 
932 void __rte_hot
933 dpaa2_dev_process_ordered_event(struct qbman_swp *swp,
934 				const struct qbman_fd *fd,
935 				const struct qbman_result *dq,
936 				struct dpaa2_queue *rxq,
937 				struct rte_event *ev)
938 {
939 	rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd) +
940 		DPAA2_FD_PTA_SIZE + 16));
941 
942 	ev->flow_id = rxq->ev.flow_id;
943 	ev->sub_event_type = rxq->ev.sub_event_type;
944 	ev->event_type = RTE_EVENT_TYPE_ETHDEV;
945 	ev->op = RTE_EVENT_OP_NEW;
946 	ev->sched_type = rxq->ev.sched_type;
947 	ev->queue_id = rxq->ev.queue_id;
948 	ev->priority = rxq->ev.priority;
949 
950 	ev->mbuf = eth_fd_to_mbuf(fd, rxq->eth_data->port_id);
951 
952 	*dpaa2_seqn(ev->mbuf) = DPAA2_ENQUEUE_FLAG_ORP;
953 	*dpaa2_seqn(ev->mbuf) |= qbman_result_DQ_odpid(dq) << DPAA2_EQCR_OPRID_SHIFT;
954 	*dpaa2_seqn(ev->mbuf) |= qbman_result_DQ_seqnum(dq) << DPAA2_EQCR_SEQNUM_SHIFT;
955 
956 	qbman_swp_dqrr_consume(swp, dq);
957 }
958 
959 uint16_t
960 dpaa2_dev_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
961 {
962 	/* Function receive frames for a given device and VQ */
963 	struct dpaa2_queue *dpaa2_q = queue;
964 	struct qbman_result *dq_storage;
965 	uint32_t fqid = dpaa2_q->fqid;
966 	int ret, num_rx = 0, next_pull = nb_pkts, num_pulled;
967 	uint8_t pending, status;
968 	struct qbman_swp *swp;
969 	const struct qbman_fd *fd;
970 	struct qbman_pull_desc pulldesc;
971 	struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
972 	struct dpaa2_dev_priv *priv = eth_data->dev_private;
973 
974 	if (unlikely(dpaa2_enable_err_queue))
975 		dump_err_pkts(priv->rx_err_vq);
976 
977 	if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
978 		ret = dpaa2_affine_qbman_swp();
979 		if (ret) {
980 			DPAA2_PMD_ERR(
981 				"Failed to allocate IO portal, tid: %d",
982 				rte_gettid());
983 			return 0;
984 		}
985 	}
986 	swp = DPAA2_PER_LCORE_PORTAL;
987 
988 	do {
989 		dq_storage = dpaa2_q->q_storage[0]->dq_storage[0];
990 		qbman_pull_desc_clear(&pulldesc);
991 		qbman_pull_desc_set_fq(&pulldesc, fqid);
992 		qbman_pull_desc_set_storage(&pulldesc, dq_storage,
993 				(size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
994 
995 		if (next_pull > dpaa2_dqrr_size) {
996 			qbman_pull_desc_set_numframes(&pulldesc,
997 				dpaa2_dqrr_size);
998 			next_pull -= dpaa2_dqrr_size;
999 		} else {
1000 			qbman_pull_desc_set_numframes(&pulldesc, next_pull);
1001 			next_pull = 0;
1002 		}
1003 
1004 		while (1) {
1005 			if (qbman_swp_pull(swp, &pulldesc)) {
1006 				DPAA2_PMD_DP_DEBUG(
1007 					"VDQ command is not issued.QBMAN is busy");
1008 				/* Portal was busy, try again */
1009 				continue;
1010 			}
1011 			break;
1012 		}
1013 
1014 		rte_prefetch0((void *)((size_t)(dq_storage + 1)));
1015 		/* Check if the previous issued command is completed. */
1016 		while (!qbman_check_command_complete(dq_storage))
1017 			;
1018 
1019 		num_pulled = 0;
1020 		pending = 1;
1021 		do {
1022 			/* Loop until the dq_storage is updated with
1023 			 * new token by QBMAN
1024 			 */
1025 			while (!qbman_check_new_result(dq_storage))
1026 				;
1027 			rte_prefetch0((void *)((size_t)(dq_storage + 2)));
1028 			/* Check whether Last Pull command is Expired and
1029 			 * setting Condition for Loop termination
1030 			 */
1031 			if (qbman_result_DQ_is_pull_complete(dq_storage)) {
1032 				pending = 0;
1033 				/* Check for valid frame. */
1034 				status = qbman_result_DQ_flags(dq_storage);
1035 				if (unlikely((status &
1036 					QBMAN_DQ_STAT_VALIDFRAME) == 0))
1037 					continue;
1038 			}
1039 			fd = qbman_result_DQ_fd(dq_storage);
1040 
1041 #ifndef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
1042 			if (dpaa2_svr_family != SVR_LX2160A) {
1043 				const struct qbman_fd *next_fd =
1044 					qbman_result_DQ_fd(dq_storage + 1);
1045 
1046 				/* Prefetch Annotation address for the parse
1047 				 * results.
1048 				 */
1049 				rte_prefetch0((DPAA2_IOVA_TO_VADDR(
1050 					DPAA2_GET_FD_ADDR(next_fd) +
1051 					DPAA2_FD_PTA_SIZE + 16)));
1052 			}
1053 #endif
1054 
1055 			if (unlikely(DPAA2_FD_GET_FORMAT(fd) == qbman_fd_sg))
1056 				bufs[num_rx] = eth_sg_fd_to_mbuf(fd,
1057 							eth_data->port_id);
1058 			else
1059 				bufs[num_rx] = eth_fd_to_mbuf(fd,
1060 							eth_data->port_id);
1061 
1062 #if defined(RTE_LIBRTE_IEEE1588)
1063 		if (bufs[num_rx]->ol_flags & RTE_MBUF_F_RX_IEEE1588_TMST) {
1064 			priv->rx_timestamp =
1065 				*dpaa2_timestamp_dynfield(bufs[num_rx]);
1066 		}
1067 #endif
1068 
1069 		if (eth_data->dev_conf.rxmode.offloads &
1070 				RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
1071 			rte_vlan_strip(bufs[num_rx]);
1072 		}
1073 
1074 			dq_storage++;
1075 			num_rx++;
1076 			num_pulled++;
1077 		} while (pending);
1078 	/* Last VDQ provided all packets and more packets are requested */
1079 	} while (next_pull && num_pulled == dpaa2_dqrr_size);
1080 
1081 	dpaa2_q->rx_pkts += num_rx;
1082 
1083 	return num_rx;
1084 }
1085 
1086 uint16_t dpaa2_dev_tx_conf(void *queue)
1087 {
1088 	/* Function receive frames for a given device and VQ */
1089 	struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
1090 	struct qbman_result *dq_storage;
1091 	uint32_t fqid = dpaa2_q->fqid;
1092 	int ret, num_tx_conf = 0, num_pulled;
1093 	uint8_t pending, status;
1094 	struct qbman_swp *swp;
1095 	const struct qbman_fd *fd, *next_fd;
1096 	struct qbman_pull_desc pulldesc;
1097 	struct qbman_release_desc releasedesc;
1098 	uint32_t bpid;
1099 	uint64_t buf;
1100 #if defined(RTE_LIBRTE_IEEE1588)
1101 	struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
1102 	struct dpaa2_dev_priv *priv = eth_data->dev_private;
1103 	struct dpaa2_annot_hdr *annotation;
1104 	void *v_addr;
1105 	struct rte_mbuf *mbuf;
1106 #endif
1107 
1108 	if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
1109 		ret = dpaa2_affine_qbman_swp();
1110 		if (ret) {
1111 			DPAA2_PMD_ERR(
1112 				"Failed to allocate IO portal, tid: %d",
1113 				rte_gettid());
1114 			return 0;
1115 		}
1116 	}
1117 	swp = DPAA2_PER_LCORE_PORTAL;
1118 
1119 	do {
1120 		dq_storage = dpaa2_q->q_storage[0]->dq_storage[0];
1121 		qbman_pull_desc_clear(&pulldesc);
1122 		qbman_pull_desc_set_fq(&pulldesc, fqid);
1123 		qbman_pull_desc_set_storage(&pulldesc, dq_storage,
1124 				(size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
1125 
1126 		qbman_pull_desc_set_numframes(&pulldesc, dpaa2_dqrr_size);
1127 
1128 		while (1) {
1129 			if (qbman_swp_pull(swp, &pulldesc)) {
1130 				DPAA2_PMD_DP_DEBUG("VDQ command is not issued."
1131 						   "QBMAN is busy");
1132 				/* Portal was busy, try again */
1133 				continue;
1134 			}
1135 			break;
1136 		}
1137 
1138 		rte_prefetch0((void *)((size_t)(dq_storage + 1)));
1139 		/* Check if the previous issued command is completed. */
1140 		while (!qbman_check_command_complete(dq_storage))
1141 			;
1142 
1143 		num_pulled = 0;
1144 		pending = 1;
1145 		do {
1146 			/* Loop until the dq_storage is updated with
1147 			 * new token by QBMAN
1148 			 */
1149 			while (!qbman_check_new_result(dq_storage))
1150 				;
1151 			rte_prefetch0((void *)((size_t)(dq_storage + 2)));
1152 			/* Check whether Last Pull command is Expired and
1153 			 * setting Condition for Loop termination
1154 			 */
1155 			if (qbman_result_DQ_is_pull_complete(dq_storage)) {
1156 				pending = 0;
1157 				/* Check for valid frame. */
1158 				status = qbman_result_DQ_flags(dq_storage);
1159 				if (unlikely((status &
1160 					QBMAN_DQ_STAT_VALIDFRAME) == 0))
1161 					continue;
1162 			}
1163 			fd = qbman_result_DQ_fd(dq_storage);
1164 
1165 			next_fd = qbman_result_DQ_fd(dq_storage + 1);
1166 			/* Prefetch Annotation address for the parse results */
1167 			rte_prefetch0((void *)(size_t)
1168 				(DPAA2_GET_FD_ADDR(next_fd) +
1169 				 DPAA2_FD_PTA_SIZE + 16));
1170 
1171 			bpid = DPAA2_GET_FD_BPID(fd);
1172 
1173 			/* Create a release descriptor required for releasing
1174 			 * buffers into QBMAN
1175 			 */
1176 			qbman_release_desc_clear(&releasedesc);
1177 			qbman_release_desc_set_bpid(&releasedesc, bpid);
1178 
1179 			buf = DPAA2_GET_FD_ADDR(fd);
1180 			/* feed them to bman */
1181 			do {
1182 				ret = qbman_swp_release(swp, &releasedesc,
1183 							&buf, 1);
1184 			} while (ret == -EBUSY);
1185 
1186 			dq_storage++;
1187 			num_tx_conf++;
1188 			num_pulled++;
1189 #if defined(RTE_LIBRTE_IEEE1588)
1190 			v_addr = DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
1191 			mbuf = DPAA2_INLINE_MBUF_FROM_BUF(v_addr,
1192 				rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
1193 
1194 			if (mbuf->ol_flags & RTE_MBUF_F_TX_IEEE1588_TMST) {
1195 				annotation = (struct dpaa2_annot_hdr *)((size_t)
1196 					DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)) +
1197 					DPAA2_FD_PTA_SIZE);
1198 				priv->tx_timestamp = annotation->word2;
1199 			}
1200 #endif
1201 		} while (pending);
1202 
1203 	/* Last VDQ provided all packets and more packets are requested */
1204 	} while (num_pulled == dpaa2_dqrr_size);
1205 
1206 	dpaa2_q->rx_pkts += num_tx_conf;
1207 
1208 	return num_tx_conf;
1209 }
1210 
1211 /* Configure the egress frame annotation for timestamp update */
1212 static void enable_tx_tstamp(struct qbman_fd *fd)
1213 {
1214 	struct dpaa2_faead *fd_faead;
1215 
1216 	/* Set frame annotation status field as valid */
1217 	(fd)->simple.frc |= DPAA2_FD_FRC_FASV;
1218 
1219 	/* Set frame annotation egress action descriptor as valid */
1220 	(fd)->simple.frc |= DPAA2_FD_FRC_FAEADV;
1221 
1222 	/* Set Annotation Length as 128B */
1223 	(fd)->simple.ctrl |= DPAA2_FD_CTRL_ASAL;
1224 
1225 	/* enable update of confirmation frame annotation */
1226 	fd_faead = (struct dpaa2_faead *)((size_t)
1227 			DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)) +
1228 			DPAA2_FD_PTA_SIZE + DPAA2_FD_HW_ANNOT_FAEAD_OFFSET);
1229 	fd_faead->ctrl = DPAA2_ANNOT_FAEAD_A2V | DPAA2_ANNOT_FAEAD_UPDV |
1230 				DPAA2_ANNOT_FAEAD_UPD;
1231 }
1232 
1233 /*
1234  * Callback to handle sending packets through WRIOP based interface
1235  */
1236 uint16_t
1237 dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
1238 {
1239 	/* Function to transmit the frames to given device and VQ*/
1240 	uint32_t loop, retry_count;
1241 	int32_t ret;
1242 	struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
1243 	struct rte_mbuf *mi;
1244 	uint32_t frames_to_send;
1245 	struct rte_mempool *mp;
1246 	struct qbman_eq_desc eqdesc;
1247 	struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
1248 	struct qbman_swp *swp;
1249 	uint16_t num_tx = 0;
1250 	uint16_t bpid;
1251 	struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
1252 	struct dpaa2_dev_priv *priv = eth_data->dev_private;
1253 	uint32_t flags[MAX_TX_RING_SLOTS] = {0};
1254 	struct sw_buf_free buf_to_free[DPAA2_MAX_SGS * dpaa2_dqrr_size];
1255 	uint32_t free_count = 0;
1256 
1257 	if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
1258 		ret = dpaa2_affine_qbman_swp();
1259 		if (ret) {
1260 			DPAA2_PMD_ERR(
1261 				"Failed to allocate IO portal, tid: %d",
1262 				rte_gettid());
1263 			return 0;
1264 		}
1265 	}
1266 	swp = DPAA2_PER_LCORE_PORTAL;
1267 
1268 	DPAA2_PMD_DP_DEBUG("===> eth_data =%p, fqid =%d",
1269 			eth_data, dpaa2_q->fqid);
1270 
1271 #ifdef RTE_LIBRTE_IEEE1588
1272 	/* IEEE1588 driver need pointer to tx confirmation queue
1273 	 * corresponding to last packet transmitted for reading
1274 	 * the timestamp
1275 	 */
1276 	if ((*bufs)->ol_flags & RTE_MBUF_F_TX_IEEE1588_TMST) {
1277 		priv->next_tx_conf_queue = dpaa2_q->tx_conf_queue;
1278 		dpaa2_dev_tx_conf(dpaa2_q->tx_conf_queue);
1279 		priv->tx_timestamp = 0;
1280 	}
1281 #endif
1282 
1283 	/*Prepare enqueue descriptor*/
1284 	qbman_eq_desc_clear(&eqdesc);
1285 	qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
1286 	qbman_eq_desc_set_fq(&eqdesc, dpaa2_q->fqid);
1287 
1288 	/*Clear the unused FD fields before sending*/
1289 	while (nb_pkts) {
1290 		/*Check if the queue is congested*/
1291 		retry_count = 0;
1292 		while (qbman_result_SCN_state(dpaa2_q->cscn)) {
1293 			retry_count++;
1294 			/* Retry for some time before giving up */
1295 			if (retry_count > CONG_RETRY_COUNT)
1296 				goto skip_tx;
1297 		}
1298 
1299 		frames_to_send = (nb_pkts > dpaa2_eqcr_size) ?
1300 			dpaa2_eqcr_size : nb_pkts;
1301 
1302 		for (loop = 0; loop < frames_to_send; loop++) {
1303 			if (*dpaa2_seqn(*bufs)) {
1304 				uint8_t dqrr_index = *dpaa2_seqn(*bufs) - 1;
1305 
1306 				flags[loop] = QBMAN_ENQUEUE_FLAG_DCA |
1307 						dqrr_index;
1308 				DPAA2_PER_LCORE_DQRR_SIZE--;
1309 				DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dqrr_index);
1310 				*dpaa2_seqn(*bufs) = DPAA2_INVALID_MBUF_SEQN;
1311 			}
1312 
1313 			if (likely(RTE_MBUF_DIRECT(*bufs))) {
1314 				mp = (*bufs)->pool;
1315 				/* Check the basic scenario and set
1316 				 * the FD appropriately here itself.
1317 				 */
1318 				if (likely(mp && mp->ops_index ==
1319 				    priv->bp_list->dpaa2_ops_index &&
1320 				    (*bufs)->nb_segs == 1 &&
1321 				    rte_mbuf_refcnt_read((*bufs)) == 1)) {
1322 					if (unlikely(((*bufs)->ol_flags
1323 						& RTE_MBUF_F_TX_VLAN) ||
1324 						(eth_data->dev_conf.txmode.offloads
1325 						& RTE_ETH_TX_OFFLOAD_VLAN_INSERT))) {
1326 						ret = rte_vlan_insert(bufs);
1327 						if (ret)
1328 							goto send_n_return;
1329 					}
1330 					DPAA2_MBUF_TO_CONTIG_FD((*bufs),
1331 					&fd_arr[loop], mempool_to_bpid(mp));
1332 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
1333 					rte_mempool_check_cookies
1334 						(rte_mempool_from_obj((void *)*bufs),
1335 						(void **)bufs, 1, 0);
1336 #endif
1337 					bufs++;
1338 #ifdef RTE_LIBRTE_IEEE1588
1339 					enable_tx_tstamp(&fd_arr[loop]);
1340 #endif
1341 					continue;
1342 				}
1343 			} else {
1344 				mi = rte_mbuf_from_indirect(*bufs);
1345 				mp = mi->pool;
1346 			}
1347 
1348 			if (unlikely(RTE_MBUF_HAS_EXTBUF(*bufs))) {
1349 				if (unlikely((*bufs)->nb_segs > 1)) {
1350 					mp = (*bufs)->pool;
1351 					if (eth_mbuf_to_sg_fd(*bufs,
1352 							      &fd_arr[loop],
1353 							      buf_to_free,
1354 							      &free_count,
1355 							      loop,
1356 							      mempool_to_bpid(mp)))
1357 						goto send_n_return;
1358 				} else {
1359 					eth_mbuf_to_fd(*bufs,
1360 							&fd_arr[loop],
1361 							buf_to_free,
1362 							&free_count,
1363 							loop, 0);
1364 				}
1365 				bufs++;
1366 #ifdef RTE_LIBRTE_IEEE1588
1367 				enable_tx_tstamp(&fd_arr[loop]);
1368 #endif
1369 				continue;
1370 			}
1371 
1372 			/* Not a hw_pkt pool allocated frame */
1373 			if (unlikely(!mp || !priv->bp_list)) {
1374 				DPAA2_PMD_ERR("Err: No buffer pool attached");
1375 				goto send_n_return;
1376 			}
1377 
1378 			if (unlikely(((*bufs)->ol_flags & RTE_MBUF_F_TX_VLAN) ||
1379 				(eth_data->dev_conf.txmode.offloads
1380 				& RTE_ETH_TX_OFFLOAD_VLAN_INSERT))) {
1381 				int ret = rte_vlan_insert(bufs);
1382 				if (ret)
1383 					goto send_n_return;
1384 			}
1385 			if (mp->ops_index != priv->bp_list->dpaa2_ops_index) {
1386 				DPAA2_PMD_WARN("Non DPAA2 buffer pool");
1387 				/* alloc should be from the default buffer pool
1388 				 * attached to this interface
1389 				 */
1390 				bpid = priv->bp_list->buf_pool.bpid;
1391 
1392 				if (unlikely((*bufs)->nb_segs > 1)) {
1393 					DPAA2_PMD_ERR("S/G support not added"
1394 						" for non hw offload buffer");
1395 					goto send_n_return;
1396 				}
1397 				if (eth_copy_mbuf_to_fd(*bufs,
1398 							&fd_arr[loop], bpid)) {
1399 					goto send_n_return;
1400 				}
1401 				/* free the original packet */
1402 				rte_pktmbuf_free(*bufs);
1403 			} else {
1404 				bpid = mempool_to_bpid(mp);
1405 				if (unlikely((*bufs)->nb_segs > 1)) {
1406 					if (eth_mbuf_to_sg_fd(*bufs,
1407 							&fd_arr[loop],
1408 							buf_to_free,
1409 							&free_count,
1410 							loop,
1411 							bpid))
1412 						goto send_n_return;
1413 				} else {
1414 					eth_mbuf_to_fd(*bufs,
1415 							&fd_arr[loop],
1416 							buf_to_free,
1417 							&free_count,
1418 							loop, bpid);
1419 				}
1420 			}
1421 #ifdef RTE_LIBRTE_IEEE1588
1422 			enable_tx_tstamp(&fd_arr[loop]);
1423 #endif
1424 			bufs++;
1425 		}
1426 
1427 		loop = 0;
1428 		retry_count = 0;
1429 		while (loop < frames_to_send) {
1430 			ret = qbman_swp_enqueue_multiple(swp, &eqdesc,
1431 					&fd_arr[loop], &flags[loop],
1432 					frames_to_send - loop);
1433 			if (unlikely(ret < 0)) {
1434 				retry_count++;
1435 				if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) {
1436 					num_tx += loop;
1437 					nb_pkts -= loop;
1438 					goto send_n_return;
1439 				}
1440 			} else {
1441 				loop += ret;
1442 				retry_count = 0;
1443 			}
1444 		}
1445 
1446 		num_tx += loop;
1447 		nb_pkts -= loop;
1448 	}
1449 	dpaa2_q->tx_pkts += num_tx;
1450 
1451 	for (loop = 0; loop < free_count; loop++) {
1452 		if (buf_to_free[loop].pkt_id < num_tx)
1453 			rte_pktmbuf_free_seg(buf_to_free[loop].seg);
1454 	}
1455 
1456 	return num_tx;
1457 
1458 send_n_return:
1459 	/* send any already prepared fd */
1460 	if (loop) {
1461 		unsigned int i = 0;
1462 
1463 		retry_count = 0;
1464 		while (i < loop) {
1465 			ret = qbman_swp_enqueue_multiple(swp, &eqdesc,
1466 							 &fd_arr[i],
1467 							 &flags[i],
1468 							 loop - i);
1469 			if (unlikely(ret < 0)) {
1470 				retry_count++;
1471 				if (retry_count > DPAA2_MAX_TX_RETRY_COUNT)
1472 					break;
1473 			} else {
1474 				i += ret;
1475 				retry_count = 0;
1476 			}
1477 		}
1478 		num_tx += i;
1479 	}
1480 skip_tx:
1481 	dpaa2_q->tx_pkts += num_tx;
1482 
1483 	for (loop = 0; loop < free_count; loop++) {
1484 		if (buf_to_free[loop].pkt_id < num_tx)
1485 			rte_pktmbuf_free_seg(buf_to_free[loop].seg);
1486 	}
1487 
1488 	return num_tx;
1489 }
1490 
1491 void
1492 dpaa2_dev_free_eqresp_buf(uint16_t eqresp_ci,
1493 			  __rte_unused struct dpaa2_queue *dpaa2_q)
1494 {
1495 	struct dpaa2_dpio_dev *dpio_dev = DPAA2_PER_LCORE_DPIO;
1496 	struct qbman_fd *fd;
1497 	struct rte_mbuf *m;
1498 
1499 	fd = qbman_result_eqresp_fd(&dpio_dev->eqresp[eqresp_ci]);
1500 
1501 	/* Setting port id does not matter as we are to free the mbuf */
1502 	m = eth_fd_to_mbuf(fd, 0);
1503 	rte_pktmbuf_free(m);
1504 }
1505 
1506 static void
1507 dpaa2_set_enqueue_descriptor(struct dpaa2_queue *dpaa2_q,
1508 			     struct rte_mbuf *m,
1509 			     struct qbman_eq_desc *eqdesc)
1510 {
1511 	struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
1512 	struct dpaa2_dev_priv *priv = eth_data->dev_private;
1513 	struct dpaa2_dpio_dev *dpio_dev = DPAA2_PER_LCORE_DPIO;
1514 	struct eqresp_metadata *eqresp_meta;
1515 	uint16_t orpid, seqnum;
1516 	uint8_t dq_idx;
1517 
1518 	qbman_eq_desc_set_fq(eqdesc, dpaa2_q->fqid);
1519 
1520 	if (*dpaa2_seqn(m) & DPAA2_ENQUEUE_FLAG_ORP) {
1521 		orpid = (*dpaa2_seqn(m) & DPAA2_EQCR_OPRID_MASK) >>
1522 			DPAA2_EQCR_OPRID_SHIFT;
1523 		seqnum = (*dpaa2_seqn(m) & DPAA2_EQCR_SEQNUM_MASK) >>
1524 			DPAA2_EQCR_SEQNUM_SHIFT;
1525 
1526 		if (!priv->en_loose_ordered) {
1527 			qbman_eq_desc_set_orp(eqdesc, 1, orpid, seqnum, 0);
1528 			qbman_eq_desc_set_response(eqdesc, (uint64_t)
1529 				DPAA2_VADDR_TO_IOVA(&dpio_dev->eqresp[
1530 				dpio_dev->eqresp_pi]), 1);
1531 			qbman_eq_desc_set_token(eqdesc, 1);
1532 
1533 			eqresp_meta = &dpio_dev->eqresp_meta[
1534 				dpio_dev->eqresp_pi];
1535 			eqresp_meta->dpaa2_q = dpaa2_q;
1536 			eqresp_meta->mp = m->pool;
1537 
1538 			dpio_dev->eqresp_pi + 1 < MAX_EQ_RESP_ENTRIES ?
1539 				dpio_dev->eqresp_pi++ :
1540 				(dpio_dev->eqresp_pi = 0);
1541 		} else {
1542 			qbman_eq_desc_set_orp(eqdesc, 0, orpid, seqnum, 0);
1543 		}
1544 	} else {
1545 		dq_idx = *dpaa2_seqn(m) - 1;
1546 		qbman_eq_desc_set_dca(eqdesc, 1, dq_idx, 0);
1547 		DPAA2_PER_LCORE_DQRR_SIZE--;
1548 		DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dq_idx);
1549 	}
1550 	*dpaa2_seqn(m) = DPAA2_INVALID_MBUF_SEQN;
1551 }
1552 
1553 uint16_t
1554 dpaa2_dev_tx_multi_txq_ordered(void **queue,
1555 		struct rte_mbuf **bufs, uint16_t nb_pkts)
1556 {
1557 	/* Function to transmit the frames to multiple queues respectively.*/
1558 	uint32_t loop, i, retry_count;
1559 	int32_t ret;
1560 	struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
1561 	uint32_t frames_to_send, num_free_eq_desc = 0;
1562 	struct rte_mempool *mp;
1563 	struct qbman_eq_desc eqdesc[MAX_TX_RING_SLOTS];
1564 	struct dpaa2_queue *dpaa2_q[MAX_TX_RING_SLOTS];
1565 	struct qbman_swp *swp;
1566 	uint16_t bpid;
1567 	struct rte_mbuf *mi;
1568 	struct rte_eth_dev_data *eth_data;
1569 	struct dpaa2_dev_priv *priv;
1570 	struct dpaa2_queue *order_sendq;
1571 	struct sw_buf_free buf_to_free[DPAA2_MAX_SGS * dpaa2_dqrr_size];
1572 	uint32_t free_count = 0;
1573 
1574 	if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
1575 		ret = dpaa2_affine_qbman_swp();
1576 		if (ret) {
1577 			DPAA2_PMD_ERR(
1578 				"Failed to allocate IO portal, tid: %d",
1579 				rte_gettid());
1580 			return 0;
1581 		}
1582 	}
1583 	swp = DPAA2_PER_LCORE_PORTAL;
1584 
1585 	frames_to_send = (nb_pkts > dpaa2_eqcr_size) ?
1586 		dpaa2_eqcr_size : nb_pkts;
1587 
1588 	for (loop = 0; loop < frames_to_send; loop++) {
1589 		dpaa2_q[loop] = (struct dpaa2_queue *)queue[loop];
1590 		eth_data = dpaa2_q[loop]->eth_data;
1591 		priv = eth_data->dev_private;
1592 		if (!priv->en_loose_ordered) {
1593 			if (*dpaa2_seqn(*bufs) & DPAA2_ENQUEUE_FLAG_ORP) {
1594 				if (!num_free_eq_desc) {
1595 					num_free_eq_desc = dpaa2_free_eq_descriptors();
1596 					if (!num_free_eq_desc)
1597 						goto send_frames;
1598 				}
1599 				num_free_eq_desc--;
1600 			}
1601 		}
1602 
1603 		DPAA2_PMD_DP_DEBUG("===> eth_data =%p, fqid =%d",
1604 				   eth_data, dpaa2_q[loop]->fqid);
1605 
1606 		/* Check if the queue is congested */
1607 		retry_count = 0;
1608 		while (qbman_result_SCN_state(dpaa2_q[loop]->cscn)) {
1609 			retry_count++;
1610 			/* Retry for some time before giving up */
1611 			if (retry_count > CONG_RETRY_COUNT)
1612 				goto send_frames;
1613 		}
1614 
1615 		/* Prepare enqueue descriptor */
1616 		qbman_eq_desc_clear(&eqdesc[loop]);
1617 
1618 		if (*dpaa2_seqn(*bufs) && priv->en_ordered) {
1619 			order_sendq = (struct dpaa2_queue *)priv->tx_vq[0];
1620 			dpaa2_set_enqueue_descriptor(order_sendq,
1621 						     (*bufs),
1622 						     &eqdesc[loop]);
1623 		} else {
1624 			qbman_eq_desc_set_no_orp(&eqdesc[loop],
1625 							 DPAA2_EQ_RESP_ERR_FQ);
1626 			qbman_eq_desc_set_fq(&eqdesc[loop],
1627 						     dpaa2_q[loop]->fqid);
1628 		}
1629 
1630 		if (likely(RTE_MBUF_DIRECT(*bufs))) {
1631 			mp = (*bufs)->pool;
1632 			/* Check the basic scenario and set
1633 			 * the FD appropriately here itself.
1634 			 */
1635 			if (likely(mp && mp->ops_index ==
1636 				priv->bp_list->dpaa2_ops_index &&
1637 				(*bufs)->nb_segs == 1 &&
1638 				rte_mbuf_refcnt_read((*bufs)) == 1)) {
1639 				if (unlikely((*bufs)->ol_flags
1640 					& RTE_MBUF_F_TX_VLAN)) {
1641 					ret = rte_vlan_insert(bufs);
1642 					if (ret)
1643 						goto send_frames;
1644 				}
1645 				DPAA2_MBUF_TO_CONTIG_FD((*bufs),
1646 					&fd_arr[loop],
1647 					mempool_to_bpid(mp));
1648 				bufs++;
1649 				continue;
1650 			}
1651 		} else {
1652 			mi = rte_mbuf_from_indirect(*bufs);
1653 			mp = mi->pool;
1654 		}
1655 		/* Not a hw_pkt pool allocated frame */
1656 		if (unlikely(!mp || !priv->bp_list)) {
1657 			DPAA2_PMD_ERR("Err: No buffer pool attached");
1658 			goto send_frames;
1659 		}
1660 
1661 		if (mp->ops_index != priv->bp_list->dpaa2_ops_index) {
1662 			DPAA2_PMD_WARN("Non DPAA2 buffer pool");
1663 			/* alloc should be from the default buffer pool
1664 			 * attached to this interface
1665 			 */
1666 			bpid = priv->bp_list->buf_pool.bpid;
1667 
1668 			if (unlikely((*bufs)->nb_segs > 1)) {
1669 				DPAA2_PMD_ERR(
1670 					"S/G not supp for non hw offload buffer");
1671 				goto send_frames;
1672 			}
1673 			if (eth_copy_mbuf_to_fd(*bufs,
1674 						&fd_arr[loop], bpid)) {
1675 				goto send_frames;
1676 			}
1677 			/* free the original packet */
1678 			rte_pktmbuf_free(*bufs);
1679 		} else {
1680 			bpid = mempool_to_bpid(mp);
1681 			if (unlikely((*bufs)->nb_segs > 1)) {
1682 				if (eth_mbuf_to_sg_fd(*bufs,
1683 						      &fd_arr[loop],
1684 						      buf_to_free,
1685 						      &free_count,
1686 						      loop,
1687 						      bpid))
1688 					goto send_frames;
1689 			} else {
1690 				eth_mbuf_to_fd(*bufs,
1691 						&fd_arr[loop],
1692 						buf_to_free,
1693 						&free_count,
1694 						loop, bpid);
1695 			}
1696 		}
1697 
1698 		bufs++;
1699 	}
1700 
1701 send_frames:
1702 	frames_to_send = loop;
1703 	loop = 0;
1704 	retry_count = 0;
1705 	while (loop < frames_to_send) {
1706 		ret = qbman_swp_enqueue_multiple_desc(swp, &eqdesc[loop],
1707 				&fd_arr[loop],
1708 				frames_to_send - loop);
1709 		if (likely(ret > 0)) {
1710 			loop += ret;
1711 			retry_count = 0;
1712 		} else {
1713 			retry_count++;
1714 			if (retry_count > DPAA2_MAX_TX_RETRY_COUNT)
1715 				break;
1716 		}
1717 	}
1718 
1719 	for (i = 0; i < free_count; i++) {
1720 		if (buf_to_free[i].pkt_id < loop)
1721 			rte_pktmbuf_free_seg(buf_to_free[i].seg);
1722 	}
1723 	return loop;
1724 }
1725 
1726 /* Callback to handle sending ordered packets through WRIOP based interface */
1727 uint16_t
1728 dpaa2_dev_tx_ordered(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
1729 {
1730 	/* Function to transmit the frames to given device and VQ*/
1731 	struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
1732 	struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
1733 	struct dpaa2_dev_priv *priv = eth_data->dev_private;
1734 	struct dpaa2_queue *order_sendq = (struct dpaa2_queue *)priv->tx_vq[0];
1735 	struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
1736 	struct rte_mbuf *mi;
1737 	struct rte_mempool *mp;
1738 	struct qbman_eq_desc eqdesc[MAX_TX_RING_SLOTS];
1739 	struct qbman_swp *swp;
1740 	uint32_t frames_to_send, num_free_eq_desc;
1741 	uint32_t loop, retry_count;
1742 	int32_t ret;
1743 	uint16_t num_tx = 0;
1744 	uint16_t bpid;
1745 	struct sw_buf_free buf_to_free[DPAA2_MAX_SGS * dpaa2_dqrr_size];
1746 	uint32_t free_count = 0;
1747 
1748 	if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
1749 		ret = dpaa2_affine_qbman_swp();
1750 		if (ret) {
1751 			DPAA2_PMD_ERR(
1752 				"Failed to allocate IO portal, tid: %d",
1753 				rte_gettid());
1754 			return 0;
1755 		}
1756 	}
1757 	swp = DPAA2_PER_LCORE_PORTAL;
1758 
1759 	DPAA2_PMD_DP_DEBUG("===> eth_data =%p, fqid =%d",
1760 			   eth_data, dpaa2_q->fqid);
1761 
1762 	/* This would also handle normal and atomic queues as any type
1763 	 * of packet can be enqueued when ordered queues are being used.
1764 	 */
1765 	while (nb_pkts) {
1766 		/*Check if the queue is congested*/
1767 		retry_count = 0;
1768 		while (qbman_result_SCN_state(dpaa2_q->cscn)) {
1769 			retry_count++;
1770 			/* Retry for some time before giving up */
1771 			if (retry_count > CONG_RETRY_COUNT)
1772 				goto skip_tx;
1773 		}
1774 
1775 		frames_to_send = (nb_pkts > dpaa2_eqcr_size) ?
1776 			dpaa2_eqcr_size : nb_pkts;
1777 
1778 		if (!priv->en_loose_ordered) {
1779 			if (*dpaa2_seqn(*bufs) & DPAA2_ENQUEUE_FLAG_ORP) {
1780 				num_free_eq_desc = dpaa2_free_eq_descriptors();
1781 				if (num_free_eq_desc < frames_to_send)
1782 					frames_to_send = num_free_eq_desc;
1783 			}
1784 		}
1785 
1786 		for (loop = 0; loop < frames_to_send; loop++) {
1787 			/*Prepare enqueue descriptor*/
1788 			qbman_eq_desc_clear(&eqdesc[loop]);
1789 
1790 			if (*dpaa2_seqn(*bufs)) {
1791 				/* Use only queue 0 for Tx in case of atomic/
1792 				 * ordered packets as packets can get unordered
1793 				 * when being transmitted out from the interface
1794 				 */
1795 				dpaa2_set_enqueue_descriptor(order_sendq,
1796 							     (*bufs),
1797 							     &eqdesc[loop]);
1798 			} else {
1799 				qbman_eq_desc_set_no_orp(&eqdesc[loop],
1800 							 DPAA2_EQ_RESP_ERR_FQ);
1801 				qbman_eq_desc_set_fq(&eqdesc[loop],
1802 						     dpaa2_q->fqid);
1803 			}
1804 
1805 			if (likely(RTE_MBUF_DIRECT(*bufs))) {
1806 				mp = (*bufs)->pool;
1807 				/* Check the basic scenario and set
1808 				 * the FD appropriately here itself.
1809 				 */
1810 				if (likely(mp && mp->ops_index ==
1811 				    priv->bp_list->dpaa2_ops_index &&
1812 				    (*bufs)->nb_segs == 1 &&
1813 				    rte_mbuf_refcnt_read((*bufs)) == 1)) {
1814 					if (unlikely((*bufs)->ol_flags
1815 						& RTE_MBUF_F_TX_VLAN)) {
1816 					  ret = rte_vlan_insert(bufs);
1817 					  if (ret)
1818 						goto send_n_return;
1819 					}
1820 					DPAA2_MBUF_TO_CONTIG_FD((*bufs),
1821 						&fd_arr[loop],
1822 						mempool_to_bpid(mp));
1823 					bufs++;
1824 					continue;
1825 				}
1826 			} else {
1827 				mi = rte_mbuf_from_indirect(*bufs);
1828 				mp = mi->pool;
1829 			}
1830 			/* Not a hw_pkt pool allocated frame */
1831 			if (unlikely(!mp || !priv->bp_list)) {
1832 				DPAA2_PMD_ERR("Err: No buffer pool attached");
1833 				goto send_n_return;
1834 			}
1835 
1836 			if (mp->ops_index != priv->bp_list->dpaa2_ops_index) {
1837 				DPAA2_PMD_WARN("Non DPAA2 buffer pool");
1838 				/* alloc should be from the default buffer pool
1839 				 * attached to this interface
1840 				 */
1841 				bpid = priv->bp_list->buf_pool.bpid;
1842 
1843 				if (unlikely((*bufs)->nb_segs > 1)) {
1844 					DPAA2_PMD_ERR(
1845 						"S/G not supp for non hw offload buffer");
1846 					goto send_n_return;
1847 				}
1848 				if (eth_copy_mbuf_to_fd(*bufs,
1849 							&fd_arr[loop], bpid)) {
1850 					goto send_n_return;
1851 				}
1852 				/* free the original packet */
1853 				rte_pktmbuf_free(*bufs);
1854 			} else {
1855 				bpid = mempool_to_bpid(mp);
1856 				if (unlikely((*bufs)->nb_segs > 1)) {
1857 					if (eth_mbuf_to_sg_fd(*bufs,
1858 							      &fd_arr[loop],
1859 							      buf_to_free,
1860 							      &free_count,
1861 							      loop,
1862 							      bpid))
1863 						goto send_n_return;
1864 				} else {
1865 					eth_mbuf_to_fd(*bufs,
1866 							&fd_arr[loop],
1867 							buf_to_free,
1868 							&free_count,
1869 							loop, bpid);
1870 				}
1871 			}
1872 			bufs++;
1873 		}
1874 
1875 		loop = 0;
1876 		retry_count = 0;
1877 		while (loop < frames_to_send) {
1878 			ret = qbman_swp_enqueue_multiple_desc(swp,
1879 					&eqdesc[loop], &fd_arr[loop],
1880 					frames_to_send - loop);
1881 			if (unlikely(ret < 0)) {
1882 				retry_count++;
1883 				if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) {
1884 					num_tx += loop;
1885 					nb_pkts -= loop;
1886 					goto send_n_return;
1887 				}
1888 			} else {
1889 				loop += ret;
1890 				retry_count = 0;
1891 			}
1892 		}
1893 
1894 		num_tx += loop;
1895 		nb_pkts -= loop;
1896 	}
1897 	dpaa2_q->tx_pkts += num_tx;
1898 	for (loop = 0; loop < free_count; loop++) {
1899 		if (buf_to_free[loop].pkt_id < num_tx)
1900 			rte_pktmbuf_free_seg(buf_to_free[loop].seg);
1901 	}
1902 
1903 	return num_tx;
1904 
1905 send_n_return:
1906 	/* send any already prepared fd */
1907 	if (loop) {
1908 		unsigned int i = 0;
1909 
1910 		retry_count = 0;
1911 		while (i < loop) {
1912 			ret = qbman_swp_enqueue_multiple_desc(swp,
1913 				       &eqdesc[i], &fd_arr[i], loop - i);
1914 			if (unlikely(ret < 0)) {
1915 				retry_count++;
1916 				if (retry_count > DPAA2_MAX_TX_RETRY_COUNT)
1917 					break;
1918 			} else {
1919 				i += ret;
1920 				retry_count = 0;
1921 			}
1922 		}
1923 		num_tx += i;
1924 	}
1925 skip_tx:
1926 	dpaa2_q->tx_pkts += num_tx;
1927 	for (loop = 0; loop < free_count; loop++) {
1928 		if (buf_to_free[loop].pkt_id < num_tx)
1929 			rte_pktmbuf_free_seg(buf_to_free[loop].seg);
1930 	}
1931 
1932 	return num_tx;
1933 }
1934 
1935 #if defined(RTE_TOOLCHAIN_GCC)
1936 #pragma GCC diagnostic push
1937 #pragma GCC diagnostic ignored "-Wcast-qual"
1938 #elif defined(RTE_TOOLCHAIN_CLANG)
1939 #pragma clang diagnostic push
1940 #pragma clang diagnostic ignored "-Wcast-qual"
1941 #endif
1942 
1943 /* This function loopbacks all the received packets.*/
1944 uint16_t
1945 dpaa2_dev_loopback_rx(void *queue,
1946 		      struct rte_mbuf **bufs __rte_unused,
1947 		      uint16_t nb_pkts)
1948 {
1949 	/* Function receive frames for a given device and VQ*/
1950 	struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
1951 	struct qbman_result *dq_storage, *dq_storage1 = NULL;
1952 	uint32_t fqid = dpaa2_q->fqid;
1953 	int ret, num_rx = 0, num_tx = 0, pull_size;
1954 	uint8_t pending, status;
1955 	struct qbman_swp *swp;
1956 	struct qbman_fd *fd[DPAA2_LX2_DQRR_RING_SIZE];
1957 	struct qbman_pull_desc pulldesc;
1958 	struct qbman_eq_desc eqdesc;
1959 	struct queue_storage_info_t *q_storage;
1960 	struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
1961 	struct dpaa2_dev_priv *priv = eth_data->dev_private;
1962 	struct dpaa2_queue *tx_q = priv->tx_vq[0];
1963 	/* todo - currently we are using 1st TX queue only for loopback*/
1964 
1965 	q_storage = dpaa2_q->q_storage[rte_lcore_id()];
1966 	if (unlikely(!DPAA2_PER_LCORE_ETHRX_DPIO)) {
1967 		ret = dpaa2_affine_qbman_ethrx_swp();
1968 		if (ret) {
1969 			DPAA2_PMD_ERR("Failure in affining portal");
1970 			return 0;
1971 		}
1972 	}
1973 	swp = DPAA2_PER_LCORE_ETHRX_PORTAL;
1974 	pull_size = (nb_pkts > dpaa2_dqrr_size) ? dpaa2_dqrr_size : nb_pkts;
1975 	if (unlikely(!q_storage->active_dqs)) {
1976 		q_storage->toggle = 0;
1977 		dq_storage = q_storage->dq_storage[q_storage->toggle];
1978 		q_storage->last_num_pkts = pull_size;
1979 		qbman_pull_desc_clear(&pulldesc);
1980 		qbman_pull_desc_set_numframes(&pulldesc,
1981 					      q_storage->last_num_pkts);
1982 		qbman_pull_desc_set_fq(&pulldesc, fqid);
1983 		qbman_pull_desc_set_storage(&pulldesc, dq_storage,
1984 			(size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
1985 		if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) {
1986 			while (!qbman_check_command_complete(
1987 			       get_swp_active_dqs(
1988 			       DPAA2_PER_LCORE_ETHRX_DPIO->index)))
1989 				;
1990 			clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index);
1991 		}
1992 		while (1) {
1993 			if (qbman_swp_pull(swp, &pulldesc)) {
1994 				DPAA2_PMD_DP_DEBUG(
1995 					"VDQ command not issued.QBMAN busy");
1996 				/* Portal was busy, try again */
1997 				continue;
1998 			}
1999 			break;
2000 		}
2001 		q_storage->active_dqs = dq_storage;
2002 		q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index;
2003 		set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index,
2004 				   dq_storage);
2005 	}
2006 
2007 	dq_storage = q_storage->active_dqs;
2008 	rte_prefetch0((void *)(size_t)(dq_storage));
2009 	rte_prefetch0((void *)(size_t)(dq_storage + 1));
2010 
2011 	/* Prepare next pull descriptor. This will give space for the
2012 	 * prefetching done on DQRR entries
2013 	 */
2014 	q_storage->toggle ^= 1;
2015 	dq_storage1 = q_storage->dq_storage[q_storage->toggle];
2016 	qbman_pull_desc_clear(&pulldesc);
2017 	qbman_pull_desc_set_numframes(&pulldesc, pull_size);
2018 	qbman_pull_desc_set_fq(&pulldesc, fqid);
2019 	qbman_pull_desc_set_storage(&pulldesc, dq_storage1,
2020 		(size_t)(DPAA2_VADDR_TO_IOVA(dq_storage1)), 1);
2021 
2022 	/*Prepare enqueue descriptor*/
2023 	qbman_eq_desc_clear(&eqdesc);
2024 	qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
2025 	qbman_eq_desc_set_response(&eqdesc, 0, 0);
2026 	qbman_eq_desc_set_fq(&eqdesc, tx_q->fqid);
2027 
2028 	/* Check if the previous issued command is completed.
2029 	 * Also seems like the SWP is shared between the Ethernet Driver
2030 	 * and the SEC driver.
2031 	 */
2032 	while (!qbman_check_command_complete(dq_storage))
2033 		;
2034 	if (dq_storage == get_swp_active_dqs(q_storage->active_dpio_id))
2035 		clear_swp_active_dqs(q_storage->active_dpio_id);
2036 
2037 	pending = 1;
2038 
2039 	do {
2040 		/* Loop until the dq_storage is updated with
2041 		 * new token by QBMAN
2042 		 */
2043 		while (!qbman_check_new_result(dq_storage))
2044 			;
2045 		rte_prefetch0((void *)((size_t)(dq_storage + 2)));
2046 		/* Check whether Last Pull command is Expired and
2047 		 * setting Condition for Loop termination
2048 		 */
2049 		if (qbman_result_DQ_is_pull_complete(dq_storage)) {
2050 			pending = 0;
2051 			/* Check for valid frame. */
2052 			status = qbman_result_DQ_flags(dq_storage);
2053 			if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0))
2054 				continue;
2055 		}
2056 		fd[num_rx] = (struct qbman_fd *)qbman_result_DQ_fd(dq_storage);
2057 
2058 		dq_storage++;
2059 		num_rx++;
2060 	} while (pending);
2061 
2062 	while (num_tx < num_rx) {
2063 		num_tx += qbman_swp_enqueue_multiple_fd(swp, &eqdesc,
2064 				&fd[num_tx], 0, num_rx - num_tx);
2065 	}
2066 
2067 	if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) {
2068 		while (!qbman_check_command_complete(
2069 		       get_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)))
2070 			;
2071 		clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index);
2072 	}
2073 	/* issue a volatile dequeue command for next pull */
2074 	while (1) {
2075 		if (qbman_swp_pull(swp, &pulldesc)) {
2076 			DPAA2_PMD_DP_DEBUG("VDQ command is not issued."
2077 					  "QBMAN is busy (2)");
2078 			continue;
2079 		}
2080 		break;
2081 	}
2082 	q_storage->active_dqs = dq_storage1;
2083 	q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index;
2084 	set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index, dq_storage1);
2085 
2086 	dpaa2_q->rx_pkts += num_rx;
2087 	dpaa2_q->tx_pkts += num_tx;
2088 
2089 	return 0;
2090 }
2091 #if defined(RTE_TOOLCHAIN_GCC)
2092 #pragma GCC diagnostic pop
2093 #elif defined(RTE_TOOLCHAIN_CLANG)
2094 #pragma clang diagnostic pop
2095 #endif
2096