xref: /dpdk/drivers/net/dpaa2/dpaa2_rxtx.c (revision 10b71caecbe1cddcbb65c050ca775fba575e88db)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4  *   Copyright 2016-2020 NXP
5  *
6  */
7 
8 #include <time.h>
9 #include <net/if.h>
10 
11 #include <rte_mbuf.h>
12 #include <rte_ethdev_driver.h>
13 #include <rte_malloc.h>
14 #include <rte_memcpy.h>
15 #include <rte_string_fns.h>
16 #include <rte_dev.h>
17 
18 #include <rte_fslmc.h>
19 #include <fslmc_vfio.h>
20 #include <dpaa2_hw_pvt.h>
21 #include <dpaa2_hw_dpio.h>
22 #include <dpaa2_hw_mempool.h>
23 
24 #include "dpaa2_pmd_logs.h"
25 #include "dpaa2_ethdev.h"
26 #include "base/dpaa2_hw_dpni_annot.h"
27 
28 static inline uint32_t __rte_hot
29 dpaa2_dev_rx_parse_slow(struct rte_mbuf *mbuf,
30 			struct dpaa2_annot_hdr *annotation);
31 
32 static void enable_tx_tstamp(struct qbman_fd *fd) __rte_unused;
33 
34 #define DPAA2_MBUF_TO_CONTIG_FD(_mbuf, _fd, _bpid)  do { \
35 	DPAA2_SET_FD_ADDR(_fd, DPAA2_MBUF_VADDR_TO_IOVA(_mbuf)); \
36 	DPAA2_SET_FD_LEN(_fd, _mbuf->data_len); \
37 	DPAA2_SET_ONLY_FD_BPID(_fd, _bpid); \
38 	DPAA2_SET_FD_OFFSET(_fd, _mbuf->data_off); \
39 	DPAA2_SET_FD_FRC(_fd, 0);		\
40 	DPAA2_RESET_FD_CTRL(_fd);		\
41 	DPAA2_RESET_FD_FLC(_fd);		\
42 } while (0)
43 
44 static inline void __rte_hot
45 dpaa2_dev_rx_parse_new(struct rte_mbuf *m, const struct qbman_fd *fd,
46 		       void *hw_annot_addr)
47 {
48 	uint16_t frc = DPAA2_GET_FD_FRC_PARSE_SUM(fd);
49 	struct dpaa2_annot_hdr *annotation =
50 			(struct dpaa2_annot_hdr *)hw_annot_addr;
51 
52 	m->packet_type = RTE_PTYPE_UNKNOWN;
53 	switch (frc) {
54 	case DPAA2_PKT_TYPE_ETHER:
55 		m->packet_type = RTE_PTYPE_L2_ETHER;
56 		break;
57 	case DPAA2_PKT_TYPE_IPV4:
58 		m->packet_type = RTE_PTYPE_L2_ETHER |
59 			RTE_PTYPE_L3_IPV4;
60 		break;
61 	case DPAA2_PKT_TYPE_IPV6:
62 		m->packet_type = RTE_PTYPE_L2_ETHER |
63 			RTE_PTYPE_L3_IPV6;
64 		break;
65 	case DPAA2_PKT_TYPE_IPV4_EXT:
66 		m->packet_type = RTE_PTYPE_L2_ETHER |
67 			RTE_PTYPE_L3_IPV4_EXT;
68 		break;
69 	case DPAA2_PKT_TYPE_IPV6_EXT:
70 		m->packet_type = RTE_PTYPE_L2_ETHER |
71 			RTE_PTYPE_L3_IPV6_EXT;
72 		break;
73 	case DPAA2_PKT_TYPE_IPV4_TCP:
74 		m->packet_type = RTE_PTYPE_L2_ETHER |
75 			RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP;
76 		break;
77 	case DPAA2_PKT_TYPE_IPV6_TCP:
78 		m->packet_type = RTE_PTYPE_L2_ETHER |
79 			RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP;
80 		break;
81 	case DPAA2_PKT_TYPE_IPV4_UDP:
82 		m->packet_type = RTE_PTYPE_L2_ETHER |
83 			RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP;
84 		break;
85 	case DPAA2_PKT_TYPE_IPV6_UDP:
86 		m->packet_type = RTE_PTYPE_L2_ETHER |
87 			RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP;
88 		break;
89 	case DPAA2_PKT_TYPE_IPV4_SCTP:
90 		m->packet_type = RTE_PTYPE_L2_ETHER |
91 			RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP;
92 		break;
93 	case DPAA2_PKT_TYPE_IPV6_SCTP:
94 		m->packet_type = RTE_PTYPE_L2_ETHER |
95 			RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_SCTP;
96 		break;
97 	case DPAA2_PKT_TYPE_IPV4_ICMP:
98 		m->packet_type = RTE_PTYPE_L2_ETHER |
99 			RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_ICMP;
100 		break;
101 	case DPAA2_PKT_TYPE_IPV6_ICMP:
102 		m->packet_type = RTE_PTYPE_L2_ETHER |
103 			RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_ICMP;
104 		break;
105 	default:
106 		m->packet_type = dpaa2_dev_rx_parse_slow(m, annotation);
107 	}
108 	m->hash.rss = fd->simple.flc_hi;
109 	m->ol_flags |= PKT_RX_RSS_HASH;
110 
111 	if (dpaa2_enable_ts[m->port]) {
112 		m->timestamp = annotation->word2;
113 		m->ol_flags |= PKT_RX_TIMESTAMP;
114 		DPAA2_PMD_DP_DEBUG("pkt timestamp:0x%" PRIx64 "", m->timestamp);
115 	}
116 
117 	DPAA2_PMD_DP_DEBUG("HW frc = 0x%x\t packet type =0x%x "
118 		"ol_flags =0x%" PRIx64 "",
119 		frc, m->packet_type, m->ol_flags);
120 }
121 
122 static inline uint32_t __rte_hot
123 dpaa2_dev_rx_parse_slow(struct rte_mbuf *mbuf,
124 			struct dpaa2_annot_hdr *annotation)
125 {
126 	uint32_t pkt_type = RTE_PTYPE_UNKNOWN;
127 	uint16_t *vlan_tci;
128 
129 	DPAA2_PMD_DP_DEBUG("(slow parse)annotation(3)=0x%" PRIx64 "\t"
130 			"(4)=0x%" PRIx64 "\t",
131 			annotation->word3, annotation->word4);
132 
133 #if defined(RTE_LIBRTE_IEEE1588)
134 	if (BIT_ISSET_AT_POS(annotation->word1, DPAA2_ETH_FAS_PTP))
135 		mbuf->ol_flags |= PKT_RX_IEEE1588_PTP;
136 #endif
137 
138 	if (BIT_ISSET_AT_POS(annotation->word3, L2_VLAN_1_PRESENT)) {
139 		vlan_tci = rte_pktmbuf_mtod_offset(mbuf, uint16_t *,
140 			(VLAN_TCI_OFFSET_1(annotation->word5) >> 16));
141 		mbuf->vlan_tci = rte_be_to_cpu_16(*vlan_tci);
142 		mbuf->ol_flags |= PKT_RX_VLAN;
143 		pkt_type |= RTE_PTYPE_L2_ETHER_VLAN;
144 	} else if (BIT_ISSET_AT_POS(annotation->word3, L2_VLAN_N_PRESENT)) {
145 		vlan_tci = rte_pktmbuf_mtod_offset(mbuf, uint16_t *,
146 			(VLAN_TCI_OFFSET_1(annotation->word5) >> 16));
147 		mbuf->vlan_tci = rte_be_to_cpu_16(*vlan_tci);
148 		mbuf->ol_flags |= PKT_RX_VLAN | PKT_RX_QINQ;
149 		pkt_type |= RTE_PTYPE_L2_ETHER_QINQ;
150 	}
151 
152 	if (BIT_ISSET_AT_POS(annotation->word3, L2_ARP_PRESENT)) {
153 		pkt_type |= RTE_PTYPE_L2_ETHER_ARP;
154 		goto parse_done;
155 	} else if (BIT_ISSET_AT_POS(annotation->word3, L2_ETH_MAC_PRESENT)) {
156 		pkt_type |= RTE_PTYPE_L2_ETHER;
157 	} else {
158 		goto parse_done;
159 	}
160 
161 	if (BIT_ISSET_AT_POS(annotation->word4, L3_IPV4_1_PRESENT |
162 			     L3_IPV4_N_PRESENT)) {
163 		pkt_type |= RTE_PTYPE_L3_IPV4;
164 		if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_OPT_PRESENT |
165 			L3_IP_N_OPT_PRESENT))
166 			pkt_type |= RTE_PTYPE_L3_IPV4_EXT;
167 
168 	} else if (BIT_ISSET_AT_POS(annotation->word4, L3_IPV6_1_PRESENT |
169 		  L3_IPV6_N_PRESENT)) {
170 		pkt_type |= RTE_PTYPE_L3_IPV6;
171 		if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_OPT_PRESENT |
172 		    L3_IP_N_OPT_PRESENT))
173 			pkt_type |= RTE_PTYPE_L3_IPV6_EXT;
174 	} else {
175 		goto parse_done;
176 	}
177 
178 	if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L3CE))
179 		mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
180 	else if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L4CE))
181 		mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
182 
183 	if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_FIRST_FRAGMENT |
184 	    L3_IP_1_MORE_FRAGMENT |
185 	    L3_IP_N_FIRST_FRAGMENT |
186 	    L3_IP_N_MORE_FRAGMENT)) {
187 		pkt_type |= RTE_PTYPE_L4_FRAG;
188 		goto parse_done;
189 	} else {
190 		pkt_type |= RTE_PTYPE_L4_NONFRAG;
191 	}
192 
193 	if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_UDP_PRESENT))
194 		pkt_type |= RTE_PTYPE_L4_UDP;
195 
196 	else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_TCP_PRESENT))
197 		pkt_type |= RTE_PTYPE_L4_TCP;
198 
199 	else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_SCTP_PRESENT))
200 		pkt_type |= RTE_PTYPE_L4_SCTP;
201 
202 	else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_ICMP_PRESENT))
203 		pkt_type |= RTE_PTYPE_L4_ICMP;
204 
205 	else if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_UNKNOWN_PROTOCOL))
206 		pkt_type |= RTE_PTYPE_UNKNOWN;
207 
208 parse_done:
209 	return pkt_type;
210 }
211 
212 static inline uint32_t __rte_hot
213 dpaa2_dev_rx_parse(struct rte_mbuf *mbuf, void *hw_annot_addr)
214 {
215 	struct dpaa2_annot_hdr *annotation =
216 			(struct dpaa2_annot_hdr *)hw_annot_addr;
217 
218 	DPAA2_PMD_DP_DEBUG("(fast parse) Annotation = 0x%" PRIx64 "\t",
219 			   annotation->word4);
220 
221 	if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L3CE))
222 		mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
223 	else if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L4CE))
224 		mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
225 
226 	mbuf->ol_flags |= PKT_RX_TIMESTAMP;
227 	mbuf->timestamp = annotation->word2;
228 	DPAA2_PMD_DP_DEBUG("pkt timestamp: 0x%" PRIx64 "", mbuf->timestamp);
229 
230 	/* Check detailed parsing requirement */
231 	if (annotation->word3 & 0x7FFFFC3FFFF)
232 		return dpaa2_dev_rx_parse_slow(mbuf, annotation);
233 
234 	/* Return some common types from parse processing */
235 	switch (annotation->word4) {
236 	case DPAA2_L3_IPv4:
237 		return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4;
238 	case DPAA2_L3_IPv6:
239 		return  RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6;
240 	case DPAA2_L3_IPv4_TCP:
241 		return  RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 |
242 				RTE_PTYPE_L4_TCP;
243 	case DPAA2_L3_IPv4_UDP:
244 		return  RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 |
245 				RTE_PTYPE_L4_UDP;
246 	case DPAA2_L3_IPv6_TCP:
247 		return  RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 |
248 				RTE_PTYPE_L4_TCP;
249 	case DPAA2_L3_IPv6_UDP:
250 		return  RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 |
251 				RTE_PTYPE_L4_UDP;
252 	default:
253 		break;
254 	}
255 
256 	return dpaa2_dev_rx_parse_slow(mbuf, annotation);
257 }
258 
259 static inline struct rte_mbuf *__rte_hot
260 eth_sg_fd_to_mbuf(const struct qbman_fd *fd,
261 		  int port_id)
262 {
263 	struct qbman_sge *sgt, *sge;
264 	size_t sg_addr, fd_addr;
265 	int i = 0;
266 	void *hw_annot_addr;
267 	struct rte_mbuf *first_seg, *next_seg, *cur_seg, *temp;
268 
269 	fd_addr = (size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
270 	hw_annot_addr = (void *)(fd_addr + DPAA2_FD_PTA_SIZE);
271 
272 	/* Get Scatter gather table address */
273 	sgt = (struct qbman_sge *)(fd_addr + DPAA2_GET_FD_OFFSET(fd));
274 
275 	sge = &sgt[i++];
276 	sg_addr = (size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FLE_ADDR(sge));
277 
278 	/* First Scatter gather entry */
279 	first_seg = DPAA2_INLINE_MBUF_FROM_BUF(sg_addr,
280 		rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
281 	/* Prepare all the metadata for first segment */
282 	first_seg->buf_addr = (uint8_t *)sg_addr;
283 	first_seg->ol_flags = 0;
284 	first_seg->data_off = DPAA2_GET_FLE_OFFSET(sge);
285 	first_seg->data_len = sge->length  & 0x1FFFF;
286 	first_seg->pkt_len = DPAA2_GET_FD_LEN(fd);
287 	first_seg->nb_segs = 1;
288 	first_seg->next = NULL;
289 	first_seg->port = port_id;
290 	if (dpaa2_svr_family == SVR_LX2160A)
291 		dpaa2_dev_rx_parse_new(first_seg, fd, hw_annot_addr);
292 	else
293 		first_seg->packet_type =
294 			dpaa2_dev_rx_parse(first_seg, hw_annot_addr);
295 
296 	rte_mbuf_refcnt_set(first_seg, 1);
297 	cur_seg = first_seg;
298 	while (!DPAA2_SG_IS_FINAL(sge)) {
299 		sge = &sgt[i++];
300 		sg_addr = (size_t)DPAA2_IOVA_TO_VADDR(
301 				DPAA2_GET_FLE_ADDR(sge));
302 		next_seg = DPAA2_INLINE_MBUF_FROM_BUF(sg_addr,
303 			rte_dpaa2_bpid_info[DPAA2_GET_FLE_BPID(sge)].meta_data_size);
304 		next_seg->buf_addr  = (uint8_t *)sg_addr;
305 		next_seg->data_off  = DPAA2_GET_FLE_OFFSET(sge);
306 		next_seg->data_len  = sge->length  & 0x1FFFF;
307 		first_seg->nb_segs += 1;
308 		rte_mbuf_refcnt_set(next_seg, 1);
309 		cur_seg->next = next_seg;
310 		next_seg->next = NULL;
311 		cur_seg = next_seg;
312 	}
313 	temp = DPAA2_INLINE_MBUF_FROM_BUF(fd_addr,
314 		rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
315 	rte_mbuf_refcnt_set(temp, 1);
316 	rte_pktmbuf_free_seg(temp);
317 
318 	return (void *)first_seg;
319 }
320 
321 static inline struct rte_mbuf *__rte_hot
322 eth_fd_to_mbuf(const struct qbman_fd *fd,
323 	       int port_id)
324 {
325 	void *v_addr = DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
326 	void *hw_annot_addr = (void *)((size_t)v_addr + DPAA2_FD_PTA_SIZE);
327 	struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF(v_addr,
328 		     rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
329 
330 	/* need to repopulated some of the fields,
331 	 * as they may have changed in last transmission
332 	 */
333 	mbuf->nb_segs = 1;
334 	mbuf->ol_flags = 0;
335 	mbuf->data_off = DPAA2_GET_FD_OFFSET(fd);
336 	mbuf->data_len = DPAA2_GET_FD_LEN(fd);
337 	mbuf->pkt_len = mbuf->data_len;
338 	mbuf->port = port_id;
339 	mbuf->next = NULL;
340 	rte_mbuf_refcnt_set(mbuf, 1);
341 
342 	/* Parse the packet */
343 	/* parse results for LX2 are there in FRC field of FD.
344 	 * For other DPAA2 platforms , parse results are after
345 	 * the private - sw annotation area
346 	 */
347 
348 	if (dpaa2_svr_family == SVR_LX2160A)
349 		dpaa2_dev_rx_parse_new(mbuf, fd, hw_annot_addr);
350 	else
351 		mbuf->packet_type = dpaa2_dev_rx_parse(mbuf, hw_annot_addr);
352 
353 	DPAA2_PMD_DP_DEBUG("to mbuf - mbuf =%p, mbuf->buf_addr =%p, off = %d,"
354 		"fd_off=%d fd =%" PRIx64 ", meta = %d  bpid =%d, len=%d\n",
355 		mbuf, mbuf->buf_addr, mbuf->data_off,
356 		DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ADDR(fd),
357 		rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
358 		DPAA2_GET_FD_BPID(fd), DPAA2_GET_FD_LEN(fd));
359 
360 	return mbuf;
361 }
362 
363 static int __rte_noinline __rte_hot
364 eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf,
365 		  struct qbman_fd *fd, uint16_t bpid)
366 {
367 	struct rte_mbuf *cur_seg = mbuf, *prev_seg, *mi, *temp;
368 	struct qbman_sge *sgt, *sge = NULL;
369 	int i;
370 
371 	temp = rte_pktmbuf_alloc(mbuf->pool);
372 	if (temp == NULL) {
373 		DPAA2_PMD_DP_DEBUG("No memory to allocate S/G table\n");
374 		return -ENOMEM;
375 	}
376 
377 	DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(temp));
378 	DPAA2_SET_FD_LEN(fd, mbuf->pkt_len);
379 	DPAA2_SET_ONLY_FD_BPID(fd, bpid);
380 	DPAA2_SET_FD_OFFSET(fd, temp->data_off);
381 	DPAA2_FD_SET_FORMAT(fd, qbman_fd_sg);
382 	DPAA2_RESET_FD_FRC(fd);
383 	DPAA2_RESET_FD_CTRL(fd);
384 	/*Set Scatter gather table and Scatter gather entries*/
385 	sgt = (struct qbman_sge *)(
386 			(size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd))
387 			+ DPAA2_GET_FD_OFFSET(fd));
388 
389 	for (i = 0; i < mbuf->nb_segs; i++) {
390 		sge = &sgt[i];
391 		/*Resetting the buffer pool id and offset field*/
392 		sge->fin_bpid_offset = 0;
393 		DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(cur_seg));
394 		DPAA2_SET_FLE_OFFSET(sge, cur_seg->data_off);
395 		sge->length = cur_seg->data_len;
396 		if (RTE_MBUF_DIRECT(cur_seg)) {
397 			if (rte_mbuf_refcnt_read(cur_seg) > 1) {
398 				/* If refcnt > 1, invalid bpid is set to ensure
399 				 * buffer is not freed by HW
400 				 */
401 				DPAA2_SET_FLE_IVP(sge);
402 				rte_mbuf_refcnt_update(cur_seg, -1);
403 			} else
404 				DPAA2_SET_FLE_BPID(sge,
405 						mempool_to_bpid(cur_seg->pool));
406 			cur_seg = cur_seg->next;
407 		} else {
408 			/* Get owner MBUF from indirect buffer */
409 			mi = rte_mbuf_from_indirect(cur_seg);
410 			if (rte_mbuf_refcnt_read(mi) > 1) {
411 				/* If refcnt > 1, invalid bpid is set to ensure
412 				 * owner buffer is not freed by HW
413 				 */
414 				DPAA2_SET_FLE_IVP(sge);
415 			} else {
416 				DPAA2_SET_FLE_BPID(sge,
417 						   mempool_to_bpid(mi->pool));
418 				rte_mbuf_refcnt_update(mi, 1);
419 			}
420 			prev_seg = cur_seg;
421 			cur_seg = cur_seg->next;
422 			prev_seg->next = NULL;
423 			rte_pktmbuf_free(prev_seg);
424 		}
425 	}
426 	DPAA2_SG_SET_FINAL(sge, true);
427 	return 0;
428 }
429 
430 static void
431 eth_mbuf_to_fd(struct rte_mbuf *mbuf,
432 	       struct qbman_fd *fd, uint16_t bpid) __rte_unused;
433 
434 static void __rte_noinline __rte_hot
435 eth_mbuf_to_fd(struct rte_mbuf *mbuf,
436 	       struct qbman_fd *fd, uint16_t bpid)
437 {
438 	DPAA2_MBUF_TO_CONTIG_FD(mbuf, fd, bpid);
439 
440 	DPAA2_PMD_DP_DEBUG("mbuf =%p, mbuf->buf_addr =%p, off = %d,"
441 		"fd_off=%d fd =%" PRIx64 ", meta = %d  bpid =%d, len=%d\n",
442 		mbuf, mbuf->buf_addr, mbuf->data_off,
443 		DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ADDR(fd),
444 		rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
445 		DPAA2_GET_FD_BPID(fd), DPAA2_GET_FD_LEN(fd));
446 	if (RTE_MBUF_DIRECT(mbuf)) {
447 		if (rte_mbuf_refcnt_read(mbuf) > 1) {
448 			DPAA2_SET_FD_IVP(fd);
449 			rte_mbuf_refcnt_update(mbuf, -1);
450 		}
451 	} else {
452 		struct rte_mbuf *mi;
453 
454 		mi = rte_mbuf_from_indirect(mbuf);
455 		if (rte_mbuf_refcnt_read(mi) > 1)
456 			DPAA2_SET_FD_IVP(fd);
457 		else
458 			rte_mbuf_refcnt_update(mi, 1);
459 		rte_pktmbuf_free(mbuf);
460 	}
461 }
462 
463 static inline int __rte_hot
464 eth_copy_mbuf_to_fd(struct rte_mbuf *mbuf,
465 		    struct qbman_fd *fd, uint16_t bpid)
466 {
467 	struct rte_mbuf *m;
468 	void *mb = NULL;
469 
470 	if (rte_dpaa2_mbuf_alloc_bulk(
471 		rte_dpaa2_bpid_info[bpid].bp_list->mp, &mb, 1)) {
472 		DPAA2_PMD_DP_DEBUG("Unable to allocated DPAA2 buffer\n");
473 		return -1;
474 	}
475 	m = (struct rte_mbuf *)mb;
476 	memcpy((char *)m->buf_addr + mbuf->data_off,
477 	       (void *)((char *)mbuf->buf_addr + mbuf->data_off),
478 		mbuf->pkt_len);
479 
480 	/* Copy required fields */
481 	m->data_off = mbuf->data_off;
482 	m->ol_flags = mbuf->ol_flags;
483 	m->packet_type = mbuf->packet_type;
484 	m->tx_offload = mbuf->tx_offload;
485 
486 	DPAA2_MBUF_TO_CONTIG_FD(m, fd, bpid);
487 
488 	DPAA2_PMD_DP_DEBUG(
489 		"mbuf: %p, BMAN buf addr: %p, fdaddr: %" PRIx64 ", bpid: %d,"
490 		" meta: %d, off: %d, len: %d\n",
491 		(void *)mbuf,
492 		mbuf->buf_addr,
493 		DPAA2_GET_FD_ADDR(fd),
494 		DPAA2_GET_FD_BPID(fd),
495 		rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
496 		DPAA2_GET_FD_OFFSET(fd),
497 		DPAA2_GET_FD_LEN(fd));
498 
499 return 0;
500 }
501 
502 /* This function assumes that caller will be keep the same value for nb_pkts
503  * across calls per queue, if that is not the case, better use non-prefetch
504  * version of rx call.
505  * It will return the packets as requested in previous call without honoring
506  * the current nb_pkts or bufs space.
507  */
508 uint16_t
509 dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
510 {
511 	/* Function receive frames for a given device and VQ*/
512 	struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
513 	struct qbman_result *dq_storage, *dq_storage1 = NULL;
514 	uint32_t fqid = dpaa2_q->fqid;
515 	int ret, num_rx = 0, pull_size;
516 	uint8_t pending, status;
517 	struct qbman_swp *swp;
518 	const struct qbman_fd *fd;
519 	struct qbman_pull_desc pulldesc;
520 	struct queue_storage_info_t *q_storage = dpaa2_q->q_storage;
521 	struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
522 #if defined(RTE_LIBRTE_IEEE1588)
523 	struct dpaa2_dev_priv *priv = eth_data->dev_private;
524 #endif
525 
526 	if (unlikely(!DPAA2_PER_LCORE_ETHRX_DPIO)) {
527 		ret = dpaa2_affine_qbman_ethrx_swp();
528 		if (ret) {
529 			DPAA2_PMD_ERR("Failure in affining portal");
530 			return 0;
531 		}
532 	}
533 
534 	if (unlikely(!rte_dpaa2_bpid_info &&
535 		     rte_eal_process_type() == RTE_PROC_SECONDARY))
536 		rte_dpaa2_bpid_info = dpaa2_q->bp_array;
537 
538 	swp = DPAA2_PER_LCORE_ETHRX_PORTAL;
539 	pull_size = (nb_pkts > dpaa2_dqrr_size) ? dpaa2_dqrr_size : nb_pkts;
540 	if (unlikely(!q_storage->active_dqs)) {
541 		q_storage->toggle = 0;
542 		dq_storage = q_storage->dq_storage[q_storage->toggle];
543 		q_storage->last_num_pkts = pull_size;
544 		qbman_pull_desc_clear(&pulldesc);
545 		qbman_pull_desc_set_numframes(&pulldesc,
546 					      q_storage->last_num_pkts);
547 		qbman_pull_desc_set_fq(&pulldesc, fqid);
548 		qbman_pull_desc_set_storage(&pulldesc, dq_storage,
549 			(uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
550 		if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) {
551 			while (!qbman_check_command_complete(
552 			       get_swp_active_dqs(
553 			       DPAA2_PER_LCORE_ETHRX_DPIO->index)))
554 				;
555 			clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index);
556 		}
557 		while (1) {
558 			if (qbman_swp_pull(swp, &pulldesc)) {
559 				DPAA2_PMD_DP_DEBUG("VDQ command is not issued."
560 						  " QBMAN is busy (1)\n");
561 				/* Portal was busy, try again */
562 				continue;
563 			}
564 			break;
565 		}
566 		q_storage->active_dqs = dq_storage;
567 		q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index;
568 		set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index,
569 				   dq_storage);
570 	}
571 
572 	dq_storage = q_storage->active_dqs;
573 	rte_prefetch0((void *)(size_t)(dq_storage));
574 	rte_prefetch0((void *)(size_t)(dq_storage + 1));
575 
576 	/* Prepare next pull descriptor. This will give space for the
577 	 * prefething done on DQRR entries
578 	 */
579 	q_storage->toggle ^= 1;
580 	dq_storage1 = q_storage->dq_storage[q_storage->toggle];
581 	qbman_pull_desc_clear(&pulldesc);
582 	qbman_pull_desc_set_numframes(&pulldesc, pull_size);
583 	qbman_pull_desc_set_fq(&pulldesc, fqid);
584 	qbman_pull_desc_set_storage(&pulldesc, dq_storage1,
585 		(uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage1)), 1);
586 
587 	/* Check if the previous issued command is completed.
588 	 * Also seems like the SWP is shared between the Ethernet Driver
589 	 * and the SEC driver.
590 	 */
591 	while (!qbman_check_command_complete(dq_storage))
592 		;
593 	if (dq_storage == get_swp_active_dqs(q_storage->active_dpio_id))
594 		clear_swp_active_dqs(q_storage->active_dpio_id);
595 
596 	pending = 1;
597 
598 	do {
599 		/* Loop until the dq_storage is updated with
600 		 * new token by QBMAN
601 		 */
602 		while (!qbman_check_new_result(dq_storage))
603 			;
604 		rte_prefetch0((void *)((size_t)(dq_storage + 2)));
605 		/* Check whether Last Pull command is Expired and
606 		 * setting Condition for Loop termination
607 		 */
608 		if (qbman_result_DQ_is_pull_complete(dq_storage)) {
609 			pending = 0;
610 			/* Check for valid frame. */
611 			status = qbman_result_DQ_flags(dq_storage);
612 			if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0))
613 				continue;
614 		}
615 		fd = qbman_result_DQ_fd(dq_storage);
616 
617 #ifndef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
618 		if (dpaa2_svr_family != SVR_LX2160A) {
619 			const struct qbman_fd *next_fd =
620 				qbman_result_DQ_fd(dq_storage + 1);
621 			/* Prefetch Annotation address for the parse results */
622 			rte_prefetch0(DPAA2_IOVA_TO_VADDR((DPAA2_GET_FD_ADDR(
623 				next_fd) + DPAA2_FD_PTA_SIZE + 16)));
624 		}
625 #endif
626 
627 		if (unlikely(DPAA2_FD_GET_FORMAT(fd) == qbman_fd_sg))
628 			bufs[num_rx] = eth_sg_fd_to_mbuf(fd, eth_data->port_id);
629 		else
630 			bufs[num_rx] = eth_fd_to_mbuf(fd, eth_data->port_id);
631 #if defined(RTE_LIBRTE_IEEE1588)
632 		priv->rx_timestamp = bufs[num_rx]->timestamp;
633 #endif
634 
635 		if (eth_data->dev_conf.rxmode.offloads &
636 				DEV_RX_OFFLOAD_VLAN_STRIP)
637 			rte_vlan_strip(bufs[num_rx]);
638 
639 		dq_storage++;
640 		num_rx++;
641 	} while (pending);
642 
643 	if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) {
644 		while (!qbman_check_command_complete(
645 		       get_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)))
646 			;
647 		clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index);
648 	}
649 	/* issue a volatile dequeue command for next pull */
650 	while (1) {
651 		if (qbman_swp_pull(swp, &pulldesc)) {
652 			DPAA2_PMD_DP_DEBUG("VDQ command is not issued."
653 					  "QBMAN is busy (2)\n");
654 			continue;
655 		}
656 		break;
657 	}
658 	q_storage->active_dqs = dq_storage1;
659 	q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index;
660 	set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index, dq_storage1);
661 
662 	dpaa2_q->rx_pkts += num_rx;
663 
664 	return num_rx;
665 }
666 
667 void __rte_hot
668 dpaa2_dev_process_parallel_event(struct qbman_swp *swp,
669 				 const struct qbman_fd *fd,
670 				 const struct qbman_result *dq,
671 				 struct dpaa2_queue *rxq,
672 				 struct rte_event *ev)
673 {
674 	rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd) +
675 		DPAA2_FD_PTA_SIZE + 16));
676 
677 	ev->flow_id = rxq->ev.flow_id;
678 	ev->sub_event_type = rxq->ev.sub_event_type;
679 	ev->event_type = RTE_EVENT_TYPE_ETHDEV;
680 	ev->op = RTE_EVENT_OP_NEW;
681 	ev->sched_type = rxq->ev.sched_type;
682 	ev->queue_id = rxq->ev.queue_id;
683 	ev->priority = rxq->ev.priority;
684 
685 	ev->mbuf = eth_fd_to_mbuf(fd, rxq->eth_data->port_id);
686 
687 	qbman_swp_dqrr_consume(swp, dq);
688 }
689 
690 void __rte_hot
691 dpaa2_dev_process_atomic_event(struct qbman_swp *swp __rte_unused,
692 			       const struct qbman_fd *fd,
693 			       const struct qbman_result *dq,
694 			       struct dpaa2_queue *rxq,
695 			       struct rte_event *ev)
696 {
697 	uint8_t dqrr_index;
698 
699 	rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd) +
700 		DPAA2_FD_PTA_SIZE + 16));
701 
702 	ev->flow_id = rxq->ev.flow_id;
703 	ev->sub_event_type = rxq->ev.sub_event_type;
704 	ev->event_type = RTE_EVENT_TYPE_ETHDEV;
705 	ev->op = RTE_EVENT_OP_NEW;
706 	ev->sched_type = rxq->ev.sched_type;
707 	ev->queue_id = rxq->ev.queue_id;
708 	ev->priority = rxq->ev.priority;
709 
710 	ev->mbuf = eth_fd_to_mbuf(fd, rxq->eth_data->port_id);
711 
712 	dqrr_index = qbman_get_dqrr_idx(dq);
713 	ev->mbuf->seqn = dqrr_index + 1;
714 	DPAA2_PER_LCORE_DQRR_SIZE++;
715 	DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index;
716 	DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = ev->mbuf;
717 }
718 
719 void __rte_hot
720 dpaa2_dev_process_ordered_event(struct qbman_swp *swp,
721 				const struct qbman_fd *fd,
722 				const struct qbman_result *dq,
723 				struct dpaa2_queue *rxq,
724 				struct rte_event *ev)
725 {
726 	rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd) +
727 		DPAA2_FD_PTA_SIZE + 16));
728 
729 	ev->flow_id = rxq->ev.flow_id;
730 	ev->sub_event_type = rxq->ev.sub_event_type;
731 	ev->event_type = RTE_EVENT_TYPE_ETHDEV;
732 	ev->op = RTE_EVENT_OP_NEW;
733 	ev->sched_type = rxq->ev.sched_type;
734 	ev->queue_id = rxq->ev.queue_id;
735 	ev->priority = rxq->ev.priority;
736 
737 	ev->mbuf = eth_fd_to_mbuf(fd, rxq->eth_data->port_id);
738 
739 	ev->mbuf->seqn = DPAA2_ENQUEUE_FLAG_ORP;
740 	ev->mbuf->seqn |= qbman_result_DQ_odpid(dq) << DPAA2_EQCR_OPRID_SHIFT;
741 	ev->mbuf->seqn |= qbman_result_DQ_seqnum(dq) << DPAA2_EQCR_SEQNUM_SHIFT;
742 
743 	qbman_swp_dqrr_consume(swp, dq);
744 }
745 
746 uint16_t
747 dpaa2_dev_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
748 {
749 	/* Function receive frames for a given device and VQ */
750 	struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
751 	struct qbman_result *dq_storage;
752 	uint32_t fqid = dpaa2_q->fqid;
753 	int ret, num_rx = 0, next_pull = nb_pkts, num_pulled;
754 	uint8_t pending, status;
755 	struct qbman_swp *swp;
756 	const struct qbman_fd *fd;
757 	struct qbman_pull_desc pulldesc;
758 	struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
759 
760 	if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
761 		ret = dpaa2_affine_qbman_swp();
762 		if (ret) {
763 			DPAA2_PMD_ERR(
764 				"Failed to allocate IO portal, tid: %d\n",
765 				rte_gettid());
766 			return 0;
767 		}
768 	}
769 	swp = DPAA2_PER_LCORE_PORTAL;
770 
771 	do {
772 		dq_storage = dpaa2_q->q_storage->dq_storage[0];
773 		qbman_pull_desc_clear(&pulldesc);
774 		qbman_pull_desc_set_fq(&pulldesc, fqid);
775 		qbman_pull_desc_set_storage(&pulldesc, dq_storage,
776 				(size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
777 
778 		if (next_pull > dpaa2_dqrr_size) {
779 			qbman_pull_desc_set_numframes(&pulldesc,
780 				dpaa2_dqrr_size);
781 			next_pull -= dpaa2_dqrr_size;
782 		} else {
783 			qbman_pull_desc_set_numframes(&pulldesc, next_pull);
784 			next_pull = 0;
785 		}
786 
787 		while (1) {
788 			if (qbman_swp_pull(swp, &pulldesc)) {
789 				DPAA2_PMD_DP_DEBUG(
790 					"VDQ command is not issued.QBMAN is busy\n");
791 				/* Portal was busy, try again */
792 				continue;
793 			}
794 			break;
795 		}
796 
797 		rte_prefetch0((void *)((size_t)(dq_storage + 1)));
798 		/* Check if the previous issued command is completed. */
799 		while (!qbman_check_command_complete(dq_storage))
800 			;
801 
802 		num_pulled = 0;
803 		pending = 1;
804 		do {
805 			/* Loop until the dq_storage is updated with
806 			 * new token by QBMAN
807 			 */
808 			while (!qbman_check_new_result(dq_storage))
809 				;
810 			rte_prefetch0((void *)((size_t)(dq_storage + 2)));
811 			/* Check whether Last Pull command is Expired and
812 			 * setting Condition for Loop termination
813 			 */
814 			if (qbman_result_DQ_is_pull_complete(dq_storage)) {
815 				pending = 0;
816 				/* Check for valid frame. */
817 				status = qbman_result_DQ_flags(dq_storage);
818 				if (unlikely((status &
819 					QBMAN_DQ_STAT_VALIDFRAME) == 0))
820 					continue;
821 			}
822 			fd = qbman_result_DQ_fd(dq_storage);
823 
824 #ifndef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
825 			if (dpaa2_svr_family != SVR_LX2160A) {
826 				const struct qbman_fd *next_fd =
827 					qbman_result_DQ_fd(dq_storage + 1);
828 
829 				/* Prefetch Annotation address for the parse
830 				 * results.
831 				 */
832 				rte_prefetch0((DPAA2_IOVA_TO_VADDR(
833 					DPAA2_GET_FD_ADDR(next_fd) +
834 					DPAA2_FD_PTA_SIZE + 16)));
835 			}
836 #endif
837 
838 			if (unlikely(DPAA2_FD_GET_FORMAT(fd) == qbman_fd_sg))
839 				bufs[num_rx] = eth_sg_fd_to_mbuf(fd,
840 							eth_data->port_id);
841 			else
842 				bufs[num_rx] = eth_fd_to_mbuf(fd,
843 							eth_data->port_id);
844 
845 		if (eth_data->dev_conf.rxmode.offloads &
846 				DEV_RX_OFFLOAD_VLAN_STRIP) {
847 			rte_vlan_strip(bufs[num_rx]);
848 		}
849 
850 			dq_storage++;
851 			num_rx++;
852 			num_pulled++;
853 		} while (pending);
854 	/* Last VDQ provided all packets and more packets are requested */
855 	} while (next_pull && num_pulled == dpaa2_dqrr_size);
856 
857 	dpaa2_q->rx_pkts += num_rx;
858 
859 	return num_rx;
860 }
861 
862 uint16_t dpaa2_dev_tx_conf(void *queue)
863 {
864 	/* Function receive frames for a given device and VQ */
865 	struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
866 	struct qbman_result *dq_storage;
867 	uint32_t fqid = dpaa2_q->fqid;
868 	int ret, num_tx_conf = 0, num_pulled;
869 	uint8_t pending, status;
870 	struct qbman_swp *swp;
871 	const struct qbman_fd *fd, *next_fd;
872 	struct qbman_pull_desc pulldesc;
873 	struct qbman_release_desc releasedesc;
874 	uint32_t bpid;
875 	uint64_t buf;
876 #if defined(RTE_LIBRTE_IEEE1588)
877 	struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
878 	struct dpaa2_dev_priv *priv = eth_data->dev_private;
879 	struct dpaa2_annot_hdr *annotation;
880 #endif
881 
882 	if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
883 		ret = dpaa2_affine_qbman_swp();
884 		if (ret) {
885 			DPAA2_PMD_ERR(
886 				"Failed to allocate IO portal, tid: %d\n",
887 				rte_gettid());
888 			return 0;
889 		}
890 	}
891 	swp = DPAA2_PER_LCORE_PORTAL;
892 
893 	do {
894 		dq_storage = dpaa2_q->q_storage->dq_storage[0];
895 		qbman_pull_desc_clear(&pulldesc);
896 		qbman_pull_desc_set_fq(&pulldesc, fqid);
897 		qbman_pull_desc_set_storage(&pulldesc, dq_storage,
898 				(size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
899 
900 		qbman_pull_desc_set_numframes(&pulldesc, dpaa2_dqrr_size);
901 
902 		while (1) {
903 			if (qbman_swp_pull(swp, &pulldesc)) {
904 				DPAA2_PMD_DP_DEBUG("VDQ command is not issued."
905 						   "QBMAN is busy\n");
906 				/* Portal was busy, try again */
907 				continue;
908 			}
909 			break;
910 		}
911 
912 		rte_prefetch0((void *)((size_t)(dq_storage + 1)));
913 		/* Check if the previous issued command is completed. */
914 		while (!qbman_check_command_complete(dq_storage))
915 			;
916 
917 		num_pulled = 0;
918 		pending = 1;
919 		do {
920 			/* Loop until the dq_storage is updated with
921 			 * new token by QBMAN
922 			 */
923 			while (!qbman_check_new_result(dq_storage))
924 				;
925 			rte_prefetch0((void *)((size_t)(dq_storage + 2)));
926 			/* Check whether Last Pull command is Expired and
927 			 * setting Condition for Loop termination
928 			 */
929 			if (qbman_result_DQ_is_pull_complete(dq_storage)) {
930 				pending = 0;
931 				/* Check for valid frame. */
932 				status = qbman_result_DQ_flags(dq_storage);
933 				if (unlikely((status &
934 					QBMAN_DQ_STAT_VALIDFRAME) == 0))
935 					continue;
936 			}
937 			fd = qbman_result_DQ_fd(dq_storage);
938 
939 			next_fd = qbman_result_DQ_fd(dq_storage + 1);
940 			/* Prefetch Annotation address for the parse results */
941 			rte_prefetch0((void *)(size_t)
942 				(DPAA2_GET_FD_ADDR(next_fd) +
943 				 DPAA2_FD_PTA_SIZE + 16));
944 
945 			bpid = DPAA2_GET_FD_BPID(fd);
946 
947 			/* Create a release descriptor required for releasing
948 			 * buffers into QBMAN
949 			 */
950 			qbman_release_desc_clear(&releasedesc);
951 			qbman_release_desc_set_bpid(&releasedesc, bpid);
952 
953 			buf = DPAA2_GET_FD_ADDR(fd);
954 			/* feed them to bman */
955 			do {
956 				ret = qbman_swp_release(swp, &releasedesc,
957 							&buf, 1);
958 			} while (ret == -EBUSY);
959 
960 			dq_storage++;
961 			num_tx_conf++;
962 			num_pulled++;
963 #if defined(RTE_LIBRTE_IEEE1588)
964 			annotation = (struct dpaa2_annot_hdr *)((size_t)
965 				DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)) +
966 				DPAA2_FD_PTA_SIZE);
967 			priv->tx_timestamp = annotation->word2;
968 #endif
969 		} while (pending);
970 
971 	/* Last VDQ provided all packets and more packets are requested */
972 	} while (num_pulled == dpaa2_dqrr_size);
973 
974 	dpaa2_q->rx_pkts += num_tx_conf;
975 
976 	return num_tx_conf;
977 }
978 
979 /* Configure the egress frame annotation for timestamp update */
980 static void enable_tx_tstamp(struct qbman_fd *fd)
981 {
982 	struct dpaa2_faead *fd_faead;
983 
984 	/* Set frame annotation status field as valid */
985 	(fd)->simple.frc |= DPAA2_FD_FRC_FASV;
986 
987 	/* Set frame annotation egress action descriptor as valid */
988 	(fd)->simple.frc |= DPAA2_FD_FRC_FAEADV;
989 
990 	/* Set Annotation Length as 128B */
991 	(fd)->simple.ctrl |= DPAA2_FD_CTRL_ASAL;
992 
993 	/* enable update of confirmation frame annotation */
994 	fd_faead = (struct dpaa2_faead *)((size_t)
995 			DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)) +
996 			DPAA2_FD_PTA_SIZE + DPAA2_FD_HW_ANNOT_FAEAD_OFFSET);
997 	fd_faead->ctrl = DPAA2_ANNOT_FAEAD_A2V | DPAA2_ANNOT_FAEAD_UPDV |
998 				DPAA2_ANNOT_FAEAD_UPD;
999 }
1000 
1001 /*
1002  * Callback to handle sending packets through WRIOP based interface
1003  */
1004 uint16_t
1005 dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
1006 {
1007 	/* Function to transmit the frames to given device and VQ*/
1008 	uint32_t loop, retry_count;
1009 	int32_t ret;
1010 	struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
1011 	struct rte_mbuf *mi;
1012 	uint32_t frames_to_send;
1013 	struct rte_mempool *mp;
1014 	struct qbman_eq_desc eqdesc;
1015 	struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
1016 	struct qbman_swp *swp;
1017 	uint16_t num_tx = 0;
1018 	uint16_t bpid;
1019 	struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
1020 	struct dpaa2_dev_priv *priv = eth_data->dev_private;
1021 	uint32_t flags[MAX_TX_RING_SLOTS] = {0};
1022 
1023 	if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
1024 		ret = dpaa2_affine_qbman_swp();
1025 		if (ret) {
1026 			DPAA2_PMD_ERR(
1027 				"Failed to allocate IO portal, tid: %d\n",
1028 				rte_gettid());
1029 			return 0;
1030 		}
1031 	}
1032 	swp = DPAA2_PER_LCORE_PORTAL;
1033 
1034 	DPAA2_PMD_DP_DEBUG("===> eth_data =%p, fqid =%d\n",
1035 			eth_data, dpaa2_q->fqid);
1036 
1037 #ifdef RTE_LIBRTE_IEEE1588
1038 	/* IEEE1588 driver need pointer to tx confirmation queue
1039 	 * corresponding to last packet transmitted for reading
1040 	 * the timestamp
1041 	 */
1042 	priv->next_tx_conf_queue = dpaa2_q->tx_conf_queue;
1043 	dpaa2_dev_tx_conf(dpaa2_q->tx_conf_queue);
1044 #endif
1045 
1046 	/*Prepare enqueue descriptor*/
1047 	qbman_eq_desc_clear(&eqdesc);
1048 	qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
1049 	qbman_eq_desc_set_fq(&eqdesc, dpaa2_q->fqid);
1050 
1051 	/*Clear the unused FD fields before sending*/
1052 	while (nb_pkts) {
1053 		/*Check if the queue is congested*/
1054 		retry_count = 0;
1055 		while (qbman_result_SCN_state(dpaa2_q->cscn)) {
1056 			retry_count++;
1057 			/* Retry for some time before giving up */
1058 			if (retry_count > CONG_RETRY_COUNT)
1059 				goto skip_tx;
1060 		}
1061 
1062 		frames_to_send = (nb_pkts > dpaa2_eqcr_size) ?
1063 			dpaa2_eqcr_size : nb_pkts;
1064 
1065 		for (loop = 0; loop < frames_to_send; loop++) {
1066 			if ((*bufs)->seqn) {
1067 				uint8_t dqrr_index = (*bufs)->seqn - 1;
1068 
1069 				flags[loop] = QBMAN_ENQUEUE_FLAG_DCA |
1070 						dqrr_index;
1071 				DPAA2_PER_LCORE_DQRR_SIZE--;
1072 				DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dqrr_index);
1073 				(*bufs)->seqn = DPAA2_INVALID_MBUF_SEQN;
1074 			}
1075 
1076 			if (likely(RTE_MBUF_DIRECT(*bufs))) {
1077 				mp = (*bufs)->pool;
1078 				/* Check the basic scenario and set
1079 				 * the FD appropriately here itself.
1080 				 */
1081 				if (likely(mp && mp->ops_index ==
1082 				    priv->bp_list->dpaa2_ops_index &&
1083 				    (*bufs)->nb_segs == 1 &&
1084 				    rte_mbuf_refcnt_read((*bufs)) == 1)) {
1085 					if (unlikely(((*bufs)->ol_flags
1086 						& PKT_TX_VLAN_PKT) ||
1087 						(eth_data->dev_conf.txmode.offloads
1088 						& DEV_TX_OFFLOAD_VLAN_INSERT))) {
1089 						ret = rte_vlan_insert(bufs);
1090 						if (ret)
1091 							goto send_n_return;
1092 					}
1093 					DPAA2_MBUF_TO_CONTIG_FD((*bufs),
1094 					&fd_arr[loop], mempool_to_bpid(mp));
1095 					bufs++;
1096 #ifdef RTE_LIBRTE_IEEE1588
1097 					enable_tx_tstamp(&fd_arr[loop]);
1098 #endif
1099 					continue;
1100 				}
1101 			} else {
1102 				mi = rte_mbuf_from_indirect(*bufs);
1103 				mp = mi->pool;
1104 			}
1105 			/* Not a hw_pkt pool allocated frame */
1106 			if (unlikely(!mp || !priv->bp_list)) {
1107 				DPAA2_PMD_ERR("Err: No buffer pool attached");
1108 				goto send_n_return;
1109 			}
1110 
1111 			if (unlikely(((*bufs)->ol_flags & PKT_TX_VLAN_PKT) ||
1112 				(eth_data->dev_conf.txmode.offloads
1113 				& DEV_TX_OFFLOAD_VLAN_INSERT))) {
1114 				int ret = rte_vlan_insert(bufs);
1115 				if (ret)
1116 					goto send_n_return;
1117 			}
1118 			if (mp->ops_index != priv->bp_list->dpaa2_ops_index) {
1119 				DPAA2_PMD_WARN("Non DPAA2 buffer pool");
1120 				/* alloc should be from the default buffer pool
1121 				 * attached to this interface
1122 				 */
1123 				bpid = priv->bp_list->buf_pool.bpid;
1124 
1125 				if (unlikely((*bufs)->nb_segs > 1)) {
1126 					DPAA2_PMD_ERR("S/G support not added"
1127 						" for non hw offload buffer");
1128 					goto send_n_return;
1129 				}
1130 				if (eth_copy_mbuf_to_fd(*bufs,
1131 							&fd_arr[loop], bpid)) {
1132 					goto send_n_return;
1133 				}
1134 				/* free the original packet */
1135 				rte_pktmbuf_free(*bufs);
1136 			} else {
1137 				bpid = mempool_to_bpid(mp);
1138 				if (unlikely((*bufs)->nb_segs > 1)) {
1139 					if (eth_mbuf_to_sg_fd(*bufs,
1140 							&fd_arr[loop], bpid))
1141 						goto send_n_return;
1142 				} else {
1143 					eth_mbuf_to_fd(*bufs,
1144 						       &fd_arr[loop], bpid);
1145 				}
1146 			}
1147 #ifdef RTE_LIBRTE_IEEE1588
1148 			enable_tx_tstamp(&fd_arr[loop]);
1149 #endif
1150 			bufs++;
1151 		}
1152 
1153 		loop = 0;
1154 		retry_count = 0;
1155 		while (loop < frames_to_send) {
1156 			ret = qbman_swp_enqueue_multiple(swp, &eqdesc,
1157 					&fd_arr[loop], &flags[loop],
1158 					frames_to_send - loop);
1159 			if (unlikely(ret < 0)) {
1160 				retry_count++;
1161 				if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) {
1162 					num_tx += loop;
1163 					nb_pkts -= loop;
1164 					goto send_n_return;
1165 				}
1166 			} else {
1167 				loop += ret;
1168 				retry_count = 0;
1169 			}
1170 		}
1171 
1172 		num_tx += loop;
1173 		nb_pkts -= loop;
1174 	}
1175 	dpaa2_q->tx_pkts += num_tx;
1176 	return num_tx;
1177 
1178 send_n_return:
1179 	/* send any already prepared fd */
1180 	if (loop) {
1181 		unsigned int i = 0;
1182 
1183 		retry_count = 0;
1184 		while (i < loop) {
1185 			ret = qbman_swp_enqueue_multiple(swp, &eqdesc,
1186 							 &fd_arr[i],
1187 							 &flags[i],
1188 							 loop - i);
1189 			if (unlikely(ret < 0)) {
1190 				retry_count++;
1191 				if (retry_count > DPAA2_MAX_TX_RETRY_COUNT)
1192 					break;
1193 			} else {
1194 				i += ret;
1195 				retry_count = 0;
1196 			}
1197 		}
1198 		num_tx += i;
1199 	}
1200 skip_tx:
1201 	dpaa2_q->tx_pkts += num_tx;
1202 	return num_tx;
1203 }
1204 
1205 void
1206 dpaa2_dev_free_eqresp_buf(uint16_t eqresp_ci)
1207 {
1208 	struct dpaa2_dpio_dev *dpio_dev = DPAA2_PER_LCORE_DPIO;
1209 	struct qbman_fd *fd;
1210 	struct rte_mbuf *m;
1211 
1212 	fd = qbman_result_eqresp_fd(&dpio_dev->eqresp[eqresp_ci]);
1213 
1214 	/* Setting port id does not matter as we are to free the mbuf */
1215 	m = eth_fd_to_mbuf(fd, 0);
1216 	rte_pktmbuf_free(m);
1217 }
1218 
1219 static void
1220 dpaa2_set_enqueue_descriptor(struct dpaa2_queue *dpaa2_q,
1221 			     struct rte_mbuf *m,
1222 			     struct qbman_eq_desc *eqdesc)
1223 {
1224 	struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
1225 	struct dpaa2_dev_priv *priv = eth_data->dev_private;
1226 	struct dpaa2_dpio_dev *dpio_dev = DPAA2_PER_LCORE_DPIO;
1227 	struct eqresp_metadata *eqresp_meta;
1228 	uint16_t orpid, seqnum;
1229 	uint8_t dq_idx;
1230 
1231 	qbman_eq_desc_set_fq(eqdesc, dpaa2_q->fqid);
1232 
1233 	if (m->seqn & DPAA2_ENQUEUE_FLAG_ORP) {
1234 		orpid = (m->seqn & DPAA2_EQCR_OPRID_MASK) >>
1235 			DPAA2_EQCR_OPRID_SHIFT;
1236 		seqnum = (m->seqn & DPAA2_EQCR_SEQNUM_MASK) >>
1237 			DPAA2_EQCR_SEQNUM_SHIFT;
1238 
1239 		if (!priv->en_loose_ordered) {
1240 			qbman_eq_desc_set_orp(eqdesc, 1, orpid, seqnum, 0);
1241 			qbman_eq_desc_set_response(eqdesc, (uint64_t)
1242 				DPAA2_VADDR_TO_IOVA(&dpio_dev->eqresp[
1243 				dpio_dev->eqresp_pi]), 1);
1244 			qbman_eq_desc_set_token(eqdesc, 1);
1245 
1246 			eqresp_meta = &dpio_dev->eqresp_meta[
1247 				dpio_dev->eqresp_pi];
1248 			eqresp_meta->dpaa2_q = dpaa2_q;
1249 			eqresp_meta->mp = m->pool;
1250 
1251 			dpio_dev->eqresp_pi + 1 < MAX_EQ_RESP_ENTRIES ?
1252 				dpio_dev->eqresp_pi++ :
1253 				(dpio_dev->eqresp_pi = 0);
1254 		} else {
1255 			qbman_eq_desc_set_orp(eqdesc, 0, orpid, seqnum, 0);
1256 		}
1257 	} else {
1258 		dq_idx = m->seqn - 1;
1259 		qbman_eq_desc_set_dca(eqdesc, 1, dq_idx, 0);
1260 		DPAA2_PER_LCORE_DQRR_SIZE--;
1261 		DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dq_idx);
1262 	}
1263 	m->seqn = DPAA2_INVALID_MBUF_SEQN;
1264 }
1265 
1266 /* Callback to handle sending ordered packets through WRIOP based interface */
1267 uint16_t
1268 dpaa2_dev_tx_ordered(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
1269 {
1270 	/* Function to transmit the frames to given device and VQ*/
1271 	struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
1272 	struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
1273 	struct dpaa2_dev_priv *priv = eth_data->dev_private;
1274 	struct dpaa2_queue *order_sendq = (struct dpaa2_queue *)priv->tx_vq[0];
1275 	struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
1276 	struct rte_mbuf *mi;
1277 	struct rte_mempool *mp;
1278 	struct qbman_eq_desc eqdesc[MAX_TX_RING_SLOTS];
1279 	struct qbman_swp *swp;
1280 	uint32_t frames_to_send, num_free_eq_desc;
1281 	uint32_t loop, retry_count;
1282 	int32_t ret;
1283 	uint16_t num_tx = 0;
1284 	uint16_t bpid;
1285 
1286 	if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
1287 		ret = dpaa2_affine_qbman_swp();
1288 		if (ret) {
1289 			DPAA2_PMD_ERR(
1290 				"Failed to allocate IO portal, tid: %d\n",
1291 				rte_gettid());
1292 			return 0;
1293 		}
1294 	}
1295 	swp = DPAA2_PER_LCORE_PORTAL;
1296 
1297 	DPAA2_PMD_DP_DEBUG("===> eth_data =%p, fqid =%d\n",
1298 			   eth_data, dpaa2_q->fqid);
1299 
1300 	/* This would also handle normal and atomic queues as any type
1301 	 * of packet can be enqueued when ordered queues are being used.
1302 	 */
1303 	while (nb_pkts) {
1304 		/*Check if the queue is congested*/
1305 		retry_count = 0;
1306 		while (qbman_result_SCN_state(dpaa2_q->cscn)) {
1307 			retry_count++;
1308 			/* Retry for some time before giving up */
1309 			if (retry_count > CONG_RETRY_COUNT)
1310 				goto skip_tx;
1311 		}
1312 
1313 		frames_to_send = (nb_pkts > dpaa2_eqcr_size) ?
1314 			dpaa2_eqcr_size : nb_pkts;
1315 
1316 		if (!priv->en_loose_ordered) {
1317 			if ((*bufs)->seqn & DPAA2_ENQUEUE_FLAG_ORP) {
1318 				num_free_eq_desc = dpaa2_free_eq_descriptors();
1319 				if (num_free_eq_desc < frames_to_send)
1320 					frames_to_send = num_free_eq_desc;
1321 			}
1322 		}
1323 
1324 		for (loop = 0; loop < frames_to_send; loop++) {
1325 			/*Prepare enqueue descriptor*/
1326 			qbman_eq_desc_clear(&eqdesc[loop]);
1327 
1328 			if ((*bufs)->seqn) {
1329 				/* Use only queue 0 for Tx in case of atomic/
1330 				 * ordered packets as packets can get unordered
1331 				 * when being tranmitted out from the interface
1332 				 */
1333 				dpaa2_set_enqueue_descriptor(order_sendq,
1334 							     (*bufs),
1335 							     &eqdesc[loop]);
1336 			} else {
1337 				qbman_eq_desc_set_no_orp(&eqdesc[loop],
1338 							 DPAA2_EQ_RESP_ERR_FQ);
1339 				qbman_eq_desc_set_fq(&eqdesc[loop],
1340 						     dpaa2_q->fqid);
1341 			}
1342 
1343 			if (likely(RTE_MBUF_DIRECT(*bufs))) {
1344 				mp = (*bufs)->pool;
1345 				/* Check the basic scenario and set
1346 				 * the FD appropriately here itself.
1347 				 */
1348 				if (likely(mp && mp->ops_index ==
1349 				    priv->bp_list->dpaa2_ops_index &&
1350 				    (*bufs)->nb_segs == 1 &&
1351 				    rte_mbuf_refcnt_read((*bufs)) == 1)) {
1352 					if (unlikely((*bufs)->ol_flags
1353 						& PKT_TX_VLAN_PKT)) {
1354 					  ret = rte_vlan_insert(bufs);
1355 					  if (ret)
1356 						goto send_n_return;
1357 					}
1358 					DPAA2_MBUF_TO_CONTIG_FD((*bufs),
1359 						&fd_arr[loop],
1360 						mempool_to_bpid(mp));
1361 					bufs++;
1362 					continue;
1363 				}
1364 			} else {
1365 				mi = rte_mbuf_from_indirect(*bufs);
1366 				mp = mi->pool;
1367 			}
1368 			/* Not a hw_pkt pool allocated frame */
1369 			if (unlikely(!mp || !priv->bp_list)) {
1370 				DPAA2_PMD_ERR("Err: No buffer pool attached");
1371 				goto send_n_return;
1372 			}
1373 
1374 			if (mp->ops_index != priv->bp_list->dpaa2_ops_index) {
1375 				DPAA2_PMD_WARN("Non DPAA2 buffer pool");
1376 				/* alloc should be from the default buffer pool
1377 				 * attached to this interface
1378 				 */
1379 				bpid = priv->bp_list->buf_pool.bpid;
1380 
1381 				if (unlikely((*bufs)->nb_segs > 1)) {
1382 					DPAA2_PMD_ERR(
1383 						"S/G not supp for non hw offload buffer");
1384 					goto send_n_return;
1385 				}
1386 				if (eth_copy_mbuf_to_fd(*bufs,
1387 							&fd_arr[loop], bpid)) {
1388 					goto send_n_return;
1389 				}
1390 				/* free the original packet */
1391 				rte_pktmbuf_free(*bufs);
1392 			} else {
1393 				bpid = mempool_to_bpid(mp);
1394 				if (unlikely((*bufs)->nb_segs > 1)) {
1395 					if (eth_mbuf_to_sg_fd(*bufs,
1396 							      &fd_arr[loop],
1397 							      bpid))
1398 						goto send_n_return;
1399 				} else {
1400 					eth_mbuf_to_fd(*bufs,
1401 						       &fd_arr[loop], bpid);
1402 				}
1403 			}
1404 			bufs++;
1405 		}
1406 
1407 		loop = 0;
1408 		retry_count = 0;
1409 		while (loop < frames_to_send) {
1410 			ret = qbman_swp_enqueue_multiple_desc(swp,
1411 					&eqdesc[loop], &fd_arr[loop],
1412 					frames_to_send - loop);
1413 			if (unlikely(ret < 0)) {
1414 				retry_count++;
1415 				if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) {
1416 					num_tx += loop;
1417 					nb_pkts -= loop;
1418 					goto send_n_return;
1419 				}
1420 			} else {
1421 				loop += ret;
1422 				retry_count = 0;
1423 			}
1424 		}
1425 
1426 		num_tx += loop;
1427 		nb_pkts -= loop;
1428 	}
1429 	dpaa2_q->tx_pkts += num_tx;
1430 	return num_tx;
1431 
1432 send_n_return:
1433 	/* send any already prepared fd */
1434 	if (loop) {
1435 		unsigned int i = 0;
1436 
1437 		retry_count = 0;
1438 		while (i < loop) {
1439 			ret = qbman_swp_enqueue_multiple_desc(swp,
1440 				       &eqdesc[loop], &fd_arr[i], loop - i);
1441 			if (unlikely(ret < 0)) {
1442 				retry_count++;
1443 				if (retry_count > DPAA2_MAX_TX_RETRY_COUNT)
1444 					break;
1445 			} else {
1446 				i += ret;
1447 				retry_count = 0;
1448 			}
1449 		}
1450 		num_tx += i;
1451 	}
1452 skip_tx:
1453 	dpaa2_q->tx_pkts += num_tx;
1454 	return num_tx;
1455 }
1456 
1457 /**
1458  * Dummy DPDK callback for TX.
1459  *
1460  * This function is used to temporarily replace the real callback during
1461  * unsafe control operations on the queue, or in case of error.
1462  *
1463  * @param dpdk_txq
1464  *   Generic pointer to TX queue structure.
1465  * @param[in] pkts
1466  *   Packets to transmit.
1467  * @param pkts_n
1468  *   Number of packets in array.
1469  *
1470  * @return
1471  *   Number of packets successfully transmitted (<= pkts_n).
1472  */
1473 uint16_t
1474 dummy_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
1475 {
1476 	(void)queue;
1477 	(void)bufs;
1478 	(void)nb_pkts;
1479 	return 0;
1480 }
1481 
1482 #if defined(RTE_TOOLCHAIN_GCC)
1483 #pragma GCC diagnostic push
1484 #pragma GCC diagnostic ignored "-Wcast-qual"
1485 #elif defined(RTE_TOOLCHAIN_CLANG)
1486 #pragma clang diagnostic push
1487 #pragma clang diagnostic ignored "-Wcast-qual"
1488 #endif
1489 
1490 /* This function loopbacks all the received packets.*/
1491 uint16_t
1492 dpaa2_dev_loopback_rx(void *queue,
1493 		      struct rte_mbuf **bufs __rte_unused,
1494 		      uint16_t nb_pkts)
1495 {
1496 	/* Function receive frames for a given device and VQ*/
1497 	struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
1498 	struct qbman_result *dq_storage, *dq_storage1 = NULL;
1499 	uint32_t fqid = dpaa2_q->fqid;
1500 	int ret, num_rx = 0, num_tx = 0, pull_size;
1501 	uint8_t pending, status;
1502 	struct qbman_swp *swp;
1503 	struct qbman_fd *fd[DPAA2_LX2_DQRR_RING_SIZE];
1504 	struct qbman_pull_desc pulldesc;
1505 	struct qbman_eq_desc eqdesc;
1506 	struct queue_storage_info_t *q_storage = dpaa2_q->q_storage;
1507 	struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
1508 	struct dpaa2_dev_priv *priv = eth_data->dev_private;
1509 	struct dpaa2_queue *tx_q = priv->tx_vq[0];
1510 	/* todo - currently we are using 1st TX queue only for loopback*/
1511 
1512 	if (unlikely(!DPAA2_PER_LCORE_ETHRX_DPIO)) {
1513 		ret = dpaa2_affine_qbman_ethrx_swp();
1514 		if (ret) {
1515 			DPAA2_PMD_ERR("Failure in affining portal");
1516 			return 0;
1517 		}
1518 	}
1519 	swp = DPAA2_PER_LCORE_ETHRX_PORTAL;
1520 	pull_size = (nb_pkts > dpaa2_dqrr_size) ? dpaa2_dqrr_size : nb_pkts;
1521 	if (unlikely(!q_storage->active_dqs)) {
1522 		q_storage->toggle = 0;
1523 		dq_storage = q_storage->dq_storage[q_storage->toggle];
1524 		q_storage->last_num_pkts = pull_size;
1525 		qbman_pull_desc_clear(&pulldesc);
1526 		qbman_pull_desc_set_numframes(&pulldesc,
1527 					      q_storage->last_num_pkts);
1528 		qbman_pull_desc_set_fq(&pulldesc, fqid);
1529 		qbman_pull_desc_set_storage(&pulldesc, dq_storage,
1530 			(size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
1531 		if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) {
1532 			while (!qbman_check_command_complete(
1533 			       get_swp_active_dqs(
1534 			       DPAA2_PER_LCORE_ETHRX_DPIO->index)))
1535 				;
1536 			clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index);
1537 		}
1538 		while (1) {
1539 			if (qbman_swp_pull(swp, &pulldesc)) {
1540 				DPAA2_PMD_DP_DEBUG(
1541 					"VDQ command not issued.QBMAN busy\n");
1542 				/* Portal was busy, try again */
1543 				continue;
1544 			}
1545 			break;
1546 		}
1547 		q_storage->active_dqs = dq_storage;
1548 		q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index;
1549 		set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index,
1550 				   dq_storage);
1551 	}
1552 
1553 	dq_storage = q_storage->active_dqs;
1554 	rte_prefetch0((void *)(size_t)(dq_storage));
1555 	rte_prefetch0((void *)(size_t)(dq_storage + 1));
1556 
1557 	/* Prepare next pull descriptor. This will give space for the
1558 	 * prefething done on DQRR entries
1559 	 */
1560 	q_storage->toggle ^= 1;
1561 	dq_storage1 = q_storage->dq_storage[q_storage->toggle];
1562 	qbman_pull_desc_clear(&pulldesc);
1563 	qbman_pull_desc_set_numframes(&pulldesc, pull_size);
1564 	qbman_pull_desc_set_fq(&pulldesc, fqid);
1565 	qbman_pull_desc_set_storage(&pulldesc, dq_storage1,
1566 		(size_t)(DPAA2_VADDR_TO_IOVA(dq_storage1)), 1);
1567 
1568 	/*Prepare enqueue descriptor*/
1569 	qbman_eq_desc_clear(&eqdesc);
1570 	qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
1571 	qbman_eq_desc_set_response(&eqdesc, 0, 0);
1572 	qbman_eq_desc_set_fq(&eqdesc, tx_q->fqid);
1573 
1574 	/* Check if the previous issued command is completed.
1575 	 * Also seems like the SWP is shared between the Ethernet Driver
1576 	 * and the SEC driver.
1577 	 */
1578 	while (!qbman_check_command_complete(dq_storage))
1579 		;
1580 	if (dq_storage == get_swp_active_dqs(q_storage->active_dpio_id))
1581 		clear_swp_active_dqs(q_storage->active_dpio_id);
1582 
1583 	pending = 1;
1584 
1585 	do {
1586 		/* Loop until the dq_storage is updated with
1587 		 * new token by QBMAN
1588 		 */
1589 		while (!qbman_check_new_result(dq_storage))
1590 			;
1591 		rte_prefetch0((void *)((size_t)(dq_storage + 2)));
1592 		/* Check whether Last Pull command is Expired and
1593 		 * setting Condition for Loop termination
1594 		 */
1595 		if (qbman_result_DQ_is_pull_complete(dq_storage)) {
1596 			pending = 0;
1597 			/* Check for valid frame. */
1598 			status = qbman_result_DQ_flags(dq_storage);
1599 			if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0))
1600 				continue;
1601 		}
1602 		fd[num_rx] = (struct qbman_fd *)qbman_result_DQ_fd(dq_storage);
1603 
1604 		dq_storage++;
1605 		num_rx++;
1606 	} while (pending);
1607 
1608 	while (num_tx < num_rx) {
1609 		num_tx += qbman_swp_enqueue_multiple_fd(swp, &eqdesc,
1610 				&fd[num_tx], 0, num_rx - num_tx);
1611 	}
1612 
1613 	if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) {
1614 		while (!qbman_check_command_complete(
1615 		       get_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)))
1616 			;
1617 		clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index);
1618 	}
1619 	/* issue a volatile dequeue command for next pull */
1620 	while (1) {
1621 		if (qbman_swp_pull(swp, &pulldesc)) {
1622 			DPAA2_PMD_DP_DEBUG("VDQ command is not issued."
1623 					  "QBMAN is busy (2)\n");
1624 			continue;
1625 		}
1626 		break;
1627 	}
1628 	q_storage->active_dqs = dq_storage1;
1629 	q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index;
1630 	set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index, dq_storage1);
1631 
1632 	dpaa2_q->rx_pkts += num_rx;
1633 	dpaa2_q->tx_pkts += num_tx;
1634 
1635 	return 0;
1636 }
1637 #if defined(RTE_TOOLCHAIN_GCC)
1638 #pragma GCC diagnostic pop
1639 #elif defined(RTE_TOOLCHAIN_CLANG)
1640 #pragma clang diagnostic pop
1641 #endif
1642