xref: /dpdk/drivers/net/dpaa2/dpaa2_rxtx.c (revision e88bd4746737a1ca464b866d29f20ff5a739cd3f)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4  *   Copyright 2016-2020 NXP
5  *
6  */
7 
8 #include <time.h>
9 #include <net/if.h>
10 
11 #include <rte_mbuf.h>
12 #include <rte_ethdev_driver.h>
13 #include <rte_malloc.h>
14 #include <rte_memcpy.h>
15 #include <rte_string_fns.h>
16 #include <rte_dev.h>
17 
18 #include <rte_fslmc.h>
19 #include <fslmc_vfio.h>
20 #include <dpaa2_hw_pvt.h>
21 #include <dpaa2_hw_dpio.h>
22 #include <dpaa2_hw_mempool.h>
23 
24 #include "dpaa2_pmd_logs.h"
25 #include "dpaa2_ethdev.h"
26 #include "base/dpaa2_hw_dpni_annot.h"
27 
28 static inline uint32_t __rte_hot
29 dpaa2_dev_rx_parse_slow(struct rte_mbuf *mbuf,
30 			struct dpaa2_annot_hdr *annotation);
31 
32 static void enable_tx_tstamp(struct qbman_fd *fd) __rte_unused;
33 
34 static inline rte_mbuf_timestamp_t *
35 dpaa2_timestamp_dynfield(struct rte_mbuf *mbuf)
36 {
37 	return RTE_MBUF_DYNFIELD(mbuf,
38 		dpaa2_timestamp_dynfield_offset, rte_mbuf_timestamp_t *);
39 }
40 
41 #define DPAA2_MBUF_TO_CONTIG_FD(_mbuf, _fd, _bpid)  do { \
42 	DPAA2_SET_FD_ADDR(_fd, DPAA2_MBUF_VADDR_TO_IOVA(_mbuf)); \
43 	DPAA2_SET_FD_LEN(_fd, _mbuf->data_len); \
44 	DPAA2_SET_ONLY_FD_BPID(_fd, _bpid); \
45 	DPAA2_SET_FD_OFFSET(_fd, _mbuf->data_off); \
46 	DPAA2_SET_FD_FRC(_fd, 0);		\
47 	DPAA2_RESET_FD_CTRL(_fd);		\
48 	DPAA2_RESET_FD_FLC(_fd);		\
49 } while (0)
50 
51 static inline void __rte_hot
52 dpaa2_dev_rx_parse_new(struct rte_mbuf *m, const struct qbman_fd *fd,
53 		       void *hw_annot_addr)
54 {
55 	uint16_t frc = DPAA2_GET_FD_FRC_PARSE_SUM(fd);
56 	struct dpaa2_annot_hdr *annotation =
57 			(struct dpaa2_annot_hdr *)hw_annot_addr;
58 
59 	m->packet_type = RTE_PTYPE_UNKNOWN;
60 	switch (frc) {
61 	case DPAA2_PKT_TYPE_ETHER:
62 		m->packet_type = RTE_PTYPE_L2_ETHER;
63 		break;
64 	case DPAA2_PKT_TYPE_IPV4:
65 		m->packet_type = RTE_PTYPE_L2_ETHER |
66 			RTE_PTYPE_L3_IPV4;
67 		break;
68 	case DPAA2_PKT_TYPE_IPV6:
69 		m->packet_type = RTE_PTYPE_L2_ETHER |
70 			RTE_PTYPE_L3_IPV6;
71 		break;
72 	case DPAA2_PKT_TYPE_IPV4_EXT:
73 		m->packet_type = RTE_PTYPE_L2_ETHER |
74 			RTE_PTYPE_L3_IPV4_EXT;
75 		break;
76 	case DPAA2_PKT_TYPE_IPV6_EXT:
77 		m->packet_type = RTE_PTYPE_L2_ETHER |
78 			RTE_PTYPE_L3_IPV6_EXT;
79 		break;
80 	case DPAA2_PKT_TYPE_IPV4_TCP:
81 		m->packet_type = RTE_PTYPE_L2_ETHER |
82 			RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP;
83 		break;
84 	case DPAA2_PKT_TYPE_IPV6_TCP:
85 		m->packet_type = RTE_PTYPE_L2_ETHER |
86 			RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP;
87 		break;
88 	case DPAA2_PKT_TYPE_IPV4_UDP:
89 		m->packet_type = RTE_PTYPE_L2_ETHER |
90 			RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP;
91 		break;
92 	case DPAA2_PKT_TYPE_IPV6_UDP:
93 		m->packet_type = RTE_PTYPE_L2_ETHER |
94 			RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP;
95 		break;
96 	case DPAA2_PKT_TYPE_IPV4_SCTP:
97 		m->packet_type = RTE_PTYPE_L2_ETHER |
98 			RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP;
99 		break;
100 	case DPAA2_PKT_TYPE_IPV6_SCTP:
101 		m->packet_type = RTE_PTYPE_L2_ETHER |
102 			RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_SCTP;
103 		break;
104 	case DPAA2_PKT_TYPE_IPV4_ICMP:
105 		m->packet_type = RTE_PTYPE_L2_ETHER |
106 			RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_ICMP;
107 		break;
108 	case DPAA2_PKT_TYPE_IPV6_ICMP:
109 		m->packet_type = RTE_PTYPE_L2_ETHER |
110 			RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_ICMP;
111 		break;
112 	default:
113 		m->packet_type = dpaa2_dev_rx_parse_slow(m, annotation);
114 	}
115 	m->hash.rss = fd->simple.flc_hi;
116 	m->ol_flags |= PKT_RX_RSS_HASH;
117 
118 	if (dpaa2_enable_ts[m->port]) {
119 		*dpaa2_timestamp_dynfield(m) = annotation->word2;
120 		m->ol_flags |= dpaa2_timestamp_rx_dynflag;
121 		DPAA2_PMD_DP_DEBUG("pkt timestamp:0x%" PRIx64 "",
122 				*dpaa2_timestamp_dynfield(m));
123 	}
124 
125 	DPAA2_PMD_DP_DEBUG("HW frc = 0x%x\t packet type =0x%x "
126 		"ol_flags =0x%" PRIx64 "",
127 		frc, m->packet_type, m->ol_flags);
128 }
129 
130 static inline uint32_t __rte_hot
131 dpaa2_dev_rx_parse_slow(struct rte_mbuf *mbuf,
132 			struct dpaa2_annot_hdr *annotation)
133 {
134 	uint32_t pkt_type = RTE_PTYPE_UNKNOWN;
135 	uint16_t *vlan_tci;
136 
137 	DPAA2_PMD_DP_DEBUG("(slow parse)annotation(3)=0x%" PRIx64 "\t"
138 			"(4)=0x%" PRIx64 "\t",
139 			annotation->word3, annotation->word4);
140 
141 #if defined(RTE_LIBRTE_IEEE1588)
142 	if (BIT_ISSET_AT_POS(annotation->word1, DPAA2_ETH_FAS_PTP))
143 		mbuf->ol_flags |= PKT_RX_IEEE1588_PTP;
144 #endif
145 
146 	if (BIT_ISSET_AT_POS(annotation->word3, L2_VLAN_1_PRESENT)) {
147 		vlan_tci = rte_pktmbuf_mtod_offset(mbuf, uint16_t *,
148 			(VLAN_TCI_OFFSET_1(annotation->word5) >> 16));
149 		mbuf->vlan_tci = rte_be_to_cpu_16(*vlan_tci);
150 		mbuf->ol_flags |= PKT_RX_VLAN;
151 		pkt_type |= RTE_PTYPE_L2_ETHER_VLAN;
152 	} else if (BIT_ISSET_AT_POS(annotation->word3, L2_VLAN_N_PRESENT)) {
153 		vlan_tci = rte_pktmbuf_mtod_offset(mbuf, uint16_t *,
154 			(VLAN_TCI_OFFSET_1(annotation->word5) >> 16));
155 		mbuf->vlan_tci = rte_be_to_cpu_16(*vlan_tci);
156 		mbuf->ol_flags |= PKT_RX_VLAN | PKT_RX_QINQ;
157 		pkt_type |= RTE_PTYPE_L2_ETHER_QINQ;
158 	}
159 
160 	if (BIT_ISSET_AT_POS(annotation->word3, L2_ARP_PRESENT)) {
161 		pkt_type |= RTE_PTYPE_L2_ETHER_ARP;
162 		goto parse_done;
163 	} else if (BIT_ISSET_AT_POS(annotation->word3, L2_ETH_MAC_PRESENT)) {
164 		pkt_type |= RTE_PTYPE_L2_ETHER;
165 	} else {
166 		goto parse_done;
167 	}
168 
169 	if (BIT_ISSET_AT_POS(annotation->word4, L3_IPV4_1_PRESENT |
170 			     L3_IPV4_N_PRESENT)) {
171 		pkt_type |= RTE_PTYPE_L3_IPV4;
172 		if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_OPT_PRESENT |
173 			L3_IP_N_OPT_PRESENT))
174 			pkt_type |= RTE_PTYPE_L3_IPV4_EXT;
175 
176 	} else if (BIT_ISSET_AT_POS(annotation->word4, L3_IPV6_1_PRESENT |
177 		  L3_IPV6_N_PRESENT)) {
178 		pkt_type |= RTE_PTYPE_L3_IPV6;
179 		if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_OPT_PRESENT |
180 		    L3_IP_N_OPT_PRESENT))
181 			pkt_type |= RTE_PTYPE_L3_IPV6_EXT;
182 	} else {
183 		goto parse_done;
184 	}
185 
186 	if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L3CE))
187 		mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
188 	else if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L4CE))
189 		mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
190 
191 	if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_FIRST_FRAGMENT |
192 	    L3_IP_1_MORE_FRAGMENT |
193 	    L3_IP_N_FIRST_FRAGMENT |
194 	    L3_IP_N_MORE_FRAGMENT)) {
195 		pkt_type |= RTE_PTYPE_L4_FRAG;
196 		goto parse_done;
197 	} else {
198 		pkt_type |= RTE_PTYPE_L4_NONFRAG;
199 	}
200 
201 	if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_UDP_PRESENT))
202 		pkt_type |= RTE_PTYPE_L4_UDP;
203 
204 	else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_TCP_PRESENT))
205 		pkt_type |= RTE_PTYPE_L4_TCP;
206 
207 	else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_SCTP_PRESENT))
208 		pkt_type |= RTE_PTYPE_L4_SCTP;
209 
210 	else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_ICMP_PRESENT))
211 		pkt_type |= RTE_PTYPE_L4_ICMP;
212 
213 	else if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_UNKNOWN_PROTOCOL))
214 		pkt_type |= RTE_PTYPE_UNKNOWN;
215 
216 parse_done:
217 	return pkt_type;
218 }
219 
220 static inline uint32_t __rte_hot
221 dpaa2_dev_rx_parse(struct rte_mbuf *mbuf, void *hw_annot_addr)
222 {
223 	struct dpaa2_annot_hdr *annotation =
224 			(struct dpaa2_annot_hdr *)hw_annot_addr;
225 
226 	DPAA2_PMD_DP_DEBUG("(fast parse) Annotation = 0x%" PRIx64 "\t",
227 			   annotation->word4);
228 
229 	if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L3CE))
230 		mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
231 	else if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L4CE))
232 		mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
233 
234 	if (dpaa2_enable_ts[mbuf->port]) {
235 		*dpaa2_timestamp_dynfield(mbuf) = annotation->word2;
236 		mbuf->ol_flags |= dpaa2_timestamp_rx_dynflag;
237 		DPAA2_PMD_DP_DEBUG("pkt timestamp: 0x%" PRIx64 "",
238 				*dpaa2_timestamp_dynfield(mbuf));
239 	}
240 
241 	/* Check detailed parsing requirement */
242 	if (annotation->word3 & 0x7FFFFC3FFFF)
243 		return dpaa2_dev_rx_parse_slow(mbuf, annotation);
244 
245 	/* Return some common types from parse processing */
246 	switch (annotation->word4) {
247 	case DPAA2_L3_IPv4:
248 		return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4;
249 	case DPAA2_L3_IPv6:
250 		return  RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6;
251 	case DPAA2_L3_IPv4_TCP:
252 		return  RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 |
253 				RTE_PTYPE_L4_TCP;
254 	case DPAA2_L3_IPv4_UDP:
255 		return  RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 |
256 				RTE_PTYPE_L4_UDP;
257 	case DPAA2_L3_IPv6_TCP:
258 		return  RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 |
259 				RTE_PTYPE_L4_TCP;
260 	case DPAA2_L3_IPv6_UDP:
261 		return  RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 |
262 				RTE_PTYPE_L4_UDP;
263 	default:
264 		break;
265 	}
266 
267 	return dpaa2_dev_rx_parse_slow(mbuf, annotation);
268 }
269 
270 static inline struct rte_mbuf *__rte_hot
271 eth_sg_fd_to_mbuf(const struct qbman_fd *fd,
272 		  int port_id)
273 {
274 	struct qbman_sge *sgt, *sge;
275 	size_t sg_addr, fd_addr;
276 	int i = 0;
277 	void *hw_annot_addr;
278 	struct rte_mbuf *first_seg, *next_seg, *cur_seg, *temp;
279 
280 	fd_addr = (size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
281 	hw_annot_addr = (void *)(fd_addr + DPAA2_FD_PTA_SIZE);
282 
283 	/* Get Scatter gather table address */
284 	sgt = (struct qbman_sge *)(fd_addr + DPAA2_GET_FD_OFFSET(fd));
285 
286 	sge = &sgt[i++];
287 	sg_addr = (size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FLE_ADDR(sge));
288 
289 	/* First Scatter gather entry */
290 	first_seg = DPAA2_INLINE_MBUF_FROM_BUF(sg_addr,
291 		rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
292 	/* Prepare all the metadata for first segment */
293 	first_seg->buf_addr = (uint8_t *)sg_addr;
294 	first_seg->ol_flags = 0;
295 	first_seg->data_off = DPAA2_GET_FLE_OFFSET(sge);
296 	first_seg->data_len = sge->length  & 0x1FFFF;
297 	first_seg->pkt_len = DPAA2_GET_FD_LEN(fd);
298 	first_seg->nb_segs = 1;
299 	first_seg->next = NULL;
300 	first_seg->port = port_id;
301 	if (dpaa2_svr_family == SVR_LX2160A)
302 		dpaa2_dev_rx_parse_new(first_seg, fd, hw_annot_addr);
303 	else
304 		first_seg->packet_type =
305 			dpaa2_dev_rx_parse(first_seg, hw_annot_addr);
306 
307 	rte_mbuf_refcnt_set(first_seg, 1);
308 	cur_seg = first_seg;
309 	while (!DPAA2_SG_IS_FINAL(sge)) {
310 		sge = &sgt[i++];
311 		sg_addr = (size_t)DPAA2_IOVA_TO_VADDR(
312 				DPAA2_GET_FLE_ADDR(sge));
313 		next_seg = DPAA2_INLINE_MBUF_FROM_BUF(sg_addr,
314 			rte_dpaa2_bpid_info[DPAA2_GET_FLE_BPID(sge)].meta_data_size);
315 		next_seg->buf_addr  = (uint8_t *)sg_addr;
316 		next_seg->data_off  = DPAA2_GET_FLE_OFFSET(sge);
317 		next_seg->data_len  = sge->length  & 0x1FFFF;
318 		first_seg->nb_segs += 1;
319 		rte_mbuf_refcnt_set(next_seg, 1);
320 		cur_seg->next = next_seg;
321 		next_seg->next = NULL;
322 		cur_seg = next_seg;
323 	}
324 	temp = DPAA2_INLINE_MBUF_FROM_BUF(fd_addr,
325 		rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
326 	rte_mbuf_refcnt_set(temp, 1);
327 	rte_pktmbuf_free_seg(temp);
328 
329 	return (void *)first_seg;
330 }
331 
332 static inline struct rte_mbuf *__rte_hot
333 eth_fd_to_mbuf(const struct qbman_fd *fd,
334 	       int port_id)
335 {
336 	void *v_addr = DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
337 	void *hw_annot_addr = (void *)((size_t)v_addr + DPAA2_FD_PTA_SIZE);
338 	struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF(v_addr,
339 		     rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
340 
341 	/* need to repopulated some of the fields,
342 	 * as they may have changed in last transmission
343 	 */
344 	mbuf->nb_segs = 1;
345 	mbuf->ol_flags = 0;
346 	mbuf->data_off = DPAA2_GET_FD_OFFSET(fd);
347 	mbuf->data_len = DPAA2_GET_FD_LEN(fd);
348 	mbuf->pkt_len = mbuf->data_len;
349 	mbuf->port = port_id;
350 	mbuf->next = NULL;
351 	rte_mbuf_refcnt_set(mbuf, 1);
352 
353 	/* Parse the packet */
354 	/* parse results for LX2 are there in FRC field of FD.
355 	 * For other DPAA2 platforms , parse results are after
356 	 * the private - sw annotation area
357 	 */
358 
359 	if (dpaa2_svr_family == SVR_LX2160A)
360 		dpaa2_dev_rx_parse_new(mbuf, fd, hw_annot_addr);
361 	else
362 		mbuf->packet_type = dpaa2_dev_rx_parse(mbuf, hw_annot_addr);
363 
364 	DPAA2_PMD_DP_DEBUG("to mbuf - mbuf =%p, mbuf->buf_addr =%p, off = %d,"
365 		"fd_off=%d fd =%" PRIx64 ", meta = %d  bpid =%d, len=%d\n",
366 		mbuf, mbuf->buf_addr, mbuf->data_off,
367 		DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ADDR(fd),
368 		rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
369 		DPAA2_GET_FD_BPID(fd), DPAA2_GET_FD_LEN(fd));
370 
371 	return mbuf;
372 }
373 
374 static int __rte_noinline __rte_hot
375 eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf,
376 		  struct qbman_fd *fd, uint16_t bpid)
377 {
378 	struct rte_mbuf *cur_seg = mbuf, *prev_seg, *mi, *temp;
379 	struct qbman_sge *sgt, *sge = NULL;
380 	int i;
381 
382 	temp = rte_pktmbuf_alloc(mbuf->pool);
383 	if (temp == NULL) {
384 		DPAA2_PMD_DP_DEBUG("No memory to allocate S/G table\n");
385 		return -ENOMEM;
386 	}
387 
388 	DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(temp));
389 	DPAA2_SET_FD_LEN(fd, mbuf->pkt_len);
390 	DPAA2_SET_ONLY_FD_BPID(fd, bpid);
391 	DPAA2_SET_FD_OFFSET(fd, temp->data_off);
392 	DPAA2_FD_SET_FORMAT(fd, qbman_fd_sg);
393 	DPAA2_RESET_FD_FRC(fd);
394 	DPAA2_RESET_FD_CTRL(fd);
395 	/*Set Scatter gather table and Scatter gather entries*/
396 	sgt = (struct qbman_sge *)(
397 			(size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd))
398 			+ DPAA2_GET_FD_OFFSET(fd));
399 
400 	for (i = 0; i < mbuf->nb_segs; i++) {
401 		sge = &sgt[i];
402 		/*Resetting the buffer pool id and offset field*/
403 		sge->fin_bpid_offset = 0;
404 		DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(cur_seg));
405 		DPAA2_SET_FLE_OFFSET(sge, cur_seg->data_off);
406 		sge->length = cur_seg->data_len;
407 		if (RTE_MBUF_DIRECT(cur_seg)) {
408 			if (rte_mbuf_refcnt_read(cur_seg) > 1) {
409 				/* If refcnt > 1, invalid bpid is set to ensure
410 				 * buffer is not freed by HW
411 				 */
412 				DPAA2_SET_FLE_IVP(sge);
413 				rte_mbuf_refcnt_update(cur_seg, -1);
414 			} else
415 				DPAA2_SET_FLE_BPID(sge,
416 						mempool_to_bpid(cur_seg->pool));
417 			cur_seg = cur_seg->next;
418 		} else {
419 			/* Get owner MBUF from indirect buffer */
420 			mi = rte_mbuf_from_indirect(cur_seg);
421 			if (rte_mbuf_refcnt_read(mi) > 1) {
422 				/* If refcnt > 1, invalid bpid is set to ensure
423 				 * owner buffer is not freed by HW
424 				 */
425 				DPAA2_SET_FLE_IVP(sge);
426 			} else {
427 				DPAA2_SET_FLE_BPID(sge,
428 						   mempool_to_bpid(mi->pool));
429 				rte_mbuf_refcnt_update(mi, 1);
430 			}
431 			prev_seg = cur_seg;
432 			cur_seg = cur_seg->next;
433 			prev_seg->next = NULL;
434 			rte_pktmbuf_free(prev_seg);
435 		}
436 	}
437 	DPAA2_SG_SET_FINAL(sge, true);
438 	return 0;
439 }
440 
441 static void
442 eth_mbuf_to_fd(struct rte_mbuf *mbuf,
443 	       struct qbman_fd *fd, uint16_t bpid) __rte_unused;
444 
445 static void __rte_noinline __rte_hot
446 eth_mbuf_to_fd(struct rte_mbuf *mbuf,
447 	       struct qbman_fd *fd, uint16_t bpid)
448 {
449 	DPAA2_MBUF_TO_CONTIG_FD(mbuf, fd, bpid);
450 
451 	DPAA2_PMD_DP_DEBUG("mbuf =%p, mbuf->buf_addr =%p, off = %d,"
452 		"fd_off=%d fd =%" PRIx64 ", meta = %d  bpid =%d, len=%d\n",
453 		mbuf, mbuf->buf_addr, mbuf->data_off,
454 		DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ADDR(fd),
455 		rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
456 		DPAA2_GET_FD_BPID(fd), DPAA2_GET_FD_LEN(fd));
457 	if (RTE_MBUF_DIRECT(mbuf)) {
458 		if (rte_mbuf_refcnt_read(mbuf) > 1) {
459 			DPAA2_SET_FD_IVP(fd);
460 			rte_mbuf_refcnt_update(mbuf, -1);
461 		}
462 	} else {
463 		struct rte_mbuf *mi;
464 
465 		mi = rte_mbuf_from_indirect(mbuf);
466 		if (rte_mbuf_refcnt_read(mi) > 1)
467 			DPAA2_SET_FD_IVP(fd);
468 		else
469 			rte_mbuf_refcnt_update(mi, 1);
470 		rte_pktmbuf_free(mbuf);
471 	}
472 }
473 
474 static inline int __rte_hot
475 eth_copy_mbuf_to_fd(struct rte_mbuf *mbuf,
476 		    struct qbman_fd *fd, uint16_t bpid)
477 {
478 	struct rte_mbuf *m;
479 	void *mb = NULL;
480 
481 	if (rte_dpaa2_mbuf_alloc_bulk(
482 		rte_dpaa2_bpid_info[bpid].bp_list->mp, &mb, 1)) {
483 		DPAA2_PMD_DP_DEBUG("Unable to allocated DPAA2 buffer\n");
484 		return -1;
485 	}
486 	m = (struct rte_mbuf *)mb;
487 	memcpy((char *)m->buf_addr + mbuf->data_off,
488 	       (void *)((char *)mbuf->buf_addr + mbuf->data_off),
489 		mbuf->pkt_len);
490 
491 	/* Copy required fields */
492 	m->data_off = mbuf->data_off;
493 	m->ol_flags = mbuf->ol_flags;
494 	m->packet_type = mbuf->packet_type;
495 	m->tx_offload = mbuf->tx_offload;
496 
497 	DPAA2_MBUF_TO_CONTIG_FD(m, fd, bpid);
498 
499 	DPAA2_PMD_DP_DEBUG(
500 		"mbuf: %p, BMAN buf addr: %p, fdaddr: %" PRIx64 ", bpid: %d,"
501 		" meta: %d, off: %d, len: %d\n",
502 		(void *)mbuf,
503 		mbuf->buf_addr,
504 		DPAA2_GET_FD_ADDR(fd),
505 		DPAA2_GET_FD_BPID(fd),
506 		rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
507 		DPAA2_GET_FD_OFFSET(fd),
508 		DPAA2_GET_FD_LEN(fd));
509 
510 return 0;
511 }
512 
513 /* This function assumes that caller will be keep the same value for nb_pkts
514  * across calls per queue, if that is not the case, better use non-prefetch
515  * version of rx call.
516  * It will return the packets as requested in previous call without honoring
517  * the current nb_pkts or bufs space.
518  */
519 uint16_t
520 dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
521 {
522 	/* Function receive frames for a given device and VQ*/
523 	struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
524 	struct qbman_result *dq_storage, *dq_storage1 = NULL;
525 	uint32_t fqid = dpaa2_q->fqid;
526 	int ret, num_rx = 0, pull_size;
527 	uint8_t pending, status;
528 	struct qbman_swp *swp;
529 	const struct qbman_fd *fd;
530 	struct qbman_pull_desc pulldesc;
531 	struct queue_storage_info_t *q_storage = dpaa2_q->q_storage;
532 	struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
533 #if defined(RTE_LIBRTE_IEEE1588)
534 	struct dpaa2_dev_priv *priv = eth_data->dev_private;
535 #endif
536 
537 	if (unlikely(!DPAA2_PER_LCORE_ETHRX_DPIO)) {
538 		ret = dpaa2_affine_qbman_ethrx_swp();
539 		if (ret) {
540 			DPAA2_PMD_ERR("Failure in affining portal");
541 			return 0;
542 		}
543 	}
544 
545 	if (unlikely(!rte_dpaa2_bpid_info &&
546 		     rte_eal_process_type() == RTE_PROC_SECONDARY))
547 		rte_dpaa2_bpid_info = dpaa2_q->bp_array;
548 
549 	swp = DPAA2_PER_LCORE_ETHRX_PORTAL;
550 	pull_size = (nb_pkts > dpaa2_dqrr_size) ? dpaa2_dqrr_size : nb_pkts;
551 	if (unlikely(!q_storage->active_dqs)) {
552 		q_storage->toggle = 0;
553 		dq_storage = q_storage->dq_storage[q_storage->toggle];
554 		q_storage->last_num_pkts = pull_size;
555 		qbman_pull_desc_clear(&pulldesc);
556 		qbman_pull_desc_set_numframes(&pulldesc,
557 					      q_storage->last_num_pkts);
558 		qbman_pull_desc_set_fq(&pulldesc, fqid);
559 		qbman_pull_desc_set_storage(&pulldesc, dq_storage,
560 			(uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
561 		if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) {
562 			while (!qbman_check_command_complete(
563 			       get_swp_active_dqs(
564 			       DPAA2_PER_LCORE_ETHRX_DPIO->index)))
565 				;
566 			clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index);
567 		}
568 		while (1) {
569 			if (qbman_swp_pull(swp, &pulldesc)) {
570 				DPAA2_PMD_DP_DEBUG("VDQ command is not issued."
571 						  " QBMAN is busy (1)\n");
572 				/* Portal was busy, try again */
573 				continue;
574 			}
575 			break;
576 		}
577 		q_storage->active_dqs = dq_storage;
578 		q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index;
579 		set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index,
580 				   dq_storage);
581 	}
582 
583 	dq_storage = q_storage->active_dqs;
584 	rte_prefetch0((void *)(size_t)(dq_storage));
585 	rte_prefetch0((void *)(size_t)(dq_storage + 1));
586 
587 	/* Prepare next pull descriptor. This will give space for the
588 	 * prefething done on DQRR entries
589 	 */
590 	q_storage->toggle ^= 1;
591 	dq_storage1 = q_storage->dq_storage[q_storage->toggle];
592 	qbman_pull_desc_clear(&pulldesc);
593 	qbman_pull_desc_set_numframes(&pulldesc, pull_size);
594 	qbman_pull_desc_set_fq(&pulldesc, fqid);
595 	qbman_pull_desc_set_storage(&pulldesc, dq_storage1,
596 		(uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage1)), 1);
597 
598 	/* Check if the previous issued command is completed.
599 	 * Also seems like the SWP is shared between the Ethernet Driver
600 	 * and the SEC driver.
601 	 */
602 	while (!qbman_check_command_complete(dq_storage))
603 		;
604 	if (dq_storage == get_swp_active_dqs(q_storage->active_dpio_id))
605 		clear_swp_active_dqs(q_storage->active_dpio_id);
606 
607 	pending = 1;
608 
609 	do {
610 		/* Loop until the dq_storage is updated with
611 		 * new token by QBMAN
612 		 */
613 		while (!qbman_check_new_result(dq_storage))
614 			;
615 		rte_prefetch0((void *)((size_t)(dq_storage + 2)));
616 		/* Check whether Last Pull command is Expired and
617 		 * setting Condition for Loop termination
618 		 */
619 		if (qbman_result_DQ_is_pull_complete(dq_storage)) {
620 			pending = 0;
621 			/* Check for valid frame. */
622 			status = qbman_result_DQ_flags(dq_storage);
623 			if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0))
624 				continue;
625 		}
626 		fd = qbman_result_DQ_fd(dq_storage);
627 
628 #ifndef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
629 		if (dpaa2_svr_family != SVR_LX2160A) {
630 			const struct qbman_fd *next_fd =
631 				qbman_result_DQ_fd(dq_storage + 1);
632 			/* Prefetch Annotation address for the parse results */
633 			rte_prefetch0(DPAA2_IOVA_TO_VADDR((DPAA2_GET_FD_ADDR(
634 				next_fd) + DPAA2_FD_PTA_SIZE + 16)));
635 		}
636 #endif
637 
638 		if (unlikely(DPAA2_FD_GET_FORMAT(fd) == qbman_fd_sg))
639 			bufs[num_rx] = eth_sg_fd_to_mbuf(fd, eth_data->port_id);
640 		else
641 			bufs[num_rx] = eth_fd_to_mbuf(fd, eth_data->port_id);
642 #if defined(RTE_LIBRTE_IEEE1588)
643 		priv->rx_timestamp = *dpaa2_timestamp_dynfield(bufs[num_rx]);
644 #endif
645 
646 		if (eth_data->dev_conf.rxmode.offloads &
647 				DEV_RX_OFFLOAD_VLAN_STRIP)
648 			rte_vlan_strip(bufs[num_rx]);
649 
650 		dq_storage++;
651 		num_rx++;
652 	} while (pending);
653 
654 	if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) {
655 		while (!qbman_check_command_complete(
656 		       get_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)))
657 			;
658 		clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index);
659 	}
660 	/* issue a volatile dequeue command for next pull */
661 	while (1) {
662 		if (qbman_swp_pull(swp, &pulldesc)) {
663 			DPAA2_PMD_DP_DEBUG("VDQ command is not issued."
664 					  "QBMAN is busy (2)\n");
665 			continue;
666 		}
667 		break;
668 	}
669 	q_storage->active_dqs = dq_storage1;
670 	q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index;
671 	set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index, dq_storage1);
672 
673 	dpaa2_q->rx_pkts += num_rx;
674 
675 	return num_rx;
676 }
677 
678 void __rte_hot
679 dpaa2_dev_process_parallel_event(struct qbman_swp *swp,
680 				 const struct qbman_fd *fd,
681 				 const struct qbman_result *dq,
682 				 struct dpaa2_queue *rxq,
683 				 struct rte_event *ev)
684 {
685 	rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd) +
686 		DPAA2_FD_PTA_SIZE + 16));
687 
688 	ev->flow_id = rxq->ev.flow_id;
689 	ev->sub_event_type = rxq->ev.sub_event_type;
690 	ev->event_type = RTE_EVENT_TYPE_ETHDEV;
691 	ev->op = RTE_EVENT_OP_NEW;
692 	ev->sched_type = rxq->ev.sched_type;
693 	ev->queue_id = rxq->ev.queue_id;
694 	ev->priority = rxq->ev.priority;
695 
696 	ev->mbuf = eth_fd_to_mbuf(fd, rxq->eth_data->port_id);
697 
698 	qbman_swp_dqrr_consume(swp, dq);
699 }
700 
701 void __rte_hot
702 dpaa2_dev_process_atomic_event(struct qbman_swp *swp __rte_unused,
703 			       const struct qbman_fd *fd,
704 			       const struct qbman_result *dq,
705 			       struct dpaa2_queue *rxq,
706 			       struct rte_event *ev)
707 {
708 	uint8_t dqrr_index;
709 
710 	rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd) +
711 		DPAA2_FD_PTA_SIZE + 16));
712 
713 	ev->flow_id = rxq->ev.flow_id;
714 	ev->sub_event_type = rxq->ev.sub_event_type;
715 	ev->event_type = RTE_EVENT_TYPE_ETHDEV;
716 	ev->op = RTE_EVENT_OP_NEW;
717 	ev->sched_type = rxq->ev.sched_type;
718 	ev->queue_id = rxq->ev.queue_id;
719 	ev->priority = rxq->ev.priority;
720 
721 	ev->mbuf = eth_fd_to_mbuf(fd, rxq->eth_data->port_id);
722 
723 	dqrr_index = qbman_get_dqrr_idx(dq);
724 	*dpaa2_seqn(ev->mbuf) = dqrr_index + 1;
725 	DPAA2_PER_LCORE_DQRR_SIZE++;
726 	DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index;
727 	DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = ev->mbuf;
728 }
729 
730 void __rte_hot
731 dpaa2_dev_process_ordered_event(struct qbman_swp *swp,
732 				const struct qbman_fd *fd,
733 				const struct qbman_result *dq,
734 				struct dpaa2_queue *rxq,
735 				struct rte_event *ev)
736 {
737 	rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd) +
738 		DPAA2_FD_PTA_SIZE + 16));
739 
740 	ev->flow_id = rxq->ev.flow_id;
741 	ev->sub_event_type = rxq->ev.sub_event_type;
742 	ev->event_type = RTE_EVENT_TYPE_ETHDEV;
743 	ev->op = RTE_EVENT_OP_NEW;
744 	ev->sched_type = rxq->ev.sched_type;
745 	ev->queue_id = rxq->ev.queue_id;
746 	ev->priority = rxq->ev.priority;
747 
748 	ev->mbuf = eth_fd_to_mbuf(fd, rxq->eth_data->port_id);
749 
750 	*dpaa2_seqn(ev->mbuf) = DPAA2_ENQUEUE_FLAG_ORP;
751 	*dpaa2_seqn(ev->mbuf) |= qbman_result_DQ_odpid(dq) << DPAA2_EQCR_OPRID_SHIFT;
752 	*dpaa2_seqn(ev->mbuf) |= qbman_result_DQ_seqnum(dq) << DPAA2_EQCR_SEQNUM_SHIFT;
753 
754 	qbman_swp_dqrr_consume(swp, dq);
755 }
756 
757 uint16_t
758 dpaa2_dev_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
759 {
760 	/* Function receive frames for a given device and VQ */
761 	struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
762 	struct qbman_result *dq_storage;
763 	uint32_t fqid = dpaa2_q->fqid;
764 	int ret, num_rx = 0, next_pull = nb_pkts, num_pulled;
765 	uint8_t pending, status;
766 	struct qbman_swp *swp;
767 	const struct qbman_fd *fd;
768 	struct qbman_pull_desc pulldesc;
769 	struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
770 
771 	if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
772 		ret = dpaa2_affine_qbman_swp();
773 		if (ret) {
774 			DPAA2_PMD_ERR(
775 				"Failed to allocate IO portal, tid: %d\n",
776 				rte_gettid());
777 			return 0;
778 		}
779 	}
780 	swp = DPAA2_PER_LCORE_PORTAL;
781 
782 	do {
783 		dq_storage = dpaa2_q->q_storage->dq_storage[0];
784 		qbman_pull_desc_clear(&pulldesc);
785 		qbman_pull_desc_set_fq(&pulldesc, fqid);
786 		qbman_pull_desc_set_storage(&pulldesc, dq_storage,
787 				(size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
788 
789 		if (next_pull > dpaa2_dqrr_size) {
790 			qbman_pull_desc_set_numframes(&pulldesc,
791 				dpaa2_dqrr_size);
792 			next_pull -= dpaa2_dqrr_size;
793 		} else {
794 			qbman_pull_desc_set_numframes(&pulldesc, next_pull);
795 			next_pull = 0;
796 		}
797 
798 		while (1) {
799 			if (qbman_swp_pull(swp, &pulldesc)) {
800 				DPAA2_PMD_DP_DEBUG(
801 					"VDQ command is not issued.QBMAN is busy\n");
802 				/* Portal was busy, try again */
803 				continue;
804 			}
805 			break;
806 		}
807 
808 		rte_prefetch0((void *)((size_t)(dq_storage + 1)));
809 		/* Check if the previous issued command is completed. */
810 		while (!qbman_check_command_complete(dq_storage))
811 			;
812 
813 		num_pulled = 0;
814 		pending = 1;
815 		do {
816 			/* Loop until the dq_storage is updated with
817 			 * new token by QBMAN
818 			 */
819 			while (!qbman_check_new_result(dq_storage))
820 				;
821 			rte_prefetch0((void *)((size_t)(dq_storage + 2)));
822 			/* Check whether Last Pull command is Expired and
823 			 * setting Condition for Loop termination
824 			 */
825 			if (qbman_result_DQ_is_pull_complete(dq_storage)) {
826 				pending = 0;
827 				/* Check for valid frame. */
828 				status = qbman_result_DQ_flags(dq_storage);
829 				if (unlikely((status &
830 					QBMAN_DQ_STAT_VALIDFRAME) == 0))
831 					continue;
832 			}
833 			fd = qbman_result_DQ_fd(dq_storage);
834 
835 #ifndef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
836 			if (dpaa2_svr_family != SVR_LX2160A) {
837 				const struct qbman_fd *next_fd =
838 					qbman_result_DQ_fd(dq_storage + 1);
839 
840 				/* Prefetch Annotation address for the parse
841 				 * results.
842 				 */
843 				rte_prefetch0((DPAA2_IOVA_TO_VADDR(
844 					DPAA2_GET_FD_ADDR(next_fd) +
845 					DPAA2_FD_PTA_SIZE + 16)));
846 			}
847 #endif
848 
849 			if (unlikely(DPAA2_FD_GET_FORMAT(fd) == qbman_fd_sg))
850 				bufs[num_rx] = eth_sg_fd_to_mbuf(fd,
851 							eth_data->port_id);
852 			else
853 				bufs[num_rx] = eth_fd_to_mbuf(fd,
854 							eth_data->port_id);
855 
856 		if (eth_data->dev_conf.rxmode.offloads &
857 				DEV_RX_OFFLOAD_VLAN_STRIP) {
858 			rte_vlan_strip(bufs[num_rx]);
859 		}
860 
861 			dq_storage++;
862 			num_rx++;
863 			num_pulled++;
864 		} while (pending);
865 	/* Last VDQ provided all packets and more packets are requested */
866 	} while (next_pull && num_pulled == dpaa2_dqrr_size);
867 
868 	dpaa2_q->rx_pkts += num_rx;
869 
870 	return num_rx;
871 }
872 
873 uint16_t dpaa2_dev_tx_conf(void *queue)
874 {
875 	/* Function receive frames for a given device and VQ */
876 	struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
877 	struct qbman_result *dq_storage;
878 	uint32_t fqid = dpaa2_q->fqid;
879 	int ret, num_tx_conf = 0, num_pulled;
880 	uint8_t pending, status;
881 	struct qbman_swp *swp;
882 	const struct qbman_fd *fd, *next_fd;
883 	struct qbman_pull_desc pulldesc;
884 	struct qbman_release_desc releasedesc;
885 	uint32_t bpid;
886 	uint64_t buf;
887 #if defined(RTE_LIBRTE_IEEE1588)
888 	struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
889 	struct dpaa2_dev_priv *priv = eth_data->dev_private;
890 	struct dpaa2_annot_hdr *annotation;
891 #endif
892 
893 	if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
894 		ret = dpaa2_affine_qbman_swp();
895 		if (ret) {
896 			DPAA2_PMD_ERR(
897 				"Failed to allocate IO portal, tid: %d\n",
898 				rte_gettid());
899 			return 0;
900 		}
901 	}
902 	swp = DPAA2_PER_LCORE_PORTAL;
903 
904 	do {
905 		dq_storage = dpaa2_q->q_storage->dq_storage[0];
906 		qbman_pull_desc_clear(&pulldesc);
907 		qbman_pull_desc_set_fq(&pulldesc, fqid);
908 		qbman_pull_desc_set_storage(&pulldesc, dq_storage,
909 				(size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
910 
911 		qbman_pull_desc_set_numframes(&pulldesc, dpaa2_dqrr_size);
912 
913 		while (1) {
914 			if (qbman_swp_pull(swp, &pulldesc)) {
915 				DPAA2_PMD_DP_DEBUG("VDQ command is not issued."
916 						   "QBMAN is busy\n");
917 				/* Portal was busy, try again */
918 				continue;
919 			}
920 			break;
921 		}
922 
923 		rte_prefetch0((void *)((size_t)(dq_storage + 1)));
924 		/* Check if the previous issued command is completed. */
925 		while (!qbman_check_command_complete(dq_storage))
926 			;
927 
928 		num_pulled = 0;
929 		pending = 1;
930 		do {
931 			/* Loop until the dq_storage is updated with
932 			 * new token by QBMAN
933 			 */
934 			while (!qbman_check_new_result(dq_storage))
935 				;
936 			rte_prefetch0((void *)((size_t)(dq_storage + 2)));
937 			/* Check whether Last Pull command is Expired and
938 			 * setting Condition for Loop termination
939 			 */
940 			if (qbman_result_DQ_is_pull_complete(dq_storage)) {
941 				pending = 0;
942 				/* Check for valid frame. */
943 				status = qbman_result_DQ_flags(dq_storage);
944 				if (unlikely((status &
945 					QBMAN_DQ_STAT_VALIDFRAME) == 0))
946 					continue;
947 			}
948 			fd = qbman_result_DQ_fd(dq_storage);
949 
950 			next_fd = qbman_result_DQ_fd(dq_storage + 1);
951 			/* Prefetch Annotation address for the parse results */
952 			rte_prefetch0((void *)(size_t)
953 				(DPAA2_GET_FD_ADDR(next_fd) +
954 				 DPAA2_FD_PTA_SIZE + 16));
955 
956 			bpid = DPAA2_GET_FD_BPID(fd);
957 
958 			/* Create a release descriptor required for releasing
959 			 * buffers into QBMAN
960 			 */
961 			qbman_release_desc_clear(&releasedesc);
962 			qbman_release_desc_set_bpid(&releasedesc, bpid);
963 
964 			buf = DPAA2_GET_FD_ADDR(fd);
965 			/* feed them to bman */
966 			do {
967 				ret = qbman_swp_release(swp, &releasedesc,
968 							&buf, 1);
969 			} while (ret == -EBUSY);
970 
971 			dq_storage++;
972 			num_tx_conf++;
973 			num_pulled++;
974 #if defined(RTE_LIBRTE_IEEE1588)
975 			annotation = (struct dpaa2_annot_hdr *)((size_t)
976 				DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)) +
977 				DPAA2_FD_PTA_SIZE);
978 			priv->tx_timestamp = annotation->word2;
979 #endif
980 		} while (pending);
981 
982 	/* Last VDQ provided all packets and more packets are requested */
983 	} while (num_pulled == dpaa2_dqrr_size);
984 
985 	dpaa2_q->rx_pkts += num_tx_conf;
986 
987 	return num_tx_conf;
988 }
989 
990 /* Configure the egress frame annotation for timestamp update */
991 static void enable_tx_tstamp(struct qbman_fd *fd)
992 {
993 	struct dpaa2_faead *fd_faead;
994 
995 	/* Set frame annotation status field as valid */
996 	(fd)->simple.frc |= DPAA2_FD_FRC_FASV;
997 
998 	/* Set frame annotation egress action descriptor as valid */
999 	(fd)->simple.frc |= DPAA2_FD_FRC_FAEADV;
1000 
1001 	/* Set Annotation Length as 128B */
1002 	(fd)->simple.ctrl |= DPAA2_FD_CTRL_ASAL;
1003 
1004 	/* enable update of confirmation frame annotation */
1005 	fd_faead = (struct dpaa2_faead *)((size_t)
1006 			DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)) +
1007 			DPAA2_FD_PTA_SIZE + DPAA2_FD_HW_ANNOT_FAEAD_OFFSET);
1008 	fd_faead->ctrl = DPAA2_ANNOT_FAEAD_A2V | DPAA2_ANNOT_FAEAD_UPDV |
1009 				DPAA2_ANNOT_FAEAD_UPD;
1010 }
1011 
1012 /*
1013  * Callback to handle sending packets through WRIOP based interface
1014  */
1015 uint16_t
1016 dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
1017 {
1018 	/* Function to transmit the frames to given device and VQ*/
1019 	uint32_t loop, retry_count;
1020 	int32_t ret;
1021 	struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
1022 	struct rte_mbuf *mi;
1023 	uint32_t frames_to_send;
1024 	struct rte_mempool *mp;
1025 	struct qbman_eq_desc eqdesc;
1026 	struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
1027 	struct qbman_swp *swp;
1028 	uint16_t num_tx = 0;
1029 	uint16_t bpid;
1030 	struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
1031 	struct dpaa2_dev_priv *priv = eth_data->dev_private;
1032 	uint32_t flags[MAX_TX_RING_SLOTS] = {0};
1033 
1034 	if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
1035 		ret = dpaa2_affine_qbman_swp();
1036 		if (ret) {
1037 			DPAA2_PMD_ERR(
1038 				"Failed to allocate IO portal, tid: %d\n",
1039 				rte_gettid());
1040 			return 0;
1041 		}
1042 	}
1043 	swp = DPAA2_PER_LCORE_PORTAL;
1044 
1045 	DPAA2_PMD_DP_DEBUG("===> eth_data =%p, fqid =%d\n",
1046 			eth_data, dpaa2_q->fqid);
1047 
1048 #ifdef RTE_LIBRTE_IEEE1588
1049 	/* IEEE1588 driver need pointer to tx confirmation queue
1050 	 * corresponding to last packet transmitted for reading
1051 	 * the timestamp
1052 	 */
1053 	priv->next_tx_conf_queue = dpaa2_q->tx_conf_queue;
1054 	dpaa2_dev_tx_conf(dpaa2_q->tx_conf_queue);
1055 #endif
1056 
1057 	/*Prepare enqueue descriptor*/
1058 	qbman_eq_desc_clear(&eqdesc);
1059 	qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
1060 	qbman_eq_desc_set_fq(&eqdesc, dpaa2_q->fqid);
1061 
1062 	/*Clear the unused FD fields before sending*/
1063 	while (nb_pkts) {
1064 		/*Check if the queue is congested*/
1065 		retry_count = 0;
1066 		while (qbman_result_SCN_state(dpaa2_q->cscn)) {
1067 			retry_count++;
1068 			/* Retry for some time before giving up */
1069 			if (retry_count > CONG_RETRY_COUNT)
1070 				goto skip_tx;
1071 		}
1072 
1073 		frames_to_send = (nb_pkts > dpaa2_eqcr_size) ?
1074 			dpaa2_eqcr_size : nb_pkts;
1075 
1076 		for (loop = 0; loop < frames_to_send; loop++) {
1077 			if (*dpaa2_seqn(*bufs)) {
1078 				uint8_t dqrr_index = *dpaa2_seqn(*bufs) - 1;
1079 
1080 				flags[loop] = QBMAN_ENQUEUE_FLAG_DCA |
1081 						dqrr_index;
1082 				DPAA2_PER_LCORE_DQRR_SIZE--;
1083 				DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dqrr_index);
1084 				*dpaa2_seqn(*bufs) = DPAA2_INVALID_MBUF_SEQN;
1085 			}
1086 
1087 			if (likely(RTE_MBUF_DIRECT(*bufs))) {
1088 				mp = (*bufs)->pool;
1089 				/* Check the basic scenario and set
1090 				 * the FD appropriately here itself.
1091 				 */
1092 				if (likely(mp && mp->ops_index ==
1093 				    priv->bp_list->dpaa2_ops_index &&
1094 				    (*bufs)->nb_segs == 1 &&
1095 				    rte_mbuf_refcnt_read((*bufs)) == 1)) {
1096 					if (unlikely(((*bufs)->ol_flags
1097 						& PKT_TX_VLAN_PKT) ||
1098 						(eth_data->dev_conf.txmode.offloads
1099 						& DEV_TX_OFFLOAD_VLAN_INSERT))) {
1100 						ret = rte_vlan_insert(bufs);
1101 						if (ret)
1102 							goto send_n_return;
1103 					}
1104 					DPAA2_MBUF_TO_CONTIG_FD((*bufs),
1105 					&fd_arr[loop], mempool_to_bpid(mp));
1106 					bufs++;
1107 #ifdef RTE_LIBRTE_IEEE1588
1108 					enable_tx_tstamp(&fd_arr[loop]);
1109 #endif
1110 					continue;
1111 				}
1112 			} else {
1113 				mi = rte_mbuf_from_indirect(*bufs);
1114 				mp = mi->pool;
1115 			}
1116 			/* Not a hw_pkt pool allocated frame */
1117 			if (unlikely(!mp || !priv->bp_list)) {
1118 				DPAA2_PMD_ERR("Err: No buffer pool attached");
1119 				goto send_n_return;
1120 			}
1121 
1122 			if (unlikely(((*bufs)->ol_flags & PKT_TX_VLAN_PKT) ||
1123 				(eth_data->dev_conf.txmode.offloads
1124 				& DEV_TX_OFFLOAD_VLAN_INSERT))) {
1125 				int ret = rte_vlan_insert(bufs);
1126 				if (ret)
1127 					goto send_n_return;
1128 			}
1129 			if (mp->ops_index != priv->bp_list->dpaa2_ops_index) {
1130 				DPAA2_PMD_WARN("Non DPAA2 buffer pool");
1131 				/* alloc should be from the default buffer pool
1132 				 * attached to this interface
1133 				 */
1134 				bpid = priv->bp_list->buf_pool.bpid;
1135 
1136 				if (unlikely((*bufs)->nb_segs > 1)) {
1137 					DPAA2_PMD_ERR("S/G support not added"
1138 						" for non hw offload buffer");
1139 					goto send_n_return;
1140 				}
1141 				if (eth_copy_mbuf_to_fd(*bufs,
1142 							&fd_arr[loop], bpid)) {
1143 					goto send_n_return;
1144 				}
1145 				/* free the original packet */
1146 				rte_pktmbuf_free(*bufs);
1147 			} else {
1148 				bpid = mempool_to_bpid(mp);
1149 				if (unlikely((*bufs)->nb_segs > 1)) {
1150 					if (eth_mbuf_to_sg_fd(*bufs,
1151 							&fd_arr[loop], bpid))
1152 						goto send_n_return;
1153 				} else {
1154 					eth_mbuf_to_fd(*bufs,
1155 						       &fd_arr[loop], bpid);
1156 				}
1157 			}
1158 #ifdef RTE_LIBRTE_IEEE1588
1159 			enable_tx_tstamp(&fd_arr[loop]);
1160 #endif
1161 			bufs++;
1162 		}
1163 
1164 		loop = 0;
1165 		retry_count = 0;
1166 		while (loop < frames_to_send) {
1167 			ret = qbman_swp_enqueue_multiple(swp, &eqdesc,
1168 					&fd_arr[loop], &flags[loop],
1169 					frames_to_send - loop);
1170 			if (unlikely(ret < 0)) {
1171 				retry_count++;
1172 				if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) {
1173 					num_tx += loop;
1174 					nb_pkts -= loop;
1175 					goto send_n_return;
1176 				}
1177 			} else {
1178 				loop += ret;
1179 				retry_count = 0;
1180 			}
1181 		}
1182 
1183 		num_tx += loop;
1184 		nb_pkts -= loop;
1185 	}
1186 	dpaa2_q->tx_pkts += num_tx;
1187 	return num_tx;
1188 
1189 send_n_return:
1190 	/* send any already prepared fd */
1191 	if (loop) {
1192 		unsigned int i = 0;
1193 
1194 		retry_count = 0;
1195 		while (i < loop) {
1196 			ret = qbman_swp_enqueue_multiple(swp, &eqdesc,
1197 							 &fd_arr[i],
1198 							 &flags[i],
1199 							 loop - i);
1200 			if (unlikely(ret < 0)) {
1201 				retry_count++;
1202 				if (retry_count > DPAA2_MAX_TX_RETRY_COUNT)
1203 					break;
1204 			} else {
1205 				i += ret;
1206 				retry_count = 0;
1207 			}
1208 		}
1209 		num_tx += i;
1210 	}
1211 skip_tx:
1212 	dpaa2_q->tx_pkts += num_tx;
1213 	return num_tx;
1214 }
1215 
1216 void
1217 dpaa2_dev_free_eqresp_buf(uint16_t eqresp_ci)
1218 {
1219 	struct dpaa2_dpio_dev *dpio_dev = DPAA2_PER_LCORE_DPIO;
1220 	struct qbman_fd *fd;
1221 	struct rte_mbuf *m;
1222 
1223 	fd = qbman_result_eqresp_fd(&dpio_dev->eqresp[eqresp_ci]);
1224 
1225 	/* Setting port id does not matter as we are to free the mbuf */
1226 	m = eth_fd_to_mbuf(fd, 0);
1227 	rte_pktmbuf_free(m);
1228 }
1229 
1230 static void
1231 dpaa2_set_enqueue_descriptor(struct dpaa2_queue *dpaa2_q,
1232 			     struct rte_mbuf *m,
1233 			     struct qbman_eq_desc *eqdesc)
1234 {
1235 	struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
1236 	struct dpaa2_dev_priv *priv = eth_data->dev_private;
1237 	struct dpaa2_dpio_dev *dpio_dev = DPAA2_PER_LCORE_DPIO;
1238 	struct eqresp_metadata *eqresp_meta;
1239 	uint16_t orpid, seqnum;
1240 	uint8_t dq_idx;
1241 
1242 	qbman_eq_desc_set_fq(eqdesc, dpaa2_q->fqid);
1243 
1244 	if (*dpaa2_seqn(m) & DPAA2_ENQUEUE_FLAG_ORP) {
1245 		orpid = (*dpaa2_seqn(m) & DPAA2_EQCR_OPRID_MASK) >>
1246 			DPAA2_EQCR_OPRID_SHIFT;
1247 		seqnum = (*dpaa2_seqn(m) & DPAA2_EQCR_SEQNUM_MASK) >>
1248 			DPAA2_EQCR_SEQNUM_SHIFT;
1249 
1250 		if (!priv->en_loose_ordered) {
1251 			qbman_eq_desc_set_orp(eqdesc, 1, orpid, seqnum, 0);
1252 			qbman_eq_desc_set_response(eqdesc, (uint64_t)
1253 				DPAA2_VADDR_TO_IOVA(&dpio_dev->eqresp[
1254 				dpio_dev->eqresp_pi]), 1);
1255 			qbman_eq_desc_set_token(eqdesc, 1);
1256 
1257 			eqresp_meta = &dpio_dev->eqresp_meta[
1258 				dpio_dev->eqresp_pi];
1259 			eqresp_meta->dpaa2_q = dpaa2_q;
1260 			eqresp_meta->mp = m->pool;
1261 
1262 			dpio_dev->eqresp_pi + 1 < MAX_EQ_RESP_ENTRIES ?
1263 				dpio_dev->eqresp_pi++ :
1264 				(dpio_dev->eqresp_pi = 0);
1265 		} else {
1266 			qbman_eq_desc_set_orp(eqdesc, 0, orpid, seqnum, 0);
1267 		}
1268 	} else {
1269 		dq_idx = *dpaa2_seqn(m) - 1;
1270 		qbman_eq_desc_set_dca(eqdesc, 1, dq_idx, 0);
1271 		DPAA2_PER_LCORE_DQRR_SIZE--;
1272 		DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dq_idx);
1273 	}
1274 	*dpaa2_seqn(m) = DPAA2_INVALID_MBUF_SEQN;
1275 }
1276 
1277 /* Callback to handle sending ordered packets through WRIOP based interface */
1278 uint16_t
1279 dpaa2_dev_tx_ordered(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
1280 {
1281 	/* Function to transmit the frames to given device and VQ*/
1282 	struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
1283 	struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
1284 	struct dpaa2_dev_priv *priv = eth_data->dev_private;
1285 	struct dpaa2_queue *order_sendq = (struct dpaa2_queue *)priv->tx_vq[0];
1286 	struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
1287 	struct rte_mbuf *mi;
1288 	struct rte_mempool *mp;
1289 	struct qbman_eq_desc eqdesc[MAX_TX_RING_SLOTS];
1290 	struct qbman_swp *swp;
1291 	uint32_t frames_to_send, num_free_eq_desc;
1292 	uint32_t loop, retry_count;
1293 	int32_t ret;
1294 	uint16_t num_tx = 0;
1295 	uint16_t bpid;
1296 
1297 	if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
1298 		ret = dpaa2_affine_qbman_swp();
1299 		if (ret) {
1300 			DPAA2_PMD_ERR(
1301 				"Failed to allocate IO portal, tid: %d\n",
1302 				rte_gettid());
1303 			return 0;
1304 		}
1305 	}
1306 	swp = DPAA2_PER_LCORE_PORTAL;
1307 
1308 	DPAA2_PMD_DP_DEBUG("===> eth_data =%p, fqid =%d\n",
1309 			   eth_data, dpaa2_q->fqid);
1310 
1311 	/* This would also handle normal and atomic queues as any type
1312 	 * of packet can be enqueued when ordered queues are being used.
1313 	 */
1314 	while (nb_pkts) {
1315 		/*Check if the queue is congested*/
1316 		retry_count = 0;
1317 		while (qbman_result_SCN_state(dpaa2_q->cscn)) {
1318 			retry_count++;
1319 			/* Retry for some time before giving up */
1320 			if (retry_count > CONG_RETRY_COUNT)
1321 				goto skip_tx;
1322 		}
1323 
1324 		frames_to_send = (nb_pkts > dpaa2_eqcr_size) ?
1325 			dpaa2_eqcr_size : nb_pkts;
1326 
1327 		if (!priv->en_loose_ordered) {
1328 			if (*dpaa2_seqn(*bufs) & DPAA2_ENQUEUE_FLAG_ORP) {
1329 				num_free_eq_desc = dpaa2_free_eq_descriptors();
1330 				if (num_free_eq_desc < frames_to_send)
1331 					frames_to_send = num_free_eq_desc;
1332 			}
1333 		}
1334 
1335 		for (loop = 0; loop < frames_to_send; loop++) {
1336 			/*Prepare enqueue descriptor*/
1337 			qbman_eq_desc_clear(&eqdesc[loop]);
1338 
1339 			if (*dpaa2_seqn(*bufs)) {
1340 				/* Use only queue 0 for Tx in case of atomic/
1341 				 * ordered packets as packets can get unordered
1342 				 * when being tranmitted out from the interface
1343 				 */
1344 				dpaa2_set_enqueue_descriptor(order_sendq,
1345 							     (*bufs),
1346 							     &eqdesc[loop]);
1347 			} else {
1348 				qbman_eq_desc_set_no_orp(&eqdesc[loop],
1349 							 DPAA2_EQ_RESP_ERR_FQ);
1350 				qbman_eq_desc_set_fq(&eqdesc[loop],
1351 						     dpaa2_q->fqid);
1352 			}
1353 
1354 			if (likely(RTE_MBUF_DIRECT(*bufs))) {
1355 				mp = (*bufs)->pool;
1356 				/* Check the basic scenario and set
1357 				 * the FD appropriately here itself.
1358 				 */
1359 				if (likely(mp && mp->ops_index ==
1360 				    priv->bp_list->dpaa2_ops_index &&
1361 				    (*bufs)->nb_segs == 1 &&
1362 				    rte_mbuf_refcnt_read((*bufs)) == 1)) {
1363 					if (unlikely((*bufs)->ol_flags
1364 						& PKT_TX_VLAN_PKT)) {
1365 					  ret = rte_vlan_insert(bufs);
1366 					  if (ret)
1367 						goto send_n_return;
1368 					}
1369 					DPAA2_MBUF_TO_CONTIG_FD((*bufs),
1370 						&fd_arr[loop],
1371 						mempool_to_bpid(mp));
1372 					bufs++;
1373 					continue;
1374 				}
1375 			} else {
1376 				mi = rte_mbuf_from_indirect(*bufs);
1377 				mp = mi->pool;
1378 			}
1379 			/* Not a hw_pkt pool allocated frame */
1380 			if (unlikely(!mp || !priv->bp_list)) {
1381 				DPAA2_PMD_ERR("Err: No buffer pool attached");
1382 				goto send_n_return;
1383 			}
1384 
1385 			if (mp->ops_index != priv->bp_list->dpaa2_ops_index) {
1386 				DPAA2_PMD_WARN("Non DPAA2 buffer pool");
1387 				/* alloc should be from the default buffer pool
1388 				 * attached to this interface
1389 				 */
1390 				bpid = priv->bp_list->buf_pool.bpid;
1391 
1392 				if (unlikely((*bufs)->nb_segs > 1)) {
1393 					DPAA2_PMD_ERR(
1394 						"S/G not supp for non hw offload buffer");
1395 					goto send_n_return;
1396 				}
1397 				if (eth_copy_mbuf_to_fd(*bufs,
1398 							&fd_arr[loop], bpid)) {
1399 					goto send_n_return;
1400 				}
1401 				/* free the original packet */
1402 				rte_pktmbuf_free(*bufs);
1403 			} else {
1404 				bpid = mempool_to_bpid(mp);
1405 				if (unlikely((*bufs)->nb_segs > 1)) {
1406 					if (eth_mbuf_to_sg_fd(*bufs,
1407 							      &fd_arr[loop],
1408 							      bpid))
1409 						goto send_n_return;
1410 				} else {
1411 					eth_mbuf_to_fd(*bufs,
1412 						       &fd_arr[loop], bpid);
1413 				}
1414 			}
1415 			bufs++;
1416 		}
1417 
1418 		loop = 0;
1419 		retry_count = 0;
1420 		while (loop < frames_to_send) {
1421 			ret = qbman_swp_enqueue_multiple_desc(swp,
1422 					&eqdesc[loop], &fd_arr[loop],
1423 					frames_to_send - loop);
1424 			if (unlikely(ret < 0)) {
1425 				retry_count++;
1426 				if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) {
1427 					num_tx += loop;
1428 					nb_pkts -= loop;
1429 					goto send_n_return;
1430 				}
1431 			} else {
1432 				loop += ret;
1433 				retry_count = 0;
1434 			}
1435 		}
1436 
1437 		num_tx += loop;
1438 		nb_pkts -= loop;
1439 	}
1440 	dpaa2_q->tx_pkts += num_tx;
1441 	return num_tx;
1442 
1443 send_n_return:
1444 	/* send any already prepared fd */
1445 	if (loop) {
1446 		unsigned int i = 0;
1447 
1448 		retry_count = 0;
1449 		while (i < loop) {
1450 			ret = qbman_swp_enqueue_multiple_desc(swp,
1451 				       &eqdesc[loop], &fd_arr[i], loop - i);
1452 			if (unlikely(ret < 0)) {
1453 				retry_count++;
1454 				if (retry_count > DPAA2_MAX_TX_RETRY_COUNT)
1455 					break;
1456 			} else {
1457 				i += ret;
1458 				retry_count = 0;
1459 			}
1460 		}
1461 		num_tx += i;
1462 	}
1463 skip_tx:
1464 	dpaa2_q->tx_pkts += num_tx;
1465 	return num_tx;
1466 }
1467 
1468 /**
1469  * Dummy DPDK callback for TX.
1470  *
1471  * This function is used to temporarily replace the real callback during
1472  * unsafe control operations on the queue, or in case of error.
1473  *
1474  * @param dpdk_txq
1475  *   Generic pointer to TX queue structure.
1476  * @param[in] pkts
1477  *   Packets to transmit.
1478  * @param pkts_n
1479  *   Number of packets in array.
1480  *
1481  * @return
1482  *   Number of packets successfully transmitted (<= pkts_n).
1483  */
1484 uint16_t
1485 dummy_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
1486 {
1487 	(void)queue;
1488 	(void)bufs;
1489 	(void)nb_pkts;
1490 	return 0;
1491 }
1492 
1493 #if defined(RTE_TOOLCHAIN_GCC)
1494 #pragma GCC diagnostic push
1495 #pragma GCC diagnostic ignored "-Wcast-qual"
1496 #elif defined(RTE_TOOLCHAIN_CLANG)
1497 #pragma clang diagnostic push
1498 #pragma clang diagnostic ignored "-Wcast-qual"
1499 #endif
1500 
1501 /* This function loopbacks all the received packets.*/
1502 uint16_t
1503 dpaa2_dev_loopback_rx(void *queue,
1504 		      struct rte_mbuf **bufs __rte_unused,
1505 		      uint16_t nb_pkts)
1506 {
1507 	/* Function receive frames for a given device and VQ*/
1508 	struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
1509 	struct qbman_result *dq_storage, *dq_storage1 = NULL;
1510 	uint32_t fqid = dpaa2_q->fqid;
1511 	int ret, num_rx = 0, num_tx = 0, pull_size;
1512 	uint8_t pending, status;
1513 	struct qbman_swp *swp;
1514 	struct qbman_fd *fd[DPAA2_LX2_DQRR_RING_SIZE];
1515 	struct qbman_pull_desc pulldesc;
1516 	struct qbman_eq_desc eqdesc;
1517 	struct queue_storage_info_t *q_storage = dpaa2_q->q_storage;
1518 	struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
1519 	struct dpaa2_dev_priv *priv = eth_data->dev_private;
1520 	struct dpaa2_queue *tx_q = priv->tx_vq[0];
1521 	/* todo - currently we are using 1st TX queue only for loopback*/
1522 
1523 	if (unlikely(!DPAA2_PER_LCORE_ETHRX_DPIO)) {
1524 		ret = dpaa2_affine_qbman_ethrx_swp();
1525 		if (ret) {
1526 			DPAA2_PMD_ERR("Failure in affining portal");
1527 			return 0;
1528 		}
1529 	}
1530 	swp = DPAA2_PER_LCORE_ETHRX_PORTAL;
1531 	pull_size = (nb_pkts > dpaa2_dqrr_size) ? dpaa2_dqrr_size : nb_pkts;
1532 	if (unlikely(!q_storage->active_dqs)) {
1533 		q_storage->toggle = 0;
1534 		dq_storage = q_storage->dq_storage[q_storage->toggle];
1535 		q_storage->last_num_pkts = pull_size;
1536 		qbman_pull_desc_clear(&pulldesc);
1537 		qbman_pull_desc_set_numframes(&pulldesc,
1538 					      q_storage->last_num_pkts);
1539 		qbman_pull_desc_set_fq(&pulldesc, fqid);
1540 		qbman_pull_desc_set_storage(&pulldesc, dq_storage,
1541 			(size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
1542 		if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) {
1543 			while (!qbman_check_command_complete(
1544 			       get_swp_active_dqs(
1545 			       DPAA2_PER_LCORE_ETHRX_DPIO->index)))
1546 				;
1547 			clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index);
1548 		}
1549 		while (1) {
1550 			if (qbman_swp_pull(swp, &pulldesc)) {
1551 				DPAA2_PMD_DP_DEBUG(
1552 					"VDQ command not issued.QBMAN busy\n");
1553 				/* Portal was busy, try again */
1554 				continue;
1555 			}
1556 			break;
1557 		}
1558 		q_storage->active_dqs = dq_storage;
1559 		q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index;
1560 		set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index,
1561 				   dq_storage);
1562 	}
1563 
1564 	dq_storage = q_storage->active_dqs;
1565 	rte_prefetch0((void *)(size_t)(dq_storage));
1566 	rte_prefetch0((void *)(size_t)(dq_storage + 1));
1567 
1568 	/* Prepare next pull descriptor. This will give space for the
1569 	 * prefething done on DQRR entries
1570 	 */
1571 	q_storage->toggle ^= 1;
1572 	dq_storage1 = q_storage->dq_storage[q_storage->toggle];
1573 	qbman_pull_desc_clear(&pulldesc);
1574 	qbman_pull_desc_set_numframes(&pulldesc, pull_size);
1575 	qbman_pull_desc_set_fq(&pulldesc, fqid);
1576 	qbman_pull_desc_set_storage(&pulldesc, dq_storage1,
1577 		(size_t)(DPAA2_VADDR_TO_IOVA(dq_storage1)), 1);
1578 
1579 	/*Prepare enqueue descriptor*/
1580 	qbman_eq_desc_clear(&eqdesc);
1581 	qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
1582 	qbman_eq_desc_set_response(&eqdesc, 0, 0);
1583 	qbman_eq_desc_set_fq(&eqdesc, tx_q->fqid);
1584 
1585 	/* Check if the previous issued command is completed.
1586 	 * Also seems like the SWP is shared between the Ethernet Driver
1587 	 * and the SEC driver.
1588 	 */
1589 	while (!qbman_check_command_complete(dq_storage))
1590 		;
1591 	if (dq_storage == get_swp_active_dqs(q_storage->active_dpio_id))
1592 		clear_swp_active_dqs(q_storage->active_dpio_id);
1593 
1594 	pending = 1;
1595 
1596 	do {
1597 		/* Loop until the dq_storage is updated with
1598 		 * new token by QBMAN
1599 		 */
1600 		while (!qbman_check_new_result(dq_storage))
1601 			;
1602 		rte_prefetch0((void *)((size_t)(dq_storage + 2)));
1603 		/* Check whether Last Pull command is Expired and
1604 		 * setting Condition for Loop termination
1605 		 */
1606 		if (qbman_result_DQ_is_pull_complete(dq_storage)) {
1607 			pending = 0;
1608 			/* Check for valid frame. */
1609 			status = qbman_result_DQ_flags(dq_storage);
1610 			if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0))
1611 				continue;
1612 		}
1613 		fd[num_rx] = (struct qbman_fd *)qbman_result_DQ_fd(dq_storage);
1614 
1615 		dq_storage++;
1616 		num_rx++;
1617 	} while (pending);
1618 
1619 	while (num_tx < num_rx) {
1620 		num_tx += qbman_swp_enqueue_multiple_fd(swp, &eqdesc,
1621 				&fd[num_tx], 0, num_rx - num_tx);
1622 	}
1623 
1624 	if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) {
1625 		while (!qbman_check_command_complete(
1626 		       get_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)))
1627 			;
1628 		clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index);
1629 	}
1630 	/* issue a volatile dequeue command for next pull */
1631 	while (1) {
1632 		if (qbman_swp_pull(swp, &pulldesc)) {
1633 			DPAA2_PMD_DP_DEBUG("VDQ command is not issued."
1634 					  "QBMAN is busy (2)\n");
1635 			continue;
1636 		}
1637 		break;
1638 	}
1639 	q_storage->active_dqs = dq_storage1;
1640 	q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index;
1641 	set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index, dq_storage1);
1642 
1643 	dpaa2_q->rx_pkts += num_rx;
1644 	dpaa2_q->tx_pkts += num_tx;
1645 
1646 	return 0;
1647 }
1648 #if defined(RTE_TOOLCHAIN_GCC)
1649 #pragma GCC diagnostic pop
1650 #elif defined(RTE_TOOLCHAIN_CLANG)
1651 #pragma clang diagnostic pop
1652 #endif
1653