xref: /dpdk/drivers/net/dpaa2/dpaa2_rxtx.c (revision bb44fb6fe7713ddcd023d5b9bacadf074d68092e)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4  *   Copyright 2016 NXP
5  *
6  */
7 
8 #include <time.h>
9 #include <net/if.h>
10 
11 #include <rte_mbuf.h>
12 #include <rte_ethdev_driver.h>
13 #include <rte_malloc.h>
14 #include <rte_memcpy.h>
15 #include <rte_string_fns.h>
16 #include <rte_dev.h>
17 
18 #include <rte_fslmc.h>
19 #include <fslmc_vfio.h>
20 #include <dpaa2_hw_pvt.h>
21 #include <dpaa2_hw_dpio.h>
22 #include <dpaa2_hw_mempool.h>
23 
24 #include "dpaa2_pmd_logs.h"
25 #include "dpaa2_ethdev.h"
26 #include "base/dpaa2_hw_dpni_annot.h"
27 
28 #define DPAA2_MBUF_TO_CONTIG_FD(_mbuf, _fd, _bpid)  do { \
29 	DPAA2_SET_FD_ADDR(_fd, DPAA2_MBUF_VADDR_TO_IOVA(_mbuf)); \
30 	DPAA2_SET_FD_LEN(_fd, _mbuf->data_len); \
31 	DPAA2_SET_ONLY_FD_BPID(_fd, _bpid); \
32 	DPAA2_SET_FD_OFFSET(_fd, _mbuf->data_off); \
33 	DPAA2_SET_FD_ASAL(_fd, DPAA2_ASAL_VAL); \
34 } while (0)
35 
36 static inline void __attribute__((hot))
37 dpaa2_dev_rx_parse_frc(struct rte_mbuf *m, uint16_t frc)
38 {
39 	DPAA2_PMD_DP_DEBUG("frc = 0x%x\t", frc);
40 
41 	m->packet_type = RTE_PTYPE_UNKNOWN;
42 	switch (frc) {
43 	case DPAA2_PKT_TYPE_ETHER:
44 		m->packet_type = RTE_PTYPE_L2_ETHER;
45 		break;
46 	case DPAA2_PKT_TYPE_IPV4:
47 		m->packet_type = RTE_PTYPE_L2_ETHER |
48 			RTE_PTYPE_L3_IPV4;
49 		break;
50 	case DPAA2_PKT_TYPE_IPV6:
51 		m->packet_type = RTE_PTYPE_L2_ETHER |
52 			RTE_PTYPE_L3_IPV6;
53 		break;
54 	case DPAA2_PKT_TYPE_IPV4_EXT:
55 		m->packet_type = RTE_PTYPE_L2_ETHER |
56 			RTE_PTYPE_L3_IPV4_EXT;
57 		break;
58 	case DPAA2_PKT_TYPE_IPV6_EXT:
59 		m->packet_type = RTE_PTYPE_L2_ETHER |
60 			RTE_PTYPE_L3_IPV6_EXT;
61 		break;
62 	case DPAA2_PKT_TYPE_IPV4_TCP:
63 		m->packet_type = RTE_PTYPE_L2_ETHER |
64 			RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP;
65 		break;
66 	case DPAA2_PKT_TYPE_IPV6_TCP:
67 		m->packet_type = RTE_PTYPE_L2_ETHER |
68 			RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP;
69 		break;
70 	case DPAA2_PKT_TYPE_IPV4_UDP:
71 		m->packet_type = RTE_PTYPE_L2_ETHER |
72 			RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP;
73 		break;
74 	case DPAA2_PKT_TYPE_IPV6_UDP:
75 		m->packet_type = RTE_PTYPE_L2_ETHER |
76 			RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP;
77 		break;
78 	case DPAA2_PKT_TYPE_IPV4_SCTP:
79 		m->packet_type = RTE_PTYPE_L2_ETHER |
80 			RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP;
81 		break;
82 	case DPAA2_PKT_TYPE_IPV6_SCTP:
83 		m->packet_type = RTE_PTYPE_L2_ETHER |
84 			RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_SCTP;
85 		break;
86 	case DPAA2_PKT_TYPE_IPV4_ICMP:
87 		m->packet_type = RTE_PTYPE_L2_ETHER |
88 			RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_ICMP;
89 		break;
90 	case DPAA2_PKT_TYPE_IPV6_ICMP:
91 		m->packet_type = RTE_PTYPE_L2_ETHER |
92 			RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_ICMP;
93 		break;
94 	case DPAA2_PKT_TYPE_VLAN_1:
95 	case DPAA2_PKT_TYPE_VLAN_2:
96 		m->ol_flags |= PKT_RX_VLAN;
97 		break;
98 	/* More switch cases can be added */
99 	/* TODO: Add handling for checksum error check from FRC */
100 	default:
101 		m->packet_type = RTE_PTYPE_UNKNOWN;
102 	}
103 }
104 
105 static inline uint32_t __attribute__((hot))
106 dpaa2_dev_rx_parse_slow(struct dpaa2_annot_hdr *annotation)
107 {
108 	uint32_t pkt_type = RTE_PTYPE_UNKNOWN;
109 
110 	DPAA2_PMD_DP_DEBUG("(slow parse) Annotation = 0x%" PRIx64 "\t",
111 			   annotation->word4);
112 	if (BIT_ISSET_AT_POS(annotation->word3, L2_ARP_PRESENT)) {
113 		pkt_type = RTE_PTYPE_L2_ETHER_ARP;
114 		goto parse_done;
115 	} else if (BIT_ISSET_AT_POS(annotation->word3, L2_ETH_MAC_PRESENT)) {
116 		pkt_type = RTE_PTYPE_L2_ETHER;
117 	} else {
118 		goto parse_done;
119 	}
120 
121 	if (BIT_ISSET_AT_POS(annotation->word4, L3_IPV4_1_PRESENT |
122 			     L3_IPV4_N_PRESENT)) {
123 		pkt_type |= RTE_PTYPE_L3_IPV4;
124 		if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_OPT_PRESENT |
125 			L3_IP_N_OPT_PRESENT))
126 			pkt_type |= RTE_PTYPE_L3_IPV4_EXT;
127 
128 	} else if (BIT_ISSET_AT_POS(annotation->word4, L3_IPV6_1_PRESENT |
129 		  L3_IPV6_N_PRESENT)) {
130 		pkt_type |= RTE_PTYPE_L3_IPV6;
131 		if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_OPT_PRESENT |
132 		    L3_IP_N_OPT_PRESENT))
133 			pkt_type |= RTE_PTYPE_L3_IPV6_EXT;
134 	} else {
135 		goto parse_done;
136 	}
137 
138 	if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_FIRST_FRAGMENT |
139 	    L3_IP_1_MORE_FRAGMENT |
140 	    L3_IP_N_FIRST_FRAGMENT |
141 	    L3_IP_N_MORE_FRAGMENT)) {
142 		pkt_type |= RTE_PTYPE_L4_FRAG;
143 		goto parse_done;
144 	} else {
145 		pkt_type |= RTE_PTYPE_L4_NONFRAG;
146 	}
147 
148 	if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_UDP_PRESENT))
149 		pkt_type |= RTE_PTYPE_L4_UDP;
150 
151 	else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_TCP_PRESENT))
152 		pkt_type |= RTE_PTYPE_L4_TCP;
153 
154 	else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_SCTP_PRESENT))
155 		pkt_type |= RTE_PTYPE_L4_SCTP;
156 
157 	else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_ICMP_PRESENT))
158 		pkt_type |= RTE_PTYPE_L4_ICMP;
159 
160 	else if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_UNKNOWN_PROTOCOL))
161 		pkt_type |= RTE_PTYPE_UNKNOWN;
162 
163 parse_done:
164 	return pkt_type;
165 }
166 
167 static inline uint32_t __attribute__((hot))
168 dpaa2_dev_rx_parse(struct rte_mbuf *mbuf, void *hw_annot_addr)
169 {
170 	struct dpaa2_annot_hdr *annotation =
171 			(struct dpaa2_annot_hdr *)hw_annot_addr;
172 
173 	DPAA2_PMD_DP_DEBUG("(fast parse) Annotation = 0x%" PRIx64 "\t",
174 			   annotation->word4);
175 
176 	/* Check offloads first */
177 	if (BIT_ISSET_AT_POS(annotation->word3,
178 			     L2_VLAN_1_PRESENT | L2_VLAN_N_PRESENT))
179 		mbuf->ol_flags |= PKT_RX_VLAN;
180 
181 	if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L3CE))
182 		mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
183 	else if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L4CE))
184 		mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
185 
186 	/* Return some common types from parse processing */
187 	switch (annotation->word4) {
188 	case DPAA2_L3_IPv4:
189 		return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4;
190 	case DPAA2_L3_IPv6:
191 		return  RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6;
192 	case DPAA2_L3_IPv4_TCP:
193 		return  RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 |
194 				RTE_PTYPE_L4_TCP;
195 	case DPAA2_L3_IPv4_UDP:
196 		return  RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 |
197 				RTE_PTYPE_L4_UDP;
198 	case DPAA2_L3_IPv6_TCP:
199 		return  RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 |
200 				RTE_PTYPE_L4_TCP;
201 	case DPAA2_L3_IPv6_UDP:
202 		return  RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 |
203 				RTE_PTYPE_L4_UDP;
204 	default:
205 		break;
206 	}
207 
208 	return dpaa2_dev_rx_parse_slow(annotation);
209 }
210 
211 static inline struct rte_mbuf *__attribute__((hot))
212 eth_sg_fd_to_mbuf(const struct qbman_fd *fd)
213 {
214 	struct qbman_sge *sgt, *sge;
215 	size_t sg_addr, fd_addr;
216 	int i = 0;
217 	struct rte_mbuf *first_seg, *next_seg, *cur_seg, *temp;
218 
219 	fd_addr = (size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
220 
221 	/* Get Scatter gather table address */
222 	sgt = (struct qbman_sge *)(fd_addr + DPAA2_GET_FD_OFFSET(fd));
223 
224 	sge = &sgt[i++];
225 	sg_addr = (size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FLE_ADDR(sge));
226 
227 	/* First Scatter gather entry */
228 	first_seg = DPAA2_INLINE_MBUF_FROM_BUF(sg_addr,
229 		rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
230 	/* Prepare all the metadata for first segment */
231 	first_seg->buf_addr = (uint8_t *)sg_addr;
232 	first_seg->ol_flags = 0;
233 	first_seg->data_off = DPAA2_GET_FLE_OFFSET(sge);
234 	first_seg->data_len = sge->length  & 0x1FFFF;
235 	first_seg->pkt_len = DPAA2_GET_FD_LEN(fd);
236 	first_seg->nb_segs = 1;
237 	first_seg->next = NULL;
238 	if (dpaa2_svr_family == SVR_LX2160A)
239 		dpaa2_dev_rx_parse_frc(first_seg,
240 				DPAA2_GET_FD_FRC_PARSE_SUM(fd));
241 	else
242 		first_seg->packet_type = dpaa2_dev_rx_parse(first_seg,
243 			(void *)((size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd))
244 			 + DPAA2_FD_PTA_SIZE));
245 
246 	rte_mbuf_refcnt_set(first_seg, 1);
247 	cur_seg = first_seg;
248 	while (!DPAA2_SG_IS_FINAL(sge)) {
249 		sge = &sgt[i++];
250 		sg_addr = (size_t)DPAA2_IOVA_TO_VADDR(
251 				DPAA2_GET_FLE_ADDR(sge));
252 		next_seg = DPAA2_INLINE_MBUF_FROM_BUF(sg_addr,
253 			rte_dpaa2_bpid_info[DPAA2_GET_FLE_BPID(sge)].meta_data_size);
254 		next_seg->buf_addr  = (uint8_t *)sg_addr;
255 		next_seg->data_off  = DPAA2_GET_FLE_OFFSET(sge);
256 		next_seg->data_len  = sge->length  & 0x1FFFF;
257 		first_seg->nb_segs += 1;
258 		rte_mbuf_refcnt_set(next_seg, 1);
259 		cur_seg->next = next_seg;
260 		next_seg->next = NULL;
261 		cur_seg = next_seg;
262 	}
263 	temp = DPAA2_INLINE_MBUF_FROM_BUF(fd_addr,
264 		rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
265 	rte_mbuf_refcnt_set(temp, 1);
266 	rte_pktmbuf_free_seg(temp);
267 
268 	return (void *)first_seg;
269 }
270 
271 static inline struct rte_mbuf *__attribute__((hot))
272 eth_fd_to_mbuf(const struct qbman_fd *fd)
273 {
274 	struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF(
275 		DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)),
276 		     rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
277 
278 	/* need to repopulated some of the fields,
279 	 * as they may have changed in last transmission
280 	 */
281 	mbuf->nb_segs = 1;
282 	mbuf->ol_flags = 0;
283 	mbuf->data_off = DPAA2_GET_FD_OFFSET(fd);
284 	mbuf->data_len = DPAA2_GET_FD_LEN(fd);
285 	mbuf->pkt_len = mbuf->data_len;
286 	mbuf->next = NULL;
287 	rte_mbuf_refcnt_set(mbuf, 1);
288 
289 	/* Parse the packet */
290 	/* parse results for LX2 are there in FRC field of FD.
291 	 * For other DPAA2 platforms , parse results are after
292 	 * the private - sw annotation area
293 	 */
294 
295 	if (dpaa2_svr_family == SVR_LX2160A)
296 		dpaa2_dev_rx_parse_frc(mbuf, DPAA2_GET_FD_FRC_PARSE_SUM(fd));
297 	else
298 		mbuf->packet_type = dpaa2_dev_rx_parse(mbuf,
299 			(void *)((size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd))
300 			 + DPAA2_FD_PTA_SIZE));
301 
302 	DPAA2_PMD_DP_DEBUG("to mbuf - mbuf =%p, mbuf->buf_addr =%p, off = %d,"
303 		"fd_off=%d fd =%" PRIx64 ", meta = %d  bpid =%d, len=%d\n",
304 		mbuf, mbuf->buf_addr, mbuf->data_off,
305 		DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ADDR(fd),
306 		rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
307 		DPAA2_GET_FD_BPID(fd), DPAA2_GET_FD_LEN(fd));
308 
309 	return mbuf;
310 }
311 
312 static int __attribute__ ((noinline)) __attribute__((hot))
313 eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf,
314 		  struct qbman_fd *fd, uint16_t bpid)
315 {
316 	struct rte_mbuf *cur_seg = mbuf, *prev_seg, *mi, *temp;
317 	struct qbman_sge *sgt, *sge = NULL;
318 	int i;
319 
320 	temp = rte_pktmbuf_alloc(mbuf->pool);
321 	if (temp == NULL) {
322 		DPAA2_PMD_DP_DEBUG("No memory to allocate S/G table\n");
323 		return -ENOMEM;
324 	}
325 
326 	DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(temp));
327 	DPAA2_SET_FD_LEN(fd, mbuf->pkt_len);
328 	DPAA2_SET_ONLY_FD_BPID(fd, bpid);
329 	DPAA2_SET_FD_OFFSET(fd, temp->data_off);
330 	DPAA2_SET_FD_ASAL(fd, DPAA2_ASAL_VAL);
331 	DPAA2_FD_SET_FORMAT(fd, qbman_fd_sg);
332 	/*Set Scatter gather table and Scatter gather entries*/
333 	sgt = (struct qbman_sge *)(
334 			(size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd))
335 			+ DPAA2_GET_FD_OFFSET(fd));
336 
337 	for (i = 0; i < mbuf->nb_segs; i++) {
338 		sge = &sgt[i];
339 		/*Resetting the buffer pool id and offset field*/
340 		sge->fin_bpid_offset = 0;
341 		DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(cur_seg));
342 		DPAA2_SET_FLE_OFFSET(sge, cur_seg->data_off);
343 		sge->length = cur_seg->data_len;
344 		if (RTE_MBUF_DIRECT(cur_seg)) {
345 			if (rte_mbuf_refcnt_read(cur_seg) > 1) {
346 				/* If refcnt > 1, invalid bpid is set to ensure
347 				 * buffer is not freed by HW
348 				 */
349 				DPAA2_SET_FLE_IVP(sge);
350 				rte_mbuf_refcnt_update(cur_seg, -1);
351 			} else
352 				DPAA2_SET_FLE_BPID(sge,
353 						mempool_to_bpid(cur_seg->pool));
354 			cur_seg = cur_seg->next;
355 		} else {
356 			/* Get owner MBUF from indirect buffer */
357 			mi = rte_mbuf_from_indirect(cur_seg);
358 			if (rte_mbuf_refcnt_read(mi) > 1) {
359 				/* If refcnt > 1, invalid bpid is set to ensure
360 				 * owner buffer is not freed by HW
361 				 */
362 				DPAA2_SET_FLE_IVP(sge);
363 			} else {
364 				DPAA2_SET_FLE_BPID(sge,
365 						   mempool_to_bpid(mi->pool));
366 				rte_mbuf_refcnt_update(mi, 1);
367 			}
368 			prev_seg = cur_seg;
369 			cur_seg = cur_seg->next;
370 			prev_seg->next = NULL;
371 			rte_pktmbuf_free(prev_seg);
372 		}
373 	}
374 	DPAA2_SG_SET_FINAL(sge, true);
375 	return 0;
376 }
377 
378 static void
379 eth_mbuf_to_fd(struct rte_mbuf *mbuf,
380 	       struct qbman_fd *fd, uint16_t bpid) __attribute__((unused));
381 
382 static void __attribute__ ((noinline)) __attribute__((hot))
383 eth_mbuf_to_fd(struct rte_mbuf *mbuf,
384 	       struct qbman_fd *fd, uint16_t bpid)
385 {
386 	DPAA2_MBUF_TO_CONTIG_FD(mbuf, fd, bpid);
387 
388 	DPAA2_PMD_DP_DEBUG("mbuf =%p, mbuf->buf_addr =%p, off = %d,"
389 		"fd_off=%d fd =%" PRIx64 ", meta = %d  bpid =%d, len=%d\n",
390 		mbuf, mbuf->buf_addr, mbuf->data_off,
391 		DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ADDR(fd),
392 		rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
393 		DPAA2_GET_FD_BPID(fd), DPAA2_GET_FD_LEN(fd));
394 	if (RTE_MBUF_DIRECT(mbuf)) {
395 		if (rte_mbuf_refcnt_read(mbuf) > 1) {
396 			DPAA2_SET_FD_IVP(fd);
397 			rte_mbuf_refcnt_update(mbuf, -1);
398 		}
399 	} else {
400 		struct rte_mbuf *mi;
401 
402 		mi = rte_mbuf_from_indirect(mbuf);
403 		if (rte_mbuf_refcnt_read(mi) > 1)
404 			DPAA2_SET_FD_IVP(fd);
405 		else
406 			rte_mbuf_refcnt_update(mi, 1);
407 		rte_pktmbuf_free(mbuf);
408 	}
409 }
410 
411 static inline int __attribute__((hot))
412 eth_copy_mbuf_to_fd(struct rte_mbuf *mbuf,
413 		    struct qbman_fd *fd, uint16_t bpid)
414 {
415 	struct rte_mbuf *m;
416 	void *mb = NULL;
417 
418 	if (rte_dpaa2_mbuf_alloc_bulk(
419 		rte_dpaa2_bpid_info[bpid].bp_list->mp, &mb, 1)) {
420 		DPAA2_PMD_DP_DEBUG("Unable to allocated DPAA2 buffer\n");
421 		return -1;
422 	}
423 	m = (struct rte_mbuf *)mb;
424 	memcpy((char *)m->buf_addr + mbuf->data_off,
425 	       (void *)((char *)mbuf->buf_addr + mbuf->data_off),
426 		mbuf->pkt_len);
427 
428 	/* Copy required fields */
429 	m->data_off = mbuf->data_off;
430 	m->ol_flags = mbuf->ol_flags;
431 	m->packet_type = mbuf->packet_type;
432 	m->tx_offload = mbuf->tx_offload;
433 
434 	DPAA2_MBUF_TO_CONTIG_FD(m, fd, bpid);
435 
436 	DPAA2_PMD_DP_DEBUG(
437 		"mbuf: %p, BMAN buf addr: %p, fdaddr: %" PRIx64 ", bpid: %d,"
438 		" meta: %d, off: %d, len: %d\n",
439 		(void *)mbuf,
440 		mbuf->buf_addr,
441 		DPAA2_GET_FD_ADDR(fd),
442 		DPAA2_GET_FD_BPID(fd),
443 		rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
444 		DPAA2_GET_FD_OFFSET(fd),
445 		DPAA2_GET_FD_LEN(fd));
446 
447 return 0;
448 }
449 
450 /* This function assumes that caller will be keep the same value for nb_pkts
451  * across calls per queue, if that is not the case, better use non-prefetch
452  * version of rx call.
453  * It will return the packets as requested in previous call without honoring
454  * the current nb_pkts or bufs space.
455  */
456 uint16_t
457 dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
458 {
459 	/* Function receive frames for a given device and VQ*/
460 	struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
461 	struct qbman_result *dq_storage, *dq_storage1 = NULL;
462 	uint32_t fqid = dpaa2_q->fqid;
463 	int ret, num_rx = 0, pull_size;
464 	uint8_t pending, status;
465 	struct qbman_swp *swp;
466 	const struct qbman_fd *fd, *next_fd;
467 	struct qbman_pull_desc pulldesc;
468 	struct queue_storage_info_t *q_storage = dpaa2_q->q_storage;
469 	struct rte_eth_dev *dev = dpaa2_q->dev;
470 
471 	if (unlikely(!DPAA2_PER_LCORE_ETHRX_DPIO)) {
472 		ret = dpaa2_affine_qbman_ethrx_swp();
473 		if (ret) {
474 			DPAA2_PMD_ERR("Failure in affining portal");
475 			return 0;
476 		}
477 	}
478 	swp = DPAA2_PER_LCORE_ETHRX_PORTAL;
479 	pull_size = (nb_pkts > DPAA2_DQRR_RING_SIZE) ?
480 					       DPAA2_DQRR_RING_SIZE : nb_pkts;
481 	if (unlikely(!q_storage->active_dqs)) {
482 		q_storage->toggle = 0;
483 		dq_storage = q_storage->dq_storage[q_storage->toggle];
484 		q_storage->last_num_pkts = pull_size;
485 		qbman_pull_desc_clear(&pulldesc);
486 		qbman_pull_desc_set_numframes(&pulldesc,
487 					      q_storage->last_num_pkts);
488 		qbman_pull_desc_set_fq(&pulldesc, fqid);
489 		qbman_pull_desc_set_storage(&pulldesc, dq_storage,
490 			(uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
491 		if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) {
492 			while (!qbman_check_command_complete(
493 			       get_swp_active_dqs(
494 			       DPAA2_PER_LCORE_ETHRX_DPIO->index)))
495 				;
496 			clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index);
497 		}
498 		while (1) {
499 			if (qbman_swp_pull(swp, &pulldesc)) {
500 				DPAA2_PMD_DP_DEBUG("VDQ command is not issued."
501 						  " QBMAN is busy (1)\n");
502 				/* Portal was busy, try again */
503 				continue;
504 			}
505 			break;
506 		}
507 		q_storage->active_dqs = dq_storage;
508 		q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index;
509 		set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index,
510 				   dq_storage);
511 	}
512 
513 	dq_storage = q_storage->active_dqs;
514 	rte_prefetch0((void *)(size_t)(dq_storage));
515 	rte_prefetch0((void *)(size_t)(dq_storage + 1));
516 
517 	/* Prepare next pull descriptor. This will give space for the
518 	 * prefething done on DQRR entries
519 	 */
520 	q_storage->toggle ^= 1;
521 	dq_storage1 = q_storage->dq_storage[q_storage->toggle];
522 	qbman_pull_desc_clear(&pulldesc);
523 	qbman_pull_desc_set_numframes(&pulldesc, pull_size);
524 	qbman_pull_desc_set_fq(&pulldesc, fqid);
525 	qbman_pull_desc_set_storage(&pulldesc, dq_storage1,
526 		(uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage1)), 1);
527 
528 	/* Check if the previous issued command is completed.
529 	 * Also seems like the SWP is shared between the Ethernet Driver
530 	 * and the SEC driver.
531 	 */
532 	while (!qbman_check_command_complete(dq_storage))
533 		;
534 	if (dq_storage == get_swp_active_dqs(q_storage->active_dpio_id))
535 		clear_swp_active_dqs(q_storage->active_dpio_id);
536 
537 	pending = 1;
538 
539 	do {
540 		/* Loop until the dq_storage is updated with
541 		 * new token by QBMAN
542 		 */
543 		while (!qbman_check_new_result(dq_storage))
544 			;
545 		rte_prefetch0((void *)((size_t)(dq_storage + 2)));
546 		/* Check whether Last Pull command is Expired and
547 		 * setting Condition for Loop termination
548 		 */
549 		if (qbman_result_DQ_is_pull_complete(dq_storage)) {
550 			pending = 0;
551 			/* Check for valid frame. */
552 			status = qbman_result_DQ_flags(dq_storage);
553 			if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0))
554 				continue;
555 		}
556 		fd = qbman_result_DQ_fd(dq_storage);
557 
558 		next_fd = qbman_result_DQ_fd(dq_storage + 1);
559 		/* Prefetch Annotation address for the parse results */
560 		rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(next_fd)
561 				+ DPAA2_FD_PTA_SIZE + 16));
562 
563 		if (unlikely(DPAA2_FD_GET_FORMAT(fd) == qbman_fd_sg))
564 			bufs[num_rx] = eth_sg_fd_to_mbuf(fd);
565 		else
566 			bufs[num_rx] = eth_fd_to_mbuf(fd);
567 		bufs[num_rx]->port = dev->data->port_id;
568 
569 		if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
570 			rte_vlan_strip(bufs[num_rx]);
571 
572 		dq_storage++;
573 		num_rx++;
574 	} while (pending);
575 
576 	if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) {
577 		while (!qbman_check_command_complete(
578 		       get_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)))
579 			;
580 		clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index);
581 	}
582 	/* issue a volatile dequeue command for next pull */
583 	while (1) {
584 		if (qbman_swp_pull(swp, &pulldesc)) {
585 			DPAA2_PMD_DP_DEBUG("VDQ command is not issued."
586 					  "QBMAN is busy (2)\n");
587 			continue;
588 		}
589 		break;
590 	}
591 	q_storage->active_dqs = dq_storage1;
592 	q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index;
593 	set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index, dq_storage1);
594 
595 	dpaa2_q->rx_pkts += num_rx;
596 
597 	return num_rx;
598 }
599 
600 void __attribute__((hot))
601 dpaa2_dev_process_parallel_event(struct qbman_swp *swp,
602 				 const struct qbman_fd *fd,
603 				 const struct qbman_result *dq,
604 				 struct dpaa2_queue *rxq,
605 				 struct rte_event *ev)
606 {
607 	rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd) +
608 		DPAA2_FD_PTA_SIZE + 16));
609 
610 	ev->flow_id = rxq->ev.flow_id;
611 	ev->sub_event_type = rxq->ev.sub_event_type;
612 	ev->event_type = RTE_EVENT_TYPE_ETHDEV;
613 	ev->op = RTE_EVENT_OP_NEW;
614 	ev->sched_type = rxq->ev.sched_type;
615 	ev->queue_id = rxq->ev.queue_id;
616 	ev->priority = rxq->ev.priority;
617 
618 	ev->mbuf = eth_fd_to_mbuf(fd);
619 
620 	qbman_swp_dqrr_consume(swp, dq);
621 }
622 
623 void __attribute__((hot))
624 dpaa2_dev_process_atomic_event(struct qbman_swp *swp __attribute__((unused)),
625 			       const struct qbman_fd *fd,
626 			       const struct qbman_result *dq,
627 			       struct dpaa2_queue *rxq,
628 			       struct rte_event *ev)
629 {
630 	uint8_t dqrr_index;
631 
632 	rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd) +
633 		DPAA2_FD_PTA_SIZE + 16));
634 
635 	ev->flow_id = rxq->ev.flow_id;
636 	ev->sub_event_type = rxq->ev.sub_event_type;
637 	ev->event_type = RTE_EVENT_TYPE_ETHDEV;
638 	ev->op = RTE_EVENT_OP_NEW;
639 	ev->sched_type = rxq->ev.sched_type;
640 	ev->queue_id = rxq->ev.queue_id;
641 	ev->priority = rxq->ev.priority;
642 
643 	ev->mbuf = eth_fd_to_mbuf(fd);
644 
645 	dqrr_index = qbman_get_dqrr_idx(dq);
646 	ev->mbuf->seqn = dqrr_index + 1;
647 	DPAA2_PER_LCORE_DQRR_SIZE++;
648 	DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index;
649 	DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = ev->mbuf;
650 }
651 
652 /*
653  * Callback to handle sending packets through WRIOP based interface
654  */
655 uint16_t
656 dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
657 {
658 	/* Function to transmit the frames to given device and VQ*/
659 	uint32_t loop, retry_count;
660 	int32_t ret;
661 	struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
662 	struct rte_mbuf *mi;
663 	uint32_t frames_to_send;
664 	struct rte_mempool *mp;
665 	struct qbman_eq_desc eqdesc;
666 	struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
667 	struct qbman_swp *swp;
668 	uint16_t num_tx = 0;
669 	uint16_t bpid;
670 	struct rte_eth_dev *dev = dpaa2_q->dev;
671 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
672 	uint32_t flags[MAX_TX_RING_SLOTS] = {0};
673 
674 	if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
675 		ret = dpaa2_affine_qbman_swp();
676 		if (ret) {
677 			DPAA2_PMD_ERR("Failure in affining portal");
678 			return 0;
679 		}
680 	}
681 	swp = DPAA2_PER_LCORE_PORTAL;
682 
683 	DPAA2_PMD_DP_DEBUG("===> dev =%p, fqid =%d\n", dev, dpaa2_q->fqid);
684 
685 	/*Prepare enqueue descriptor*/
686 	qbman_eq_desc_clear(&eqdesc);
687 	qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
688 	qbman_eq_desc_set_response(&eqdesc, 0, 0);
689 	qbman_eq_desc_set_qd(&eqdesc, priv->qdid,
690 			     dpaa2_q->flow_id, dpaa2_q->tc_index);
691 	/*Clear the unused FD fields before sending*/
692 	while (nb_pkts) {
693 		/*Check if the queue is congested*/
694 		retry_count = 0;
695 		while (qbman_result_SCN_state(dpaa2_q->cscn)) {
696 			retry_count++;
697 			/* Retry for some time before giving up */
698 			if (retry_count > CONG_RETRY_COUNT)
699 				goto skip_tx;
700 		}
701 
702 		frames_to_send = (nb_pkts >> 3) ? MAX_TX_RING_SLOTS : nb_pkts;
703 
704 		for (loop = 0; loop < frames_to_send; loop++) {
705 			if ((*bufs)->seqn) {
706 				uint8_t dqrr_index = (*bufs)->seqn - 1;
707 
708 				flags[loop] = QBMAN_ENQUEUE_FLAG_DCA |
709 						dqrr_index;
710 				DPAA2_PER_LCORE_DQRR_SIZE--;
711 				DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dqrr_index);
712 				(*bufs)->seqn = DPAA2_INVALID_MBUF_SEQN;
713 			}
714 
715 			fd_arr[loop].simple.frc = 0;
716 			DPAA2_RESET_FD_CTRL((&fd_arr[loop]));
717 			DPAA2_SET_FD_FLC((&fd_arr[loop]), (size_t)NULL);
718 			if (likely(RTE_MBUF_DIRECT(*bufs))) {
719 				mp = (*bufs)->pool;
720 				/* Check the basic scenario and set
721 				 * the FD appropriately here itself.
722 				 */
723 				if (likely(mp && mp->ops_index ==
724 				    priv->bp_list->dpaa2_ops_index &&
725 				    (*bufs)->nb_segs == 1 &&
726 				    rte_mbuf_refcnt_read((*bufs)) == 1)) {
727 					if (unlikely(((*bufs)->ol_flags
728 						& PKT_TX_VLAN_PKT) ||
729 						(dev->data->dev_conf.txmode.offloads
730 						& DEV_TX_OFFLOAD_VLAN_INSERT))) {
731 						ret = rte_vlan_insert(bufs);
732 						if (ret)
733 							goto send_n_return;
734 					}
735 					DPAA2_MBUF_TO_CONTIG_FD((*bufs),
736 					&fd_arr[loop], mempool_to_bpid(mp));
737 					bufs++;
738 					continue;
739 				}
740 			} else {
741 				mi = rte_mbuf_from_indirect(*bufs);
742 				mp = mi->pool;
743 			}
744 			/* Not a hw_pkt pool allocated frame */
745 			if (unlikely(!mp || !priv->bp_list)) {
746 				DPAA2_PMD_ERR("Err: No buffer pool attached");
747 				goto send_n_return;
748 			}
749 
750 			if (unlikely(((*bufs)->ol_flags & PKT_TX_VLAN_PKT) ||
751 				(dev->data->dev_conf.txmode.offloads
752 				& DEV_TX_OFFLOAD_VLAN_INSERT))) {
753 				int ret = rte_vlan_insert(bufs);
754 				if (ret)
755 					goto send_n_return;
756 			}
757 			if (mp->ops_index != priv->bp_list->dpaa2_ops_index) {
758 				DPAA2_PMD_WARN("Non DPAA2 buffer pool");
759 				/* alloc should be from the default buffer pool
760 				 * attached to this interface
761 				 */
762 				bpid = priv->bp_list->buf_pool.bpid;
763 
764 				if (unlikely((*bufs)->nb_segs > 1)) {
765 					DPAA2_PMD_ERR("S/G support not added"
766 						" for non hw offload buffer");
767 					goto send_n_return;
768 				}
769 				if (eth_copy_mbuf_to_fd(*bufs,
770 							&fd_arr[loop], bpid)) {
771 					goto send_n_return;
772 				}
773 				/* free the original packet */
774 				rte_pktmbuf_free(*bufs);
775 			} else {
776 				bpid = mempool_to_bpid(mp);
777 				if (unlikely((*bufs)->nb_segs > 1)) {
778 					if (eth_mbuf_to_sg_fd(*bufs,
779 							&fd_arr[loop], bpid))
780 						goto send_n_return;
781 				} else {
782 					eth_mbuf_to_fd(*bufs,
783 						       &fd_arr[loop], bpid);
784 				}
785 			}
786 			bufs++;
787 		}
788 		loop = 0;
789 		while (loop < frames_to_send) {
790 			loop += qbman_swp_enqueue_multiple(swp, &eqdesc,
791 					&fd_arr[loop], &flags[loop],
792 					frames_to_send - loop);
793 		}
794 
795 		num_tx += frames_to_send;
796 		nb_pkts -= frames_to_send;
797 	}
798 	dpaa2_q->tx_pkts += num_tx;
799 	return num_tx;
800 
801 send_n_return:
802 	/* send any already prepared fd */
803 	if (loop) {
804 		unsigned int i = 0;
805 
806 		while (i < loop) {
807 			i += qbman_swp_enqueue_multiple(swp, &eqdesc,
808 							&fd_arr[i],
809 							&flags[loop],
810 							loop - i);
811 		}
812 		num_tx += loop;
813 	}
814 skip_tx:
815 	dpaa2_q->tx_pkts += num_tx;
816 	return num_tx;
817 }
818 
819 /**
820  * Dummy DPDK callback for TX.
821  *
822  * This function is used to temporarily replace the real callback during
823  * unsafe control operations on the queue, or in case of error.
824  *
825  * @param dpdk_txq
826  *   Generic pointer to TX queue structure.
827  * @param[in] pkts
828  *   Packets to transmit.
829  * @param pkts_n
830  *   Number of packets in array.
831  *
832  * @return
833  *   Number of packets successfully transmitted (<= pkts_n).
834  */
835 uint16_t
836 dummy_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
837 {
838 	(void)queue;
839 	(void)bufs;
840 	(void)nb_pkts;
841 	return 0;
842 }
843