xref: /dpdk/drivers/net/dpaa/dpaa_rxtx.c (revision e7524271c3984ae3a77d42c7ea1df78ddcc89a5f)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright 2016 Freescale Semiconductor, Inc. All rights reserved.
4  *   Copyright 2017,2019-2021 NXP
5  *
6  */
7 
8 /* System headers */
9 #include <inttypes.h>
10 #include <unistd.h>
11 #include <stdio.h>
12 #include <limits.h>
13 #include <sched.h>
14 #include <pthread.h>
15 
16 #include <rte_byteorder.h>
17 #include <rte_common.h>
18 #include <rte_interrupts.h>
19 #include <rte_log.h>
20 #include <rte_debug.h>
21 #include <rte_pci.h>
22 #include <rte_atomic.h>
23 #include <rte_branch_prediction.h>
24 #include <rte_memory.h>
25 #include <rte_tailq.h>
26 #include <rte_eal.h>
27 #include <rte_alarm.h>
28 #include <rte_ether.h>
29 #include <ethdev_driver.h>
30 #include <rte_malloc.h>
31 #include <rte_ring.h>
32 #include <rte_ip.h>
33 #include <rte_tcp.h>
34 #include <rte_udp.h>
35 #include <rte_net.h>
36 #include <rte_eventdev.h>
37 
38 #include "dpaa_ethdev.h"
39 #include "dpaa_rxtx.h"
40 #include <bus_dpaa_driver.h>
41 #include <dpaa_mempool.h>
42 
43 #include <qman.h>
44 #include <fsl_usd.h>
45 #include <fsl_qman.h>
46 #include <fsl_bman.h>
47 #include <dpaa_of.h>
48 #include <netcfg.h>
49 
50 #define DPAA_MBUF_TO_CONTIG_FD(_mbuf, _fd, _bpid) \
51 	do { \
52 		(_fd)->cmd = 0; \
53 		(_fd)->opaque_addr = 0; \
54 		(_fd)->opaque = QM_FD_CONTIG << DPAA_FD_FORMAT_SHIFT; \
55 		(_fd)->opaque |= ((_mbuf)->data_off) << DPAA_FD_OFFSET_SHIFT; \
56 		(_fd)->opaque |= (_mbuf)->pkt_len; \
57 		(_fd)->addr = (_mbuf)->buf_iova; \
58 		(_fd)->bpid = _bpid; \
59 	} while (0)
60 
61 #ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER
62 #define DISPLAY_PRINT printf
63 static void dpaa_display_frame_info(const struct qm_fd *fd,
64 			uint32_t fqid, bool rx)
65 {
66 	int ii;
67 	char *ptr;
68 	struct annotations_t *annot = rte_dpaa_mem_ptov(fd->addr);
69 	uint8_t format;
70 
71 	if (!fd->status) {
72 		/* Do not display correct packets.*/
73 		return;
74 	}
75 
76 	format = (fd->opaque & DPAA_FD_FORMAT_MASK) >>
77 				DPAA_FD_FORMAT_SHIFT;
78 
79 	DISPLAY_PRINT("fqid %d bpid %d addr 0x%lx, format %d\r\n",
80 		      fqid, fd->bpid, (unsigned long)fd->addr, fd->format);
81 	DISPLAY_PRINT("off %d, len %d stat 0x%x\r\n",
82 		      fd->offset, fd->length20, fd->status);
83 	if (rx) {
84 		ptr = (char *)&annot->parse;
85 		DISPLAY_PRINT("RX parser result:\r\n");
86 		for (ii = 0; ii < (int)sizeof(struct dpaa_eth_parse_results_t);
87 			ii++) {
88 			DISPLAY_PRINT("%02x ", ptr[ii]);
89 			if (((ii + 1) % 16) == 0)
90 				DISPLAY_PRINT("\n");
91 		}
92 		DISPLAY_PRINT("\n");
93 	}
94 
95 	if (unlikely(format == qm_fd_sg)) {
96 		/*TBD:S/G display: to be implemented*/
97 		return;
98 	}
99 
100 	DISPLAY_PRINT("Frame payload:\r\n");
101 	ptr = (char *)annot;
102 	ptr += fd->offset;
103 	for (ii = 0; ii < fd->length20; ii++) {
104 		DISPLAY_PRINT("%02x ", ptr[ii]);
105 		if (((ii + 1) % 16) == 0)
106 			printf("\n");
107 	}
108 	DISPLAY_PRINT("\n");
109 }
110 #else
111 #define dpaa_display_frame_info(a, b, c)
112 #endif
113 
114 static inline void dpaa_slow_parsing(struct rte_mbuf *m __rte_unused,
115 				     uint64_t prs __rte_unused)
116 {
117 	DPAA_DP_LOG(DEBUG, "Slow parsing");
118 	/*TBD:XXX: to be implemented*/
119 }
120 
121 static inline void dpaa_eth_packet_info(struct rte_mbuf *m, void *fd_virt_addr)
122 {
123 	struct annotations_t *annot = GET_ANNOTATIONS(fd_virt_addr);
124 	uint64_t prs = *((uintptr_t *)(&annot->parse)) & DPAA_PARSE_MASK;
125 
126 	DPAA_DP_LOG(DEBUG, " Parsing mbuf: %p with annotations: %p", m, annot);
127 
128 	m->ol_flags = RTE_MBUF_F_RX_RSS_HASH | RTE_MBUF_F_RX_IP_CKSUM_GOOD |
129 		RTE_MBUF_F_RX_L4_CKSUM_GOOD;
130 
131 	switch (prs) {
132 	case DPAA_PKT_TYPE_IPV4:
133 		m->packet_type = RTE_PTYPE_L2_ETHER |
134 			RTE_PTYPE_L3_IPV4;
135 		break;
136 	case DPAA_PKT_TYPE_IPV6:
137 		m->packet_type = RTE_PTYPE_L2_ETHER |
138 			RTE_PTYPE_L3_IPV6;
139 		break;
140 	case DPAA_PKT_TYPE_ETHER:
141 		m->packet_type = RTE_PTYPE_L2_ETHER;
142 		break;
143 	case DPAA_PKT_TYPE_IPV4_FRAG:
144 	case DPAA_PKT_TYPE_IPV4_FRAG_UDP:
145 	case DPAA_PKT_TYPE_IPV4_FRAG_TCP:
146 	case DPAA_PKT_TYPE_IPV4_FRAG_SCTP:
147 		m->packet_type = RTE_PTYPE_L2_ETHER |
148 			RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_FRAG;
149 		break;
150 	case DPAA_PKT_TYPE_IPV6_FRAG:
151 	case DPAA_PKT_TYPE_IPV6_FRAG_UDP:
152 	case DPAA_PKT_TYPE_IPV6_FRAG_TCP:
153 	case DPAA_PKT_TYPE_IPV6_FRAG_SCTP:
154 		m->packet_type = RTE_PTYPE_L2_ETHER |
155 			RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_FRAG;
156 		break;
157 	case DPAA_PKT_TYPE_IPV4_EXT:
158 		m->packet_type = RTE_PTYPE_L2_ETHER |
159 			RTE_PTYPE_L3_IPV4_EXT;
160 		break;
161 	case DPAA_PKT_TYPE_IPV6_EXT:
162 		m->packet_type = RTE_PTYPE_L2_ETHER |
163 			RTE_PTYPE_L3_IPV6_EXT;
164 		break;
165 	case DPAA_PKT_TYPE_IPV4_TCP:
166 		m->packet_type = RTE_PTYPE_L2_ETHER |
167 			RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP;
168 		break;
169 	case DPAA_PKT_TYPE_IPV6_TCP:
170 		m->packet_type = RTE_PTYPE_L2_ETHER |
171 			RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP;
172 		break;
173 	case DPAA_PKT_TYPE_IPV4_UDP:
174 		m->packet_type = RTE_PTYPE_L2_ETHER |
175 			RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP;
176 		break;
177 	case DPAA_PKT_TYPE_IPV6_UDP:
178 		m->packet_type = RTE_PTYPE_L2_ETHER |
179 			RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP;
180 		break;
181 	case DPAA_PKT_TYPE_IPSEC_IPV4:
182 		if (*((uintptr_t *)&annot->parse) & DPAA_PARSE_ESP_MASK)
183 			m->packet_type = RTE_PTYPE_L2_ETHER |
184 				RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_ESP;
185 		break;
186 	case DPAA_PKT_TYPE_IPSEC_IPV6:
187 		if (*((uintptr_t *)&annot->parse) & DPAA_PARSE_ESP_MASK)
188 			m->packet_type = RTE_PTYPE_L2_ETHER |
189 				RTE_PTYPE_L3_IPV6 | RTE_PTYPE_TUNNEL_ESP;
190 		break;
191 	case DPAA_PKT_TYPE_IPV4_EXT_UDP:
192 		m->packet_type = RTE_PTYPE_L2_ETHER |
193 			RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_UDP;
194 		break;
195 	case DPAA_PKT_TYPE_IPV6_EXT_UDP:
196 		m->packet_type = RTE_PTYPE_L2_ETHER |
197 			RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP;
198 		break;
199 	case DPAA_PKT_TYPE_IPV4_EXT_TCP:
200 		m->packet_type = RTE_PTYPE_L2_ETHER |
201 			RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_TCP;
202 		break;
203 	case DPAA_PKT_TYPE_IPV6_EXT_TCP:
204 		m->packet_type = RTE_PTYPE_L2_ETHER |
205 			RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP;
206 		break;
207 	case DPAA_PKT_TYPE_IPV4_SCTP:
208 		m->packet_type = RTE_PTYPE_L2_ETHER |
209 			RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP;
210 		break;
211 	case DPAA_PKT_TYPE_IPV6_SCTP:
212 		m->packet_type = RTE_PTYPE_L2_ETHER |
213 			RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_SCTP;
214 		break;
215 	case DPAA_PKT_TYPE_IPV4_CSUM_ERR:
216 	case DPAA_PKT_TYPE_IPV6_CSUM_ERR:
217 		m->ol_flags = RTE_MBUF_F_RX_RSS_HASH | RTE_MBUF_F_RX_IP_CKSUM_BAD;
218 		break;
219 	case DPAA_PKT_TYPE_IPV4_TCP_CSUM_ERR:
220 	case DPAA_PKT_TYPE_IPV6_TCP_CSUM_ERR:
221 	case DPAA_PKT_TYPE_IPV4_UDP_CSUM_ERR:
222 	case DPAA_PKT_TYPE_IPV6_UDP_CSUM_ERR:
223 		m->ol_flags = RTE_MBUF_F_RX_RSS_HASH | RTE_MBUF_F_RX_L4_CKSUM_BAD;
224 		break;
225 	case DPAA_PKT_TYPE_NONE:
226 		m->packet_type = 0;
227 		break;
228 	/* More switch cases can be added */
229 	default:
230 		dpaa_slow_parsing(m, prs);
231 	}
232 
233 	m->tx_offload = annot->parse.ip_off[0];
234 	m->tx_offload |= (annot->parse.l4_off - annot->parse.ip_off[0])
235 					<< DPAA_PKT_L3_LEN_SHIFT;
236 
237 	/* Set the hash values */
238 	m->hash.rss = (uint32_t)(annot->hash);
239 
240 	/* Check if Vlan is present */
241 	if (prs & DPAA_PARSE_VLAN_MASK)
242 		m->ol_flags |= RTE_MBUF_F_RX_VLAN;
243 	/* Packet received without stripping the vlan */
244 }
245 
246 static inline void dpaa_checksum(struct rte_mbuf *mbuf)
247 {
248 	struct rte_ether_hdr *eth_hdr =
249 		rte_pktmbuf_mtod(mbuf, struct rte_ether_hdr *);
250 	char *l3_hdr = (char *)eth_hdr + mbuf->l2_len;
251 	struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *)l3_hdr;
252 	struct rte_ipv6_hdr *ipv6_hdr = (struct rte_ipv6_hdr *)l3_hdr;
253 
254 	DPAA_DP_LOG(DEBUG, "Calculating checksum for mbuf: %p", mbuf);
255 
256 	if (((mbuf->packet_type & RTE_PTYPE_L3_MASK) == RTE_PTYPE_L3_IPV4) ||
257 	    ((mbuf->packet_type & RTE_PTYPE_L3_MASK) ==
258 	    RTE_PTYPE_L3_IPV4_EXT)) {
259 		ipv4_hdr = (struct rte_ipv4_hdr *)l3_hdr;
260 		ipv4_hdr->hdr_checksum = 0;
261 		ipv4_hdr->hdr_checksum = rte_ipv4_cksum(ipv4_hdr);
262 	} else if (((mbuf->packet_type & RTE_PTYPE_L3_MASK) ==
263 		   RTE_PTYPE_L3_IPV6) ||
264 		   ((mbuf->packet_type & RTE_PTYPE_L3_MASK) ==
265 		   RTE_PTYPE_L3_IPV6_EXT))
266 		ipv6_hdr = (struct rte_ipv6_hdr *)l3_hdr;
267 
268 	if ((mbuf->packet_type & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP) {
269 		struct rte_tcp_hdr *tcp_hdr = (struct rte_tcp_hdr *)(l3_hdr +
270 					  mbuf->l3_len);
271 		tcp_hdr->cksum = 0;
272 		if (eth_hdr->ether_type == htons(RTE_ETHER_TYPE_IPV4))
273 			tcp_hdr->cksum = rte_ipv4_udptcp_cksum(ipv4_hdr,
274 							       tcp_hdr);
275 		else /* assume ethertype == RTE_ETHER_TYPE_IPV6 */
276 			tcp_hdr->cksum = rte_ipv6_udptcp_cksum(ipv6_hdr,
277 							       tcp_hdr);
278 	} else if ((mbuf->packet_type & RTE_PTYPE_L4_MASK) ==
279 		   RTE_PTYPE_L4_UDP) {
280 		struct rte_udp_hdr *udp_hdr = (struct rte_udp_hdr *)(l3_hdr +
281 							     mbuf->l3_len);
282 		udp_hdr->dgram_cksum = 0;
283 		if (eth_hdr->ether_type == htons(RTE_ETHER_TYPE_IPV4))
284 			udp_hdr->dgram_cksum = rte_ipv4_udptcp_cksum(ipv4_hdr,
285 								     udp_hdr);
286 		else /* assume ethertype == RTE_ETHER_TYPE_IPV6 */
287 			udp_hdr->dgram_cksum = rte_ipv6_udptcp_cksum(ipv6_hdr,
288 								     udp_hdr);
289 	}
290 }
291 
292 static inline void dpaa_checksum_offload(struct rte_mbuf *mbuf,
293 					 struct qm_fd *fd, char *prs_buf)
294 {
295 	struct dpaa_eth_parse_results_t *prs;
296 
297 	DPAA_DP_LOG(DEBUG, " Offloading checksum for mbuf: %p", mbuf);
298 
299 	prs = GET_TX_PRS(prs_buf);
300 	prs->l3r = 0;
301 	prs->l4r = 0;
302 	if (((mbuf->packet_type & RTE_PTYPE_L3_MASK) == RTE_PTYPE_L3_IPV4) ||
303 	   ((mbuf->packet_type & RTE_PTYPE_L3_MASK) ==
304 	   RTE_PTYPE_L3_IPV4_EXT))
305 		prs->l3r = DPAA_L3_PARSE_RESULT_IPV4;
306 	else if (((mbuf->packet_type & RTE_PTYPE_L3_MASK) ==
307 		   RTE_PTYPE_L3_IPV6) ||
308 		 ((mbuf->packet_type & RTE_PTYPE_L3_MASK) ==
309 		RTE_PTYPE_L3_IPV6_EXT))
310 		prs->l3r = DPAA_L3_PARSE_RESULT_IPV6;
311 
312 	if ((mbuf->packet_type & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP)
313 		prs->l4r = DPAA_L4_PARSE_RESULT_TCP;
314 	else if ((mbuf->packet_type & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_UDP)
315 		prs->l4r = DPAA_L4_PARSE_RESULT_UDP;
316 
317 	prs->ip_off[0] = mbuf->l2_len;
318 	prs->l4_off = mbuf->l3_len + mbuf->l2_len;
319 	/* Enable L3 (and L4, if TCP or UDP) HW checksum*/
320 	fd->cmd = DPAA_FD_CMD_RPD | DPAA_FD_CMD_DTC;
321 }
322 
323 static inline void
324 dpaa_unsegmented_checksum(struct rte_mbuf *mbuf, struct qm_fd *fd_arr)
325 {
326 	if (!mbuf->packet_type) {
327 		struct rte_net_hdr_lens hdr_lens;
328 
329 		mbuf->packet_type = rte_net_get_ptype(mbuf, &hdr_lens,
330 				RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK
331 				| RTE_PTYPE_L4_MASK);
332 		mbuf->l2_len = hdr_lens.l2_len;
333 		mbuf->l3_len = hdr_lens.l3_len;
334 	}
335 	if (mbuf->data_off < (DEFAULT_TX_ICEOF +
336 	    sizeof(struct dpaa_eth_parse_results_t))) {
337 		DPAA_DP_LOG(DEBUG, "Checksum offload Err: "
338 			"Not enough Headroom "
339 			"space for correct Checksum offload."
340 			"So Calculating checksum in Software.");
341 		dpaa_checksum(mbuf);
342 	} else {
343 		dpaa_checksum_offload(mbuf, fd_arr, mbuf->buf_addr);
344 	}
345 }
346 
347 static struct rte_mbuf *
348 dpaa_eth_sg_to_mbuf(const struct qm_fd *fd, uint32_t ifid)
349 {
350 	struct dpaa_bp_info *bp_info = DPAA_BPID_TO_POOL_INFO(fd->bpid);
351 	struct rte_mbuf *first_seg, *prev_seg, *cur_seg, *temp;
352 	struct qm_sg_entry *sgt, *sg_temp;
353 	void *vaddr, *sg_vaddr;
354 	int i = 0;
355 	uint16_t fd_offset = fd->offset;
356 
357 	vaddr = DPAA_MEMPOOL_PTOV(bp_info, qm_fd_addr(fd));
358 	if (!vaddr) {
359 		DPAA_PMD_ERR("unable to convert physical address");
360 		return NULL;
361 	}
362 	sgt = vaddr + fd_offset;
363 	sg_temp = &sgt[i++];
364 	hw_sg_to_cpu(sg_temp);
365 	temp = (struct rte_mbuf *)((char *)vaddr - bp_info->meta_data_size);
366 	sg_vaddr = DPAA_MEMPOOL_PTOV(bp_info, qm_sg_entry_get64(sg_temp));
367 
368 	first_seg = (struct rte_mbuf *)((char *)sg_vaddr -
369 						bp_info->meta_data_size);
370 	first_seg->data_off = sg_temp->offset;
371 	first_seg->data_len = sg_temp->length;
372 	first_seg->pkt_len = sg_temp->length;
373 	rte_mbuf_refcnt_set(first_seg, 1);
374 
375 	first_seg->port = ifid;
376 	first_seg->nb_segs = 1;
377 	first_seg->ol_flags = 0;
378 	prev_seg = first_seg;
379 	while (i < DPAA_SGT_MAX_ENTRIES) {
380 		sg_temp = &sgt[i++];
381 		hw_sg_to_cpu(sg_temp);
382 		sg_vaddr = DPAA_MEMPOOL_PTOV(bp_info,
383 					     qm_sg_entry_get64(sg_temp));
384 		cur_seg = (struct rte_mbuf *)((char *)sg_vaddr -
385 						      bp_info->meta_data_size);
386 		cur_seg->data_off = sg_temp->offset;
387 		cur_seg->data_len = sg_temp->length;
388 		first_seg->pkt_len += sg_temp->length;
389 		first_seg->nb_segs += 1;
390 		rte_mbuf_refcnt_set(cur_seg, 1);
391 		prev_seg->next = cur_seg;
392 		if (sg_temp->final) {
393 			cur_seg->next = NULL;
394 			break;
395 		}
396 		prev_seg = cur_seg;
397 	}
398 	DPAA_DP_LOG(DEBUG, "Received an SG frame len =%d, num_sg =%d",
399 			first_seg->pkt_len, first_seg->nb_segs);
400 
401 	dpaa_eth_packet_info(first_seg, vaddr);
402 	rte_pktmbuf_free_seg(temp);
403 
404 	return first_seg;
405 }
406 
407 static inline struct rte_mbuf *
408 dpaa_eth_fd_to_mbuf(const struct qm_fd *fd, uint32_t ifid)
409 {
410 	struct rte_mbuf *mbuf;
411 	struct dpaa_bp_info *bp_info = DPAA_BPID_TO_POOL_INFO(fd->bpid);
412 	void *ptr;
413 	uint8_t format =
414 		(fd->opaque & DPAA_FD_FORMAT_MASK) >> DPAA_FD_FORMAT_SHIFT;
415 	uint16_t offset;
416 	uint32_t length;
417 
418 	if (unlikely(format == qm_fd_sg))
419 		return dpaa_eth_sg_to_mbuf(fd, ifid);
420 
421 	offset = (fd->opaque & DPAA_FD_OFFSET_MASK) >> DPAA_FD_OFFSET_SHIFT;
422 	length = fd->opaque & DPAA_FD_LENGTH_MASK;
423 
424 	DPAA_DP_LOG(DEBUG, " FD--->MBUF off %d len = %d", offset, length);
425 
426 	/* Ignoring case when format != qm_fd_contig */
427 	ptr = DPAA_MEMPOOL_PTOV(bp_info, qm_fd_addr(fd));
428 
429 	mbuf = (struct rte_mbuf *)((char *)ptr - bp_info->meta_data_size);
430 	/* Prefetch the Parse results and packet data to L1 */
431 	rte_prefetch0((void *)((uint8_t *)ptr + DEFAULT_RX_ICEOF));
432 
433 	mbuf->data_off = offset;
434 	mbuf->data_len = length;
435 	mbuf->pkt_len = length;
436 
437 	mbuf->port = ifid;
438 	mbuf->nb_segs = 1;
439 	mbuf->ol_flags = 0;
440 	mbuf->next = NULL;
441 	rte_mbuf_refcnt_set(mbuf, 1);
442 	dpaa_eth_packet_info(mbuf, mbuf->buf_addr);
443 
444 	return mbuf;
445 }
446 
447 uint16_t
448 dpaa_free_mbuf(const struct qm_fd *fd)
449 {
450 	struct rte_mbuf *mbuf;
451 	struct dpaa_bp_info *bp_info;
452 	uint8_t format;
453 	void *ptr;
454 
455 	bp_info = DPAA_BPID_TO_POOL_INFO(fd->bpid);
456 	format = (fd->opaque & DPAA_FD_FORMAT_MASK) >> DPAA_FD_FORMAT_SHIFT;
457 	if (unlikely(format == qm_fd_sg)) {
458 		struct rte_mbuf *first_seg, *prev_seg, *cur_seg, *temp;
459 		struct qm_sg_entry *sgt, *sg_temp;
460 		void *vaddr, *sg_vaddr;
461 		int i = 0;
462 		uint16_t fd_offset = fd->offset;
463 
464 		vaddr = DPAA_MEMPOOL_PTOV(bp_info, qm_fd_addr(fd));
465 		if (!vaddr) {
466 			DPAA_PMD_ERR("unable to convert physical address");
467 			return -1;
468 		}
469 		sgt = vaddr + fd_offset;
470 		sg_temp = &sgt[i++];
471 		hw_sg_to_cpu(sg_temp);
472 		temp = (struct rte_mbuf *)
473 			((char *)vaddr - bp_info->meta_data_size);
474 		sg_vaddr = DPAA_MEMPOOL_PTOV(bp_info,
475 						qm_sg_entry_get64(sg_temp));
476 
477 		first_seg = (struct rte_mbuf *)((char *)sg_vaddr -
478 						bp_info->meta_data_size);
479 		first_seg->nb_segs = 1;
480 		prev_seg = first_seg;
481 		while (i < DPAA_SGT_MAX_ENTRIES) {
482 			sg_temp = &sgt[i++];
483 			hw_sg_to_cpu(sg_temp);
484 			sg_vaddr = DPAA_MEMPOOL_PTOV(bp_info,
485 						qm_sg_entry_get64(sg_temp));
486 			cur_seg = (struct rte_mbuf *)((char *)sg_vaddr -
487 						      bp_info->meta_data_size);
488 			first_seg->nb_segs += 1;
489 			prev_seg->next = cur_seg;
490 			if (sg_temp->final) {
491 				cur_seg->next = NULL;
492 				break;
493 			}
494 			prev_seg = cur_seg;
495 		}
496 
497 		rte_pktmbuf_free_seg(temp);
498 		rte_pktmbuf_free_seg(first_seg);
499 		return 0;
500 	}
501 
502 	ptr = DPAA_MEMPOOL_PTOV(bp_info, qm_fd_addr(fd));
503 	mbuf = (struct rte_mbuf *)((char *)ptr - bp_info->meta_data_size);
504 
505 	rte_pktmbuf_free(mbuf);
506 
507 	return 0;
508 }
509 
510 /* Specific for LS1043 */
511 void
512 dpaa_rx_cb_no_prefetch(struct qman_fq **fq, struct qm_dqrr_entry **dqrr,
513 	   void **bufs, int num_bufs)
514 {
515 	struct rte_mbuf *mbuf;
516 	struct dpaa_bp_info *bp_info;
517 	const struct qm_fd *fd;
518 	void *ptr;
519 	struct dpaa_if *dpaa_intf;
520 	uint16_t offset, i;
521 	uint32_t length;
522 	uint8_t format;
523 
524 	bp_info = DPAA_BPID_TO_POOL_INFO(dqrr[0]->fd.bpid);
525 	ptr = rte_dpaa_mem_ptov(qm_fd_addr(&dqrr[0]->fd));
526 	rte_prefetch0((void *)((uint8_t *)ptr + DEFAULT_RX_ICEOF));
527 	bufs[0] = (struct rte_mbuf *)((char *)ptr - bp_info->meta_data_size);
528 
529 	for (i = 0; i < num_bufs; i++) {
530 		if (i < num_bufs - 1) {
531 			bp_info = DPAA_BPID_TO_POOL_INFO(dqrr[i + 1]->fd.bpid);
532 			ptr = rte_dpaa_mem_ptov(qm_fd_addr(&dqrr[i + 1]->fd));
533 			rte_prefetch0((void *)((uint8_t *)ptr +
534 					DEFAULT_RX_ICEOF));
535 			bufs[i + 1] = (struct rte_mbuf *)((char *)ptr -
536 					bp_info->meta_data_size);
537 		}
538 
539 		fd = &dqrr[i]->fd;
540 		dpaa_intf = fq[0]->dpaa_intf;
541 		format = (fd->opaque & DPAA_FD_FORMAT_MASK) >>
542 				DPAA_FD_FORMAT_SHIFT;
543 		if (unlikely(format == qm_fd_sg)) {
544 			bufs[i] = dpaa_eth_sg_to_mbuf(fd, dpaa_intf->ifid);
545 			continue;
546 		}
547 
548 		offset = (fd->opaque & DPAA_FD_OFFSET_MASK) >>
549 				DPAA_FD_OFFSET_SHIFT;
550 		length = fd->opaque & DPAA_FD_LENGTH_MASK;
551 
552 		mbuf = bufs[i];
553 		mbuf->data_off = offset;
554 		mbuf->data_len = length;
555 		mbuf->pkt_len = length;
556 		mbuf->port = dpaa_intf->ifid;
557 
558 		mbuf->nb_segs = 1;
559 		mbuf->ol_flags = 0;
560 		mbuf->next = NULL;
561 		rte_mbuf_refcnt_set(mbuf, 1);
562 		dpaa_eth_packet_info(mbuf, mbuf->buf_addr);
563 		dpaa_display_frame_info(fd, fq[0]->fqid, true);
564 	}
565 }
566 
567 void
568 dpaa_rx_cb(struct qman_fq **fq, struct qm_dqrr_entry **dqrr,
569 	   void **bufs, int num_bufs)
570 {
571 	struct rte_mbuf *mbuf;
572 	const struct qm_fd *fd;
573 	struct dpaa_if *dpaa_intf;
574 	uint16_t offset, i;
575 	uint32_t length;
576 	uint8_t format;
577 
578 	for (i = 0; i < num_bufs; i++) {
579 		fd = &dqrr[i]->fd;
580 		dpaa_intf = fq[0]->dpaa_intf;
581 		format = (fd->opaque & DPAA_FD_FORMAT_MASK) >>
582 				DPAA_FD_FORMAT_SHIFT;
583 		if (unlikely(format == qm_fd_sg)) {
584 			bufs[i] = dpaa_eth_sg_to_mbuf(fd, dpaa_intf->ifid);
585 			continue;
586 		}
587 
588 		offset = (fd->opaque & DPAA_FD_OFFSET_MASK) >>
589 				DPAA_FD_OFFSET_SHIFT;
590 		length = fd->opaque & DPAA_FD_LENGTH_MASK;
591 
592 		mbuf = bufs[i];
593 		mbuf->data_off = offset;
594 		mbuf->data_len = length;
595 		mbuf->pkt_len = length;
596 		mbuf->port = dpaa_intf->ifid;
597 
598 		mbuf->nb_segs = 1;
599 		mbuf->ol_flags = 0;
600 		mbuf->next = NULL;
601 		rte_mbuf_refcnt_set(mbuf, 1);
602 		dpaa_eth_packet_info(mbuf, mbuf->buf_addr);
603 		dpaa_display_frame_info(fd, fq[0]->fqid, true);
604 	}
605 }
606 
607 void dpaa_rx_cb_prepare(struct qm_dqrr_entry *dq, void **bufs)
608 {
609 	struct dpaa_bp_info *bp_info = DPAA_BPID_TO_POOL_INFO(dq->fd.bpid);
610 	void *ptr = rte_dpaa_mem_ptov(qm_fd_addr(&dq->fd));
611 
612 	/* In case of LS1046, annotation stashing is disabled due to L2 cache
613 	 * being bottleneck in case of multicore scenario for this platform.
614 	 * So we prefetch the annotation beforehand, so that it is available
615 	 * in cache when accessed.
616 	 */
617 	rte_prefetch0((void *)((uint8_t *)ptr + DEFAULT_RX_ICEOF));
618 
619 	*bufs = (struct rte_mbuf *)((char *)ptr - bp_info->meta_data_size);
620 }
621 
622 static uint16_t
623 dpaa_eth_queue_portal_rx(struct qman_fq *fq,
624 			 struct rte_mbuf **bufs,
625 			 uint16_t nb_bufs)
626 {
627 	int ret;
628 
629 	if (unlikely(!fq->qp_initialized)) {
630 		ret = rte_dpaa_portal_fq_init((void *)0, fq);
631 		if (ret) {
632 			DPAA_PMD_ERR("Failure in affining portal %d", ret);
633 			return 0;
634 		}
635 		fq->qp_initialized = 1;
636 	}
637 
638 	return qman_portal_poll_rx(nb_bufs, (void **)bufs, fq->qp);
639 }
640 
641 enum qman_cb_dqrr_result
642 dpaa_rx_cb_parallel(void *event,
643 		    struct qman_portal *qm __always_unused,
644 		    struct qman_fq *fq,
645 		    const struct qm_dqrr_entry *dqrr,
646 		    void **bufs)
647 {
648 	u32 ifid = ((struct dpaa_if *)fq->dpaa_intf)->ifid;
649 	struct rte_mbuf *mbuf;
650 	struct rte_event *ev = (struct rte_event *)event;
651 
652 	mbuf = dpaa_eth_fd_to_mbuf(&dqrr->fd, ifid);
653 	ev->event_ptr = (void *)mbuf;
654 	ev->flow_id = fq->ev.flow_id;
655 	ev->sub_event_type = fq->ev.sub_event_type;
656 	ev->event_type = RTE_EVENT_TYPE_ETHDEV;
657 	ev->op = RTE_EVENT_OP_NEW;
658 	ev->sched_type = fq->ev.sched_type;
659 	ev->queue_id = fq->ev.queue_id;
660 	ev->priority = fq->ev.priority;
661 	ev->impl_opaque = (uint8_t)DPAA_INVALID_MBUF_SEQN;
662 	*dpaa_seqn(mbuf) = DPAA_INVALID_MBUF_SEQN;
663 	*bufs = mbuf;
664 
665 	return qman_cb_dqrr_consume;
666 }
667 
668 enum qman_cb_dqrr_result
669 dpaa_rx_cb_atomic(void *event,
670 		  struct qman_portal *qm __always_unused,
671 		  struct qman_fq *fq,
672 		  const struct qm_dqrr_entry *dqrr,
673 		  void **bufs)
674 {
675 	u8 index;
676 	u32 ifid = ((struct dpaa_if *)fq->dpaa_intf)->ifid;
677 	struct rte_mbuf *mbuf;
678 	struct rte_event *ev = (struct rte_event *)event;
679 
680 	mbuf = dpaa_eth_fd_to_mbuf(&dqrr->fd, ifid);
681 	ev->event_ptr = (void *)mbuf;
682 	ev->flow_id = fq->ev.flow_id;
683 	ev->sub_event_type = fq->ev.sub_event_type;
684 	ev->event_type = RTE_EVENT_TYPE_ETHDEV;
685 	ev->op = RTE_EVENT_OP_NEW;
686 	ev->sched_type = fq->ev.sched_type;
687 	ev->queue_id = fq->ev.queue_id;
688 	ev->priority = fq->ev.priority;
689 
690 	/* Save active dqrr entries */
691 	index = DQRR_PTR2IDX(dqrr);
692 	DPAA_PER_LCORE_DQRR_SIZE++;
693 	DPAA_PER_LCORE_DQRR_HELD |= 1 << index;
694 	DPAA_PER_LCORE_DQRR_MBUF(index) = mbuf;
695 	ev->impl_opaque = index + 1;
696 	*dpaa_seqn(mbuf) = (uint32_t)index + 1;
697 	*bufs = mbuf;
698 
699 	return qman_cb_dqrr_defer;
700 }
701 
702 #ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER
703 static inline void dpaa_eth_err_queue(struct dpaa_if *dpaa_intf)
704 {
705 	struct rte_mbuf *mbuf;
706 	struct qman_fq *debug_fq;
707 	int ret, i;
708 	struct qm_dqrr_entry *dq;
709 	struct qm_fd *fd;
710 
711 	if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
712 		ret = rte_dpaa_portal_init((void *)0);
713 		if (ret) {
714 			DPAA_PMD_ERR("Failure in affining portal");
715 			return;
716 		}
717 	}
718 	for (i = 0; i <= DPAA_DEBUG_FQ_TX_ERROR; i++) {
719 		debug_fq = &dpaa_intf->debug_queues[i];
720 		ret = qman_set_vdq(debug_fq, 4, QM_VDQCR_EXACT);
721 		if (ret)
722 			return;
723 
724 		do {
725 			dq = qman_dequeue(debug_fq);
726 			if (!dq)
727 				continue;
728 			fd = &dq->fd;
729 			if (i == DPAA_DEBUG_FQ_RX_ERROR)
730 				DPAA_PMD_ERR("RX ERROR status: 0x%08x",
731 					fd->status);
732 			else
733 				DPAA_PMD_ERR("TX ERROR status: 0x%08x",
734 					fd->status);
735 			dpaa_display_frame_info(fd, debug_fq->fqid,
736 				i == DPAA_DEBUG_FQ_RX_ERROR);
737 
738 			mbuf = dpaa_eth_fd_to_mbuf(fd, dpaa_intf->ifid);
739 			rte_pktmbuf_free(mbuf);
740 			qman_dqrr_consume(debug_fq, dq);
741 		} while (debug_fq->flags & QMAN_FQ_STATE_VDQCR);
742 	}
743 }
744 #endif
745 
746 uint16_t dpaa_eth_queue_rx(void *q,
747 			   struct rte_mbuf **bufs,
748 			   uint16_t nb_bufs)
749 {
750 	struct qman_fq *fq = q;
751 	struct qm_dqrr_entry *dq;
752 	uint32_t num_rx = 0, ifid = ((struct dpaa_if *)fq->dpaa_intf)->ifid;
753 	int num_rx_bufs, ret;
754 	uint32_t vdqcr_flags = 0;
755 
756 	if (unlikely(rte_dpaa_bpid_info == NULL &&
757 				rte_eal_process_type() == RTE_PROC_SECONDARY))
758 		rte_dpaa_bpid_info = fq->bp_array;
759 
760 #ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER
761 	if (fq->fqid == ((struct dpaa_if *)fq->dpaa_intf)->rx_queues[0].fqid)
762 		dpaa_eth_err_queue((struct dpaa_if *)fq->dpaa_intf);
763 #endif
764 
765 	if (likely(fq->is_static))
766 		return dpaa_eth_queue_portal_rx(fq, bufs, nb_bufs);
767 
768 	if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
769 		ret = rte_dpaa_portal_init((void *)0);
770 		if (ret) {
771 			DPAA_PMD_ERR("Failure in affining portal");
772 			return 0;
773 		}
774 	}
775 
776 	/* Until request for four buffers, we provide exact number of buffers.
777 	 * Otherwise we do not set the QM_VDQCR_EXACT flag.
778 	 * Not setting QM_VDQCR_EXACT flag can provide two more buffers than
779 	 * requested, so we request two less in this case.
780 	 */
781 	if (nb_bufs < 4) {
782 		vdqcr_flags = QM_VDQCR_EXACT;
783 		num_rx_bufs = nb_bufs;
784 	} else {
785 		num_rx_bufs = nb_bufs > DPAA_MAX_DEQUEUE_NUM_FRAMES ?
786 			(DPAA_MAX_DEQUEUE_NUM_FRAMES - 2) : (nb_bufs - 2);
787 	}
788 	ret = qman_set_vdq(fq, num_rx_bufs, vdqcr_flags);
789 	if (ret)
790 		return 0;
791 
792 	do {
793 		dq = qman_dequeue(fq);
794 		if (!dq)
795 			continue;
796 		bufs[num_rx++] = dpaa_eth_fd_to_mbuf(&dq->fd, ifid);
797 		dpaa_display_frame_info(&dq->fd, fq->fqid, true);
798 		qman_dqrr_consume(fq, dq);
799 	} while (fq->flags & QMAN_FQ_STATE_VDQCR);
800 
801 	return num_rx;
802 }
803 
804 static int
805 dpaa_eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf,
806 		struct qm_fd *fd,
807 		struct dpaa_bp_info *bp_info)
808 {
809 	struct rte_mbuf *cur_seg = mbuf, *prev_seg = NULL;
810 	struct rte_mbuf *temp, *mi;
811 	struct qm_sg_entry *sg_temp, *sgt;
812 	int i = 0;
813 
814 	DPAA_DP_LOG(DEBUG, "Creating SG FD to transmit");
815 
816 	temp = rte_pktmbuf_alloc(bp_info->mp);
817 	if (!temp) {
818 		DPAA_PMD_ERR("Failure in allocation of mbuf");
819 		return -1;
820 	}
821 	if (temp->buf_len < ((mbuf->nb_segs * sizeof(struct qm_sg_entry))
822 				+ temp->data_off)) {
823 		DPAA_PMD_ERR("Insufficient space in mbuf for SG entries");
824 		return -1;
825 	}
826 
827 	fd->cmd = 0;
828 	fd->opaque_addr = 0;
829 
830 	if (mbuf->ol_flags & DPAA_TX_CKSUM_OFFLOAD_MASK) {
831 		if (!mbuf->packet_type) {
832 			struct rte_net_hdr_lens hdr_lens;
833 
834 			mbuf->packet_type = rte_net_get_ptype(mbuf, &hdr_lens,
835 					RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK
836 					| RTE_PTYPE_L4_MASK);
837 			mbuf->l2_len = hdr_lens.l2_len;
838 			mbuf->l3_len = hdr_lens.l3_len;
839 		}
840 		if (temp->data_off < DEFAULT_TX_ICEOF
841 			+ sizeof(struct dpaa_eth_parse_results_t))
842 			temp->data_off = DEFAULT_TX_ICEOF
843 				+ sizeof(struct dpaa_eth_parse_results_t);
844 		dcbz_64(temp->buf_addr);
845 		dpaa_checksum_offload(mbuf, fd, temp->buf_addr);
846 	}
847 
848 	sgt = temp->buf_addr + temp->data_off;
849 	fd->format = QM_FD_SG;
850 	fd->addr = temp->buf_iova;
851 	fd->offset = temp->data_off;
852 	fd->bpid = bp_info ? bp_info->bpid : 0xff;
853 	fd->length20 = mbuf->pkt_len;
854 
855 	while (i < DPAA_SGT_MAX_ENTRIES) {
856 		sg_temp = &sgt[i++];
857 		sg_temp->opaque = 0;
858 		sg_temp->val = 0;
859 		sg_temp->addr = cur_seg->buf_iova;
860 		sg_temp->offset = cur_seg->data_off;
861 		sg_temp->length = cur_seg->data_len;
862 		if (RTE_MBUF_DIRECT(cur_seg)) {
863 			if (rte_mbuf_refcnt_read(cur_seg) > 1) {
864 				/*If refcnt > 1, invalid bpid is set to ensure
865 				 * buffer is not freed by HW.
866 				 */
867 				sg_temp->bpid = 0xff;
868 				rte_mbuf_refcnt_update(cur_seg, -1);
869 			} else {
870 				sg_temp->bpid =
871 					DPAA_MEMPOOL_TO_BPID(cur_seg->pool);
872 			}
873 			cur_seg = cur_seg->next;
874 		} else if (RTE_MBUF_HAS_EXTBUF(cur_seg)) {
875 			sg_temp->bpid = 0xff;
876 			cur_seg = cur_seg->next;
877 		} else {
878 			/* Get owner MBUF from indirect buffer */
879 			mi = rte_mbuf_from_indirect(cur_seg);
880 			if (rte_mbuf_refcnt_read(mi) > 1) {
881 				/*If refcnt > 1, invalid bpid is set to ensure
882 				 * owner buffer is not freed by HW.
883 				 */
884 				sg_temp->bpid = 0xff;
885 			} else {
886 				sg_temp->bpid = DPAA_MEMPOOL_TO_BPID(mi->pool);
887 				rte_mbuf_refcnt_update(mi, 1);
888 			}
889 			prev_seg = cur_seg;
890 			cur_seg = cur_seg->next;
891 			prev_seg->next = NULL;
892 			rte_pktmbuf_free(prev_seg);
893 		}
894 		if (cur_seg == NULL) {
895 			sg_temp->final = 1;
896 			cpu_to_hw_sg(sg_temp);
897 			break;
898 		}
899 		cpu_to_hw_sg(sg_temp);
900 	}
901 	return 0;
902 }
903 
904 /* Handle mbufs which are not segmented (non SG) */
905 static inline void
906 tx_on_dpaa_pool_unsegmented(struct rte_mbuf *mbuf,
907 			    struct dpaa_bp_info *bp_info,
908 			    struct qm_fd *fd_arr)
909 {
910 	struct rte_mbuf *mi = NULL;
911 
912 	if (RTE_MBUF_DIRECT(mbuf)) {
913 		if (rte_mbuf_refcnt_read(mbuf) > 1) {
914 			/* In case of direct mbuf and mbuf being cloned,
915 			 * BMAN should _not_ release buffer.
916 			 */
917 			DPAA_MBUF_TO_CONTIG_FD(mbuf, fd_arr, 0xff);
918 			/* Buffer should be releasd by EAL */
919 			rte_mbuf_refcnt_update(mbuf, -1);
920 		} else {
921 			/* In case of direct mbuf and no cloning, mbuf can be
922 			 * released by BMAN.
923 			 */
924 			DPAA_MBUF_TO_CONTIG_FD(mbuf, fd_arr, bp_info->bpid);
925 		}
926 	} else if (RTE_MBUF_HAS_EXTBUF(mbuf)) {
927 		DPAA_MBUF_TO_CONTIG_FD(mbuf, fd_arr,
928 				bp_info ? bp_info->bpid : 0xff);
929 	} else {
930 		/* This is data-containing core mbuf: 'mi' */
931 		mi = rte_mbuf_from_indirect(mbuf);
932 		if (rte_mbuf_refcnt_read(mi) > 1) {
933 			/* In case of indirect mbuf, and mbuf being cloned,
934 			 * BMAN should _not_ release it and let EAL release
935 			 * it through pktmbuf_free below.
936 			 */
937 			DPAA_MBUF_TO_CONTIG_FD(mbuf, fd_arr, 0xff);
938 		} else {
939 			/* In case of indirect mbuf, and no cloning, core mbuf
940 			 * should be released by BMAN.
941 			 * Increate refcnt of core mbuf so that when
942 			 * pktmbuf_free is called and mbuf is released, EAL
943 			 * doesn't try to release core mbuf which would have
944 			 * been released by BMAN.
945 			 */
946 			rte_mbuf_refcnt_update(mi, 1);
947 			DPAA_MBUF_TO_CONTIG_FD(mbuf, fd_arr,
948 						bp_info ? bp_info->bpid : 0xff);
949 		}
950 		rte_pktmbuf_free(mbuf);
951 	}
952 
953 	if (mbuf->ol_flags & DPAA_TX_CKSUM_OFFLOAD_MASK)
954 		dpaa_unsegmented_checksum(mbuf, fd_arr);
955 }
956 
957 /* Handle all mbufs on dpaa BMAN managed pool */
958 static inline uint16_t
959 tx_on_dpaa_pool(struct rte_mbuf *mbuf,
960 		struct dpaa_bp_info *bp_info,
961 		struct qm_fd *fd_arr)
962 {
963 	DPAA_DP_LOG(DEBUG, "BMAN offloaded buffer, mbuf: %p", mbuf);
964 
965 	if (mbuf->nb_segs == 1) {
966 		/* Case for non-segmented buffers */
967 		tx_on_dpaa_pool_unsegmented(mbuf, bp_info, fd_arr);
968 	} else if (mbuf->nb_segs > 1 &&
969 		   mbuf->nb_segs <= DPAA_SGT_MAX_ENTRIES) {
970 		if (dpaa_eth_mbuf_to_sg_fd(mbuf, fd_arr, bp_info)) {
971 			DPAA_PMD_DEBUG("Unable to create Scatter Gather FD");
972 			return 1;
973 		}
974 	} else {
975 		DPAA_PMD_DEBUG("Number of Segments not supported");
976 		return 1;
977 	}
978 
979 	return 0;
980 }
981 
982 /* Handle all mbufs on an external pool (non-dpaa) */
983 static inline struct rte_mbuf *
984 reallocate_mbuf(struct qman_fq *txq, struct rte_mbuf *mbuf)
985 {
986 	struct dpaa_if *dpaa_intf = txq->dpaa_intf;
987 	struct dpaa_bp_info *bp_info = dpaa_intf->bp_info;
988 	struct rte_mbuf *new_mbufs[DPAA_SGT_MAX_ENTRIES + 1] = {0};
989 	struct rte_mbuf *temp_mbuf;
990 	int num_new_segs, mbuf_greater, ret, extra_seg = 0, i = 0;
991 	uint64_t mbufs_size, bytes_to_copy, offset1 = 0, offset2 = 0;
992 	char *data;
993 
994 	DPAA_DP_LOG(DEBUG, "Reallocating transmit buffer");
995 
996 	mbufs_size = bp_info->size -
997 		bp_info->meta_data_size - RTE_PKTMBUF_HEADROOM;
998 	extra_seg = !!(mbuf->pkt_len % mbufs_size);
999 	num_new_segs = (mbuf->pkt_len / mbufs_size) + extra_seg;
1000 
1001 	ret = rte_pktmbuf_alloc_bulk(bp_info->mp, new_mbufs, num_new_segs);
1002 	if (ret != 0) {
1003 		DPAA_DP_LOG(DEBUG, "Allocation for new buffers failed");
1004 		return NULL;
1005 	}
1006 
1007 	temp_mbuf = mbuf;
1008 
1009 	while (temp_mbuf) {
1010 		/* If mbuf data is less than new mbuf remaining memory */
1011 		if ((temp_mbuf->data_len - offset1) < (mbufs_size - offset2)) {
1012 			bytes_to_copy = temp_mbuf->data_len - offset1;
1013 			mbuf_greater = -1;
1014 		/* If mbuf data is greater than new mbuf remaining memory */
1015 		} else if ((temp_mbuf->data_len - offset1) >
1016 			   (mbufs_size - offset2)) {
1017 			bytes_to_copy = mbufs_size - offset2;
1018 			mbuf_greater = 1;
1019 		/* if mbuf data is equal to new mbuf remaining memory */
1020 		} else {
1021 			bytes_to_copy = temp_mbuf->data_len - offset1;
1022 			mbuf_greater = 0;
1023 		}
1024 
1025 		/* Copy the data */
1026 		data = rte_pktmbuf_append(new_mbufs[0], bytes_to_copy);
1027 
1028 		rte_memcpy((uint8_t *)data, rte_pktmbuf_mtod_offset(mbuf,
1029 			   void *, offset1), bytes_to_copy);
1030 
1031 		/* Set new offsets and the temp buffers */
1032 		if (mbuf_greater == -1) {
1033 			offset1 = 0;
1034 			offset2 += bytes_to_copy;
1035 			temp_mbuf = temp_mbuf->next;
1036 		} else if (mbuf_greater == 1) {
1037 			offset2 = 0;
1038 			offset1 += bytes_to_copy;
1039 			new_mbufs[i]->next = new_mbufs[i + 1];
1040 			new_mbufs[0]->nb_segs++;
1041 			i++;
1042 		} else {
1043 			offset1 = 0;
1044 			offset2 = 0;
1045 			temp_mbuf = temp_mbuf->next;
1046 			new_mbufs[i]->next = new_mbufs[i + 1];
1047 			if (new_mbufs[i + 1])
1048 				new_mbufs[0]->nb_segs++;
1049 			i++;
1050 		}
1051 	}
1052 
1053 	/* Copy other required fields */
1054 	new_mbufs[0]->ol_flags = mbuf->ol_flags;
1055 	new_mbufs[0]->packet_type = mbuf->packet_type;
1056 	new_mbufs[0]->tx_offload = mbuf->tx_offload;
1057 
1058 	rte_pktmbuf_free(mbuf);
1059 
1060 	return new_mbufs[0];
1061 }
1062 
1063 uint16_t
1064 dpaa_eth_queue_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
1065 {
1066 	struct rte_mbuf *mbuf, *mi = NULL;
1067 	struct rte_mempool *mp;
1068 	struct dpaa_bp_info *bp_info;
1069 	struct qm_fd fd_arr[DPAA_TX_BURST_SIZE];
1070 	uint32_t frames_to_send, loop, sent = 0;
1071 	uint16_t state;
1072 	int ret, realloc_mbuf = 0;
1073 	uint32_t seqn, index, flags[DPAA_TX_BURST_SIZE] = {0};
1074 	struct rte_mbuf **orig_bufs = bufs;
1075 
1076 	if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
1077 		ret = rte_dpaa_portal_init((void *)0);
1078 		if (ret) {
1079 			DPAA_PMD_ERR("Failure in affining portal");
1080 			return 0;
1081 		}
1082 	}
1083 
1084 	DPAA_DP_LOG(DEBUG, "Transmitting %d buffers on queue: %p", nb_bufs, q);
1085 
1086 	while (nb_bufs) {
1087 		frames_to_send = (nb_bufs > DPAA_TX_BURST_SIZE) ?
1088 				DPAA_TX_BURST_SIZE : nb_bufs;
1089 		for (loop = 0; loop < frames_to_send; loop++) {
1090 			mbuf = *(bufs++);
1091 			/* In case the data offset is not multiple of 16,
1092 			 * FMAN can stall because of an errata. So reallocate
1093 			 * the buffer in such case.
1094 			 */
1095 			if (dpaa_svr_family == SVR_LS1043A_FAMILY &&
1096 					(mbuf->data_off & 0x7F) != 0x0)
1097 				realloc_mbuf = 1;
1098 			seqn = *dpaa_seqn(mbuf);
1099 			if (seqn != DPAA_INVALID_MBUF_SEQN) {
1100 				index = seqn - 1;
1101 				if (DPAA_PER_LCORE_DQRR_HELD & (1 << index)) {
1102 					flags[loop] =
1103 					   ((index & QM_EQCR_DCA_IDXMASK) << 8);
1104 					flags[loop] |= QMAN_ENQUEUE_FLAG_DCA;
1105 					DPAA_PER_LCORE_DQRR_SIZE--;
1106 					DPAA_PER_LCORE_DQRR_HELD &=
1107 								~(1 << index);
1108 				}
1109 			}
1110 
1111 			if (likely(RTE_MBUF_DIRECT(mbuf))) {
1112 				mp = mbuf->pool;
1113 				bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp);
1114 				if (likely(mp->ops_index ==
1115 						bp_info->dpaa_ops_index &&
1116 					mbuf->nb_segs == 1 &&
1117 					realloc_mbuf == 0 &&
1118 					rte_mbuf_refcnt_read(mbuf) == 1)) {
1119 					DPAA_MBUF_TO_CONTIG_FD(mbuf,
1120 						&fd_arr[loop], bp_info->bpid);
1121 					if (mbuf->ol_flags &
1122 						DPAA_TX_CKSUM_OFFLOAD_MASK)
1123 						dpaa_unsegmented_checksum(mbuf,
1124 							&fd_arr[loop]);
1125 					continue;
1126 				}
1127 			} else {
1128 				mi = rte_mbuf_from_indirect(mbuf);
1129 				mp = mi->pool;
1130 			}
1131 
1132 			if (unlikely(RTE_MBUF_HAS_EXTBUF(mbuf))) {
1133 				bp_info = NULL;
1134 				goto indirect_buf;
1135 			}
1136 
1137 			bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp);
1138 			if (unlikely(mp->ops_index != bp_info->dpaa_ops_index ||
1139 				     realloc_mbuf == 1)) {
1140 				struct rte_mbuf *temp_mbuf;
1141 
1142 				temp_mbuf = reallocate_mbuf(q, mbuf);
1143 				if (!temp_mbuf) {
1144 					/* Set frames_to_send & nb_bufs so
1145 					 * that packets are transmitted till
1146 					 * previous frame.
1147 					 */
1148 					frames_to_send = loop;
1149 					nb_bufs = loop;
1150 					goto send_pkts;
1151 				}
1152 				mbuf = temp_mbuf;
1153 				realloc_mbuf = 0;
1154 			}
1155 indirect_buf:
1156 			state = tx_on_dpaa_pool(mbuf, bp_info,
1157 						&fd_arr[loop]);
1158 			if (unlikely(state)) {
1159 				/* Set frames_to_send & nb_bufs so
1160 				 * that packets are transmitted till
1161 				 * previous frame.
1162 				 */
1163 				frames_to_send = loop;
1164 				nb_bufs = loop;
1165 				goto send_pkts;
1166 			}
1167 		}
1168 
1169 send_pkts:
1170 		loop = 0;
1171 		while (loop < frames_to_send) {
1172 			loop += qman_enqueue_multi(q, &fd_arr[loop],
1173 						   &flags[loop],
1174 						   frames_to_send - loop);
1175 		}
1176 		nb_bufs -= frames_to_send;
1177 		sent += frames_to_send;
1178 	}
1179 
1180 	DPAA_DP_LOG(DEBUG, "Transmitted %d buffers on queue: %p", sent, q);
1181 
1182 
1183 	loop = 0;
1184 	while (loop < sent) {
1185 		if (unlikely(RTE_MBUF_HAS_EXTBUF(*orig_bufs)))
1186 			rte_pktmbuf_free(*orig_bufs);
1187 		orig_bufs++;
1188 		loop++;
1189 	}
1190 
1191 	return sent;
1192 }
1193 
1194 uint16_t
1195 dpaa_eth_queue_tx_slow(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
1196 {
1197 	qman_ern_poll_free();
1198 
1199 	return dpaa_eth_queue_tx(q, bufs, nb_bufs);
1200 }
1201 
1202 uint16_t dpaa_eth_tx_drop_all(void *q  __rte_unused,
1203 			      struct rte_mbuf **bufs __rte_unused,
1204 		uint16_t nb_bufs __rte_unused)
1205 {
1206 	DPAA_DP_LOG(DEBUG, "Drop all packets");
1207 
1208 	/* Drop all incoming packets. No need to free packets here
1209 	 * because the rte_eth f/w frees up the packets through tx_buffer
1210 	 * callback in case this functions returns count less than nb_bufs
1211 	 */
1212 	return 0;
1213 }
1214