1d81734caSHemant Agrawal /* SPDX-License-Identifier: BSD-3-Clause 237f9b54bSShreyansh Jain * 337f9b54bSShreyansh Jain * Copyright 2016 Freescale Semiconductor, Inc. All rights reserved. 4f191d5abSHemant Agrawal * Copyright 2017,2019-2021 NXP 537f9b54bSShreyansh Jain * 637f9b54bSShreyansh Jain */ 737f9b54bSShreyansh Jain 837f9b54bSShreyansh Jain /* System headers */ 937f9b54bSShreyansh Jain #include <inttypes.h> 1037f9b54bSShreyansh Jain #include <unistd.h> 1137f9b54bSShreyansh Jain #include <stdio.h> 1237f9b54bSShreyansh Jain #include <limits.h> 1337f9b54bSShreyansh Jain #include <sched.h> 1437f9b54bSShreyansh Jain #include <pthread.h> 1537f9b54bSShreyansh Jain 1637f9b54bSShreyansh Jain #include <rte_byteorder.h> 1737f9b54bSShreyansh Jain #include <rte_common.h> 1837f9b54bSShreyansh Jain #include <rte_interrupts.h> 1937f9b54bSShreyansh Jain #include <rte_log.h> 2037f9b54bSShreyansh Jain #include <rte_debug.h> 2137f9b54bSShreyansh Jain #include <rte_pci.h> 2237f9b54bSShreyansh Jain #include <rte_atomic.h> 2337f9b54bSShreyansh Jain #include <rte_branch_prediction.h> 2437f9b54bSShreyansh Jain #include <rte_memory.h> 2537f9b54bSShreyansh Jain #include <rte_tailq.h> 2637f9b54bSShreyansh Jain #include <rte_eal.h> 2737f9b54bSShreyansh Jain #include <rte_alarm.h> 2837f9b54bSShreyansh Jain #include <rte_ether.h> 29df96fd0dSBruce Richardson #include <ethdev_driver.h> 3037f9b54bSShreyansh Jain #include <rte_malloc.h> 3137f9b54bSShreyansh Jain #include <rte_ring.h> 3237f9b54bSShreyansh Jain #include <rte_ip.h> 3337f9b54bSShreyansh Jain #include <rte_tcp.h> 3437f9b54bSShreyansh Jain #include <rte_udp.h> 35d565c887SAshish Jain #include <rte_net.h> 365e745593SSunil Kumar Kori #include <rte_eventdev.h> 3737f9b54bSShreyansh Jain 3837f9b54bSShreyansh Jain #include "dpaa_ethdev.h" 3937f9b54bSShreyansh Jain #include "dpaa_rxtx.h" 4037f9b54bSShreyansh Jain #include <rte_dpaa_bus.h> 4137f9b54bSShreyansh Jain #include <dpaa_mempool.h> 4237f9b54bSShreyansh Jain 435e745593SSunil Kumar Kori #include <qman.h> 4437f9b54bSShreyansh Jain #include <fsl_usd.h> 4537f9b54bSShreyansh Jain #include <fsl_qman.h> 4637f9b54bSShreyansh Jain #include <fsl_bman.h> 478c83f28cSHemant Agrawal #include <dpaa_of.h> 4837f9b54bSShreyansh Jain #include <netcfg.h> 4937f9b54bSShreyansh Jain 5037f9b54bSShreyansh Jain #define DPAA_MBUF_TO_CONTIG_FD(_mbuf, _fd, _bpid) \ 5137f9b54bSShreyansh Jain do { \ 5237f9b54bSShreyansh Jain (_fd)->cmd = 0; \ 5337f9b54bSShreyansh Jain (_fd)->opaque_addr = 0; \ 5437f9b54bSShreyansh Jain (_fd)->opaque = QM_FD_CONTIG << DPAA_FD_FORMAT_SHIFT; \ 5537f9b54bSShreyansh Jain (_fd)->opaque |= ((_mbuf)->data_off) << DPAA_FD_OFFSET_SHIFT; \ 5637f9b54bSShreyansh Jain (_fd)->opaque |= (_mbuf)->pkt_len; \ 57455da545SSantosh Shukla (_fd)->addr = (_mbuf)->buf_iova; \ 5837f9b54bSShreyansh Jain (_fd)->bpid = _bpid; \ 5937f9b54bSShreyansh Jain } while (0) 6037f9b54bSShreyansh Jain 6177393f56SSachin Saxena #ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER 6277393f56SSachin Saxena #define DISPLAY_PRINT printf 6377393f56SSachin Saxena static void dpaa_display_frame_info(const struct qm_fd *fd, 6477393f56SSachin Saxena uint32_t fqid, bool rx) 6505ba55bcSShreyansh Jain { 6605ba55bcSShreyansh Jain int ii; 6705ba55bcSShreyansh Jain char *ptr; 6877393f56SSachin Saxena struct annotations_t *annot = rte_dpaa_mem_ptov(fd->addr); 6977393f56SSachin Saxena uint8_t format; 7005ba55bcSShreyansh Jain 7177393f56SSachin Saxena if (!fd->status) { 7277393f56SSachin Saxena /* Do not display correct packets.*/ 7377393f56SSachin Saxena return; 7405ba55bcSShreyansh Jain } 7577393f56SSachin Saxena 7677393f56SSachin Saxena format = (fd->opaque & DPAA_FD_FORMAT_MASK) >> 7777393f56SSachin Saxena DPAA_FD_FORMAT_SHIFT; 7877393f56SSachin Saxena 7977393f56SSachin Saxena DISPLAY_PRINT("fqid %d bpid %d addr 0x%lx, format %d\r\n", 8077393f56SSachin Saxena fqid, fd->bpid, (unsigned long)fd->addr, fd->format); 8177393f56SSachin Saxena DISPLAY_PRINT("off %d, len %d stat 0x%x\r\n", 8277393f56SSachin Saxena fd->offset, fd->length20, fd->status); 8377393f56SSachin Saxena if (rx) { 8477393f56SSachin Saxena ptr = (char *)&annot->parse; 8577393f56SSachin Saxena DISPLAY_PRINT("RX parser result:\r\n"); 8677393f56SSachin Saxena for (ii = 0; ii < (int)sizeof(struct dpaa_eth_parse_results_t); 8777393f56SSachin Saxena ii++) { 8877393f56SSachin Saxena DISPLAY_PRINT("%02x ", ptr[ii]); 8977393f56SSachin Saxena if (((ii + 1) % 16) == 0) 9077393f56SSachin Saxena DISPLAY_PRINT("\n"); 9177393f56SSachin Saxena } 9277393f56SSachin Saxena DISPLAY_PRINT("\n"); 9377393f56SSachin Saxena } 9477393f56SSachin Saxena 9577393f56SSachin Saxena if (unlikely(format == qm_fd_sg)) { 9677393f56SSachin Saxena /*TBD:S/G display: to be implemented*/ 9777393f56SSachin Saxena return; 9877393f56SSachin Saxena } 9977393f56SSachin Saxena 10077393f56SSachin Saxena DISPLAY_PRINT("Frame payload:\r\n"); 10177393f56SSachin Saxena ptr = (char *)annot; 10277393f56SSachin Saxena ptr += fd->offset; 10377393f56SSachin Saxena for (ii = 0; ii < fd->length20; ii++) { 10477393f56SSachin Saxena DISPLAY_PRINT("%02x ", ptr[ii]); 10577393f56SSachin Saxena if (((ii + 1) % 16) == 0) 10605ba55bcSShreyansh Jain printf("\n"); 10705ba55bcSShreyansh Jain } 10877393f56SSachin Saxena DISPLAY_PRINT("\n"); 10977393f56SSachin Saxena } 11005ba55bcSShreyansh Jain #else 11177393f56SSachin Saxena #define dpaa_display_frame_info(a, b, c) 11205ba55bcSShreyansh Jain #endif 11305ba55bcSShreyansh Jain 114a7bdc3bdSShreyansh Jain static inline void dpaa_slow_parsing(struct rte_mbuf *m __rte_unused, 115a7bdc3bdSShreyansh Jain uint64_t prs __rte_unused) 116a7bdc3bdSShreyansh Jain { 117a7bdc3bdSShreyansh Jain DPAA_DP_LOG(DEBUG, "Slow parsing"); 118a7bdc3bdSShreyansh Jain /*TBD:XXX: to be implemented*/ 119a7bdc3bdSShreyansh Jain } 120a7bdc3bdSShreyansh Jain 1210e5607e4SHemant Agrawal static inline void dpaa_eth_packet_info(struct rte_mbuf *m, void *fd_virt_addr) 122a7bdc3bdSShreyansh Jain { 123a7bdc3bdSShreyansh Jain struct annotations_t *annot = GET_ANNOTATIONS(fd_virt_addr); 1240e5607e4SHemant Agrawal uint64_t prs = *((uintptr_t *)(&annot->parse)) & DPAA_PARSE_MASK; 125a7bdc3bdSShreyansh Jain 126a7bdc3bdSShreyansh Jain DPAA_DP_LOG(DEBUG, " Parsing mbuf: %p with annotations: %p", m, annot); 127a7bdc3bdSShreyansh Jain 128*daa02b5cSOlivier Matz m->ol_flags = RTE_MBUF_F_RX_RSS_HASH | RTE_MBUF_F_RX_IP_CKSUM_GOOD | 129*daa02b5cSOlivier Matz RTE_MBUF_F_RX_L4_CKSUM_GOOD; 13095d226f0SNipun Gupta 131a7bdc3bdSShreyansh Jain switch (prs) { 132a7bdc3bdSShreyansh Jain case DPAA_PKT_TYPE_IPV4: 133a7bdc3bdSShreyansh Jain m->packet_type = RTE_PTYPE_L2_ETHER | 134a7bdc3bdSShreyansh Jain RTE_PTYPE_L3_IPV4; 135a7bdc3bdSShreyansh Jain break; 136a7bdc3bdSShreyansh Jain case DPAA_PKT_TYPE_IPV6: 137a7bdc3bdSShreyansh Jain m->packet_type = RTE_PTYPE_L2_ETHER | 138a7bdc3bdSShreyansh Jain RTE_PTYPE_L3_IPV6; 139a7bdc3bdSShreyansh Jain break; 1409ac71da4SNipun Gupta case DPAA_PKT_TYPE_ETHER: 1419ac71da4SNipun Gupta m->packet_type = RTE_PTYPE_L2_ETHER; 1429ac71da4SNipun Gupta break; 143a7bdc3bdSShreyansh Jain case DPAA_PKT_TYPE_IPV4_FRAG: 144a7bdc3bdSShreyansh Jain case DPAA_PKT_TYPE_IPV4_FRAG_UDP: 145a7bdc3bdSShreyansh Jain case DPAA_PKT_TYPE_IPV4_FRAG_TCP: 146a7bdc3bdSShreyansh Jain case DPAA_PKT_TYPE_IPV4_FRAG_SCTP: 147a7bdc3bdSShreyansh Jain m->packet_type = RTE_PTYPE_L2_ETHER | 148a7bdc3bdSShreyansh Jain RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_FRAG; 149a7bdc3bdSShreyansh Jain break; 150a7bdc3bdSShreyansh Jain case DPAA_PKT_TYPE_IPV6_FRAG: 151a7bdc3bdSShreyansh Jain case DPAA_PKT_TYPE_IPV6_FRAG_UDP: 152a7bdc3bdSShreyansh Jain case DPAA_PKT_TYPE_IPV6_FRAG_TCP: 153a7bdc3bdSShreyansh Jain case DPAA_PKT_TYPE_IPV6_FRAG_SCTP: 154a7bdc3bdSShreyansh Jain m->packet_type = RTE_PTYPE_L2_ETHER | 155a7bdc3bdSShreyansh Jain RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_FRAG; 156a7bdc3bdSShreyansh Jain break; 157a7bdc3bdSShreyansh Jain case DPAA_PKT_TYPE_IPV4_EXT: 158a7bdc3bdSShreyansh Jain m->packet_type = RTE_PTYPE_L2_ETHER | 159a7bdc3bdSShreyansh Jain RTE_PTYPE_L3_IPV4_EXT; 160a7bdc3bdSShreyansh Jain break; 161a7bdc3bdSShreyansh Jain case DPAA_PKT_TYPE_IPV6_EXT: 162a7bdc3bdSShreyansh Jain m->packet_type = RTE_PTYPE_L2_ETHER | 163a7bdc3bdSShreyansh Jain RTE_PTYPE_L3_IPV6_EXT; 164a7bdc3bdSShreyansh Jain break; 165a7bdc3bdSShreyansh Jain case DPAA_PKT_TYPE_IPV4_TCP: 166a7bdc3bdSShreyansh Jain m->packet_type = RTE_PTYPE_L2_ETHER | 167a7bdc3bdSShreyansh Jain RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP; 168a7bdc3bdSShreyansh Jain break; 169a7bdc3bdSShreyansh Jain case DPAA_PKT_TYPE_IPV6_TCP: 170a7bdc3bdSShreyansh Jain m->packet_type = RTE_PTYPE_L2_ETHER | 171a7bdc3bdSShreyansh Jain RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP; 172a7bdc3bdSShreyansh Jain break; 173a7bdc3bdSShreyansh Jain case DPAA_PKT_TYPE_IPV4_UDP: 174a7bdc3bdSShreyansh Jain m->packet_type = RTE_PTYPE_L2_ETHER | 175a7bdc3bdSShreyansh Jain RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP; 176a7bdc3bdSShreyansh Jain break; 177a7bdc3bdSShreyansh Jain case DPAA_PKT_TYPE_IPV6_UDP: 178a7bdc3bdSShreyansh Jain m->packet_type = RTE_PTYPE_L2_ETHER | 179a7bdc3bdSShreyansh Jain RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP; 180a7bdc3bdSShreyansh Jain break; 181a7bdc3bdSShreyansh Jain case DPAA_PKT_TYPE_IPV4_EXT_UDP: 182a7bdc3bdSShreyansh Jain m->packet_type = RTE_PTYPE_L2_ETHER | 183a7bdc3bdSShreyansh Jain RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_UDP; 184a7bdc3bdSShreyansh Jain break; 185a7bdc3bdSShreyansh Jain case DPAA_PKT_TYPE_IPV6_EXT_UDP: 186a7bdc3bdSShreyansh Jain m->packet_type = RTE_PTYPE_L2_ETHER | 187a7bdc3bdSShreyansh Jain RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP; 188a7bdc3bdSShreyansh Jain break; 189a7bdc3bdSShreyansh Jain case DPAA_PKT_TYPE_IPV4_EXT_TCP: 190a7bdc3bdSShreyansh Jain m->packet_type = RTE_PTYPE_L2_ETHER | 191a7bdc3bdSShreyansh Jain RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_TCP; 192a7bdc3bdSShreyansh Jain break; 193a7bdc3bdSShreyansh Jain case DPAA_PKT_TYPE_IPV6_EXT_TCP: 194a7bdc3bdSShreyansh Jain m->packet_type = RTE_PTYPE_L2_ETHER | 195a7bdc3bdSShreyansh Jain RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP; 196a7bdc3bdSShreyansh Jain break; 197a7bdc3bdSShreyansh Jain case DPAA_PKT_TYPE_IPV4_SCTP: 198a7bdc3bdSShreyansh Jain m->packet_type = RTE_PTYPE_L2_ETHER | 199a7bdc3bdSShreyansh Jain RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP; 200a7bdc3bdSShreyansh Jain break; 201a7bdc3bdSShreyansh Jain case DPAA_PKT_TYPE_IPV6_SCTP: 202a7bdc3bdSShreyansh Jain m->packet_type = RTE_PTYPE_L2_ETHER | 203a7bdc3bdSShreyansh Jain RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_SCTP; 204a7bdc3bdSShreyansh Jain break; 20595d226f0SNipun Gupta case DPAA_PKT_TYPE_IPV4_CSUM_ERR: 20695d226f0SNipun Gupta case DPAA_PKT_TYPE_IPV6_CSUM_ERR: 207*daa02b5cSOlivier Matz m->ol_flags = RTE_MBUF_F_RX_RSS_HASH | RTE_MBUF_F_RX_IP_CKSUM_BAD; 20895d226f0SNipun Gupta break; 20995d226f0SNipun Gupta case DPAA_PKT_TYPE_IPV4_TCP_CSUM_ERR: 21095d226f0SNipun Gupta case DPAA_PKT_TYPE_IPV6_TCP_CSUM_ERR: 21195d226f0SNipun Gupta case DPAA_PKT_TYPE_IPV4_UDP_CSUM_ERR: 21295d226f0SNipun Gupta case DPAA_PKT_TYPE_IPV6_UDP_CSUM_ERR: 213*daa02b5cSOlivier Matz m->ol_flags = RTE_MBUF_F_RX_RSS_HASH | RTE_MBUF_F_RX_L4_CKSUM_BAD; 21495d226f0SNipun Gupta break; 2159ac71da4SNipun Gupta case DPAA_PKT_TYPE_NONE: 2169ac71da4SNipun Gupta m->packet_type = 0; 2179ac71da4SNipun Gupta break; 218a7bdc3bdSShreyansh Jain /* More switch cases can be added */ 219a7bdc3bdSShreyansh Jain default: 220a7bdc3bdSShreyansh Jain dpaa_slow_parsing(m, prs); 221a7bdc3bdSShreyansh Jain } 222a7bdc3bdSShreyansh Jain 223a7bdc3bdSShreyansh Jain m->tx_offload = annot->parse.ip_off[0]; 224a7bdc3bdSShreyansh Jain m->tx_offload |= (annot->parse.l4_off - annot->parse.ip_off[0]) 225a7bdc3bdSShreyansh Jain << DPAA_PKT_L3_LEN_SHIFT; 226a7bdc3bdSShreyansh Jain 227a7bdc3bdSShreyansh Jain /* Set the hash values */ 2289ac71da4SNipun Gupta m->hash.rss = (uint32_t)(annot->hash); 229a7bdc3bdSShreyansh Jain 230a7bdc3bdSShreyansh Jain /* Check if Vlan is present */ 231a7bdc3bdSShreyansh Jain if (prs & DPAA_PARSE_VLAN_MASK) 232*daa02b5cSOlivier Matz m->ol_flags |= RTE_MBUF_F_RX_VLAN; 233a7bdc3bdSShreyansh Jain /* Packet received without stripping the vlan */ 234a7bdc3bdSShreyansh Jain } 235a7bdc3bdSShreyansh Jain 2365a8cf1beSShreyansh Jain static inline void dpaa_checksum(struct rte_mbuf *mbuf) 2375a8cf1beSShreyansh Jain { 2386d13ea8eSOlivier Matz struct rte_ether_hdr *eth_hdr = 2396d13ea8eSOlivier Matz rte_pktmbuf_mtod(mbuf, struct rte_ether_hdr *); 2405a8cf1beSShreyansh Jain char *l3_hdr = (char *)eth_hdr + mbuf->l2_len; 241a7c528e5SOlivier Matz struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *)l3_hdr; 242a7c528e5SOlivier Matz struct rte_ipv6_hdr *ipv6_hdr = (struct rte_ipv6_hdr *)l3_hdr; 2435a8cf1beSShreyansh Jain 2445a8cf1beSShreyansh Jain DPAA_DP_LOG(DEBUG, "Calculating checksum for mbuf: %p", mbuf); 2455a8cf1beSShreyansh Jain 2465a8cf1beSShreyansh Jain if (((mbuf->packet_type & RTE_PTYPE_L3_MASK) == RTE_PTYPE_L3_IPV4) || 2475a8cf1beSShreyansh Jain ((mbuf->packet_type & RTE_PTYPE_L3_MASK) == 2485a8cf1beSShreyansh Jain RTE_PTYPE_L3_IPV4_EXT)) { 249a7c528e5SOlivier Matz ipv4_hdr = (struct rte_ipv4_hdr *)l3_hdr; 2505a8cf1beSShreyansh Jain ipv4_hdr->hdr_checksum = 0; 2515a8cf1beSShreyansh Jain ipv4_hdr->hdr_checksum = rte_ipv4_cksum(ipv4_hdr); 2525a8cf1beSShreyansh Jain } else if (((mbuf->packet_type & RTE_PTYPE_L3_MASK) == 2535a8cf1beSShreyansh Jain RTE_PTYPE_L3_IPV6) || 2545a8cf1beSShreyansh Jain ((mbuf->packet_type & RTE_PTYPE_L3_MASK) == 2555a8cf1beSShreyansh Jain RTE_PTYPE_L3_IPV6_EXT)) 256a7c528e5SOlivier Matz ipv6_hdr = (struct rte_ipv6_hdr *)l3_hdr; 2575a8cf1beSShreyansh Jain 2585a8cf1beSShreyansh Jain if ((mbuf->packet_type & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP) { 259f41b5156SOlivier Matz struct rte_tcp_hdr *tcp_hdr = (struct rte_tcp_hdr *)(l3_hdr + 2605a8cf1beSShreyansh Jain mbuf->l3_len); 2615a8cf1beSShreyansh Jain tcp_hdr->cksum = 0; 2620c9da755SDavid Marchand if (eth_hdr->ether_type == htons(RTE_ETHER_TYPE_IPV4)) 2635a8cf1beSShreyansh Jain tcp_hdr->cksum = rte_ipv4_udptcp_cksum(ipv4_hdr, 2645a8cf1beSShreyansh Jain tcp_hdr); 2650c9da755SDavid Marchand else /* assume ethertype == RTE_ETHER_TYPE_IPV6 */ 2665a8cf1beSShreyansh Jain tcp_hdr->cksum = rte_ipv6_udptcp_cksum(ipv6_hdr, 2675a8cf1beSShreyansh Jain tcp_hdr); 2685a8cf1beSShreyansh Jain } else if ((mbuf->packet_type & RTE_PTYPE_L4_MASK) == 2695a8cf1beSShreyansh Jain RTE_PTYPE_L4_UDP) { 270e73e3547SOlivier Matz struct rte_udp_hdr *udp_hdr = (struct rte_udp_hdr *)(l3_hdr + 2715a8cf1beSShreyansh Jain mbuf->l3_len); 2725a8cf1beSShreyansh Jain udp_hdr->dgram_cksum = 0; 2730c9da755SDavid Marchand if (eth_hdr->ether_type == htons(RTE_ETHER_TYPE_IPV4)) 2745a8cf1beSShreyansh Jain udp_hdr->dgram_cksum = rte_ipv4_udptcp_cksum(ipv4_hdr, 2755a8cf1beSShreyansh Jain udp_hdr); 2760c9da755SDavid Marchand else /* assume ethertype == RTE_ETHER_TYPE_IPV6 */ 2775a8cf1beSShreyansh Jain udp_hdr->dgram_cksum = rte_ipv6_udptcp_cksum(ipv6_hdr, 2785a8cf1beSShreyansh Jain udp_hdr); 2795a8cf1beSShreyansh Jain } 2805a8cf1beSShreyansh Jain } 2815a8cf1beSShreyansh Jain 2825a8cf1beSShreyansh Jain static inline void dpaa_checksum_offload(struct rte_mbuf *mbuf, 2835a8cf1beSShreyansh Jain struct qm_fd *fd, char *prs_buf) 2845a8cf1beSShreyansh Jain { 2855a8cf1beSShreyansh Jain struct dpaa_eth_parse_results_t *prs; 2865a8cf1beSShreyansh Jain 2875a8cf1beSShreyansh Jain DPAA_DP_LOG(DEBUG, " Offloading checksum for mbuf: %p", mbuf); 2885a8cf1beSShreyansh Jain 2895a8cf1beSShreyansh Jain prs = GET_TX_PRS(prs_buf); 2905a8cf1beSShreyansh Jain prs->l3r = 0; 2915a8cf1beSShreyansh Jain prs->l4r = 0; 2925a8cf1beSShreyansh Jain if (((mbuf->packet_type & RTE_PTYPE_L3_MASK) == RTE_PTYPE_L3_IPV4) || 2935a8cf1beSShreyansh Jain ((mbuf->packet_type & RTE_PTYPE_L3_MASK) == 2945a8cf1beSShreyansh Jain RTE_PTYPE_L3_IPV4_EXT)) 2955a8cf1beSShreyansh Jain prs->l3r = DPAA_L3_PARSE_RESULT_IPV4; 2965a8cf1beSShreyansh Jain else if (((mbuf->packet_type & RTE_PTYPE_L3_MASK) == 2975a8cf1beSShreyansh Jain RTE_PTYPE_L3_IPV6) || 2985a8cf1beSShreyansh Jain ((mbuf->packet_type & RTE_PTYPE_L3_MASK) == 2995a8cf1beSShreyansh Jain RTE_PTYPE_L3_IPV6_EXT)) 3005a8cf1beSShreyansh Jain prs->l3r = DPAA_L3_PARSE_RESULT_IPV6; 3015a8cf1beSShreyansh Jain 3025a8cf1beSShreyansh Jain if ((mbuf->packet_type & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP) 3035a8cf1beSShreyansh Jain prs->l4r = DPAA_L4_PARSE_RESULT_TCP; 3045a8cf1beSShreyansh Jain else if ((mbuf->packet_type & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_UDP) 3055a8cf1beSShreyansh Jain prs->l4r = DPAA_L4_PARSE_RESULT_UDP; 3065a8cf1beSShreyansh Jain 3075a8cf1beSShreyansh Jain prs->ip_off[0] = mbuf->l2_len; 3085a8cf1beSShreyansh Jain prs->l4_off = mbuf->l3_len + mbuf->l2_len; 3095a8cf1beSShreyansh Jain /* Enable L3 (and L4, if TCP or UDP) HW checksum*/ 3105a8cf1beSShreyansh Jain fd->cmd = DPAA_FD_CMD_RPD | DPAA_FD_CMD_DTC; 3115a8cf1beSShreyansh Jain } 3125a8cf1beSShreyansh Jain 3135e0789e9SNipun Gupta static inline void 3145e0789e9SNipun Gupta dpaa_unsegmented_checksum(struct rte_mbuf *mbuf, struct qm_fd *fd_arr) 3155e0789e9SNipun Gupta { 3165e0789e9SNipun Gupta if (!mbuf->packet_type) { 3175e0789e9SNipun Gupta struct rte_net_hdr_lens hdr_lens; 3185e0789e9SNipun Gupta 3195e0789e9SNipun Gupta mbuf->packet_type = rte_net_get_ptype(mbuf, &hdr_lens, 3205e0789e9SNipun Gupta RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK 3215e0789e9SNipun Gupta | RTE_PTYPE_L4_MASK); 3225e0789e9SNipun Gupta mbuf->l2_len = hdr_lens.l2_len; 3235e0789e9SNipun Gupta mbuf->l3_len = hdr_lens.l3_len; 3245e0789e9SNipun Gupta } 3255e0789e9SNipun Gupta if (mbuf->data_off < (DEFAULT_TX_ICEOF + 3265e0789e9SNipun Gupta sizeof(struct dpaa_eth_parse_results_t))) { 3275e0789e9SNipun Gupta DPAA_DP_LOG(DEBUG, "Checksum offload Err: " 3285e0789e9SNipun Gupta "Not enough Headroom " 3295e0789e9SNipun Gupta "space for correct Checksum offload." 3305e0789e9SNipun Gupta "So Calculating checksum in Software."); 3315e0789e9SNipun Gupta dpaa_checksum(mbuf); 3325e0789e9SNipun Gupta } else { 3335e0789e9SNipun Gupta dpaa_checksum_offload(mbuf, fd_arr, mbuf->buf_addr); 3345e0789e9SNipun Gupta } 3355e0789e9SNipun Gupta } 3365e0789e9SNipun Gupta 337f191d5abSHemant Agrawal static struct rte_mbuf * 3389ac71da4SNipun Gupta dpaa_eth_sg_to_mbuf(const struct qm_fd *fd, uint32_t ifid) 3398cffdcbeSShreyansh Jain { 3408cffdcbeSShreyansh Jain struct dpaa_bp_info *bp_info = DPAA_BPID_TO_POOL_INFO(fd->bpid); 3418cffdcbeSShreyansh Jain struct rte_mbuf *first_seg, *prev_seg, *cur_seg, *temp; 3428cffdcbeSShreyansh Jain struct qm_sg_entry *sgt, *sg_temp; 3438cffdcbeSShreyansh Jain void *vaddr, *sg_vaddr; 3448cffdcbeSShreyansh Jain int i = 0; 345287f4256SNipun Gupta uint16_t fd_offset = fd->offset; 3468cffdcbeSShreyansh Jain 34741c9ee8dSHemant Agrawal vaddr = DPAA_MEMPOOL_PTOV(bp_info, qm_fd_addr(fd)); 3488cffdcbeSShreyansh Jain if (!vaddr) { 3498cffdcbeSShreyansh Jain DPAA_PMD_ERR("unable to convert physical address"); 3508cffdcbeSShreyansh Jain return NULL; 3518cffdcbeSShreyansh Jain } 3528cffdcbeSShreyansh Jain sgt = vaddr + fd_offset; 3538cffdcbeSShreyansh Jain sg_temp = &sgt[i++]; 3548cffdcbeSShreyansh Jain hw_sg_to_cpu(sg_temp); 3558cffdcbeSShreyansh Jain temp = (struct rte_mbuf *)((char *)vaddr - bp_info->meta_data_size); 35641c9ee8dSHemant Agrawal sg_vaddr = DPAA_MEMPOOL_PTOV(bp_info, qm_sg_entry_get64(sg_temp)); 3578cffdcbeSShreyansh Jain 3588cffdcbeSShreyansh Jain first_seg = (struct rte_mbuf *)((char *)sg_vaddr - 3598cffdcbeSShreyansh Jain bp_info->meta_data_size); 3608cffdcbeSShreyansh Jain first_seg->data_off = sg_temp->offset; 3618cffdcbeSShreyansh Jain first_seg->data_len = sg_temp->length; 3628cffdcbeSShreyansh Jain first_seg->pkt_len = sg_temp->length; 3638cffdcbeSShreyansh Jain rte_mbuf_refcnt_set(first_seg, 1); 3648cffdcbeSShreyansh Jain 3658cffdcbeSShreyansh Jain first_seg->port = ifid; 3668cffdcbeSShreyansh Jain first_seg->nb_segs = 1; 3678cffdcbeSShreyansh Jain first_seg->ol_flags = 0; 3688cffdcbeSShreyansh Jain prev_seg = first_seg; 3698cffdcbeSShreyansh Jain while (i < DPAA_SGT_MAX_ENTRIES) { 3708cffdcbeSShreyansh Jain sg_temp = &sgt[i++]; 3718cffdcbeSShreyansh Jain hw_sg_to_cpu(sg_temp); 37241c9ee8dSHemant Agrawal sg_vaddr = DPAA_MEMPOOL_PTOV(bp_info, 37341c9ee8dSHemant Agrawal qm_sg_entry_get64(sg_temp)); 3748cffdcbeSShreyansh Jain cur_seg = (struct rte_mbuf *)((char *)sg_vaddr - 3758cffdcbeSShreyansh Jain bp_info->meta_data_size); 3768cffdcbeSShreyansh Jain cur_seg->data_off = sg_temp->offset; 3778cffdcbeSShreyansh Jain cur_seg->data_len = sg_temp->length; 3788cffdcbeSShreyansh Jain first_seg->pkt_len += sg_temp->length; 3798cffdcbeSShreyansh Jain first_seg->nb_segs += 1; 3808cffdcbeSShreyansh Jain rte_mbuf_refcnt_set(cur_seg, 1); 3818cffdcbeSShreyansh Jain prev_seg->next = cur_seg; 3828cffdcbeSShreyansh Jain if (sg_temp->final) { 3838cffdcbeSShreyansh Jain cur_seg->next = NULL; 3848cffdcbeSShreyansh Jain break; 3858cffdcbeSShreyansh Jain } 3868cffdcbeSShreyansh Jain prev_seg = cur_seg; 3878cffdcbeSShreyansh Jain } 38855576ac2SHemant Agrawal DPAA_DP_LOG(DEBUG, "Received an SG frame len =%d, num_sg =%d", 38955576ac2SHemant Agrawal first_seg->pkt_len, first_seg->nb_segs); 3908cffdcbeSShreyansh Jain 3910e5607e4SHemant Agrawal dpaa_eth_packet_info(first_seg, vaddr); 3928cffdcbeSShreyansh Jain rte_pktmbuf_free_seg(temp); 3938cffdcbeSShreyansh Jain 3948cffdcbeSShreyansh Jain return first_seg; 3958cffdcbeSShreyansh Jain } 3968cffdcbeSShreyansh Jain 3979ac71da4SNipun Gupta static inline struct rte_mbuf * 3989ac71da4SNipun Gupta dpaa_eth_fd_to_mbuf(const struct qm_fd *fd, uint32_t ifid) 39937f9b54bSShreyansh Jain { 40037f9b54bSShreyansh Jain struct rte_mbuf *mbuf; 4019ac71da4SNipun Gupta struct dpaa_bp_info *bp_info = DPAA_BPID_TO_POOL_INFO(fd->bpid); 40241c9ee8dSHemant Agrawal void *ptr; 4038cffdcbeSShreyansh Jain uint8_t format = 4048cffdcbeSShreyansh Jain (fd->opaque & DPAA_FD_FORMAT_MASK) >> DPAA_FD_FORMAT_SHIFT; 4059ac71da4SNipun Gupta uint16_t offset; 4069ac71da4SNipun Gupta uint32_t length; 40737f9b54bSShreyansh Jain 4088cffdcbeSShreyansh Jain if (unlikely(format == qm_fd_sg)) 4098cffdcbeSShreyansh Jain return dpaa_eth_sg_to_mbuf(fd, ifid); 4108cffdcbeSShreyansh Jain 4119ac71da4SNipun Gupta offset = (fd->opaque & DPAA_FD_OFFSET_MASK) >> DPAA_FD_OFFSET_SHIFT; 4129ac71da4SNipun Gupta length = fd->opaque & DPAA_FD_LENGTH_MASK; 4139ac71da4SNipun Gupta 41455576ac2SHemant Agrawal DPAA_DP_LOG(DEBUG, " FD--->MBUF off %d len = %d", offset, length); 41555576ac2SHemant Agrawal 41637f9b54bSShreyansh Jain /* Ignoring case when format != qm_fd_contig */ 4171ee09e39SHemant Agrawal ptr = DPAA_MEMPOOL_PTOV(bp_info, qm_fd_addr(fd)); 41837f9b54bSShreyansh Jain 41937f9b54bSShreyansh Jain mbuf = (struct rte_mbuf *)((char *)ptr - bp_info->meta_data_size); 4201ee09e39SHemant Agrawal /* Prefetch the Parse results and packet data to L1 */ 4211ee09e39SHemant Agrawal rte_prefetch0((void *)((uint8_t *)ptr + DEFAULT_RX_ICEOF)); 42237f9b54bSShreyansh Jain 42337f9b54bSShreyansh Jain mbuf->data_off = offset; 42437f9b54bSShreyansh Jain mbuf->data_len = length; 42537f9b54bSShreyansh Jain mbuf->pkt_len = length; 42637f9b54bSShreyansh Jain 42737f9b54bSShreyansh Jain mbuf->port = ifid; 42837f9b54bSShreyansh Jain mbuf->nb_segs = 1; 42937f9b54bSShreyansh Jain mbuf->ol_flags = 0; 43037f9b54bSShreyansh Jain mbuf->next = NULL; 43137f9b54bSShreyansh Jain rte_mbuf_refcnt_set(mbuf, 1); 4320e5607e4SHemant Agrawal dpaa_eth_packet_info(mbuf, mbuf->buf_addr); 43337f9b54bSShreyansh Jain 43437f9b54bSShreyansh Jain return mbuf; 43537f9b54bSShreyansh Jain } 43637f9b54bSShreyansh Jain 4379124e65dSGagandeep Singh uint16_t 4389124e65dSGagandeep Singh dpaa_free_mbuf(const struct qm_fd *fd) 4399124e65dSGagandeep Singh { 4409124e65dSGagandeep Singh struct rte_mbuf *mbuf; 4419124e65dSGagandeep Singh struct dpaa_bp_info *bp_info; 4429124e65dSGagandeep Singh uint8_t format; 4439124e65dSGagandeep Singh void *ptr; 4449124e65dSGagandeep Singh 4459124e65dSGagandeep Singh bp_info = DPAA_BPID_TO_POOL_INFO(fd->bpid); 4469124e65dSGagandeep Singh format = (fd->opaque & DPAA_FD_FORMAT_MASK) >> DPAA_FD_FORMAT_SHIFT; 4479124e65dSGagandeep Singh if (unlikely(format == qm_fd_sg)) { 4489124e65dSGagandeep Singh struct rte_mbuf *first_seg, *prev_seg, *cur_seg, *temp; 4499124e65dSGagandeep Singh struct qm_sg_entry *sgt, *sg_temp; 4509124e65dSGagandeep Singh void *vaddr, *sg_vaddr; 4519124e65dSGagandeep Singh int i = 0; 4529124e65dSGagandeep Singh uint16_t fd_offset = fd->offset; 4539124e65dSGagandeep Singh 4549124e65dSGagandeep Singh vaddr = DPAA_MEMPOOL_PTOV(bp_info, qm_fd_addr(fd)); 4559124e65dSGagandeep Singh if (!vaddr) { 4569124e65dSGagandeep Singh DPAA_PMD_ERR("unable to convert physical address"); 4579124e65dSGagandeep Singh return -1; 4589124e65dSGagandeep Singh } 4599124e65dSGagandeep Singh sgt = vaddr + fd_offset; 4609124e65dSGagandeep Singh sg_temp = &sgt[i++]; 4619124e65dSGagandeep Singh hw_sg_to_cpu(sg_temp); 4629124e65dSGagandeep Singh temp = (struct rte_mbuf *) 4639124e65dSGagandeep Singh ((char *)vaddr - bp_info->meta_data_size); 4649124e65dSGagandeep Singh sg_vaddr = DPAA_MEMPOOL_PTOV(bp_info, 4659124e65dSGagandeep Singh qm_sg_entry_get64(sg_temp)); 4669124e65dSGagandeep Singh 4679124e65dSGagandeep Singh first_seg = (struct rte_mbuf *)((char *)sg_vaddr - 4689124e65dSGagandeep Singh bp_info->meta_data_size); 4699124e65dSGagandeep Singh first_seg->nb_segs = 1; 4709124e65dSGagandeep Singh prev_seg = first_seg; 4719124e65dSGagandeep Singh while (i < DPAA_SGT_MAX_ENTRIES) { 4729124e65dSGagandeep Singh sg_temp = &sgt[i++]; 4739124e65dSGagandeep Singh hw_sg_to_cpu(sg_temp); 4749124e65dSGagandeep Singh sg_vaddr = DPAA_MEMPOOL_PTOV(bp_info, 4759124e65dSGagandeep Singh qm_sg_entry_get64(sg_temp)); 4769124e65dSGagandeep Singh cur_seg = (struct rte_mbuf *)((char *)sg_vaddr - 4779124e65dSGagandeep Singh bp_info->meta_data_size); 4789124e65dSGagandeep Singh first_seg->nb_segs += 1; 4799124e65dSGagandeep Singh prev_seg->next = cur_seg; 4809124e65dSGagandeep Singh if (sg_temp->final) { 4819124e65dSGagandeep Singh cur_seg->next = NULL; 4829124e65dSGagandeep Singh break; 4839124e65dSGagandeep Singh } 4849124e65dSGagandeep Singh prev_seg = cur_seg; 4859124e65dSGagandeep Singh } 4869124e65dSGagandeep Singh 4879124e65dSGagandeep Singh rte_pktmbuf_free_seg(temp); 4889124e65dSGagandeep Singh rte_pktmbuf_free_seg(first_seg); 4899124e65dSGagandeep Singh return 0; 4909124e65dSGagandeep Singh } 4919124e65dSGagandeep Singh 4929124e65dSGagandeep Singh ptr = DPAA_MEMPOOL_PTOV(bp_info, qm_fd_addr(fd)); 4939124e65dSGagandeep Singh mbuf = (struct rte_mbuf *)((char *)ptr - bp_info->meta_data_size); 4949124e65dSGagandeep Singh 4959124e65dSGagandeep Singh rte_pktmbuf_free(mbuf); 4969124e65dSGagandeep Singh 4979124e65dSGagandeep Singh return 0; 4989124e65dSGagandeep Singh } 4999124e65dSGagandeep Singh 50019b4aba2SHemant Agrawal /* Specific for LS1043 */ 501b9083ea5SNipun Gupta void 50219b4aba2SHemant Agrawal dpaa_rx_cb_no_prefetch(struct qman_fq **fq, struct qm_dqrr_entry **dqrr, 503b9083ea5SNipun Gupta void **bufs, int num_bufs) 5040c504f69SHemant Agrawal { 505b9083ea5SNipun Gupta struct rte_mbuf *mbuf; 506b9083ea5SNipun Gupta struct dpaa_bp_info *bp_info; 507b9083ea5SNipun Gupta const struct qm_fd *fd; 508b9083ea5SNipun Gupta void *ptr; 509b9083ea5SNipun Gupta struct dpaa_if *dpaa_intf; 510b9083ea5SNipun Gupta uint16_t offset, i; 511b9083ea5SNipun Gupta uint32_t length; 512b9083ea5SNipun Gupta uint8_t format; 5130c504f69SHemant Agrawal 514b9083ea5SNipun Gupta bp_info = DPAA_BPID_TO_POOL_INFO(dqrr[0]->fd.bpid); 515b9083ea5SNipun Gupta ptr = rte_dpaa_mem_ptov(qm_fd_addr(&dqrr[0]->fd)); 516b9083ea5SNipun Gupta rte_prefetch0((void *)((uint8_t *)ptr + DEFAULT_RX_ICEOF)); 51719b4aba2SHemant Agrawal bufs[0] = (struct rte_mbuf *)((char *)ptr - bp_info->meta_data_size); 518b9083ea5SNipun Gupta 519b9083ea5SNipun Gupta for (i = 0; i < num_bufs; i++) { 52019b4aba2SHemant Agrawal if (i < num_bufs - 1) { 521b9083ea5SNipun Gupta bp_info = DPAA_BPID_TO_POOL_INFO(dqrr[i + 1]->fd.bpid); 522b9083ea5SNipun Gupta ptr = rte_dpaa_mem_ptov(qm_fd_addr(&dqrr[i + 1]->fd)); 523b9083ea5SNipun Gupta rte_prefetch0((void *)((uint8_t *)ptr + 524b9083ea5SNipun Gupta DEFAULT_RX_ICEOF)); 525b9083ea5SNipun Gupta bufs[i + 1] = (struct rte_mbuf *)((char *)ptr - 526b9083ea5SNipun Gupta bp_info->meta_data_size); 527b9083ea5SNipun Gupta } 528b9083ea5SNipun Gupta 529b9083ea5SNipun Gupta fd = &dqrr[i]->fd; 5309abdad12SHemant Agrawal dpaa_intf = fq[0]->dpaa_intf; 531b9083ea5SNipun Gupta format = (fd->opaque & DPAA_FD_FORMAT_MASK) >> 532b9083ea5SNipun Gupta DPAA_FD_FORMAT_SHIFT; 533b9083ea5SNipun Gupta if (unlikely(format == qm_fd_sg)) { 534b9083ea5SNipun Gupta bufs[i] = dpaa_eth_sg_to_mbuf(fd, dpaa_intf->ifid); 535b9083ea5SNipun Gupta continue; 536b9083ea5SNipun Gupta } 537b9083ea5SNipun Gupta 538b9083ea5SNipun Gupta offset = (fd->opaque & DPAA_FD_OFFSET_MASK) >> 539b9083ea5SNipun Gupta DPAA_FD_OFFSET_SHIFT; 540b9083ea5SNipun Gupta length = fd->opaque & DPAA_FD_LENGTH_MASK; 541b9083ea5SNipun Gupta 542b9083ea5SNipun Gupta mbuf = bufs[i]; 543b9083ea5SNipun Gupta mbuf->data_off = offset; 544b9083ea5SNipun Gupta mbuf->data_len = length; 545b9083ea5SNipun Gupta mbuf->pkt_len = length; 546b9083ea5SNipun Gupta mbuf->port = dpaa_intf->ifid; 547b9083ea5SNipun Gupta 548b9083ea5SNipun Gupta mbuf->nb_segs = 1; 549b9083ea5SNipun Gupta mbuf->ol_flags = 0; 550b9083ea5SNipun Gupta mbuf->next = NULL; 551b9083ea5SNipun Gupta rte_mbuf_refcnt_set(mbuf, 1); 5520e5607e4SHemant Agrawal dpaa_eth_packet_info(mbuf, mbuf->buf_addr); 55377393f56SSachin Saxena dpaa_display_frame_info(fd, fq[0]->fqid, true); 554b9083ea5SNipun Gupta } 555b9083ea5SNipun Gupta } 556b9083ea5SNipun Gupta 55719b4aba2SHemant Agrawal void 55819b4aba2SHemant Agrawal dpaa_rx_cb(struct qman_fq **fq, struct qm_dqrr_entry **dqrr, 55919b4aba2SHemant Agrawal void **bufs, int num_bufs) 56019b4aba2SHemant Agrawal { 56119b4aba2SHemant Agrawal struct rte_mbuf *mbuf; 56219b4aba2SHemant Agrawal const struct qm_fd *fd; 56319b4aba2SHemant Agrawal struct dpaa_if *dpaa_intf; 56419b4aba2SHemant Agrawal uint16_t offset, i; 56519b4aba2SHemant Agrawal uint32_t length; 56619b4aba2SHemant Agrawal uint8_t format; 56719b4aba2SHemant Agrawal 56819b4aba2SHemant Agrawal for (i = 0; i < num_bufs; i++) { 56919b4aba2SHemant Agrawal fd = &dqrr[i]->fd; 57019b4aba2SHemant Agrawal dpaa_intf = fq[0]->dpaa_intf; 57119b4aba2SHemant Agrawal format = (fd->opaque & DPAA_FD_FORMAT_MASK) >> 57219b4aba2SHemant Agrawal DPAA_FD_FORMAT_SHIFT; 57319b4aba2SHemant Agrawal if (unlikely(format == qm_fd_sg)) { 57419b4aba2SHemant Agrawal bufs[i] = dpaa_eth_sg_to_mbuf(fd, dpaa_intf->ifid); 57519b4aba2SHemant Agrawal continue; 57619b4aba2SHemant Agrawal } 57719b4aba2SHemant Agrawal 57819b4aba2SHemant Agrawal offset = (fd->opaque & DPAA_FD_OFFSET_MASK) >> 57919b4aba2SHemant Agrawal DPAA_FD_OFFSET_SHIFT; 58019b4aba2SHemant Agrawal length = fd->opaque & DPAA_FD_LENGTH_MASK; 58119b4aba2SHemant Agrawal 58219b4aba2SHemant Agrawal mbuf = bufs[i]; 58319b4aba2SHemant Agrawal mbuf->data_off = offset; 58419b4aba2SHemant Agrawal mbuf->data_len = length; 58519b4aba2SHemant Agrawal mbuf->pkt_len = length; 58619b4aba2SHemant Agrawal mbuf->port = dpaa_intf->ifid; 58719b4aba2SHemant Agrawal 58819b4aba2SHemant Agrawal mbuf->nb_segs = 1; 58919b4aba2SHemant Agrawal mbuf->ol_flags = 0; 59019b4aba2SHemant Agrawal mbuf->next = NULL; 59119b4aba2SHemant Agrawal rte_mbuf_refcnt_set(mbuf, 1); 59219b4aba2SHemant Agrawal dpaa_eth_packet_info(mbuf, mbuf->buf_addr); 59377393f56SSachin Saxena dpaa_display_frame_info(fd, fq[0]->fqid, true); 59419b4aba2SHemant Agrawal } 59519b4aba2SHemant Agrawal } 59619b4aba2SHemant Agrawal 597b9083ea5SNipun Gupta void dpaa_rx_cb_prepare(struct qm_dqrr_entry *dq, void **bufs) 598b9083ea5SNipun Gupta { 599b9083ea5SNipun Gupta struct dpaa_bp_info *bp_info = DPAA_BPID_TO_POOL_INFO(dq->fd.bpid); 600b9083ea5SNipun Gupta void *ptr = rte_dpaa_mem_ptov(qm_fd_addr(&dq->fd)); 601b9083ea5SNipun Gupta 602b9083ea5SNipun Gupta /* In case of LS1046, annotation stashing is disabled due to L2 cache 603b9083ea5SNipun Gupta * being bottleneck in case of multicore scanario for this platform. 604b9083ea5SNipun Gupta * So we prefetch the annoation beforehand, so that it is available 605b9083ea5SNipun Gupta * in cache when accessed. 606b9083ea5SNipun Gupta */ 607b9083ea5SNipun Gupta rte_prefetch0((void *)((uint8_t *)ptr + DEFAULT_RX_ICEOF)); 608b9083ea5SNipun Gupta 609b9083ea5SNipun Gupta *bufs = (struct rte_mbuf *)((char *)ptr - bp_info->meta_data_size); 6100c504f69SHemant Agrawal } 6110c504f69SHemant Agrawal 6120c504f69SHemant Agrawal static uint16_t 6130c504f69SHemant Agrawal dpaa_eth_queue_portal_rx(struct qman_fq *fq, 6140c504f69SHemant Agrawal struct rte_mbuf **bufs, 6150c504f69SHemant Agrawal uint16_t nb_bufs) 6160c504f69SHemant Agrawal { 6170c504f69SHemant Agrawal int ret; 6180c504f69SHemant Agrawal 619b9c94167SNipun Gupta if (unlikely(!fq->qp_initialized)) { 6200c504f69SHemant Agrawal ret = rte_dpaa_portal_fq_init((void *)0, fq); 6210c504f69SHemant Agrawal if (ret) { 6220c504f69SHemant Agrawal DPAA_PMD_ERR("Failure in affining portal %d", ret); 6230c504f69SHemant Agrawal return 0; 6240c504f69SHemant Agrawal } 625b9c94167SNipun Gupta fq->qp_initialized = 1; 6260c504f69SHemant Agrawal } 6270c504f69SHemant Agrawal 6280c504f69SHemant Agrawal return qman_portal_poll_rx(nb_bufs, (void **)bufs, fq->qp); 6290c504f69SHemant Agrawal } 6300c504f69SHemant Agrawal 6315e745593SSunil Kumar Kori enum qman_cb_dqrr_result 6325e745593SSunil Kumar Kori dpaa_rx_cb_parallel(void *event, 6335e745593SSunil Kumar Kori struct qman_portal *qm __always_unused, 6345e745593SSunil Kumar Kori struct qman_fq *fq, 6355e745593SSunil Kumar Kori const struct qm_dqrr_entry *dqrr, 6365e745593SSunil Kumar Kori void **bufs) 6375e745593SSunil Kumar Kori { 6385e745593SSunil Kumar Kori u32 ifid = ((struct dpaa_if *)fq->dpaa_intf)->ifid; 6395e745593SSunil Kumar Kori struct rte_mbuf *mbuf; 6405e745593SSunil Kumar Kori struct rte_event *ev = (struct rte_event *)event; 6415e745593SSunil Kumar Kori 6425e745593SSunil Kumar Kori mbuf = dpaa_eth_fd_to_mbuf(&dqrr->fd, ifid); 6435e745593SSunil Kumar Kori ev->event_ptr = (void *)mbuf; 6445e745593SSunil Kumar Kori ev->flow_id = fq->ev.flow_id; 6455e745593SSunil Kumar Kori ev->sub_event_type = fq->ev.sub_event_type; 6465e745593SSunil Kumar Kori ev->event_type = RTE_EVENT_TYPE_ETHDEV; 6475e745593SSunil Kumar Kori ev->op = RTE_EVENT_OP_NEW; 6485e745593SSunil Kumar Kori ev->sched_type = fq->ev.sched_type; 6495e745593SSunil Kumar Kori ev->queue_id = fq->ev.queue_id; 6505e745593SSunil Kumar Kori ev->priority = fq->ev.priority; 6515e745593SSunil Kumar Kori ev->impl_opaque = (uint8_t)DPAA_INVALID_MBUF_SEQN; 652c9a1c2e5SDavid Marchand *dpaa_seqn(mbuf) = DPAA_INVALID_MBUF_SEQN; 6535e745593SSunil Kumar Kori *bufs = mbuf; 6545e745593SSunil Kumar Kori 6555e745593SSunil Kumar Kori return qman_cb_dqrr_consume; 6565e745593SSunil Kumar Kori } 6575e745593SSunil Kumar Kori 6585e745593SSunil Kumar Kori enum qman_cb_dqrr_result 6595e745593SSunil Kumar Kori dpaa_rx_cb_atomic(void *event, 6605e745593SSunil Kumar Kori struct qman_portal *qm __always_unused, 6615e745593SSunil Kumar Kori struct qman_fq *fq, 6625e745593SSunil Kumar Kori const struct qm_dqrr_entry *dqrr, 6635e745593SSunil Kumar Kori void **bufs) 6645e745593SSunil Kumar Kori { 6655e745593SSunil Kumar Kori u8 index; 6665e745593SSunil Kumar Kori u32 ifid = ((struct dpaa_if *)fq->dpaa_intf)->ifid; 6675e745593SSunil Kumar Kori struct rte_mbuf *mbuf; 6685e745593SSunil Kumar Kori struct rte_event *ev = (struct rte_event *)event; 6695e745593SSunil Kumar Kori 6705e745593SSunil Kumar Kori mbuf = dpaa_eth_fd_to_mbuf(&dqrr->fd, ifid); 6715e745593SSunil Kumar Kori ev->event_ptr = (void *)mbuf; 6725e745593SSunil Kumar Kori ev->flow_id = fq->ev.flow_id; 6735e745593SSunil Kumar Kori ev->sub_event_type = fq->ev.sub_event_type; 6745e745593SSunil Kumar Kori ev->event_type = RTE_EVENT_TYPE_ETHDEV; 6755e745593SSunil Kumar Kori ev->op = RTE_EVENT_OP_NEW; 6765e745593SSunil Kumar Kori ev->sched_type = fq->ev.sched_type; 6775e745593SSunil Kumar Kori ev->queue_id = fq->ev.queue_id; 6785e745593SSunil Kumar Kori ev->priority = fq->ev.priority; 6795e745593SSunil Kumar Kori 6805e745593SSunil Kumar Kori /* Save active dqrr entries */ 6815e745593SSunil Kumar Kori index = DQRR_PTR2IDX(dqrr); 6825e745593SSunil Kumar Kori DPAA_PER_LCORE_DQRR_SIZE++; 6835e745593SSunil Kumar Kori DPAA_PER_LCORE_DQRR_HELD |= 1 << index; 6845e745593SSunil Kumar Kori DPAA_PER_LCORE_DQRR_MBUF(index) = mbuf; 6855e745593SSunil Kumar Kori ev->impl_opaque = index + 1; 686c9a1c2e5SDavid Marchand *dpaa_seqn(mbuf) = (uint32_t)index + 1; 6875e745593SSunil Kumar Kori *bufs = mbuf; 6885e745593SSunil Kumar Kori 6895e745593SSunil Kumar Kori return qman_cb_dqrr_defer; 6905e745593SSunil Kumar Kori } 6915e745593SSunil Kumar Kori 69277393f56SSachin Saxena #ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER 69377393f56SSachin Saxena static inline void dpaa_eth_err_queue(struct dpaa_if *dpaa_intf) 69477393f56SSachin Saxena { 69577393f56SSachin Saxena struct rte_mbuf *mbuf; 69677393f56SSachin Saxena struct qman_fq *debug_fq; 69777393f56SSachin Saxena int ret, i; 69877393f56SSachin Saxena struct qm_dqrr_entry *dq; 69977393f56SSachin Saxena struct qm_fd *fd; 70077393f56SSachin Saxena 70177393f56SSachin Saxena if (unlikely(!RTE_PER_LCORE(dpaa_io))) { 70277393f56SSachin Saxena ret = rte_dpaa_portal_init((void *)0); 70377393f56SSachin Saxena if (ret) { 70477393f56SSachin Saxena DPAA_PMD_ERR("Failure in affining portal"); 70577393f56SSachin Saxena return; 70677393f56SSachin Saxena } 70777393f56SSachin Saxena } 70877393f56SSachin Saxena for (i = 0; i <= DPAA_DEBUG_FQ_TX_ERROR; i++) { 70977393f56SSachin Saxena debug_fq = &dpaa_intf->debug_queues[i]; 71077393f56SSachin Saxena ret = qman_set_vdq(debug_fq, 4, QM_VDQCR_EXACT); 71177393f56SSachin Saxena if (ret) 71277393f56SSachin Saxena return; 71377393f56SSachin Saxena 71477393f56SSachin Saxena do { 71577393f56SSachin Saxena dq = qman_dequeue(debug_fq); 71677393f56SSachin Saxena if (!dq) 71777393f56SSachin Saxena continue; 71877393f56SSachin Saxena fd = &dq->fd; 71977393f56SSachin Saxena if (i == DPAA_DEBUG_FQ_RX_ERROR) 72077393f56SSachin Saxena DPAA_PMD_ERR("RX ERROR status: 0x%08x", 72177393f56SSachin Saxena fd->status); 72277393f56SSachin Saxena else 72377393f56SSachin Saxena DPAA_PMD_ERR("TX ERROR status: 0x%08x", 72477393f56SSachin Saxena fd->status); 72577393f56SSachin Saxena dpaa_display_frame_info(fd, debug_fq->fqid, 72677393f56SSachin Saxena i == DPAA_DEBUG_FQ_RX_ERROR); 72777393f56SSachin Saxena 72877393f56SSachin Saxena mbuf = dpaa_eth_fd_to_mbuf(fd, dpaa_intf->ifid); 72977393f56SSachin Saxena rte_pktmbuf_free(mbuf); 73077393f56SSachin Saxena qman_dqrr_consume(debug_fq, dq); 73177393f56SSachin Saxena } while (debug_fq->flags & QMAN_FQ_STATE_VDQCR); 73277393f56SSachin Saxena } 73377393f56SSachin Saxena } 73477393f56SSachin Saxena #endif 73577393f56SSachin Saxena 73637f9b54bSShreyansh Jain uint16_t dpaa_eth_queue_rx(void *q, 73737f9b54bSShreyansh Jain struct rte_mbuf **bufs, 73837f9b54bSShreyansh Jain uint16_t nb_bufs) 73937f9b54bSShreyansh Jain { 74037f9b54bSShreyansh Jain struct qman_fq *fq = q; 74137f9b54bSShreyansh Jain struct qm_dqrr_entry *dq; 74237f9b54bSShreyansh Jain uint32_t num_rx = 0, ifid = ((struct dpaa_if *)fq->dpaa_intf)->ifid; 743f40d5a53SNipun Gupta int num_rx_bufs, ret; 744f40d5a53SNipun Gupta uint32_t vdqcr_flags = 0; 74537f9b54bSShreyansh Jain 746e1797f4bSAkhil Goyal if (unlikely(rte_dpaa_bpid_info == NULL && 747e1797f4bSAkhil Goyal rte_eal_process_type() == RTE_PROC_SECONDARY)) 748e1797f4bSAkhil Goyal rte_dpaa_bpid_info = fq->bp_array; 749e1797f4bSAkhil Goyal 75077393f56SSachin Saxena #ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER 75177393f56SSachin Saxena if (fq->fqid == ((struct dpaa_if *)fq->dpaa_intf)->rx_queues[0].fqid) 75277393f56SSachin Saxena dpaa_eth_err_queue((struct dpaa_if *)fq->dpaa_intf); 75377393f56SSachin Saxena #endif 75477393f56SSachin Saxena 7550c504f69SHemant Agrawal if (likely(fq->is_static)) 7560c504f69SHemant Agrawal return dpaa_eth_queue_portal_rx(fq, bufs, nb_bufs); 7570c504f69SHemant Agrawal 758e5872221SRohit Raj if (unlikely(!DPAA_PER_LCORE_PORTAL)) { 75937f9b54bSShreyansh Jain ret = rte_dpaa_portal_init((void *)0); 76037f9b54bSShreyansh Jain if (ret) { 76137f9b54bSShreyansh Jain DPAA_PMD_ERR("Failure in affining portal"); 76237f9b54bSShreyansh Jain return 0; 76337f9b54bSShreyansh Jain } 7645d944582SNipun Gupta } 76537f9b54bSShreyansh Jain 766f40d5a53SNipun Gupta /* Until request for four buffers, we provide exact number of buffers. 767f40d5a53SNipun Gupta * Otherwise we do not set the QM_VDQCR_EXACT flag. 768f40d5a53SNipun Gupta * Not setting QM_VDQCR_EXACT flag can provide two more buffers than 769f40d5a53SNipun Gupta * requested, so we request two less in this case. 770f40d5a53SNipun Gupta */ 771f40d5a53SNipun Gupta if (nb_bufs < 4) { 772f40d5a53SNipun Gupta vdqcr_flags = QM_VDQCR_EXACT; 773f40d5a53SNipun Gupta num_rx_bufs = nb_bufs; 774f40d5a53SNipun Gupta } else { 775f40d5a53SNipun Gupta num_rx_bufs = nb_bufs > DPAA_MAX_DEQUEUE_NUM_FRAMES ? 776f40d5a53SNipun Gupta (DPAA_MAX_DEQUEUE_NUM_FRAMES - 2) : (nb_bufs - 2); 777f40d5a53SNipun Gupta } 778f40d5a53SNipun Gupta ret = qman_set_vdq(fq, num_rx_bufs, vdqcr_flags); 77937f9b54bSShreyansh Jain if (ret) 78037f9b54bSShreyansh Jain return 0; 78137f9b54bSShreyansh Jain 78237f9b54bSShreyansh Jain do { 78337f9b54bSShreyansh Jain dq = qman_dequeue(fq); 78437f9b54bSShreyansh Jain if (!dq) 78537f9b54bSShreyansh Jain continue; 78637f9b54bSShreyansh Jain bufs[num_rx++] = dpaa_eth_fd_to_mbuf(&dq->fd, ifid); 78777393f56SSachin Saxena dpaa_display_frame_info(&dq->fd, fq->fqid, true); 78837f9b54bSShreyansh Jain qman_dqrr_consume(fq, dq); 78937f9b54bSShreyansh Jain } while (fq->flags & QMAN_FQ_STATE_VDQCR); 79037f9b54bSShreyansh Jain 79137f9b54bSShreyansh Jain return num_rx; 79237f9b54bSShreyansh Jain } 79337f9b54bSShreyansh Jain 794f191d5abSHemant Agrawal static int 7958cffdcbeSShreyansh Jain dpaa_eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf, 7968cffdcbeSShreyansh Jain struct qm_fd *fd, 797f191d5abSHemant Agrawal struct dpaa_bp_info *bp_info) 7988cffdcbeSShreyansh Jain { 7998cffdcbeSShreyansh Jain struct rte_mbuf *cur_seg = mbuf, *prev_seg = NULL; 8008cffdcbeSShreyansh Jain struct rte_mbuf *temp, *mi; 8018cffdcbeSShreyansh Jain struct qm_sg_entry *sg_temp, *sgt; 8028cffdcbeSShreyansh Jain int i = 0; 8038cffdcbeSShreyansh Jain 8048cffdcbeSShreyansh Jain DPAA_DP_LOG(DEBUG, "Creating SG FD to transmit"); 8058cffdcbeSShreyansh Jain 8068cffdcbeSShreyansh Jain temp = rte_pktmbuf_alloc(bp_info->mp); 8078cffdcbeSShreyansh Jain if (!temp) { 8088cffdcbeSShreyansh Jain DPAA_PMD_ERR("Failure in allocation of mbuf"); 8098cffdcbeSShreyansh Jain return -1; 8108cffdcbeSShreyansh Jain } 8118cffdcbeSShreyansh Jain if (temp->buf_len < ((mbuf->nb_segs * sizeof(struct qm_sg_entry)) 8128cffdcbeSShreyansh Jain + temp->data_off)) { 8138cffdcbeSShreyansh Jain DPAA_PMD_ERR("Insufficient space in mbuf for SG entries"); 8148cffdcbeSShreyansh Jain return -1; 8158cffdcbeSShreyansh Jain } 8168cffdcbeSShreyansh Jain 8178cffdcbeSShreyansh Jain fd->cmd = 0; 8188cffdcbeSShreyansh Jain fd->opaque_addr = 0; 8198cffdcbeSShreyansh Jain 8208cffdcbeSShreyansh Jain if (mbuf->ol_flags & DPAA_TX_CKSUM_OFFLOAD_MASK) { 821d565c887SAshish Jain if (!mbuf->packet_type) { 822d565c887SAshish Jain struct rte_net_hdr_lens hdr_lens; 823d565c887SAshish Jain 824d565c887SAshish Jain mbuf->packet_type = rte_net_get_ptype(mbuf, &hdr_lens, 825d565c887SAshish Jain RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK 826d565c887SAshish Jain | RTE_PTYPE_L4_MASK); 827d565c887SAshish Jain mbuf->l2_len = hdr_lens.l2_len; 828d565c887SAshish Jain mbuf->l3_len = hdr_lens.l3_len; 829d565c887SAshish Jain } 8308cffdcbeSShreyansh Jain if (temp->data_off < DEFAULT_TX_ICEOF 8318cffdcbeSShreyansh Jain + sizeof(struct dpaa_eth_parse_results_t)) 8328cffdcbeSShreyansh Jain temp->data_off = DEFAULT_TX_ICEOF 8338cffdcbeSShreyansh Jain + sizeof(struct dpaa_eth_parse_results_t); 8348cffdcbeSShreyansh Jain dcbz_64(temp->buf_addr); 8358cffdcbeSShreyansh Jain dpaa_checksum_offload(mbuf, fd, temp->buf_addr); 8368cffdcbeSShreyansh Jain } 8378cffdcbeSShreyansh Jain 8388cffdcbeSShreyansh Jain sgt = temp->buf_addr + temp->data_off; 8398cffdcbeSShreyansh Jain fd->format = QM_FD_SG; 840455da545SSantosh Shukla fd->addr = temp->buf_iova; 8418cffdcbeSShreyansh Jain fd->offset = temp->data_off; 842f191d5abSHemant Agrawal fd->bpid = bp_info ? bp_info->bpid : 0xff; 8438cffdcbeSShreyansh Jain fd->length20 = mbuf->pkt_len; 8448cffdcbeSShreyansh Jain 8458cffdcbeSShreyansh Jain while (i < DPAA_SGT_MAX_ENTRIES) { 8468cffdcbeSShreyansh Jain sg_temp = &sgt[i++]; 8478cffdcbeSShreyansh Jain sg_temp->opaque = 0; 8488cffdcbeSShreyansh Jain sg_temp->val = 0; 849455da545SSantosh Shukla sg_temp->addr = cur_seg->buf_iova; 8508cffdcbeSShreyansh Jain sg_temp->offset = cur_seg->data_off; 8518cffdcbeSShreyansh Jain sg_temp->length = cur_seg->data_len; 8528cffdcbeSShreyansh Jain if (RTE_MBUF_DIRECT(cur_seg)) { 8538cffdcbeSShreyansh Jain if (rte_mbuf_refcnt_read(cur_seg) > 1) { 8548cffdcbeSShreyansh Jain /*If refcnt > 1, invalid bpid is set to ensure 8558cffdcbeSShreyansh Jain * buffer is not freed by HW. 8568cffdcbeSShreyansh Jain */ 8578cffdcbeSShreyansh Jain sg_temp->bpid = 0xff; 8588cffdcbeSShreyansh Jain rte_mbuf_refcnt_update(cur_seg, -1); 8598cffdcbeSShreyansh Jain } else { 8608cffdcbeSShreyansh Jain sg_temp->bpid = 8618cffdcbeSShreyansh Jain DPAA_MEMPOOL_TO_BPID(cur_seg->pool); 8628cffdcbeSShreyansh Jain } 8638cffdcbeSShreyansh Jain cur_seg = cur_seg->next; 864f191d5abSHemant Agrawal } else if (RTE_MBUF_HAS_EXTBUF(cur_seg)) { 865f191d5abSHemant Agrawal sg_temp->bpid = 0xff; 866f191d5abSHemant Agrawal cur_seg = cur_seg->next; 8678cffdcbeSShreyansh Jain } else { 8688cffdcbeSShreyansh Jain /* Get owner MBUF from indirect buffer */ 8698cffdcbeSShreyansh Jain mi = rte_mbuf_from_indirect(cur_seg); 8708cffdcbeSShreyansh Jain if (rte_mbuf_refcnt_read(mi) > 1) { 8718cffdcbeSShreyansh Jain /*If refcnt > 1, invalid bpid is set to ensure 8728cffdcbeSShreyansh Jain * owner buffer is not freed by HW. 8738cffdcbeSShreyansh Jain */ 8748cffdcbeSShreyansh Jain sg_temp->bpid = 0xff; 8758cffdcbeSShreyansh Jain } else { 8768cffdcbeSShreyansh Jain sg_temp->bpid = DPAA_MEMPOOL_TO_BPID(mi->pool); 8778cffdcbeSShreyansh Jain rte_mbuf_refcnt_update(mi, 1); 8788cffdcbeSShreyansh Jain } 8798cffdcbeSShreyansh Jain prev_seg = cur_seg; 8808cffdcbeSShreyansh Jain cur_seg = cur_seg->next; 8818cffdcbeSShreyansh Jain prev_seg->next = NULL; 8828cffdcbeSShreyansh Jain rte_pktmbuf_free(prev_seg); 8838cffdcbeSShreyansh Jain } 8848cffdcbeSShreyansh Jain if (cur_seg == NULL) { 8858cffdcbeSShreyansh Jain sg_temp->final = 1; 8868cffdcbeSShreyansh Jain cpu_to_hw_sg(sg_temp); 8878cffdcbeSShreyansh Jain break; 8888cffdcbeSShreyansh Jain } 8898cffdcbeSShreyansh Jain cpu_to_hw_sg(sg_temp); 8908cffdcbeSShreyansh Jain } 8918cffdcbeSShreyansh Jain return 0; 8928cffdcbeSShreyansh Jain } 8938cffdcbeSShreyansh Jain 89437f9b54bSShreyansh Jain /* Handle mbufs which are not segmented (non SG) */ 89537f9b54bSShreyansh Jain static inline void 89637f9b54bSShreyansh Jain tx_on_dpaa_pool_unsegmented(struct rte_mbuf *mbuf, 89737f9b54bSShreyansh Jain struct dpaa_bp_info *bp_info, 89837f9b54bSShreyansh Jain struct qm_fd *fd_arr) 89937f9b54bSShreyansh Jain { 90037f9b54bSShreyansh Jain struct rte_mbuf *mi = NULL; 90137f9b54bSShreyansh Jain 90237f9b54bSShreyansh Jain if (RTE_MBUF_DIRECT(mbuf)) { 90337f9b54bSShreyansh Jain if (rte_mbuf_refcnt_read(mbuf) > 1) { 90437f9b54bSShreyansh Jain /* In case of direct mbuf and mbuf being cloned, 90537f9b54bSShreyansh Jain * BMAN should _not_ release buffer. 90637f9b54bSShreyansh Jain */ 90737f9b54bSShreyansh Jain DPAA_MBUF_TO_CONTIG_FD(mbuf, fd_arr, 0xff); 90837f9b54bSShreyansh Jain /* Buffer should be releasd by EAL */ 90937f9b54bSShreyansh Jain rte_mbuf_refcnt_update(mbuf, -1); 91037f9b54bSShreyansh Jain } else { 91137f9b54bSShreyansh Jain /* In case of direct mbuf and no cloning, mbuf can be 91237f9b54bSShreyansh Jain * released by BMAN. 91337f9b54bSShreyansh Jain */ 91437f9b54bSShreyansh Jain DPAA_MBUF_TO_CONTIG_FD(mbuf, fd_arr, bp_info->bpid); 91537f9b54bSShreyansh Jain } 916f191d5abSHemant Agrawal } else if (RTE_MBUF_HAS_EXTBUF(mbuf)) { 917f191d5abSHemant Agrawal DPAA_MBUF_TO_CONTIG_FD(mbuf, fd_arr, 918f191d5abSHemant Agrawal bp_info ? bp_info->bpid : 0xff); 91937f9b54bSShreyansh Jain } else { 92037f9b54bSShreyansh Jain /* This is data-containing core mbuf: 'mi' */ 92137f9b54bSShreyansh Jain mi = rte_mbuf_from_indirect(mbuf); 92237f9b54bSShreyansh Jain if (rte_mbuf_refcnt_read(mi) > 1) { 92337f9b54bSShreyansh Jain /* In case of indirect mbuf, and mbuf being cloned, 92437f9b54bSShreyansh Jain * BMAN should _not_ release it and let EAL release 92537f9b54bSShreyansh Jain * it through pktmbuf_free below. 92637f9b54bSShreyansh Jain */ 92737f9b54bSShreyansh Jain DPAA_MBUF_TO_CONTIG_FD(mbuf, fd_arr, 0xff); 92837f9b54bSShreyansh Jain } else { 92937f9b54bSShreyansh Jain /* In case of indirect mbuf, and no cloning, core mbuf 93037f9b54bSShreyansh Jain * should be released by BMAN. 93137f9b54bSShreyansh Jain * Increate refcnt of core mbuf so that when 93237f9b54bSShreyansh Jain * pktmbuf_free is called and mbuf is released, EAL 93337f9b54bSShreyansh Jain * doesn't try to release core mbuf which would have 93437f9b54bSShreyansh Jain * been released by BMAN. 93537f9b54bSShreyansh Jain */ 93637f9b54bSShreyansh Jain rte_mbuf_refcnt_update(mi, 1); 937f191d5abSHemant Agrawal DPAA_MBUF_TO_CONTIG_FD(mbuf, fd_arr, 938f191d5abSHemant Agrawal bp_info ? bp_info->bpid : 0xff); 93937f9b54bSShreyansh Jain } 94037f9b54bSShreyansh Jain rte_pktmbuf_free(mbuf); 94137f9b54bSShreyansh Jain } 9425a8cf1beSShreyansh Jain 9435e0789e9SNipun Gupta if (mbuf->ol_flags & DPAA_TX_CKSUM_OFFLOAD_MASK) 9445e0789e9SNipun Gupta dpaa_unsegmented_checksum(mbuf, fd_arr); 94537f9b54bSShreyansh Jain } 94637f9b54bSShreyansh Jain 94737f9b54bSShreyansh Jain /* Handle all mbufs on dpaa BMAN managed pool */ 94837f9b54bSShreyansh Jain static inline uint16_t 94937f9b54bSShreyansh Jain tx_on_dpaa_pool(struct rte_mbuf *mbuf, 95037f9b54bSShreyansh Jain struct dpaa_bp_info *bp_info, 95137f9b54bSShreyansh Jain struct qm_fd *fd_arr) 95237f9b54bSShreyansh Jain { 95337f9b54bSShreyansh Jain DPAA_DP_LOG(DEBUG, "BMAN offloaded buffer, mbuf: %p", mbuf); 95437f9b54bSShreyansh Jain 95537f9b54bSShreyansh Jain if (mbuf->nb_segs == 1) { 95637f9b54bSShreyansh Jain /* Case for non-segmented buffers */ 95737f9b54bSShreyansh Jain tx_on_dpaa_pool_unsegmented(mbuf, bp_info, fd_arr); 9588cffdcbeSShreyansh Jain } else if (mbuf->nb_segs > 1 && 9598cffdcbeSShreyansh Jain mbuf->nb_segs <= DPAA_SGT_MAX_ENTRIES) { 960f191d5abSHemant Agrawal if (dpaa_eth_mbuf_to_sg_fd(mbuf, fd_arr, bp_info)) { 9618cffdcbeSShreyansh Jain DPAA_PMD_DEBUG("Unable to create Scatter Gather FD"); 9628cffdcbeSShreyansh Jain return 1; 9638cffdcbeSShreyansh Jain } 96437f9b54bSShreyansh Jain } else { 96537f9b54bSShreyansh Jain DPAA_PMD_DEBUG("Number of Segments not supported"); 96637f9b54bSShreyansh Jain return 1; 96737f9b54bSShreyansh Jain } 96837f9b54bSShreyansh Jain 96937f9b54bSShreyansh Jain return 0; 97037f9b54bSShreyansh Jain } 97137f9b54bSShreyansh Jain 97237f9b54bSShreyansh Jain /* Handle all mbufs on an external pool (non-dpaa) */ 973f8c7a17aSNipun Gupta static inline struct rte_mbuf * 974f8c7a17aSNipun Gupta reallocate_mbuf(struct qman_fq *txq, struct rte_mbuf *mbuf) 97537f9b54bSShreyansh Jain { 97637f9b54bSShreyansh Jain struct dpaa_if *dpaa_intf = txq->dpaa_intf; 977f8c7a17aSNipun Gupta struct dpaa_bp_info *bp_info = dpaa_intf->bp_info; 978f8c7a17aSNipun Gupta struct rte_mbuf *new_mbufs[DPAA_SGT_MAX_ENTRIES + 1] = {0}; 979f8c7a17aSNipun Gupta struct rte_mbuf *temp_mbuf; 980f8c7a17aSNipun Gupta int num_new_segs, mbuf_greater, ret, extra_seg = 0, i = 0; 981f8c7a17aSNipun Gupta uint64_t mbufs_size, bytes_to_copy, offset1 = 0, offset2 = 0; 982f8c7a17aSNipun Gupta char *data; 98337f9b54bSShreyansh Jain 984f8c7a17aSNipun Gupta DPAA_DP_LOG(DEBUG, "Reallocating transmit buffer"); 985f8c7a17aSNipun Gupta 986f8c7a17aSNipun Gupta mbufs_size = bp_info->size - 987f8c7a17aSNipun Gupta bp_info->meta_data_size - RTE_PKTMBUF_HEADROOM; 988f8c7a17aSNipun Gupta extra_seg = !!(mbuf->pkt_len % mbufs_size); 989f8c7a17aSNipun Gupta num_new_segs = (mbuf->pkt_len / mbufs_size) + extra_seg; 990f8c7a17aSNipun Gupta 991f8c7a17aSNipun Gupta ret = rte_pktmbuf_alloc_bulk(bp_info->mp, new_mbufs, num_new_segs); 992f8c7a17aSNipun Gupta if (ret != 0) { 993f8c7a17aSNipun Gupta DPAA_DP_LOG(DEBUG, "Allocation for new buffers failed"); 994f8c7a17aSNipun Gupta return NULL; 99537f9b54bSShreyansh Jain } 99637f9b54bSShreyansh Jain 997f8c7a17aSNipun Gupta temp_mbuf = mbuf; 99837f9b54bSShreyansh Jain 999f8c7a17aSNipun Gupta while (temp_mbuf) { 1000f8c7a17aSNipun Gupta /* If mbuf data is less than new mbuf remaining memory */ 1001f8c7a17aSNipun Gupta if ((temp_mbuf->data_len - offset1) < (mbufs_size - offset2)) { 1002f8c7a17aSNipun Gupta bytes_to_copy = temp_mbuf->data_len - offset1; 1003f8c7a17aSNipun Gupta mbuf_greater = -1; 1004f8c7a17aSNipun Gupta /* If mbuf data is greater than new mbuf remaining memory */ 1005f8c7a17aSNipun Gupta } else if ((temp_mbuf->data_len - offset1) > 1006f8c7a17aSNipun Gupta (mbufs_size - offset2)) { 1007f8c7a17aSNipun Gupta bytes_to_copy = mbufs_size - offset2; 1008f8c7a17aSNipun Gupta mbuf_greater = 1; 1009f8c7a17aSNipun Gupta /* if mbuf data is equal to new mbuf remaining memory */ 1010f8c7a17aSNipun Gupta } else { 1011f8c7a17aSNipun Gupta bytes_to_copy = temp_mbuf->data_len - offset1; 1012f8c7a17aSNipun Gupta mbuf_greater = 0; 1013f8c7a17aSNipun Gupta } 1014f8c7a17aSNipun Gupta 1015f8c7a17aSNipun Gupta /* Copy the data */ 1016f8c7a17aSNipun Gupta data = rte_pktmbuf_append(new_mbufs[0], bytes_to_copy); 1017f8c7a17aSNipun Gupta 1018f8c7a17aSNipun Gupta rte_memcpy((uint8_t *)data, rte_pktmbuf_mtod_offset(mbuf, 1019f8c7a17aSNipun Gupta void *, offset1), bytes_to_copy); 1020f8c7a17aSNipun Gupta 1021f8c7a17aSNipun Gupta /* Set new offsets and the temp buffers */ 1022f8c7a17aSNipun Gupta if (mbuf_greater == -1) { 1023f8c7a17aSNipun Gupta offset1 = 0; 1024f8c7a17aSNipun Gupta offset2 += bytes_to_copy; 1025f8c7a17aSNipun Gupta temp_mbuf = temp_mbuf->next; 1026f8c7a17aSNipun Gupta } else if (mbuf_greater == 1) { 1027f8c7a17aSNipun Gupta offset2 = 0; 1028f8c7a17aSNipun Gupta offset1 += bytes_to_copy; 1029f8c7a17aSNipun Gupta new_mbufs[i]->next = new_mbufs[i + 1]; 1030f8c7a17aSNipun Gupta new_mbufs[0]->nb_segs++; 1031f8c7a17aSNipun Gupta i++; 1032f8c7a17aSNipun Gupta } else { 1033f8c7a17aSNipun Gupta offset1 = 0; 1034f8c7a17aSNipun Gupta offset2 = 0; 1035f8c7a17aSNipun Gupta temp_mbuf = temp_mbuf->next; 1036f8c7a17aSNipun Gupta new_mbufs[i]->next = new_mbufs[i + 1]; 1037f8c7a17aSNipun Gupta if (new_mbufs[i + 1]) 1038f8c7a17aSNipun Gupta new_mbufs[0]->nb_segs++; 1039f8c7a17aSNipun Gupta i++; 1040f8c7a17aSNipun Gupta } 1041f8c7a17aSNipun Gupta } 1042f8c7a17aSNipun Gupta 1043f8c7a17aSNipun Gupta /* Copy other required fields */ 1044f8c7a17aSNipun Gupta new_mbufs[0]->ol_flags = mbuf->ol_flags; 1045f8c7a17aSNipun Gupta new_mbufs[0]->packet_type = mbuf->packet_type; 1046f8c7a17aSNipun Gupta new_mbufs[0]->tx_offload = mbuf->tx_offload; 1047f8c7a17aSNipun Gupta 1048f8c7a17aSNipun Gupta rte_pktmbuf_free(mbuf); 1049f8c7a17aSNipun Gupta 1050f8c7a17aSNipun Gupta return new_mbufs[0]; 105137f9b54bSShreyansh Jain } 105237f9b54bSShreyansh Jain 105337f9b54bSShreyansh Jain uint16_t 105437f9b54bSShreyansh Jain dpaa_eth_queue_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs) 105537f9b54bSShreyansh Jain { 105637f9b54bSShreyansh Jain struct rte_mbuf *mbuf, *mi = NULL; 105737f9b54bSShreyansh Jain struct rte_mempool *mp; 105837f9b54bSShreyansh Jain struct dpaa_bp_info *bp_info; 1059b0a87fe2SNipun Gupta struct qm_fd fd_arr[DPAA_TX_BURST_SIZE]; 10605e0789e9SNipun Gupta uint32_t frames_to_send, loop, sent = 0; 106137f9b54bSShreyansh Jain uint16_t state; 1062f8c7a17aSNipun Gupta int ret, realloc_mbuf = 0; 10635e745593SSunil Kumar Kori uint32_t seqn, index, flags[DPAA_TX_BURST_SIZE] = {0}; 1064f191d5abSHemant Agrawal struct rte_mbuf **orig_bufs = bufs; 106537f9b54bSShreyansh Jain 1066e5872221SRohit Raj if (unlikely(!DPAA_PER_LCORE_PORTAL)) { 106737f9b54bSShreyansh Jain ret = rte_dpaa_portal_init((void *)0); 106837f9b54bSShreyansh Jain if (ret) { 106937f9b54bSShreyansh Jain DPAA_PMD_ERR("Failure in affining portal"); 107037f9b54bSShreyansh Jain return 0; 107137f9b54bSShreyansh Jain } 10725d944582SNipun Gupta } 107337f9b54bSShreyansh Jain 107437f9b54bSShreyansh Jain DPAA_DP_LOG(DEBUG, "Transmitting %d buffers on queue: %p", nb_bufs, q); 107537f9b54bSShreyansh Jain 107637f9b54bSShreyansh Jain while (nb_bufs) { 1077b0a87fe2SNipun Gupta frames_to_send = (nb_bufs > DPAA_TX_BURST_SIZE) ? 1078b0a87fe2SNipun Gupta DPAA_TX_BURST_SIZE : nb_bufs; 10795e0789e9SNipun Gupta for (loop = 0; loop < frames_to_send; loop++) { 10805e0789e9SNipun Gupta mbuf = *(bufs++); 1081f8c7a17aSNipun Gupta /* In case the data offset is not multiple of 16, 1082f8c7a17aSNipun Gupta * FMAN can stall because of an errata. So reallocate 1083f8c7a17aSNipun Gupta * the buffer in such case. 1084f8c7a17aSNipun Gupta */ 1085f8c7a17aSNipun Gupta if (dpaa_svr_family == SVR_LS1043A_FAMILY && 108659267d7bSNipun Gupta (mbuf->data_off & 0x7F) != 0x0) 1087f8c7a17aSNipun Gupta realloc_mbuf = 1; 1088c9a1c2e5SDavid Marchand seqn = *dpaa_seqn(mbuf); 10899afce5aaSSunil Kumar Kori if (seqn != DPAA_INVALID_MBUF_SEQN) { 10909afce5aaSSunil Kumar Kori index = seqn - 1; 10919afce5aaSSunil Kumar Kori if (DPAA_PER_LCORE_DQRR_HELD & (1 << index)) { 10929afce5aaSSunil Kumar Kori flags[loop] = 10939afce5aaSSunil Kumar Kori ((index & QM_EQCR_DCA_IDXMASK) << 8); 10949afce5aaSSunil Kumar Kori flags[loop] |= QMAN_ENQUEUE_FLAG_DCA; 10959afce5aaSSunil Kumar Kori DPAA_PER_LCORE_DQRR_SIZE--; 10969afce5aaSSunil Kumar Kori DPAA_PER_LCORE_DQRR_HELD &= 10979afce5aaSSunil Kumar Kori ~(1 << index); 10989afce5aaSSunil Kumar Kori } 10999afce5aaSSunil Kumar Kori } 11009afce5aaSSunil Kumar Kori 11015e0789e9SNipun Gupta if (likely(RTE_MBUF_DIRECT(mbuf))) { 110237f9b54bSShreyansh Jain mp = mbuf->pool; 11035e0789e9SNipun Gupta bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp); 11045e0789e9SNipun Gupta if (likely(mp->ops_index == 11055e0789e9SNipun Gupta bp_info->dpaa_ops_index && 11065e0789e9SNipun Gupta mbuf->nb_segs == 1 && 1107f8c7a17aSNipun Gupta realloc_mbuf == 0 && 11085e0789e9SNipun Gupta rte_mbuf_refcnt_read(mbuf) == 1)) { 11095e0789e9SNipun Gupta DPAA_MBUF_TO_CONTIG_FD(mbuf, 11105e0789e9SNipun Gupta &fd_arr[loop], bp_info->bpid); 11115e0789e9SNipun Gupta if (mbuf->ol_flags & 11125e0789e9SNipun Gupta DPAA_TX_CKSUM_OFFLOAD_MASK) 11135e0789e9SNipun Gupta dpaa_unsegmented_checksum(mbuf, 11145e0789e9SNipun Gupta &fd_arr[loop]); 11155e0789e9SNipun Gupta continue; 11165e0789e9SNipun Gupta } 111737f9b54bSShreyansh Jain } else { 111837f9b54bSShreyansh Jain mi = rte_mbuf_from_indirect(mbuf); 111937f9b54bSShreyansh Jain mp = mi->pool; 112037f9b54bSShreyansh Jain } 112137f9b54bSShreyansh Jain 1122f191d5abSHemant Agrawal if (unlikely(RTE_MBUF_HAS_EXTBUF(mbuf))) { 1123f191d5abSHemant Agrawal bp_info = NULL; 1124f191d5abSHemant Agrawal goto indirect_buf; 1125f191d5abSHemant Agrawal } 1126f191d5abSHemant Agrawal 112737f9b54bSShreyansh Jain bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp); 1128f8c7a17aSNipun Gupta if (unlikely(mp->ops_index != bp_info->dpaa_ops_index || 1129f8c7a17aSNipun Gupta realloc_mbuf == 1)) { 1130f8c7a17aSNipun Gupta struct rte_mbuf *temp_mbuf; 1131f8c7a17aSNipun Gupta 1132f8c7a17aSNipun Gupta temp_mbuf = reallocate_mbuf(q, mbuf); 1133f8c7a17aSNipun Gupta if (!temp_mbuf) { 1134f8c7a17aSNipun Gupta /* Set frames_to_send & nb_bufs so 1135f8c7a17aSNipun Gupta * that packets are transmitted till 1136f8c7a17aSNipun Gupta * previous frame. 1137f8c7a17aSNipun Gupta */ 1138f8c7a17aSNipun Gupta frames_to_send = loop; 1139f8c7a17aSNipun Gupta nb_bufs = loop; 1140f8c7a17aSNipun Gupta goto send_pkts; 1141f8c7a17aSNipun Gupta } 1142f8c7a17aSNipun Gupta mbuf = temp_mbuf; 1143f8c7a17aSNipun Gupta realloc_mbuf = 0; 1144f8c7a17aSNipun Gupta } 1145f191d5abSHemant Agrawal indirect_buf: 114637f9b54bSShreyansh Jain state = tx_on_dpaa_pool(mbuf, bp_info, 114737f9b54bSShreyansh Jain &fd_arr[loop]); 114837f9b54bSShreyansh Jain if (unlikely(state)) { 114937f9b54bSShreyansh Jain /* Set frames_to_send & nb_bufs so 115037f9b54bSShreyansh Jain * that packets are transmitted till 115137f9b54bSShreyansh Jain * previous frame. 115237f9b54bSShreyansh Jain */ 115337f9b54bSShreyansh Jain frames_to_send = loop; 115437f9b54bSShreyansh Jain nb_bufs = loop; 115537f9b54bSShreyansh Jain goto send_pkts; 115637f9b54bSShreyansh Jain } 115737f9b54bSShreyansh Jain } 115837f9b54bSShreyansh Jain 115937f9b54bSShreyansh Jain send_pkts: 116037f9b54bSShreyansh Jain loop = 0; 116137f9b54bSShreyansh Jain while (loop < frames_to_send) { 116237f9b54bSShreyansh Jain loop += qman_enqueue_multi(q, &fd_arr[loop], 11635e745593SSunil Kumar Kori &flags[loop], 116437f9b54bSShreyansh Jain frames_to_send - loop); 116537f9b54bSShreyansh Jain } 116637f9b54bSShreyansh Jain nb_bufs -= frames_to_send; 11675e0789e9SNipun Gupta sent += frames_to_send; 116837f9b54bSShreyansh Jain } 116937f9b54bSShreyansh Jain 11705e0789e9SNipun Gupta DPAA_DP_LOG(DEBUG, "Transmitted %d buffers on queue: %p", sent, q); 117137f9b54bSShreyansh Jain 1172f191d5abSHemant Agrawal 1173f191d5abSHemant Agrawal loop = 0; 1174f191d5abSHemant Agrawal while (loop < sent) { 1175f191d5abSHemant Agrawal if (unlikely(RTE_MBUF_HAS_EXTBUF(*orig_bufs))) 1176f191d5abSHemant Agrawal rte_pktmbuf_free(*orig_bufs); 1177f191d5abSHemant Agrawal orig_bufs++; 1178f191d5abSHemant Agrawal loop++; 1179f191d5abSHemant Agrawal } 1180f191d5abSHemant Agrawal 11815e0789e9SNipun Gupta return sent; 118237f9b54bSShreyansh Jain } 118337f9b54bSShreyansh Jain 11849124e65dSGagandeep Singh uint16_t 11859124e65dSGagandeep Singh dpaa_eth_queue_tx_slow(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs) 11869124e65dSGagandeep Singh { 11879124e65dSGagandeep Singh qman_ern_poll_free(); 11889124e65dSGagandeep Singh 11899124e65dSGagandeep Singh return dpaa_eth_queue_tx(q, bufs, nb_bufs); 11909124e65dSGagandeep Singh } 11919124e65dSGagandeep Singh 119237f9b54bSShreyansh Jain uint16_t dpaa_eth_tx_drop_all(void *q __rte_unused, 119337f9b54bSShreyansh Jain struct rte_mbuf **bufs __rte_unused, 119437f9b54bSShreyansh Jain uint16_t nb_bufs __rte_unused) 119537f9b54bSShreyansh Jain { 119637f9b54bSShreyansh Jain DPAA_DP_LOG(DEBUG, "Drop all packets"); 119737f9b54bSShreyansh Jain 119837f9b54bSShreyansh Jain /* Drop all incoming packets. No need to free packets here 119937f9b54bSShreyansh Jain * because the rte_eth f/w frees up the packets through tx_buffer 120037f9b54bSShreyansh Jain * callback in case this functions returns count less than nb_bufs 120137f9b54bSShreyansh Jain */ 120237f9b54bSShreyansh Jain return 0; 120337f9b54bSShreyansh Jain } 1204