1d81734caSHemant Agrawal /* SPDX-License-Identifier: BSD-3-Clause 237f9b54bSShreyansh Jain * 337f9b54bSShreyansh Jain * Copyright 2016 Freescale Semiconductor, Inc. All rights reserved. 4e1797f4bSAkhil Goyal * Copyright 2017,2019 NXP 537f9b54bSShreyansh Jain * 637f9b54bSShreyansh Jain */ 737f9b54bSShreyansh Jain 837f9b54bSShreyansh Jain /* System headers */ 937f9b54bSShreyansh Jain #include <inttypes.h> 1037f9b54bSShreyansh Jain #include <unistd.h> 1137f9b54bSShreyansh Jain #include <stdio.h> 1237f9b54bSShreyansh Jain #include <limits.h> 1337f9b54bSShreyansh Jain #include <sched.h> 1437f9b54bSShreyansh Jain #include <pthread.h> 1537f9b54bSShreyansh Jain 1637f9b54bSShreyansh Jain #include <rte_byteorder.h> 1737f9b54bSShreyansh Jain #include <rte_common.h> 1837f9b54bSShreyansh Jain #include <rte_interrupts.h> 1937f9b54bSShreyansh Jain #include <rte_log.h> 2037f9b54bSShreyansh Jain #include <rte_debug.h> 2137f9b54bSShreyansh Jain #include <rte_pci.h> 2237f9b54bSShreyansh Jain #include <rte_atomic.h> 2337f9b54bSShreyansh Jain #include <rte_branch_prediction.h> 2437f9b54bSShreyansh Jain #include <rte_memory.h> 2537f9b54bSShreyansh Jain #include <rte_tailq.h> 2637f9b54bSShreyansh Jain #include <rte_eal.h> 2737f9b54bSShreyansh Jain #include <rte_alarm.h> 2837f9b54bSShreyansh Jain #include <rte_ether.h> 29ffc905f3SFerruh Yigit #include <rte_ethdev_driver.h> 3037f9b54bSShreyansh Jain #include <rte_malloc.h> 3137f9b54bSShreyansh Jain #include <rte_ring.h> 3237f9b54bSShreyansh Jain #include <rte_ip.h> 3337f9b54bSShreyansh Jain #include <rte_tcp.h> 3437f9b54bSShreyansh Jain #include <rte_udp.h> 35d565c887SAshish Jain #include <rte_net.h> 365e745593SSunil Kumar Kori #include <rte_eventdev.h> 3737f9b54bSShreyansh Jain 3837f9b54bSShreyansh Jain #include "dpaa_ethdev.h" 3937f9b54bSShreyansh Jain #include "dpaa_rxtx.h" 4037f9b54bSShreyansh Jain #include <rte_dpaa_bus.h> 4137f9b54bSShreyansh Jain #include <dpaa_mempool.h> 4237f9b54bSShreyansh Jain 435e745593SSunil Kumar Kori #include <qman.h> 4437f9b54bSShreyansh Jain #include <fsl_usd.h> 4537f9b54bSShreyansh Jain #include <fsl_qman.h> 4637f9b54bSShreyansh Jain #include <fsl_bman.h> 478c83f28cSHemant Agrawal #include <dpaa_of.h> 4837f9b54bSShreyansh Jain #include <netcfg.h> 4937f9b54bSShreyansh Jain 5037f9b54bSShreyansh Jain #define DPAA_MBUF_TO_CONTIG_FD(_mbuf, _fd, _bpid) \ 5137f9b54bSShreyansh Jain do { \ 5237f9b54bSShreyansh Jain (_fd)->cmd = 0; \ 5337f9b54bSShreyansh Jain (_fd)->opaque_addr = 0; \ 5437f9b54bSShreyansh Jain (_fd)->opaque = QM_FD_CONTIG << DPAA_FD_FORMAT_SHIFT; \ 5537f9b54bSShreyansh Jain (_fd)->opaque |= ((_mbuf)->data_off) << DPAA_FD_OFFSET_SHIFT; \ 5637f9b54bSShreyansh Jain (_fd)->opaque |= (_mbuf)->pkt_len; \ 57455da545SSantosh Shukla (_fd)->addr = (_mbuf)->buf_iova; \ 5837f9b54bSShreyansh Jain (_fd)->bpid = _bpid; \ 5937f9b54bSShreyansh Jain } while (0) 6037f9b54bSShreyansh Jain 6105ba55bcSShreyansh Jain #if (defined RTE_LIBRTE_DPAA_DEBUG_DRIVER) 620e5607e4SHemant Agrawal static void dpaa_display_frame(const struct qm_fd *fd) 6305ba55bcSShreyansh Jain { 6405ba55bcSShreyansh Jain int ii; 6505ba55bcSShreyansh Jain char *ptr; 6605ba55bcSShreyansh Jain 6705ba55bcSShreyansh Jain printf("%s::bpid %x addr %08x%08x, format %d off %d, len %d stat %x\n", 6805ba55bcSShreyansh Jain __func__, fd->bpid, fd->addr_hi, fd->addr_lo, fd->format, 6905ba55bcSShreyansh Jain fd->offset, fd->length20, fd->status); 7005ba55bcSShreyansh Jain 7105ba55bcSShreyansh Jain ptr = (char *)rte_dpaa_mem_ptov(fd->addr); 7205ba55bcSShreyansh Jain ptr += fd->offset; 7305ba55bcSShreyansh Jain printf("%02x ", *ptr); 7405ba55bcSShreyansh Jain for (ii = 1; ii < fd->length20; ii++) { 7505ba55bcSShreyansh Jain printf("%02x ", *ptr); 7605ba55bcSShreyansh Jain if ((ii % 16) == 0) 7705ba55bcSShreyansh Jain printf("\n"); 7805ba55bcSShreyansh Jain ptr++; 7905ba55bcSShreyansh Jain } 8005ba55bcSShreyansh Jain printf("\n"); 8105ba55bcSShreyansh Jain } 8205ba55bcSShreyansh Jain #else 8305ba55bcSShreyansh Jain #define dpaa_display_frame(a) 8405ba55bcSShreyansh Jain #endif 8505ba55bcSShreyansh Jain 86a7bdc3bdSShreyansh Jain static inline void dpaa_slow_parsing(struct rte_mbuf *m __rte_unused, 87a7bdc3bdSShreyansh Jain uint64_t prs __rte_unused) 88a7bdc3bdSShreyansh Jain { 89a7bdc3bdSShreyansh Jain DPAA_DP_LOG(DEBUG, "Slow parsing"); 90a7bdc3bdSShreyansh Jain /*TBD:XXX: to be implemented*/ 91a7bdc3bdSShreyansh Jain } 92a7bdc3bdSShreyansh Jain 930e5607e4SHemant Agrawal static inline void dpaa_eth_packet_info(struct rte_mbuf *m, void *fd_virt_addr) 94a7bdc3bdSShreyansh Jain { 95a7bdc3bdSShreyansh Jain struct annotations_t *annot = GET_ANNOTATIONS(fd_virt_addr); 960e5607e4SHemant Agrawal uint64_t prs = *((uintptr_t *)(&annot->parse)) & DPAA_PARSE_MASK; 97a7bdc3bdSShreyansh Jain 98a7bdc3bdSShreyansh Jain DPAA_DP_LOG(DEBUG, " Parsing mbuf: %p with annotations: %p", m, annot); 99a7bdc3bdSShreyansh Jain 100a7bdc3bdSShreyansh Jain switch (prs) { 101a7bdc3bdSShreyansh Jain case DPAA_PKT_TYPE_IPV4: 102a7bdc3bdSShreyansh Jain m->packet_type = RTE_PTYPE_L2_ETHER | 103a7bdc3bdSShreyansh Jain RTE_PTYPE_L3_IPV4; 104a7bdc3bdSShreyansh Jain break; 105a7bdc3bdSShreyansh Jain case DPAA_PKT_TYPE_IPV6: 106a7bdc3bdSShreyansh Jain m->packet_type = RTE_PTYPE_L2_ETHER | 107a7bdc3bdSShreyansh Jain RTE_PTYPE_L3_IPV6; 108a7bdc3bdSShreyansh Jain break; 1099ac71da4SNipun Gupta case DPAA_PKT_TYPE_ETHER: 1109ac71da4SNipun Gupta m->packet_type = RTE_PTYPE_L2_ETHER; 1119ac71da4SNipun Gupta break; 112a7bdc3bdSShreyansh Jain case DPAA_PKT_TYPE_IPV4_FRAG: 113a7bdc3bdSShreyansh Jain case DPAA_PKT_TYPE_IPV4_FRAG_UDP: 114a7bdc3bdSShreyansh Jain case DPAA_PKT_TYPE_IPV4_FRAG_TCP: 115a7bdc3bdSShreyansh Jain case DPAA_PKT_TYPE_IPV4_FRAG_SCTP: 116a7bdc3bdSShreyansh Jain m->packet_type = RTE_PTYPE_L2_ETHER | 117a7bdc3bdSShreyansh Jain RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_FRAG; 118a7bdc3bdSShreyansh Jain break; 119a7bdc3bdSShreyansh Jain case DPAA_PKT_TYPE_IPV6_FRAG: 120a7bdc3bdSShreyansh Jain case DPAA_PKT_TYPE_IPV6_FRAG_UDP: 121a7bdc3bdSShreyansh Jain case DPAA_PKT_TYPE_IPV6_FRAG_TCP: 122a7bdc3bdSShreyansh Jain case DPAA_PKT_TYPE_IPV6_FRAG_SCTP: 123a7bdc3bdSShreyansh Jain m->packet_type = RTE_PTYPE_L2_ETHER | 124a7bdc3bdSShreyansh Jain RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_FRAG; 125a7bdc3bdSShreyansh Jain break; 126a7bdc3bdSShreyansh Jain case DPAA_PKT_TYPE_IPV4_EXT: 127a7bdc3bdSShreyansh Jain m->packet_type = RTE_PTYPE_L2_ETHER | 128a7bdc3bdSShreyansh Jain RTE_PTYPE_L3_IPV4_EXT; 129a7bdc3bdSShreyansh Jain break; 130a7bdc3bdSShreyansh Jain case DPAA_PKT_TYPE_IPV6_EXT: 131a7bdc3bdSShreyansh Jain m->packet_type = RTE_PTYPE_L2_ETHER | 132a7bdc3bdSShreyansh Jain RTE_PTYPE_L3_IPV6_EXT; 133a7bdc3bdSShreyansh Jain break; 134a7bdc3bdSShreyansh Jain case DPAA_PKT_TYPE_IPV4_TCP: 135a7bdc3bdSShreyansh Jain m->packet_type = RTE_PTYPE_L2_ETHER | 136a7bdc3bdSShreyansh Jain RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP; 137a7bdc3bdSShreyansh Jain break; 138a7bdc3bdSShreyansh Jain case DPAA_PKT_TYPE_IPV6_TCP: 139a7bdc3bdSShreyansh Jain m->packet_type = RTE_PTYPE_L2_ETHER | 140a7bdc3bdSShreyansh Jain RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP; 141a7bdc3bdSShreyansh Jain break; 142a7bdc3bdSShreyansh Jain case DPAA_PKT_TYPE_IPV4_UDP: 143a7bdc3bdSShreyansh Jain m->packet_type = RTE_PTYPE_L2_ETHER | 144a7bdc3bdSShreyansh Jain RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP; 145a7bdc3bdSShreyansh Jain break; 146a7bdc3bdSShreyansh Jain case DPAA_PKT_TYPE_IPV6_UDP: 147a7bdc3bdSShreyansh Jain m->packet_type = RTE_PTYPE_L2_ETHER | 148a7bdc3bdSShreyansh Jain RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP; 149a7bdc3bdSShreyansh Jain break; 150a7bdc3bdSShreyansh Jain case DPAA_PKT_TYPE_IPV4_EXT_UDP: 151a7bdc3bdSShreyansh Jain m->packet_type = RTE_PTYPE_L2_ETHER | 152a7bdc3bdSShreyansh Jain RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_UDP; 153a7bdc3bdSShreyansh Jain break; 154a7bdc3bdSShreyansh Jain case DPAA_PKT_TYPE_IPV6_EXT_UDP: 155a7bdc3bdSShreyansh Jain m->packet_type = RTE_PTYPE_L2_ETHER | 156a7bdc3bdSShreyansh Jain RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP; 157a7bdc3bdSShreyansh Jain break; 158a7bdc3bdSShreyansh Jain case DPAA_PKT_TYPE_IPV4_EXT_TCP: 159a7bdc3bdSShreyansh Jain m->packet_type = RTE_PTYPE_L2_ETHER | 160a7bdc3bdSShreyansh Jain RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_TCP; 161a7bdc3bdSShreyansh Jain break; 162a7bdc3bdSShreyansh Jain case DPAA_PKT_TYPE_IPV6_EXT_TCP: 163a7bdc3bdSShreyansh Jain m->packet_type = RTE_PTYPE_L2_ETHER | 164a7bdc3bdSShreyansh Jain RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP; 165a7bdc3bdSShreyansh Jain break; 166a7bdc3bdSShreyansh Jain case DPAA_PKT_TYPE_IPV4_SCTP: 167a7bdc3bdSShreyansh Jain m->packet_type = RTE_PTYPE_L2_ETHER | 168a7bdc3bdSShreyansh Jain RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP; 169a7bdc3bdSShreyansh Jain break; 170a7bdc3bdSShreyansh Jain case DPAA_PKT_TYPE_IPV6_SCTP: 171a7bdc3bdSShreyansh Jain m->packet_type = RTE_PTYPE_L2_ETHER | 172a7bdc3bdSShreyansh Jain RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_SCTP; 173a7bdc3bdSShreyansh Jain break; 1749ac71da4SNipun Gupta case DPAA_PKT_TYPE_NONE: 1759ac71da4SNipun Gupta m->packet_type = 0; 1769ac71da4SNipun Gupta break; 177a7bdc3bdSShreyansh Jain /* More switch cases can be added */ 178a7bdc3bdSShreyansh Jain default: 179a7bdc3bdSShreyansh Jain dpaa_slow_parsing(m, prs); 180a7bdc3bdSShreyansh Jain } 181a7bdc3bdSShreyansh Jain 182a7bdc3bdSShreyansh Jain m->tx_offload = annot->parse.ip_off[0]; 183a7bdc3bdSShreyansh Jain m->tx_offload |= (annot->parse.l4_off - annot->parse.ip_off[0]) 184a7bdc3bdSShreyansh Jain << DPAA_PKT_L3_LEN_SHIFT; 185a7bdc3bdSShreyansh Jain 186a7bdc3bdSShreyansh Jain /* Set the hash values */ 1879ac71da4SNipun Gupta m->hash.rss = (uint32_t)(annot->hash); 188a7bdc3bdSShreyansh Jain /* All packets with Bad checksum are dropped by interface (and 189a7bdc3bdSShreyansh Jain * corresponding notification issued to RX error queues). 190a7bdc3bdSShreyansh Jain */ 1919ac71da4SNipun Gupta m->ol_flags = PKT_RX_RSS_HASH | PKT_RX_IP_CKSUM_GOOD; 192a7bdc3bdSShreyansh Jain 193a7bdc3bdSShreyansh Jain /* Check if Vlan is present */ 194a7bdc3bdSShreyansh Jain if (prs & DPAA_PARSE_VLAN_MASK) 195380a7aabSOlivier Matz m->ol_flags |= PKT_RX_VLAN; 196a7bdc3bdSShreyansh Jain /* Packet received without stripping the vlan */ 197a7bdc3bdSShreyansh Jain } 198a7bdc3bdSShreyansh Jain 1995a8cf1beSShreyansh Jain static inline void dpaa_checksum(struct rte_mbuf *mbuf) 2005a8cf1beSShreyansh Jain { 2016d13ea8eSOlivier Matz struct rte_ether_hdr *eth_hdr = 2026d13ea8eSOlivier Matz rte_pktmbuf_mtod(mbuf, struct rte_ether_hdr *); 2035a8cf1beSShreyansh Jain char *l3_hdr = (char *)eth_hdr + mbuf->l2_len; 204a7c528e5SOlivier Matz struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *)l3_hdr; 205a7c528e5SOlivier Matz struct rte_ipv6_hdr *ipv6_hdr = (struct rte_ipv6_hdr *)l3_hdr; 2065a8cf1beSShreyansh Jain 2075a8cf1beSShreyansh Jain DPAA_DP_LOG(DEBUG, "Calculating checksum for mbuf: %p", mbuf); 2085a8cf1beSShreyansh Jain 2095a8cf1beSShreyansh Jain if (((mbuf->packet_type & RTE_PTYPE_L3_MASK) == RTE_PTYPE_L3_IPV4) || 2105a8cf1beSShreyansh Jain ((mbuf->packet_type & RTE_PTYPE_L3_MASK) == 2115a8cf1beSShreyansh Jain RTE_PTYPE_L3_IPV4_EXT)) { 212a7c528e5SOlivier Matz ipv4_hdr = (struct rte_ipv4_hdr *)l3_hdr; 2135a8cf1beSShreyansh Jain ipv4_hdr->hdr_checksum = 0; 2145a8cf1beSShreyansh Jain ipv4_hdr->hdr_checksum = rte_ipv4_cksum(ipv4_hdr); 2155a8cf1beSShreyansh Jain } else if (((mbuf->packet_type & RTE_PTYPE_L3_MASK) == 2165a8cf1beSShreyansh Jain RTE_PTYPE_L3_IPV6) || 2175a8cf1beSShreyansh Jain ((mbuf->packet_type & RTE_PTYPE_L3_MASK) == 2185a8cf1beSShreyansh Jain RTE_PTYPE_L3_IPV6_EXT)) 219a7c528e5SOlivier Matz ipv6_hdr = (struct rte_ipv6_hdr *)l3_hdr; 2205a8cf1beSShreyansh Jain 2215a8cf1beSShreyansh Jain if ((mbuf->packet_type & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP) { 222f41b5156SOlivier Matz struct rte_tcp_hdr *tcp_hdr = (struct rte_tcp_hdr *)(l3_hdr + 2235a8cf1beSShreyansh Jain mbuf->l3_len); 2245a8cf1beSShreyansh Jain tcp_hdr->cksum = 0; 2250c9da755SDavid Marchand if (eth_hdr->ether_type == htons(RTE_ETHER_TYPE_IPV4)) 2265a8cf1beSShreyansh Jain tcp_hdr->cksum = rte_ipv4_udptcp_cksum(ipv4_hdr, 2275a8cf1beSShreyansh Jain tcp_hdr); 2280c9da755SDavid Marchand else /* assume ethertype == RTE_ETHER_TYPE_IPV6 */ 2295a8cf1beSShreyansh Jain tcp_hdr->cksum = rte_ipv6_udptcp_cksum(ipv6_hdr, 2305a8cf1beSShreyansh Jain tcp_hdr); 2315a8cf1beSShreyansh Jain } else if ((mbuf->packet_type & RTE_PTYPE_L4_MASK) == 2325a8cf1beSShreyansh Jain RTE_PTYPE_L4_UDP) { 233e73e3547SOlivier Matz struct rte_udp_hdr *udp_hdr = (struct rte_udp_hdr *)(l3_hdr + 2345a8cf1beSShreyansh Jain mbuf->l3_len); 2355a8cf1beSShreyansh Jain udp_hdr->dgram_cksum = 0; 2360c9da755SDavid Marchand if (eth_hdr->ether_type == htons(RTE_ETHER_TYPE_IPV4)) 2375a8cf1beSShreyansh Jain udp_hdr->dgram_cksum = rte_ipv4_udptcp_cksum(ipv4_hdr, 2385a8cf1beSShreyansh Jain udp_hdr); 2390c9da755SDavid Marchand else /* assume ethertype == RTE_ETHER_TYPE_IPV6 */ 2405a8cf1beSShreyansh Jain udp_hdr->dgram_cksum = rte_ipv6_udptcp_cksum(ipv6_hdr, 2415a8cf1beSShreyansh Jain udp_hdr); 2425a8cf1beSShreyansh Jain } 2435a8cf1beSShreyansh Jain } 2445a8cf1beSShreyansh Jain 2455a8cf1beSShreyansh Jain static inline void dpaa_checksum_offload(struct rte_mbuf *mbuf, 2465a8cf1beSShreyansh Jain struct qm_fd *fd, char *prs_buf) 2475a8cf1beSShreyansh Jain { 2485a8cf1beSShreyansh Jain struct dpaa_eth_parse_results_t *prs; 2495a8cf1beSShreyansh Jain 2505a8cf1beSShreyansh Jain DPAA_DP_LOG(DEBUG, " Offloading checksum for mbuf: %p", mbuf); 2515a8cf1beSShreyansh Jain 2525a8cf1beSShreyansh Jain prs = GET_TX_PRS(prs_buf); 2535a8cf1beSShreyansh Jain prs->l3r = 0; 2545a8cf1beSShreyansh Jain prs->l4r = 0; 2555a8cf1beSShreyansh Jain if (((mbuf->packet_type & RTE_PTYPE_L3_MASK) == RTE_PTYPE_L3_IPV4) || 2565a8cf1beSShreyansh Jain ((mbuf->packet_type & RTE_PTYPE_L3_MASK) == 2575a8cf1beSShreyansh Jain RTE_PTYPE_L3_IPV4_EXT)) 2585a8cf1beSShreyansh Jain prs->l3r = DPAA_L3_PARSE_RESULT_IPV4; 2595a8cf1beSShreyansh Jain else if (((mbuf->packet_type & RTE_PTYPE_L3_MASK) == 2605a8cf1beSShreyansh Jain RTE_PTYPE_L3_IPV6) || 2615a8cf1beSShreyansh Jain ((mbuf->packet_type & RTE_PTYPE_L3_MASK) == 2625a8cf1beSShreyansh Jain RTE_PTYPE_L3_IPV6_EXT)) 2635a8cf1beSShreyansh Jain prs->l3r = DPAA_L3_PARSE_RESULT_IPV6; 2645a8cf1beSShreyansh Jain 2655a8cf1beSShreyansh Jain if ((mbuf->packet_type & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP) 2665a8cf1beSShreyansh Jain prs->l4r = DPAA_L4_PARSE_RESULT_TCP; 2675a8cf1beSShreyansh Jain else if ((mbuf->packet_type & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_UDP) 2685a8cf1beSShreyansh Jain prs->l4r = DPAA_L4_PARSE_RESULT_UDP; 2695a8cf1beSShreyansh Jain 2705a8cf1beSShreyansh Jain prs->ip_off[0] = mbuf->l2_len; 2715a8cf1beSShreyansh Jain prs->l4_off = mbuf->l3_len + mbuf->l2_len; 2725a8cf1beSShreyansh Jain /* Enable L3 (and L4, if TCP or UDP) HW checksum*/ 2735a8cf1beSShreyansh Jain fd->cmd = DPAA_FD_CMD_RPD | DPAA_FD_CMD_DTC; 2745a8cf1beSShreyansh Jain } 2755a8cf1beSShreyansh Jain 2765e0789e9SNipun Gupta static inline void 2775e0789e9SNipun Gupta dpaa_unsegmented_checksum(struct rte_mbuf *mbuf, struct qm_fd *fd_arr) 2785e0789e9SNipun Gupta { 2795e0789e9SNipun Gupta if (!mbuf->packet_type) { 2805e0789e9SNipun Gupta struct rte_net_hdr_lens hdr_lens; 2815e0789e9SNipun Gupta 2825e0789e9SNipun Gupta mbuf->packet_type = rte_net_get_ptype(mbuf, &hdr_lens, 2835e0789e9SNipun Gupta RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK 2845e0789e9SNipun Gupta | RTE_PTYPE_L4_MASK); 2855e0789e9SNipun Gupta mbuf->l2_len = hdr_lens.l2_len; 2865e0789e9SNipun Gupta mbuf->l3_len = hdr_lens.l3_len; 2875e0789e9SNipun Gupta } 2885e0789e9SNipun Gupta if (mbuf->data_off < (DEFAULT_TX_ICEOF + 2895e0789e9SNipun Gupta sizeof(struct dpaa_eth_parse_results_t))) { 2905e0789e9SNipun Gupta DPAA_DP_LOG(DEBUG, "Checksum offload Err: " 2915e0789e9SNipun Gupta "Not enough Headroom " 2925e0789e9SNipun Gupta "space for correct Checksum offload." 2935e0789e9SNipun Gupta "So Calculating checksum in Software."); 2945e0789e9SNipun Gupta dpaa_checksum(mbuf); 2955e0789e9SNipun Gupta } else { 2965e0789e9SNipun Gupta dpaa_checksum_offload(mbuf, fd_arr, mbuf->buf_addr); 2975e0789e9SNipun Gupta } 2985e0789e9SNipun Gupta } 2995e0789e9SNipun Gupta 3008cffdcbeSShreyansh Jain struct rte_mbuf * 3019ac71da4SNipun Gupta dpaa_eth_sg_to_mbuf(const struct qm_fd *fd, uint32_t ifid) 3028cffdcbeSShreyansh Jain { 3038cffdcbeSShreyansh Jain struct dpaa_bp_info *bp_info = DPAA_BPID_TO_POOL_INFO(fd->bpid); 3048cffdcbeSShreyansh Jain struct rte_mbuf *first_seg, *prev_seg, *cur_seg, *temp; 3058cffdcbeSShreyansh Jain struct qm_sg_entry *sgt, *sg_temp; 3068cffdcbeSShreyansh Jain void *vaddr, *sg_vaddr; 3078cffdcbeSShreyansh Jain int i = 0; 308287f4256SNipun Gupta uint16_t fd_offset = fd->offset; 3098cffdcbeSShreyansh Jain 31041c9ee8dSHemant Agrawal vaddr = DPAA_MEMPOOL_PTOV(bp_info, qm_fd_addr(fd)); 3118cffdcbeSShreyansh Jain if (!vaddr) { 3128cffdcbeSShreyansh Jain DPAA_PMD_ERR("unable to convert physical address"); 3138cffdcbeSShreyansh Jain return NULL; 3148cffdcbeSShreyansh Jain } 3158cffdcbeSShreyansh Jain sgt = vaddr + fd_offset; 3168cffdcbeSShreyansh Jain sg_temp = &sgt[i++]; 3178cffdcbeSShreyansh Jain hw_sg_to_cpu(sg_temp); 3188cffdcbeSShreyansh Jain temp = (struct rte_mbuf *)((char *)vaddr - bp_info->meta_data_size); 31941c9ee8dSHemant Agrawal sg_vaddr = DPAA_MEMPOOL_PTOV(bp_info, qm_sg_entry_get64(sg_temp)); 3208cffdcbeSShreyansh Jain 3218cffdcbeSShreyansh Jain first_seg = (struct rte_mbuf *)((char *)sg_vaddr - 3228cffdcbeSShreyansh Jain bp_info->meta_data_size); 3238cffdcbeSShreyansh Jain first_seg->data_off = sg_temp->offset; 3248cffdcbeSShreyansh Jain first_seg->data_len = sg_temp->length; 3258cffdcbeSShreyansh Jain first_seg->pkt_len = sg_temp->length; 3268cffdcbeSShreyansh Jain rte_mbuf_refcnt_set(first_seg, 1); 3278cffdcbeSShreyansh Jain 3288cffdcbeSShreyansh Jain first_seg->port = ifid; 3298cffdcbeSShreyansh Jain first_seg->nb_segs = 1; 3308cffdcbeSShreyansh Jain first_seg->ol_flags = 0; 3318cffdcbeSShreyansh Jain prev_seg = first_seg; 3328cffdcbeSShreyansh Jain while (i < DPAA_SGT_MAX_ENTRIES) { 3338cffdcbeSShreyansh Jain sg_temp = &sgt[i++]; 3348cffdcbeSShreyansh Jain hw_sg_to_cpu(sg_temp); 33541c9ee8dSHemant Agrawal sg_vaddr = DPAA_MEMPOOL_PTOV(bp_info, 33641c9ee8dSHemant Agrawal qm_sg_entry_get64(sg_temp)); 3378cffdcbeSShreyansh Jain cur_seg = (struct rte_mbuf *)((char *)sg_vaddr - 3388cffdcbeSShreyansh Jain bp_info->meta_data_size); 3398cffdcbeSShreyansh Jain cur_seg->data_off = sg_temp->offset; 3408cffdcbeSShreyansh Jain cur_seg->data_len = sg_temp->length; 3418cffdcbeSShreyansh Jain first_seg->pkt_len += sg_temp->length; 3428cffdcbeSShreyansh Jain first_seg->nb_segs += 1; 3438cffdcbeSShreyansh Jain rte_mbuf_refcnt_set(cur_seg, 1); 3448cffdcbeSShreyansh Jain prev_seg->next = cur_seg; 3458cffdcbeSShreyansh Jain if (sg_temp->final) { 3468cffdcbeSShreyansh Jain cur_seg->next = NULL; 3478cffdcbeSShreyansh Jain break; 3488cffdcbeSShreyansh Jain } 3498cffdcbeSShreyansh Jain prev_seg = cur_seg; 3508cffdcbeSShreyansh Jain } 35155576ac2SHemant Agrawal DPAA_DP_LOG(DEBUG, "Received an SG frame len =%d, num_sg =%d", 35255576ac2SHemant Agrawal first_seg->pkt_len, first_seg->nb_segs); 3538cffdcbeSShreyansh Jain 3540e5607e4SHemant Agrawal dpaa_eth_packet_info(first_seg, vaddr); 3558cffdcbeSShreyansh Jain rte_pktmbuf_free_seg(temp); 3568cffdcbeSShreyansh Jain 3578cffdcbeSShreyansh Jain return first_seg; 3588cffdcbeSShreyansh Jain } 3598cffdcbeSShreyansh Jain 3609ac71da4SNipun Gupta static inline struct rte_mbuf * 3619ac71da4SNipun Gupta dpaa_eth_fd_to_mbuf(const struct qm_fd *fd, uint32_t ifid) 36237f9b54bSShreyansh Jain { 36337f9b54bSShreyansh Jain struct rte_mbuf *mbuf; 3649ac71da4SNipun Gupta struct dpaa_bp_info *bp_info = DPAA_BPID_TO_POOL_INFO(fd->bpid); 36541c9ee8dSHemant Agrawal void *ptr; 3668cffdcbeSShreyansh Jain uint8_t format = 3678cffdcbeSShreyansh Jain (fd->opaque & DPAA_FD_FORMAT_MASK) >> DPAA_FD_FORMAT_SHIFT; 3689ac71da4SNipun Gupta uint16_t offset; 3699ac71da4SNipun Gupta uint32_t length; 37037f9b54bSShreyansh Jain 3718cffdcbeSShreyansh Jain if (unlikely(format == qm_fd_sg)) 3728cffdcbeSShreyansh Jain return dpaa_eth_sg_to_mbuf(fd, ifid); 3738cffdcbeSShreyansh Jain 3749ac71da4SNipun Gupta offset = (fd->opaque & DPAA_FD_OFFSET_MASK) >> DPAA_FD_OFFSET_SHIFT; 3759ac71da4SNipun Gupta length = fd->opaque & DPAA_FD_LENGTH_MASK; 3769ac71da4SNipun Gupta 37755576ac2SHemant Agrawal DPAA_DP_LOG(DEBUG, " FD--->MBUF off %d len = %d", offset, length); 37855576ac2SHemant Agrawal 37937f9b54bSShreyansh Jain /* Ignoring case when format != qm_fd_contig */ 38005ba55bcSShreyansh Jain dpaa_display_frame(fd); 3811ee09e39SHemant Agrawal ptr = DPAA_MEMPOOL_PTOV(bp_info, qm_fd_addr(fd)); 38237f9b54bSShreyansh Jain 38337f9b54bSShreyansh Jain mbuf = (struct rte_mbuf *)((char *)ptr - bp_info->meta_data_size); 3841ee09e39SHemant Agrawal /* Prefetch the Parse results and packet data to L1 */ 3851ee09e39SHemant Agrawal rte_prefetch0((void *)((uint8_t *)ptr + DEFAULT_RX_ICEOF)); 38637f9b54bSShreyansh Jain 38737f9b54bSShreyansh Jain mbuf->data_off = offset; 38837f9b54bSShreyansh Jain mbuf->data_len = length; 38937f9b54bSShreyansh Jain mbuf->pkt_len = length; 39037f9b54bSShreyansh Jain 39137f9b54bSShreyansh Jain mbuf->port = ifid; 39237f9b54bSShreyansh Jain mbuf->nb_segs = 1; 39337f9b54bSShreyansh Jain mbuf->ol_flags = 0; 39437f9b54bSShreyansh Jain mbuf->next = NULL; 39537f9b54bSShreyansh Jain rte_mbuf_refcnt_set(mbuf, 1); 3960e5607e4SHemant Agrawal dpaa_eth_packet_info(mbuf, mbuf->buf_addr); 39737f9b54bSShreyansh Jain 39837f9b54bSShreyansh Jain return mbuf; 39937f9b54bSShreyansh Jain } 40037f9b54bSShreyansh Jain 4019124e65dSGagandeep Singh uint16_t 4029124e65dSGagandeep Singh dpaa_free_mbuf(const struct qm_fd *fd) 4039124e65dSGagandeep Singh { 4049124e65dSGagandeep Singh struct rte_mbuf *mbuf; 4059124e65dSGagandeep Singh struct dpaa_bp_info *bp_info; 4069124e65dSGagandeep Singh uint8_t format; 4079124e65dSGagandeep Singh void *ptr; 4089124e65dSGagandeep Singh 4099124e65dSGagandeep Singh bp_info = DPAA_BPID_TO_POOL_INFO(fd->bpid); 4109124e65dSGagandeep Singh format = (fd->opaque & DPAA_FD_FORMAT_MASK) >> DPAA_FD_FORMAT_SHIFT; 4119124e65dSGagandeep Singh if (unlikely(format == qm_fd_sg)) { 4129124e65dSGagandeep Singh struct rte_mbuf *first_seg, *prev_seg, *cur_seg, *temp; 4139124e65dSGagandeep Singh struct qm_sg_entry *sgt, *sg_temp; 4149124e65dSGagandeep Singh void *vaddr, *sg_vaddr; 4159124e65dSGagandeep Singh int i = 0; 4169124e65dSGagandeep Singh uint16_t fd_offset = fd->offset; 4179124e65dSGagandeep Singh 4189124e65dSGagandeep Singh vaddr = DPAA_MEMPOOL_PTOV(bp_info, qm_fd_addr(fd)); 4199124e65dSGagandeep Singh if (!vaddr) { 4209124e65dSGagandeep Singh DPAA_PMD_ERR("unable to convert physical address"); 4219124e65dSGagandeep Singh return -1; 4229124e65dSGagandeep Singh } 4239124e65dSGagandeep Singh sgt = vaddr + fd_offset; 4249124e65dSGagandeep Singh sg_temp = &sgt[i++]; 4259124e65dSGagandeep Singh hw_sg_to_cpu(sg_temp); 4269124e65dSGagandeep Singh temp = (struct rte_mbuf *) 4279124e65dSGagandeep Singh ((char *)vaddr - bp_info->meta_data_size); 4289124e65dSGagandeep Singh sg_vaddr = DPAA_MEMPOOL_PTOV(bp_info, 4299124e65dSGagandeep Singh qm_sg_entry_get64(sg_temp)); 4309124e65dSGagandeep Singh 4319124e65dSGagandeep Singh first_seg = (struct rte_mbuf *)((char *)sg_vaddr - 4329124e65dSGagandeep Singh bp_info->meta_data_size); 4339124e65dSGagandeep Singh first_seg->nb_segs = 1; 4349124e65dSGagandeep Singh prev_seg = first_seg; 4359124e65dSGagandeep Singh while (i < DPAA_SGT_MAX_ENTRIES) { 4369124e65dSGagandeep Singh sg_temp = &sgt[i++]; 4379124e65dSGagandeep Singh hw_sg_to_cpu(sg_temp); 4389124e65dSGagandeep Singh sg_vaddr = DPAA_MEMPOOL_PTOV(bp_info, 4399124e65dSGagandeep Singh qm_sg_entry_get64(sg_temp)); 4409124e65dSGagandeep Singh cur_seg = (struct rte_mbuf *)((char *)sg_vaddr - 4419124e65dSGagandeep Singh bp_info->meta_data_size); 4429124e65dSGagandeep Singh first_seg->nb_segs += 1; 4439124e65dSGagandeep Singh prev_seg->next = cur_seg; 4449124e65dSGagandeep Singh if (sg_temp->final) { 4459124e65dSGagandeep Singh cur_seg->next = NULL; 4469124e65dSGagandeep Singh break; 4479124e65dSGagandeep Singh } 4489124e65dSGagandeep Singh prev_seg = cur_seg; 4499124e65dSGagandeep Singh } 4509124e65dSGagandeep Singh 4519124e65dSGagandeep Singh rte_pktmbuf_free_seg(temp); 4529124e65dSGagandeep Singh rte_pktmbuf_free_seg(first_seg); 4539124e65dSGagandeep Singh return 0; 4549124e65dSGagandeep Singh } 4559124e65dSGagandeep Singh 4569124e65dSGagandeep Singh ptr = DPAA_MEMPOOL_PTOV(bp_info, qm_fd_addr(fd)); 4579124e65dSGagandeep Singh mbuf = (struct rte_mbuf *)((char *)ptr - bp_info->meta_data_size); 4589124e65dSGagandeep Singh 4599124e65dSGagandeep Singh rte_pktmbuf_free(mbuf); 4609124e65dSGagandeep Singh 4619124e65dSGagandeep Singh return 0; 4629124e65dSGagandeep Singh } 4639124e65dSGagandeep Singh 46419b4aba2SHemant Agrawal /* Specific for LS1043 */ 465b9083ea5SNipun Gupta void 46619b4aba2SHemant Agrawal dpaa_rx_cb_no_prefetch(struct qman_fq **fq, struct qm_dqrr_entry **dqrr, 467b9083ea5SNipun Gupta void **bufs, int num_bufs) 4680c504f69SHemant Agrawal { 469b9083ea5SNipun Gupta struct rte_mbuf *mbuf; 470b9083ea5SNipun Gupta struct dpaa_bp_info *bp_info; 471b9083ea5SNipun Gupta const struct qm_fd *fd; 472b9083ea5SNipun Gupta void *ptr; 473b9083ea5SNipun Gupta struct dpaa_if *dpaa_intf; 474b9083ea5SNipun Gupta uint16_t offset, i; 475b9083ea5SNipun Gupta uint32_t length; 476b9083ea5SNipun Gupta uint8_t format; 4770c504f69SHemant Agrawal 478b9083ea5SNipun Gupta bp_info = DPAA_BPID_TO_POOL_INFO(dqrr[0]->fd.bpid); 479b9083ea5SNipun Gupta ptr = rte_dpaa_mem_ptov(qm_fd_addr(&dqrr[0]->fd)); 480b9083ea5SNipun Gupta rte_prefetch0((void *)((uint8_t *)ptr + DEFAULT_RX_ICEOF)); 48119b4aba2SHemant Agrawal bufs[0] = (struct rte_mbuf *)((char *)ptr - bp_info->meta_data_size); 482b9083ea5SNipun Gupta 483b9083ea5SNipun Gupta for (i = 0; i < num_bufs; i++) { 48419b4aba2SHemant Agrawal if (i < num_bufs - 1) { 485b9083ea5SNipun Gupta bp_info = DPAA_BPID_TO_POOL_INFO(dqrr[i + 1]->fd.bpid); 486b9083ea5SNipun Gupta ptr = rte_dpaa_mem_ptov(qm_fd_addr(&dqrr[i + 1]->fd)); 487b9083ea5SNipun Gupta rte_prefetch0((void *)((uint8_t *)ptr + 488b9083ea5SNipun Gupta DEFAULT_RX_ICEOF)); 489b9083ea5SNipun Gupta bufs[i + 1] = (struct rte_mbuf *)((char *)ptr - 490b9083ea5SNipun Gupta bp_info->meta_data_size); 491b9083ea5SNipun Gupta } 492b9083ea5SNipun Gupta 493b9083ea5SNipun Gupta fd = &dqrr[i]->fd; 4949abdad12SHemant Agrawal dpaa_intf = fq[0]->dpaa_intf; 495b9083ea5SNipun Gupta 496b9083ea5SNipun Gupta format = (fd->opaque & DPAA_FD_FORMAT_MASK) >> 497b9083ea5SNipun Gupta DPAA_FD_FORMAT_SHIFT; 498b9083ea5SNipun Gupta if (unlikely(format == qm_fd_sg)) { 499b9083ea5SNipun Gupta bufs[i] = dpaa_eth_sg_to_mbuf(fd, dpaa_intf->ifid); 500b9083ea5SNipun Gupta continue; 501b9083ea5SNipun Gupta } 502b9083ea5SNipun Gupta 503b9083ea5SNipun Gupta offset = (fd->opaque & DPAA_FD_OFFSET_MASK) >> 504b9083ea5SNipun Gupta DPAA_FD_OFFSET_SHIFT; 505b9083ea5SNipun Gupta length = fd->opaque & DPAA_FD_LENGTH_MASK; 506b9083ea5SNipun Gupta 507b9083ea5SNipun Gupta mbuf = bufs[i]; 508b9083ea5SNipun Gupta mbuf->data_off = offset; 509b9083ea5SNipun Gupta mbuf->data_len = length; 510b9083ea5SNipun Gupta mbuf->pkt_len = length; 511b9083ea5SNipun Gupta mbuf->port = dpaa_intf->ifid; 512b9083ea5SNipun Gupta 513b9083ea5SNipun Gupta mbuf->nb_segs = 1; 514b9083ea5SNipun Gupta mbuf->ol_flags = 0; 515b9083ea5SNipun Gupta mbuf->next = NULL; 516b9083ea5SNipun Gupta rte_mbuf_refcnt_set(mbuf, 1); 5170e5607e4SHemant Agrawal dpaa_eth_packet_info(mbuf, mbuf->buf_addr); 518b9083ea5SNipun Gupta } 519b9083ea5SNipun Gupta } 520b9083ea5SNipun Gupta 52119b4aba2SHemant Agrawal void 52219b4aba2SHemant Agrawal dpaa_rx_cb(struct qman_fq **fq, struct qm_dqrr_entry **dqrr, 52319b4aba2SHemant Agrawal void **bufs, int num_bufs) 52419b4aba2SHemant Agrawal { 52519b4aba2SHemant Agrawal struct rte_mbuf *mbuf; 52619b4aba2SHemant Agrawal const struct qm_fd *fd; 52719b4aba2SHemant Agrawal struct dpaa_if *dpaa_intf; 52819b4aba2SHemant Agrawal uint16_t offset, i; 52919b4aba2SHemant Agrawal uint32_t length; 53019b4aba2SHemant Agrawal uint8_t format; 53119b4aba2SHemant Agrawal 53219b4aba2SHemant Agrawal for (i = 0; i < num_bufs; i++) { 53319b4aba2SHemant Agrawal fd = &dqrr[i]->fd; 53419b4aba2SHemant Agrawal dpaa_intf = fq[0]->dpaa_intf; 53519b4aba2SHemant Agrawal 53619b4aba2SHemant Agrawal format = (fd->opaque & DPAA_FD_FORMAT_MASK) >> 53719b4aba2SHemant Agrawal DPAA_FD_FORMAT_SHIFT; 53819b4aba2SHemant Agrawal if (unlikely(format == qm_fd_sg)) { 53919b4aba2SHemant Agrawal bufs[i] = dpaa_eth_sg_to_mbuf(fd, dpaa_intf->ifid); 54019b4aba2SHemant Agrawal continue; 54119b4aba2SHemant Agrawal } 54219b4aba2SHemant Agrawal 54319b4aba2SHemant Agrawal offset = (fd->opaque & DPAA_FD_OFFSET_MASK) >> 54419b4aba2SHemant Agrawal DPAA_FD_OFFSET_SHIFT; 54519b4aba2SHemant Agrawal length = fd->opaque & DPAA_FD_LENGTH_MASK; 54619b4aba2SHemant Agrawal 54719b4aba2SHemant Agrawal mbuf = bufs[i]; 54819b4aba2SHemant Agrawal mbuf->data_off = offset; 54919b4aba2SHemant Agrawal mbuf->data_len = length; 55019b4aba2SHemant Agrawal mbuf->pkt_len = length; 55119b4aba2SHemant Agrawal mbuf->port = dpaa_intf->ifid; 55219b4aba2SHemant Agrawal 55319b4aba2SHemant Agrawal mbuf->nb_segs = 1; 55419b4aba2SHemant Agrawal mbuf->ol_flags = 0; 55519b4aba2SHemant Agrawal mbuf->next = NULL; 55619b4aba2SHemant Agrawal rte_mbuf_refcnt_set(mbuf, 1); 55719b4aba2SHemant Agrawal dpaa_eth_packet_info(mbuf, mbuf->buf_addr); 55819b4aba2SHemant Agrawal } 55919b4aba2SHemant Agrawal } 56019b4aba2SHemant Agrawal 561b9083ea5SNipun Gupta void dpaa_rx_cb_prepare(struct qm_dqrr_entry *dq, void **bufs) 562b9083ea5SNipun Gupta { 563b9083ea5SNipun Gupta struct dpaa_bp_info *bp_info = DPAA_BPID_TO_POOL_INFO(dq->fd.bpid); 564b9083ea5SNipun Gupta void *ptr = rte_dpaa_mem_ptov(qm_fd_addr(&dq->fd)); 565b9083ea5SNipun Gupta 566b9083ea5SNipun Gupta /* In case of LS1046, annotation stashing is disabled due to L2 cache 567b9083ea5SNipun Gupta * being bottleneck in case of multicore scanario for this platform. 568b9083ea5SNipun Gupta * So we prefetch the annoation beforehand, so that it is available 569b9083ea5SNipun Gupta * in cache when accessed. 570b9083ea5SNipun Gupta */ 571b9083ea5SNipun Gupta rte_prefetch0((void *)((uint8_t *)ptr + DEFAULT_RX_ICEOF)); 572b9083ea5SNipun Gupta 573b9083ea5SNipun Gupta *bufs = (struct rte_mbuf *)((char *)ptr - bp_info->meta_data_size); 5740c504f69SHemant Agrawal } 5750c504f69SHemant Agrawal 5760c504f69SHemant Agrawal static uint16_t 5770c504f69SHemant Agrawal dpaa_eth_queue_portal_rx(struct qman_fq *fq, 5780c504f69SHemant Agrawal struct rte_mbuf **bufs, 5790c504f69SHemant Agrawal uint16_t nb_bufs) 5800c504f69SHemant Agrawal { 5810c504f69SHemant Agrawal int ret; 5820c504f69SHemant Agrawal 583b9c94167SNipun Gupta if (unlikely(!fq->qp_initialized)) { 5840c504f69SHemant Agrawal ret = rte_dpaa_portal_fq_init((void *)0, fq); 5850c504f69SHemant Agrawal if (ret) { 5860c504f69SHemant Agrawal DPAA_PMD_ERR("Failure in affining portal %d", ret); 5870c504f69SHemant Agrawal return 0; 5880c504f69SHemant Agrawal } 589b9c94167SNipun Gupta fq->qp_initialized = 1; 5900c504f69SHemant Agrawal } 5910c504f69SHemant Agrawal 5920c504f69SHemant Agrawal return qman_portal_poll_rx(nb_bufs, (void **)bufs, fq->qp); 5930c504f69SHemant Agrawal } 5940c504f69SHemant Agrawal 5955e745593SSunil Kumar Kori enum qman_cb_dqrr_result 5965e745593SSunil Kumar Kori dpaa_rx_cb_parallel(void *event, 5975e745593SSunil Kumar Kori struct qman_portal *qm __always_unused, 5985e745593SSunil Kumar Kori struct qman_fq *fq, 5995e745593SSunil Kumar Kori const struct qm_dqrr_entry *dqrr, 6005e745593SSunil Kumar Kori void **bufs) 6015e745593SSunil Kumar Kori { 6025e745593SSunil Kumar Kori u32 ifid = ((struct dpaa_if *)fq->dpaa_intf)->ifid; 6035e745593SSunil Kumar Kori struct rte_mbuf *mbuf; 6045e745593SSunil Kumar Kori struct rte_event *ev = (struct rte_event *)event; 6055e745593SSunil Kumar Kori 6065e745593SSunil Kumar Kori mbuf = dpaa_eth_fd_to_mbuf(&dqrr->fd, ifid); 6075e745593SSunil Kumar Kori ev->event_ptr = (void *)mbuf; 6085e745593SSunil Kumar Kori ev->flow_id = fq->ev.flow_id; 6095e745593SSunil Kumar Kori ev->sub_event_type = fq->ev.sub_event_type; 6105e745593SSunil Kumar Kori ev->event_type = RTE_EVENT_TYPE_ETHDEV; 6115e745593SSunil Kumar Kori ev->op = RTE_EVENT_OP_NEW; 6125e745593SSunil Kumar Kori ev->sched_type = fq->ev.sched_type; 6135e745593SSunil Kumar Kori ev->queue_id = fq->ev.queue_id; 6145e745593SSunil Kumar Kori ev->priority = fq->ev.priority; 6155e745593SSunil Kumar Kori ev->impl_opaque = (uint8_t)DPAA_INVALID_MBUF_SEQN; 6165e745593SSunil Kumar Kori mbuf->seqn = DPAA_INVALID_MBUF_SEQN; 6175e745593SSunil Kumar Kori *bufs = mbuf; 6185e745593SSunil Kumar Kori 6195e745593SSunil Kumar Kori return qman_cb_dqrr_consume; 6205e745593SSunil Kumar Kori } 6215e745593SSunil Kumar Kori 6225e745593SSunil Kumar Kori enum qman_cb_dqrr_result 6235e745593SSunil Kumar Kori dpaa_rx_cb_atomic(void *event, 6245e745593SSunil Kumar Kori struct qman_portal *qm __always_unused, 6255e745593SSunil Kumar Kori struct qman_fq *fq, 6265e745593SSunil Kumar Kori const struct qm_dqrr_entry *dqrr, 6275e745593SSunil Kumar Kori void **bufs) 6285e745593SSunil Kumar Kori { 6295e745593SSunil Kumar Kori u8 index; 6305e745593SSunil Kumar Kori u32 ifid = ((struct dpaa_if *)fq->dpaa_intf)->ifid; 6315e745593SSunil Kumar Kori struct rte_mbuf *mbuf; 6325e745593SSunil Kumar Kori struct rte_event *ev = (struct rte_event *)event; 6335e745593SSunil Kumar Kori 6345e745593SSunil Kumar Kori mbuf = dpaa_eth_fd_to_mbuf(&dqrr->fd, ifid); 6355e745593SSunil Kumar Kori ev->event_ptr = (void *)mbuf; 6365e745593SSunil Kumar Kori ev->flow_id = fq->ev.flow_id; 6375e745593SSunil Kumar Kori ev->sub_event_type = fq->ev.sub_event_type; 6385e745593SSunil Kumar Kori ev->event_type = RTE_EVENT_TYPE_ETHDEV; 6395e745593SSunil Kumar Kori ev->op = RTE_EVENT_OP_NEW; 6405e745593SSunil Kumar Kori ev->sched_type = fq->ev.sched_type; 6415e745593SSunil Kumar Kori ev->queue_id = fq->ev.queue_id; 6425e745593SSunil Kumar Kori ev->priority = fq->ev.priority; 6435e745593SSunil Kumar Kori 6445e745593SSunil Kumar Kori /* Save active dqrr entries */ 6455e745593SSunil Kumar Kori index = DQRR_PTR2IDX(dqrr); 6465e745593SSunil Kumar Kori DPAA_PER_LCORE_DQRR_SIZE++; 6475e745593SSunil Kumar Kori DPAA_PER_LCORE_DQRR_HELD |= 1 << index; 6485e745593SSunil Kumar Kori DPAA_PER_LCORE_DQRR_MBUF(index) = mbuf; 6495e745593SSunil Kumar Kori ev->impl_opaque = index + 1; 6505e745593SSunil Kumar Kori mbuf->seqn = (uint32_t)index + 1; 6515e745593SSunil Kumar Kori *bufs = mbuf; 6525e745593SSunil Kumar Kori 6535e745593SSunil Kumar Kori return qman_cb_dqrr_defer; 6545e745593SSunil Kumar Kori } 6555e745593SSunil Kumar Kori 65637f9b54bSShreyansh Jain uint16_t dpaa_eth_queue_rx(void *q, 65737f9b54bSShreyansh Jain struct rte_mbuf **bufs, 65837f9b54bSShreyansh Jain uint16_t nb_bufs) 65937f9b54bSShreyansh Jain { 66037f9b54bSShreyansh Jain struct qman_fq *fq = q; 66137f9b54bSShreyansh Jain struct qm_dqrr_entry *dq; 66237f9b54bSShreyansh Jain uint32_t num_rx = 0, ifid = ((struct dpaa_if *)fq->dpaa_intf)->ifid; 663f40d5a53SNipun Gupta int num_rx_bufs, ret; 664f40d5a53SNipun Gupta uint32_t vdqcr_flags = 0; 66537f9b54bSShreyansh Jain 666e1797f4bSAkhil Goyal if (unlikely(rte_dpaa_bpid_info == NULL && 667e1797f4bSAkhil Goyal rte_eal_process_type() == RTE_PROC_SECONDARY)) 668e1797f4bSAkhil Goyal rte_dpaa_bpid_info = fq->bp_array; 669e1797f4bSAkhil Goyal 6700c504f69SHemant Agrawal if (likely(fq->is_static)) 6710c504f69SHemant Agrawal return dpaa_eth_queue_portal_rx(fq, bufs, nb_bufs); 6720c504f69SHemant Agrawal 673*e5872221SRohit Raj if (unlikely(!DPAA_PER_LCORE_PORTAL)) { 67437f9b54bSShreyansh Jain ret = rte_dpaa_portal_init((void *)0); 67537f9b54bSShreyansh Jain if (ret) { 67637f9b54bSShreyansh Jain DPAA_PMD_ERR("Failure in affining portal"); 67737f9b54bSShreyansh Jain return 0; 67837f9b54bSShreyansh Jain } 6795d944582SNipun Gupta } 68037f9b54bSShreyansh Jain 681f40d5a53SNipun Gupta /* Until request for four buffers, we provide exact number of buffers. 682f40d5a53SNipun Gupta * Otherwise we do not set the QM_VDQCR_EXACT flag. 683f40d5a53SNipun Gupta * Not setting QM_VDQCR_EXACT flag can provide two more buffers than 684f40d5a53SNipun Gupta * requested, so we request two less in this case. 685f40d5a53SNipun Gupta */ 686f40d5a53SNipun Gupta if (nb_bufs < 4) { 687f40d5a53SNipun Gupta vdqcr_flags = QM_VDQCR_EXACT; 688f40d5a53SNipun Gupta num_rx_bufs = nb_bufs; 689f40d5a53SNipun Gupta } else { 690f40d5a53SNipun Gupta num_rx_bufs = nb_bufs > DPAA_MAX_DEQUEUE_NUM_FRAMES ? 691f40d5a53SNipun Gupta (DPAA_MAX_DEQUEUE_NUM_FRAMES - 2) : (nb_bufs - 2); 692f40d5a53SNipun Gupta } 693f40d5a53SNipun Gupta ret = qman_set_vdq(fq, num_rx_bufs, vdqcr_flags); 69437f9b54bSShreyansh Jain if (ret) 69537f9b54bSShreyansh Jain return 0; 69637f9b54bSShreyansh Jain 69737f9b54bSShreyansh Jain do { 69837f9b54bSShreyansh Jain dq = qman_dequeue(fq); 69937f9b54bSShreyansh Jain if (!dq) 70037f9b54bSShreyansh Jain continue; 70137f9b54bSShreyansh Jain bufs[num_rx++] = dpaa_eth_fd_to_mbuf(&dq->fd, ifid); 70237f9b54bSShreyansh Jain qman_dqrr_consume(fq, dq); 70337f9b54bSShreyansh Jain } while (fq->flags & QMAN_FQ_STATE_VDQCR); 70437f9b54bSShreyansh Jain 70537f9b54bSShreyansh Jain return num_rx; 70637f9b54bSShreyansh Jain } 70737f9b54bSShreyansh Jain 7088cffdcbeSShreyansh Jain int 7098cffdcbeSShreyansh Jain dpaa_eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf, 7108cffdcbeSShreyansh Jain struct qm_fd *fd, 7118cffdcbeSShreyansh Jain uint32_t bpid) 7128cffdcbeSShreyansh Jain { 7138cffdcbeSShreyansh Jain struct rte_mbuf *cur_seg = mbuf, *prev_seg = NULL; 7148cffdcbeSShreyansh Jain struct dpaa_bp_info *bp_info = DPAA_BPID_TO_POOL_INFO(bpid); 7158cffdcbeSShreyansh Jain struct rte_mbuf *temp, *mi; 7168cffdcbeSShreyansh Jain struct qm_sg_entry *sg_temp, *sgt; 7178cffdcbeSShreyansh Jain int i = 0; 7188cffdcbeSShreyansh Jain 7198cffdcbeSShreyansh Jain DPAA_DP_LOG(DEBUG, "Creating SG FD to transmit"); 7208cffdcbeSShreyansh Jain 7218cffdcbeSShreyansh Jain temp = rte_pktmbuf_alloc(bp_info->mp); 7228cffdcbeSShreyansh Jain if (!temp) { 7238cffdcbeSShreyansh Jain DPAA_PMD_ERR("Failure in allocation of mbuf"); 7248cffdcbeSShreyansh Jain return -1; 7258cffdcbeSShreyansh Jain } 7268cffdcbeSShreyansh Jain if (temp->buf_len < ((mbuf->nb_segs * sizeof(struct qm_sg_entry)) 7278cffdcbeSShreyansh Jain + temp->data_off)) { 7288cffdcbeSShreyansh Jain DPAA_PMD_ERR("Insufficient space in mbuf for SG entries"); 7298cffdcbeSShreyansh Jain return -1; 7308cffdcbeSShreyansh Jain } 7318cffdcbeSShreyansh Jain 7328cffdcbeSShreyansh Jain fd->cmd = 0; 7338cffdcbeSShreyansh Jain fd->opaque_addr = 0; 7348cffdcbeSShreyansh Jain 7358cffdcbeSShreyansh Jain if (mbuf->ol_flags & DPAA_TX_CKSUM_OFFLOAD_MASK) { 736d565c887SAshish Jain if (!mbuf->packet_type) { 737d565c887SAshish Jain struct rte_net_hdr_lens hdr_lens; 738d565c887SAshish Jain 739d565c887SAshish Jain mbuf->packet_type = rte_net_get_ptype(mbuf, &hdr_lens, 740d565c887SAshish Jain RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK 741d565c887SAshish Jain | RTE_PTYPE_L4_MASK); 742d565c887SAshish Jain mbuf->l2_len = hdr_lens.l2_len; 743d565c887SAshish Jain mbuf->l3_len = hdr_lens.l3_len; 744d565c887SAshish Jain } 7458cffdcbeSShreyansh Jain if (temp->data_off < DEFAULT_TX_ICEOF 7468cffdcbeSShreyansh Jain + sizeof(struct dpaa_eth_parse_results_t)) 7478cffdcbeSShreyansh Jain temp->data_off = DEFAULT_TX_ICEOF 7488cffdcbeSShreyansh Jain + sizeof(struct dpaa_eth_parse_results_t); 7498cffdcbeSShreyansh Jain dcbz_64(temp->buf_addr); 7508cffdcbeSShreyansh Jain dpaa_checksum_offload(mbuf, fd, temp->buf_addr); 7518cffdcbeSShreyansh Jain } 7528cffdcbeSShreyansh Jain 7538cffdcbeSShreyansh Jain sgt = temp->buf_addr + temp->data_off; 7548cffdcbeSShreyansh Jain fd->format = QM_FD_SG; 755455da545SSantosh Shukla fd->addr = temp->buf_iova; 7568cffdcbeSShreyansh Jain fd->offset = temp->data_off; 7578cffdcbeSShreyansh Jain fd->bpid = bpid; 7588cffdcbeSShreyansh Jain fd->length20 = mbuf->pkt_len; 7598cffdcbeSShreyansh Jain 7608cffdcbeSShreyansh Jain while (i < DPAA_SGT_MAX_ENTRIES) { 7618cffdcbeSShreyansh Jain sg_temp = &sgt[i++]; 7628cffdcbeSShreyansh Jain sg_temp->opaque = 0; 7638cffdcbeSShreyansh Jain sg_temp->val = 0; 764455da545SSantosh Shukla sg_temp->addr = cur_seg->buf_iova; 7658cffdcbeSShreyansh Jain sg_temp->offset = cur_seg->data_off; 7668cffdcbeSShreyansh Jain sg_temp->length = cur_seg->data_len; 7678cffdcbeSShreyansh Jain if (RTE_MBUF_DIRECT(cur_seg)) { 7688cffdcbeSShreyansh Jain if (rte_mbuf_refcnt_read(cur_seg) > 1) { 7698cffdcbeSShreyansh Jain /*If refcnt > 1, invalid bpid is set to ensure 7708cffdcbeSShreyansh Jain * buffer is not freed by HW. 7718cffdcbeSShreyansh Jain */ 7728cffdcbeSShreyansh Jain sg_temp->bpid = 0xff; 7738cffdcbeSShreyansh Jain rte_mbuf_refcnt_update(cur_seg, -1); 7748cffdcbeSShreyansh Jain } else { 7758cffdcbeSShreyansh Jain sg_temp->bpid = 7768cffdcbeSShreyansh Jain DPAA_MEMPOOL_TO_BPID(cur_seg->pool); 7778cffdcbeSShreyansh Jain } 7788cffdcbeSShreyansh Jain cur_seg = cur_seg->next; 7798cffdcbeSShreyansh Jain } else { 7808cffdcbeSShreyansh Jain /* Get owner MBUF from indirect buffer */ 7818cffdcbeSShreyansh Jain mi = rte_mbuf_from_indirect(cur_seg); 7828cffdcbeSShreyansh Jain if (rte_mbuf_refcnt_read(mi) > 1) { 7838cffdcbeSShreyansh Jain /*If refcnt > 1, invalid bpid is set to ensure 7848cffdcbeSShreyansh Jain * owner buffer is not freed by HW. 7858cffdcbeSShreyansh Jain */ 7868cffdcbeSShreyansh Jain sg_temp->bpid = 0xff; 7878cffdcbeSShreyansh Jain } else { 7888cffdcbeSShreyansh Jain sg_temp->bpid = DPAA_MEMPOOL_TO_BPID(mi->pool); 7898cffdcbeSShreyansh Jain rte_mbuf_refcnt_update(mi, 1); 7908cffdcbeSShreyansh Jain } 7918cffdcbeSShreyansh Jain prev_seg = cur_seg; 7928cffdcbeSShreyansh Jain cur_seg = cur_seg->next; 7938cffdcbeSShreyansh Jain prev_seg->next = NULL; 7948cffdcbeSShreyansh Jain rte_pktmbuf_free(prev_seg); 7958cffdcbeSShreyansh Jain } 7968cffdcbeSShreyansh Jain if (cur_seg == NULL) { 7978cffdcbeSShreyansh Jain sg_temp->final = 1; 7988cffdcbeSShreyansh Jain cpu_to_hw_sg(sg_temp); 7998cffdcbeSShreyansh Jain break; 8008cffdcbeSShreyansh Jain } 8018cffdcbeSShreyansh Jain cpu_to_hw_sg(sg_temp); 8028cffdcbeSShreyansh Jain } 8038cffdcbeSShreyansh Jain return 0; 8048cffdcbeSShreyansh Jain } 8058cffdcbeSShreyansh Jain 80637f9b54bSShreyansh Jain /* Handle mbufs which are not segmented (non SG) */ 80737f9b54bSShreyansh Jain static inline void 80837f9b54bSShreyansh Jain tx_on_dpaa_pool_unsegmented(struct rte_mbuf *mbuf, 80937f9b54bSShreyansh Jain struct dpaa_bp_info *bp_info, 81037f9b54bSShreyansh Jain struct qm_fd *fd_arr) 81137f9b54bSShreyansh Jain { 81237f9b54bSShreyansh Jain struct rte_mbuf *mi = NULL; 81337f9b54bSShreyansh Jain 81437f9b54bSShreyansh Jain if (RTE_MBUF_DIRECT(mbuf)) { 81537f9b54bSShreyansh Jain if (rte_mbuf_refcnt_read(mbuf) > 1) { 81637f9b54bSShreyansh Jain /* In case of direct mbuf and mbuf being cloned, 81737f9b54bSShreyansh Jain * BMAN should _not_ release buffer. 81837f9b54bSShreyansh Jain */ 81937f9b54bSShreyansh Jain DPAA_MBUF_TO_CONTIG_FD(mbuf, fd_arr, 0xff); 82037f9b54bSShreyansh Jain /* Buffer should be releasd by EAL */ 82137f9b54bSShreyansh Jain rte_mbuf_refcnt_update(mbuf, -1); 82237f9b54bSShreyansh Jain } else { 82337f9b54bSShreyansh Jain /* In case of direct mbuf and no cloning, mbuf can be 82437f9b54bSShreyansh Jain * released by BMAN. 82537f9b54bSShreyansh Jain */ 82637f9b54bSShreyansh Jain DPAA_MBUF_TO_CONTIG_FD(mbuf, fd_arr, bp_info->bpid); 82737f9b54bSShreyansh Jain } 82837f9b54bSShreyansh Jain } else { 82937f9b54bSShreyansh Jain /* This is data-containing core mbuf: 'mi' */ 83037f9b54bSShreyansh Jain mi = rte_mbuf_from_indirect(mbuf); 83137f9b54bSShreyansh Jain if (rte_mbuf_refcnt_read(mi) > 1) { 83237f9b54bSShreyansh Jain /* In case of indirect mbuf, and mbuf being cloned, 83337f9b54bSShreyansh Jain * BMAN should _not_ release it and let EAL release 83437f9b54bSShreyansh Jain * it through pktmbuf_free below. 83537f9b54bSShreyansh Jain */ 83637f9b54bSShreyansh Jain DPAA_MBUF_TO_CONTIG_FD(mbuf, fd_arr, 0xff); 83737f9b54bSShreyansh Jain } else { 83837f9b54bSShreyansh Jain /* In case of indirect mbuf, and no cloning, core mbuf 83937f9b54bSShreyansh Jain * should be released by BMAN. 84037f9b54bSShreyansh Jain * Increate refcnt of core mbuf so that when 84137f9b54bSShreyansh Jain * pktmbuf_free is called and mbuf is released, EAL 84237f9b54bSShreyansh Jain * doesn't try to release core mbuf which would have 84337f9b54bSShreyansh Jain * been released by BMAN. 84437f9b54bSShreyansh Jain */ 84537f9b54bSShreyansh Jain rte_mbuf_refcnt_update(mi, 1); 84637f9b54bSShreyansh Jain DPAA_MBUF_TO_CONTIG_FD(mbuf, fd_arr, bp_info->bpid); 84737f9b54bSShreyansh Jain } 84837f9b54bSShreyansh Jain rte_pktmbuf_free(mbuf); 84937f9b54bSShreyansh Jain } 8505a8cf1beSShreyansh Jain 8515e0789e9SNipun Gupta if (mbuf->ol_flags & DPAA_TX_CKSUM_OFFLOAD_MASK) 8525e0789e9SNipun Gupta dpaa_unsegmented_checksum(mbuf, fd_arr); 85337f9b54bSShreyansh Jain } 85437f9b54bSShreyansh Jain 85537f9b54bSShreyansh Jain /* Handle all mbufs on dpaa BMAN managed pool */ 85637f9b54bSShreyansh Jain static inline uint16_t 85737f9b54bSShreyansh Jain tx_on_dpaa_pool(struct rte_mbuf *mbuf, 85837f9b54bSShreyansh Jain struct dpaa_bp_info *bp_info, 85937f9b54bSShreyansh Jain struct qm_fd *fd_arr) 86037f9b54bSShreyansh Jain { 86137f9b54bSShreyansh Jain DPAA_DP_LOG(DEBUG, "BMAN offloaded buffer, mbuf: %p", mbuf); 86237f9b54bSShreyansh Jain 86337f9b54bSShreyansh Jain if (mbuf->nb_segs == 1) { 86437f9b54bSShreyansh Jain /* Case for non-segmented buffers */ 86537f9b54bSShreyansh Jain tx_on_dpaa_pool_unsegmented(mbuf, bp_info, fd_arr); 8668cffdcbeSShreyansh Jain } else if (mbuf->nb_segs > 1 && 8678cffdcbeSShreyansh Jain mbuf->nb_segs <= DPAA_SGT_MAX_ENTRIES) { 8688cffdcbeSShreyansh Jain if (dpaa_eth_mbuf_to_sg_fd(mbuf, fd_arr, bp_info->bpid)) { 8698cffdcbeSShreyansh Jain DPAA_PMD_DEBUG("Unable to create Scatter Gather FD"); 8708cffdcbeSShreyansh Jain return 1; 8718cffdcbeSShreyansh Jain } 87237f9b54bSShreyansh Jain } else { 87337f9b54bSShreyansh Jain DPAA_PMD_DEBUG("Number of Segments not supported"); 87437f9b54bSShreyansh Jain return 1; 87537f9b54bSShreyansh Jain } 87637f9b54bSShreyansh Jain 87737f9b54bSShreyansh Jain return 0; 87837f9b54bSShreyansh Jain } 87937f9b54bSShreyansh Jain 88037f9b54bSShreyansh Jain /* Handle all mbufs on an external pool (non-dpaa) */ 881f8c7a17aSNipun Gupta static inline struct rte_mbuf * 882f8c7a17aSNipun Gupta reallocate_mbuf(struct qman_fq *txq, struct rte_mbuf *mbuf) 88337f9b54bSShreyansh Jain { 88437f9b54bSShreyansh Jain struct dpaa_if *dpaa_intf = txq->dpaa_intf; 885f8c7a17aSNipun Gupta struct dpaa_bp_info *bp_info = dpaa_intf->bp_info; 886f8c7a17aSNipun Gupta struct rte_mbuf *new_mbufs[DPAA_SGT_MAX_ENTRIES + 1] = {0}; 887f8c7a17aSNipun Gupta struct rte_mbuf *temp_mbuf; 888f8c7a17aSNipun Gupta int num_new_segs, mbuf_greater, ret, extra_seg = 0, i = 0; 889f8c7a17aSNipun Gupta uint64_t mbufs_size, bytes_to_copy, offset1 = 0, offset2 = 0; 890f8c7a17aSNipun Gupta char *data; 89137f9b54bSShreyansh Jain 892f8c7a17aSNipun Gupta DPAA_DP_LOG(DEBUG, "Reallocating transmit buffer"); 893f8c7a17aSNipun Gupta 894f8c7a17aSNipun Gupta mbufs_size = bp_info->size - 895f8c7a17aSNipun Gupta bp_info->meta_data_size - RTE_PKTMBUF_HEADROOM; 896f8c7a17aSNipun Gupta extra_seg = !!(mbuf->pkt_len % mbufs_size); 897f8c7a17aSNipun Gupta num_new_segs = (mbuf->pkt_len / mbufs_size) + extra_seg; 898f8c7a17aSNipun Gupta 899f8c7a17aSNipun Gupta ret = rte_pktmbuf_alloc_bulk(bp_info->mp, new_mbufs, num_new_segs); 900f8c7a17aSNipun Gupta if (ret != 0) { 901f8c7a17aSNipun Gupta DPAA_DP_LOG(DEBUG, "Allocation for new buffers failed"); 902f8c7a17aSNipun Gupta return NULL; 90337f9b54bSShreyansh Jain } 90437f9b54bSShreyansh Jain 905f8c7a17aSNipun Gupta temp_mbuf = mbuf; 90637f9b54bSShreyansh Jain 907f8c7a17aSNipun Gupta while (temp_mbuf) { 908f8c7a17aSNipun Gupta /* If mbuf data is less than new mbuf remaining memory */ 909f8c7a17aSNipun Gupta if ((temp_mbuf->data_len - offset1) < (mbufs_size - offset2)) { 910f8c7a17aSNipun Gupta bytes_to_copy = temp_mbuf->data_len - offset1; 911f8c7a17aSNipun Gupta mbuf_greater = -1; 912f8c7a17aSNipun Gupta /* If mbuf data is greater than new mbuf remaining memory */ 913f8c7a17aSNipun Gupta } else if ((temp_mbuf->data_len - offset1) > 914f8c7a17aSNipun Gupta (mbufs_size - offset2)) { 915f8c7a17aSNipun Gupta bytes_to_copy = mbufs_size - offset2; 916f8c7a17aSNipun Gupta mbuf_greater = 1; 917f8c7a17aSNipun Gupta /* if mbuf data is equal to new mbuf remaining memory */ 918f8c7a17aSNipun Gupta } else { 919f8c7a17aSNipun Gupta bytes_to_copy = temp_mbuf->data_len - offset1; 920f8c7a17aSNipun Gupta mbuf_greater = 0; 921f8c7a17aSNipun Gupta } 922f8c7a17aSNipun Gupta 923f8c7a17aSNipun Gupta /* Copy the data */ 924f8c7a17aSNipun Gupta data = rte_pktmbuf_append(new_mbufs[0], bytes_to_copy); 925f8c7a17aSNipun Gupta 926f8c7a17aSNipun Gupta rte_memcpy((uint8_t *)data, rte_pktmbuf_mtod_offset(mbuf, 927f8c7a17aSNipun Gupta void *, offset1), bytes_to_copy); 928f8c7a17aSNipun Gupta 929f8c7a17aSNipun Gupta /* Set new offsets and the temp buffers */ 930f8c7a17aSNipun Gupta if (mbuf_greater == -1) { 931f8c7a17aSNipun Gupta offset1 = 0; 932f8c7a17aSNipun Gupta offset2 += bytes_to_copy; 933f8c7a17aSNipun Gupta temp_mbuf = temp_mbuf->next; 934f8c7a17aSNipun Gupta } else if (mbuf_greater == 1) { 935f8c7a17aSNipun Gupta offset2 = 0; 936f8c7a17aSNipun Gupta offset1 += bytes_to_copy; 937f8c7a17aSNipun Gupta new_mbufs[i]->next = new_mbufs[i + 1]; 938f8c7a17aSNipun Gupta new_mbufs[0]->nb_segs++; 939f8c7a17aSNipun Gupta i++; 940f8c7a17aSNipun Gupta } else { 941f8c7a17aSNipun Gupta offset1 = 0; 942f8c7a17aSNipun Gupta offset2 = 0; 943f8c7a17aSNipun Gupta temp_mbuf = temp_mbuf->next; 944f8c7a17aSNipun Gupta new_mbufs[i]->next = new_mbufs[i + 1]; 945f8c7a17aSNipun Gupta if (new_mbufs[i + 1]) 946f8c7a17aSNipun Gupta new_mbufs[0]->nb_segs++; 947f8c7a17aSNipun Gupta i++; 948f8c7a17aSNipun Gupta } 949f8c7a17aSNipun Gupta } 950f8c7a17aSNipun Gupta 951f8c7a17aSNipun Gupta /* Copy other required fields */ 952f8c7a17aSNipun Gupta new_mbufs[0]->ol_flags = mbuf->ol_flags; 953f8c7a17aSNipun Gupta new_mbufs[0]->packet_type = mbuf->packet_type; 954f8c7a17aSNipun Gupta new_mbufs[0]->tx_offload = mbuf->tx_offload; 955f8c7a17aSNipun Gupta 956f8c7a17aSNipun Gupta rte_pktmbuf_free(mbuf); 957f8c7a17aSNipun Gupta 958f8c7a17aSNipun Gupta return new_mbufs[0]; 95937f9b54bSShreyansh Jain } 96037f9b54bSShreyansh Jain 96137f9b54bSShreyansh Jain uint16_t 96237f9b54bSShreyansh Jain dpaa_eth_queue_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs) 96337f9b54bSShreyansh Jain { 96437f9b54bSShreyansh Jain struct rte_mbuf *mbuf, *mi = NULL; 96537f9b54bSShreyansh Jain struct rte_mempool *mp; 96637f9b54bSShreyansh Jain struct dpaa_bp_info *bp_info; 967b0a87fe2SNipun Gupta struct qm_fd fd_arr[DPAA_TX_BURST_SIZE]; 9685e0789e9SNipun Gupta uint32_t frames_to_send, loop, sent = 0; 96937f9b54bSShreyansh Jain uint16_t state; 970f8c7a17aSNipun Gupta int ret, realloc_mbuf = 0; 9715e745593SSunil Kumar Kori uint32_t seqn, index, flags[DPAA_TX_BURST_SIZE] = {0}; 97237f9b54bSShreyansh Jain 973*e5872221SRohit Raj if (unlikely(!DPAA_PER_LCORE_PORTAL)) { 97437f9b54bSShreyansh Jain ret = rte_dpaa_portal_init((void *)0); 97537f9b54bSShreyansh Jain if (ret) { 97637f9b54bSShreyansh Jain DPAA_PMD_ERR("Failure in affining portal"); 97737f9b54bSShreyansh Jain return 0; 97837f9b54bSShreyansh Jain } 9795d944582SNipun Gupta } 98037f9b54bSShreyansh Jain 98137f9b54bSShreyansh Jain DPAA_DP_LOG(DEBUG, "Transmitting %d buffers on queue: %p", nb_bufs, q); 98237f9b54bSShreyansh Jain 98337f9b54bSShreyansh Jain while (nb_bufs) { 984b0a87fe2SNipun Gupta frames_to_send = (nb_bufs > DPAA_TX_BURST_SIZE) ? 985b0a87fe2SNipun Gupta DPAA_TX_BURST_SIZE : nb_bufs; 9865e0789e9SNipun Gupta for (loop = 0; loop < frames_to_send; loop++) { 9875e0789e9SNipun Gupta mbuf = *(bufs++); 988f8c7a17aSNipun Gupta /* In case the data offset is not multiple of 16, 989f8c7a17aSNipun Gupta * FMAN can stall because of an errata. So reallocate 990f8c7a17aSNipun Gupta * the buffer in such case. 991f8c7a17aSNipun Gupta */ 992f8c7a17aSNipun Gupta if (dpaa_svr_family == SVR_LS1043A_FAMILY && 99359267d7bSNipun Gupta (mbuf->data_off & 0x7F) != 0x0) 994f8c7a17aSNipun Gupta realloc_mbuf = 1; 9959afce5aaSSunil Kumar Kori seqn = mbuf->seqn; 9969afce5aaSSunil Kumar Kori if (seqn != DPAA_INVALID_MBUF_SEQN) { 9979afce5aaSSunil Kumar Kori index = seqn - 1; 9989afce5aaSSunil Kumar Kori if (DPAA_PER_LCORE_DQRR_HELD & (1 << index)) { 9999afce5aaSSunil Kumar Kori flags[loop] = 10009afce5aaSSunil Kumar Kori ((index & QM_EQCR_DCA_IDXMASK) << 8); 10019afce5aaSSunil Kumar Kori flags[loop] |= QMAN_ENQUEUE_FLAG_DCA; 10029afce5aaSSunil Kumar Kori DPAA_PER_LCORE_DQRR_SIZE--; 10039afce5aaSSunil Kumar Kori DPAA_PER_LCORE_DQRR_HELD &= 10049afce5aaSSunil Kumar Kori ~(1 << index); 10059afce5aaSSunil Kumar Kori } 10069afce5aaSSunil Kumar Kori } 10079afce5aaSSunil Kumar Kori 10085e0789e9SNipun Gupta if (likely(RTE_MBUF_DIRECT(mbuf))) { 100937f9b54bSShreyansh Jain mp = mbuf->pool; 10105e0789e9SNipun Gupta bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp); 10115e0789e9SNipun Gupta if (likely(mp->ops_index == 10125e0789e9SNipun Gupta bp_info->dpaa_ops_index && 10135e0789e9SNipun Gupta mbuf->nb_segs == 1 && 1014f8c7a17aSNipun Gupta realloc_mbuf == 0 && 10155e0789e9SNipun Gupta rte_mbuf_refcnt_read(mbuf) == 1)) { 10165e0789e9SNipun Gupta DPAA_MBUF_TO_CONTIG_FD(mbuf, 10175e0789e9SNipun Gupta &fd_arr[loop], bp_info->bpid); 10185e0789e9SNipun Gupta if (mbuf->ol_flags & 10195e0789e9SNipun Gupta DPAA_TX_CKSUM_OFFLOAD_MASK) 10205e0789e9SNipun Gupta dpaa_unsegmented_checksum(mbuf, 10215e0789e9SNipun Gupta &fd_arr[loop]); 10225e0789e9SNipun Gupta continue; 10235e0789e9SNipun Gupta } 102437f9b54bSShreyansh Jain } else { 102537f9b54bSShreyansh Jain mi = rte_mbuf_from_indirect(mbuf); 102637f9b54bSShreyansh Jain mp = mi->pool; 102737f9b54bSShreyansh Jain } 102837f9b54bSShreyansh Jain 102937f9b54bSShreyansh Jain bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp); 1030f8c7a17aSNipun Gupta if (unlikely(mp->ops_index != bp_info->dpaa_ops_index || 1031f8c7a17aSNipun Gupta realloc_mbuf == 1)) { 1032f8c7a17aSNipun Gupta struct rte_mbuf *temp_mbuf; 1033f8c7a17aSNipun Gupta 1034f8c7a17aSNipun Gupta temp_mbuf = reallocate_mbuf(q, mbuf); 1035f8c7a17aSNipun Gupta if (!temp_mbuf) { 1036f8c7a17aSNipun Gupta /* Set frames_to_send & nb_bufs so 1037f8c7a17aSNipun Gupta * that packets are transmitted till 1038f8c7a17aSNipun Gupta * previous frame. 1039f8c7a17aSNipun Gupta */ 1040f8c7a17aSNipun Gupta frames_to_send = loop; 1041f8c7a17aSNipun Gupta nb_bufs = loop; 1042f8c7a17aSNipun Gupta goto send_pkts; 1043f8c7a17aSNipun Gupta } 1044f8c7a17aSNipun Gupta mbuf = temp_mbuf; 1045f8c7a17aSNipun Gupta realloc_mbuf = 0; 1046f8c7a17aSNipun Gupta } 1047f8c7a17aSNipun Gupta 104837f9b54bSShreyansh Jain state = tx_on_dpaa_pool(mbuf, bp_info, 104937f9b54bSShreyansh Jain &fd_arr[loop]); 105037f9b54bSShreyansh Jain if (unlikely(state)) { 105137f9b54bSShreyansh Jain /* Set frames_to_send & nb_bufs so 105237f9b54bSShreyansh Jain * that packets are transmitted till 105337f9b54bSShreyansh Jain * previous frame. 105437f9b54bSShreyansh Jain */ 105537f9b54bSShreyansh Jain frames_to_send = loop; 105637f9b54bSShreyansh Jain nb_bufs = loop; 105737f9b54bSShreyansh Jain goto send_pkts; 105837f9b54bSShreyansh Jain } 105937f9b54bSShreyansh Jain } 106037f9b54bSShreyansh Jain 106137f9b54bSShreyansh Jain send_pkts: 106237f9b54bSShreyansh Jain loop = 0; 106337f9b54bSShreyansh Jain while (loop < frames_to_send) { 106437f9b54bSShreyansh Jain loop += qman_enqueue_multi(q, &fd_arr[loop], 10655e745593SSunil Kumar Kori &flags[loop], 106637f9b54bSShreyansh Jain frames_to_send - loop); 106737f9b54bSShreyansh Jain } 106837f9b54bSShreyansh Jain nb_bufs -= frames_to_send; 10695e0789e9SNipun Gupta sent += frames_to_send; 107037f9b54bSShreyansh Jain } 107137f9b54bSShreyansh Jain 10725e0789e9SNipun Gupta DPAA_DP_LOG(DEBUG, "Transmitted %d buffers on queue: %p", sent, q); 107337f9b54bSShreyansh Jain 10745e0789e9SNipun Gupta return sent; 107537f9b54bSShreyansh Jain } 107637f9b54bSShreyansh Jain 10779124e65dSGagandeep Singh uint16_t 10789124e65dSGagandeep Singh dpaa_eth_queue_tx_slow(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs) 10799124e65dSGagandeep Singh { 10809124e65dSGagandeep Singh qman_ern_poll_free(); 10819124e65dSGagandeep Singh 10829124e65dSGagandeep Singh return dpaa_eth_queue_tx(q, bufs, nb_bufs); 10839124e65dSGagandeep Singh } 10849124e65dSGagandeep Singh 108537f9b54bSShreyansh Jain uint16_t dpaa_eth_tx_drop_all(void *q __rte_unused, 108637f9b54bSShreyansh Jain struct rte_mbuf **bufs __rte_unused, 108737f9b54bSShreyansh Jain uint16_t nb_bufs __rte_unused) 108837f9b54bSShreyansh Jain { 108937f9b54bSShreyansh Jain DPAA_DP_LOG(DEBUG, "Drop all packets"); 109037f9b54bSShreyansh Jain 109137f9b54bSShreyansh Jain /* Drop all incoming packets. No need to free packets here 109237f9b54bSShreyansh Jain * because the rte_eth f/w frees up the packets through tx_buffer 109337f9b54bSShreyansh Jain * callback in case this functions returns count less than nb_bufs 109437f9b54bSShreyansh Jain */ 109537f9b54bSShreyansh Jain return 0; 109637f9b54bSShreyansh Jain } 1097