1d81734caSHemant Agrawal /* SPDX-License-Identifier: BSD-3-Clause 237f9b54bSShreyansh Jain * 337f9b54bSShreyansh Jain * Copyright 2016 Freescale Semiconductor, Inc. All rights reserved. 4d81734caSHemant Agrawal * Copyright 2017 NXP 537f9b54bSShreyansh Jain * 637f9b54bSShreyansh Jain */ 737f9b54bSShreyansh Jain 837f9b54bSShreyansh Jain /* System headers */ 937f9b54bSShreyansh Jain #include <inttypes.h> 1037f9b54bSShreyansh Jain #include <unistd.h> 1137f9b54bSShreyansh Jain #include <stdio.h> 1237f9b54bSShreyansh Jain #include <limits.h> 1337f9b54bSShreyansh Jain #include <sched.h> 1437f9b54bSShreyansh Jain #include <pthread.h> 1537f9b54bSShreyansh Jain 1637f9b54bSShreyansh Jain #include <rte_byteorder.h> 1737f9b54bSShreyansh Jain #include <rte_common.h> 1837f9b54bSShreyansh Jain #include <rte_interrupts.h> 1937f9b54bSShreyansh Jain #include <rte_log.h> 2037f9b54bSShreyansh Jain #include <rte_debug.h> 2137f9b54bSShreyansh Jain #include <rte_pci.h> 2237f9b54bSShreyansh Jain #include <rte_atomic.h> 2337f9b54bSShreyansh Jain #include <rte_branch_prediction.h> 2437f9b54bSShreyansh Jain #include <rte_memory.h> 2537f9b54bSShreyansh Jain #include <rte_tailq.h> 2637f9b54bSShreyansh Jain #include <rte_eal.h> 2737f9b54bSShreyansh Jain #include <rte_alarm.h> 2837f9b54bSShreyansh Jain #include <rte_ether.h> 2937f9b54bSShreyansh Jain #include <rte_ethdev.h> 3037f9b54bSShreyansh Jain #include <rte_malloc.h> 3137f9b54bSShreyansh Jain #include <rte_ring.h> 3237f9b54bSShreyansh Jain #include <rte_ip.h> 3337f9b54bSShreyansh Jain #include <rte_tcp.h> 3437f9b54bSShreyansh Jain #include <rte_udp.h> 3537f9b54bSShreyansh Jain 3637f9b54bSShreyansh Jain #include "dpaa_ethdev.h" 3737f9b54bSShreyansh Jain #include "dpaa_rxtx.h" 3837f9b54bSShreyansh Jain #include <rte_dpaa_bus.h> 3937f9b54bSShreyansh Jain #include <dpaa_mempool.h> 4037f9b54bSShreyansh Jain 4137f9b54bSShreyansh Jain #include <fsl_usd.h> 4237f9b54bSShreyansh Jain #include <fsl_qman.h> 4337f9b54bSShreyansh Jain #include <fsl_bman.h> 4437f9b54bSShreyansh Jain #include <of.h> 4537f9b54bSShreyansh Jain #include <netcfg.h> 4637f9b54bSShreyansh Jain 4737f9b54bSShreyansh Jain #define DPAA_MBUF_TO_CONTIG_FD(_mbuf, _fd, _bpid) \ 4837f9b54bSShreyansh Jain do { \ 4937f9b54bSShreyansh Jain (_fd)->cmd = 0; \ 5037f9b54bSShreyansh Jain (_fd)->opaque_addr = 0; \ 5137f9b54bSShreyansh Jain (_fd)->opaque = QM_FD_CONTIG << DPAA_FD_FORMAT_SHIFT; \ 5237f9b54bSShreyansh Jain (_fd)->opaque |= ((_mbuf)->data_off) << DPAA_FD_OFFSET_SHIFT; \ 5337f9b54bSShreyansh Jain (_fd)->opaque |= (_mbuf)->pkt_len; \ 54455da545SSantosh Shukla (_fd)->addr = (_mbuf)->buf_iova; \ 5537f9b54bSShreyansh Jain (_fd)->bpid = _bpid; \ 5637f9b54bSShreyansh Jain } while (0) 5737f9b54bSShreyansh Jain 5805ba55bcSShreyansh Jain #if (defined RTE_LIBRTE_DPAA_DEBUG_DRIVER) 5905ba55bcSShreyansh Jain void dpaa_display_frame(const struct qm_fd *fd) 6005ba55bcSShreyansh Jain { 6105ba55bcSShreyansh Jain int ii; 6205ba55bcSShreyansh Jain char *ptr; 6305ba55bcSShreyansh Jain 6405ba55bcSShreyansh Jain printf("%s::bpid %x addr %08x%08x, format %d off %d, len %d stat %x\n", 6505ba55bcSShreyansh Jain __func__, fd->bpid, fd->addr_hi, fd->addr_lo, fd->format, 6605ba55bcSShreyansh Jain fd->offset, fd->length20, fd->status); 6705ba55bcSShreyansh Jain 6805ba55bcSShreyansh Jain ptr = (char *)rte_dpaa_mem_ptov(fd->addr); 6905ba55bcSShreyansh Jain ptr += fd->offset; 7005ba55bcSShreyansh Jain printf("%02x ", *ptr); 7105ba55bcSShreyansh Jain for (ii = 1; ii < fd->length20; ii++) { 7205ba55bcSShreyansh Jain printf("%02x ", *ptr); 7305ba55bcSShreyansh Jain if ((ii % 16) == 0) 7405ba55bcSShreyansh Jain printf("\n"); 7505ba55bcSShreyansh Jain ptr++; 7605ba55bcSShreyansh Jain } 7705ba55bcSShreyansh Jain printf("\n"); 7805ba55bcSShreyansh Jain } 7905ba55bcSShreyansh Jain #else 8005ba55bcSShreyansh Jain #define dpaa_display_frame(a) 8105ba55bcSShreyansh Jain #endif 8205ba55bcSShreyansh Jain 83a7bdc3bdSShreyansh Jain static inline void dpaa_slow_parsing(struct rte_mbuf *m __rte_unused, 84a7bdc3bdSShreyansh Jain uint64_t prs __rte_unused) 85a7bdc3bdSShreyansh Jain { 86a7bdc3bdSShreyansh Jain DPAA_DP_LOG(DEBUG, "Slow parsing"); 87a7bdc3bdSShreyansh Jain /*TBD:XXX: to be implemented*/ 88a7bdc3bdSShreyansh Jain } 89a7bdc3bdSShreyansh Jain 90a7bdc3bdSShreyansh Jain static inline void dpaa_eth_packet_info(struct rte_mbuf *m, 91a7bdc3bdSShreyansh Jain uint64_t fd_virt_addr) 92a7bdc3bdSShreyansh Jain { 93a7bdc3bdSShreyansh Jain struct annotations_t *annot = GET_ANNOTATIONS(fd_virt_addr); 94a7bdc3bdSShreyansh Jain uint64_t prs = *((uint64_t *)(&annot->parse)) & DPAA_PARSE_MASK; 95a7bdc3bdSShreyansh Jain 96a7bdc3bdSShreyansh Jain DPAA_DP_LOG(DEBUG, " Parsing mbuf: %p with annotations: %p", m, annot); 97a7bdc3bdSShreyansh Jain 98a7bdc3bdSShreyansh Jain switch (prs) { 99a7bdc3bdSShreyansh Jain case DPAA_PKT_TYPE_NONE: 100a7bdc3bdSShreyansh Jain m->packet_type = 0; 101a7bdc3bdSShreyansh Jain break; 102a7bdc3bdSShreyansh Jain case DPAA_PKT_TYPE_ETHER: 103a7bdc3bdSShreyansh Jain m->packet_type = RTE_PTYPE_L2_ETHER; 104a7bdc3bdSShreyansh Jain break; 105a7bdc3bdSShreyansh Jain case DPAA_PKT_TYPE_IPV4: 106a7bdc3bdSShreyansh Jain m->packet_type = RTE_PTYPE_L2_ETHER | 107a7bdc3bdSShreyansh Jain RTE_PTYPE_L3_IPV4; 108a7bdc3bdSShreyansh Jain break; 109a7bdc3bdSShreyansh Jain case DPAA_PKT_TYPE_IPV6: 110a7bdc3bdSShreyansh Jain m->packet_type = RTE_PTYPE_L2_ETHER | 111a7bdc3bdSShreyansh Jain RTE_PTYPE_L3_IPV6; 112a7bdc3bdSShreyansh Jain break; 113a7bdc3bdSShreyansh Jain case DPAA_PKT_TYPE_IPV4_FRAG: 114a7bdc3bdSShreyansh Jain case DPAA_PKT_TYPE_IPV4_FRAG_UDP: 115a7bdc3bdSShreyansh Jain case DPAA_PKT_TYPE_IPV4_FRAG_TCP: 116a7bdc3bdSShreyansh Jain case DPAA_PKT_TYPE_IPV4_FRAG_SCTP: 117a7bdc3bdSShreyansh Jain m->packet_type = RTE_PTYPE_L2_ETHER | 118a7bdc3bdSShreyansh Jain RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_FRAG; 119a7bdc3bdSShreyansh Jain break; 120a7bdc3bdSShreyansh Jain case DPAA_PKT_TYPE_IPV6_FRAG: 121a7bdc3bdSShreyansh Jain case DPAA_PKT_TYPE_IPV6_FRAG_UDP: 122a7bdc3bdSShreyansh Jain case DPAA_PKT_TYPE_IPV6_FRAG_TCP: 123a7bdc3bdSShreyansh Jain case DPAA_PKT_TYPE_IPV6_FRAG_SCTP: 124a7bdc3bdSShreyansh Jain m->packet_type = RTE_PTYPE_L2_ETHER | 125a7bdc3bdSShreyansh Jain RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_FRAG; 126a7bdc3bdSShreyansh Jain break; 127a7bdc3bdSShreyansh Jain case DPAA_PKT_TYPE_IPV4_EXT: 128a7bdc3bdSShreyansh Jain m->packet_type = RTE_PTYPE_L2_ETHER | 129a7bdc3bdSShreyansh Jain RTE_PTYPE_L3_IPV4_EXT; 130a7bdc3bdSShreyansh Jain break; 131a7bdc3bdSShreyansh Jain case DPAA_PKT_TYPE_IPV6_EXT: 132a7bdc3bdSShreyansh Jain m->packet_type = RTE_PTYPE_L2_ETHER | 133a7bdc3bdSShreyansh Jain RTE_PTYPE_L3_IPV6_EXT; 134a7bdc3bdSShreyansh Jain break; 135a7bdc3bdSShreyansh Jain case DPAA_PKT_TYPE_IPV4_TCP: 136a7bdc3bdSShreyansh Jain m->packet_type = RTE_PTYPE_L2_ETHER | 137a7bdc3bdSShreyansh Jain RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP; 138a7bdc3bdSShreyansh Jain break; 139a7bdc3bdSShreyansh Jain case DPAA_PKT_TYPE_IPV6_TCP: 140a7bdc3bdSShreyansh Jain m->packet_type = RTE_PTYPE_L2_ETHER | 141a7bdc3bdSShreyansh Jain RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP; 142a7bdc3bdSShreyansh Jain break; 143a7bdc3bdSShreyansh Jain case DPAA_PKT_TYPE_IPV4_UDP: 144a7bdc3bdSShreyansh Jain m->packet_type = RTE_PTYPE_L2_ETHER | 145a7bdc3bdSShreyansh Jain RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP; 146a7bdc3bdSShreyansh Jain break; 147a7bdc3bdSShreyansh Jain case DPAA_PKT_TYPE_IPV6_UDP: 148a7bdc3bdSShreyansh Jain m->packet_type = RTE_PTYPE_L2_ETHER | 149a7bdc3bdSShreyansh Jain RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP; 150a7bdc3bdSShreyansh Jain break; 151a7bdc3bdSShreyansh Jain case DPAA_PKT_TYPE_IPV4_EXT_UDP: 152a7bdc3bdSShreyansh Jain m->packet_type = RTE_PTYPE_L2_ETHER | 153a7bdc3bdSShreyansh Jain RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_UDP; 154a7bdc3bdSShreyansh Jain break; 155a7bdc3bdSShreyansh Jain case DPAA_PKT_TYPE_IPV6_EXT_UDP: 156a7bdc3bdSShreyansh Jain m->packet_type = RTE_PTYPE_L2_ETHER | 157a7bdc3bdSShreyansh Jain RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP; 158a7bdc3bdSShreyansh Jain break; 159a7bdc3bdSShreyansh Jain case DPAA_PKT_TYPE_IPV4_EXT_TCP: 160a7bdc3bdSShreyansh Jain m->packet_type = RTE_PTYPE_L2_ETHER | 161a7bdc3bdSShreyansh Jain RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_TCP; 162a7bdc3bdSShreyansh Jain break; 163a7bdc3bdSShreyansh Jain case DPAA_PKT_TYPE_IPV6_EXT_TCP: 164a7bdc3bdSShreyansh Jain m->packet_type = RTE_PTYPE_L2_ETHER | 165a7bdc3bdSShreyansh Jain RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP; 166a7bdc3bdSShreyansh Jain break; 167a7bdc3bdSShreyansh Jain case DPAA_PKT_TYPE_IPV4_SCTP: 168a7bdc3bdSShreyansh Jain m->packet_type = RTE_PTYPE_L2_ETHER | 169a7bdc3bdSShreyansh Jain RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP; 170a7bdc3bdSShreyansh Jain break; 171a7bdc3bdSShreyansh Jain case DPAA_PKT_TYPE_IPV6_SCTP: 172a7bdc3bdSShreyansh Jain m->packet_type = RTE_PTYPE_L2_ETHER | 173a7bdc3bdSShreyansh Jain RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_SCTP; 174a7bdc3bdSShreyansh Jain break; 175a7bdc3bdSShreyansh Jain /* More switch cases can be added */ 176a7bdc3bdSShreyansh Jain default: 177a7bdc3bdSShreyansh Jain dpaa_slow_parsing(m, prs); 178a7bdc3bdSShreyansh Jain } 179a7bdc3bdSShreyansh Jain 180a7bdc3bdSShreyansh Jain m->tx_offload = annot->parse.ip_off[0]; 181a7bdc3bdSShreyansh Jain m->tx_offload |= (annot->parse.l4_off - annot->parse.ip_off[0]) 182a7bdc3bdSShreyansh Jain << DPAA_PKT_L3_LEN_SHIFT; 183a7bdc3bdSShreyansh Jain 184a7bdc3bdSShreyansh Jain /* Set the hash values */ 185a7bdc3bdSShreyansh Jain m->hash.rss = (uint32_t)(rte_be_to_cpu_64(annot->hash)); 186a7bdc3bdSShreyansh Jain m->ol_flags = PKT_RX_RSS_HASH; 187a7bdc3bdSShreyansh Jain /* All packets with Bad checksum are dropped by interface (and 188a7bdc3bdSShreyansh Jain * corresponding notification issued to RX error queues). 189a7bdc3bdSShreyansh Jain */ 190a7bdc3bdSShreyansh Jain m->ol_flags |= PKT_RX_IP_CKSUM_GOOD; 191a7bdc3bdSShreyansh Jain 192a7bdc3bdSShreyansh Jain /* Check if Vlan is present */ 193a7bdc3bdSShreyansh Jain if (prs & DPAA_PARSE_VLAN_MASK) 194380a7aabSOlivier Matz m->ol_flags |= PKT_RX_VLAN; 195a7bdc3bdSShreyansh Jain /* Packet received without stripping the vlan */ 196a7bdc3bdSShreyansh Jain } 197a7bdc3bdSShreyansh Jain 1985a8cf1beSShreyansh Jain static inline void dpaa_checksum(struct rte_mbuf *mbuf) 1995a8cf1beSShreyansh Jain { 2005a8cf1beSShreyansh Jain struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(mbuf, struct ether_hdr *); 2015a8cf1beSShreyansh Jain char *l3_hdr = (char *)eth_hdr + mbuf->l2_len; 2025a8cf1beSShreyansh Jain struct ipv4_hdr *ipv4_hdr = (struct ipv4_hdr *)l3_hdr; 2035a8cf1beSShreyansh Jain struct ipv6_hdr *ipv6_hdr = (struct ipv6_hdr *)l3_hdr; 2045a8cf1beSShreyansh Jain 2055a8cf1beSShreyansh Jain DPAA_DP_LOG(DEBUG, "Calculating checksum for mbuf: %p", mbuf); 2065a8cf1beSShreyansh Jain 2075a8cf1beSShreyansh Jain if (((mbuf->packet_type & RTE_PTYPE_L3_MASK) == RTE_PTYPE_L3_IPV4) || 2085a8cf1beSShreyansh Jain ((mbuf->packet_type & RTE_PTYPE_L3_MASK) == 2095a8cf1beSShreyansh Jain RTE_PTYPE_L3_IPV4_EXT)) { 2105a8cf1beSShreyansh Jain ipv4_hdr = (struct ipv4_hdr *)l3_hdr; 2115a8cf1beSShreyansh Jain ipv4_hdr->hdr_checksum = 0; 2125a8cf1beSShreyansh Jain ipv4_hdr->hdr_checksum = rte_ipv4_cksum(ipv4_hdr); 2135a8cf1beSShreyansh Jain } else if (((mbuf->packet_type & RTE_PTYPE_L3_MASK) == 2145a8cf1beSShreyansh Jain RTE_PTYPE_L3_IPV6) || 2155a8cf1beSShreyansh Jain ((mbuf->packet_type & RTE_PTYPE_L3_MASK) == 2165a8cf1beSShreyansh Jain RTE_PTYPE_L3_IPV6_EXT)) 2175a8cf1beSShreyansh Jain ipv6_hdr = (struct ipv6_hdr *)l3_hdr; 2185a8cf1beSShreyansh Jain 2195a8cf1beSShreyansh Jain if ((mbuf->packet_type & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP) { 2205a8cf1beSShreyansh Jain struct tcp_hdr *tcp_hdr = (struct tcp_hdr *)(l3_hdr + 2215a8cf1beSShreyansh Jain mbuf->l3_len); 2225a8cf1beSShreyansh Jain tcp_hdr->cksum = 0; 2235a8cf1beSShreyansh Jain if (eth_hdr->ether_type == htons(ETHER_TYPE_IPv4)) 2245a8cf1beSShreyansh Jain tcp_hdr->cksum = rte_ipv4_udptcp_cksum(ipv4_hdr, 2255a8cf1beSShreyansh Jain tcp_hdr); 2265a8cf1beSShreyansh Jain else /* assume ethertype == ETHER_TYPE_IPv6 */ 2275a8cf1beSShreyansh Jain tcp_hdr->cksum = rte_ipv6_udptcp_cksum(ipv6_hdr, 2285a8cf1beSShreyansh Jain tcp_hdr); 2295a8cf1beSShreyansh Jain } else if ((mbuf->packet_type & RTE_PTYPE_L4_MASK) == 2305a8cf1beSShreyansh Jain RTE_PTYPE_L4_UDP) { 2315a8cf1beSShreyansh Jain struct udp_hdr *udp_hdr = (struct udp_hdr *)(l3_hdr + 2325a8cf1beSShreyansh Jain mbuf->l3_len); 2335a8cf1beSShreyansh Jain udp_hdr->dgram_cksum = 0; 2345a8cf1beSShreyansh Jain if (eth_hdr->ether_type == htons(ETHER_TYPE_IPv4)) 2355a8cf1beSShreyansh Jain udp_hdr->dgram_cksum = rte_ipv4_udptcp_cksum(ipv4_hdr, 2365a8cf1beSShreyansh Jain udp_hdr); 2375a8cf1beSShreyansh Jain else /* assume ethertype == ETHER_TYPE_IPv6 */ 2385a8cf1beSShreyansh Jain udp_hdr->dgram_cksum = rte_ipv6_udptcp_cksum(ipv6_hdr, 2395a8cf1beSShreyansh Jain udp_hdr); 2405a8cf1beSShreyansh Jain } 2415a8cf1beSShreyansh Jain } 2425a8cf1beSShreyansh Jain 2435a8cf1beSShreyansh Jain static inline void dpaa_checksum_offload(struct rte_mbuf *mbuf, 2445a8cf1beSShreyansh Jain struct qm_fd *fd, char *prs_buf) 2455a8cf1beSShreyansh Jain { 2465a8cf1beSShreyansh Jain struct dpaa_eth_parse_results_t *prs; 2475a8cf1beSShreyansh Jain 2485a8cf1beSShreyansh Jain DPAA_DP_LOG(DEBUG, " Offloading checksum for mbuf: %p", mbuf); 2495a8cf1beSShreyansh Jain 2505a8cf1beSShreyansh Jain prs = GET_TX_PRS(prs_buf); 2515a8cf1beSShreyansh Jain prs->l3r = 0; 2525a8cf1beSShreyansh Jain prs->l4r = 0; 2535a8cf1beSShreyansh Jain if (((mbuf->packet_type & RTE_PTYPE_L3_MASK) == RTE_PTYPE_L3_IPV4) || 2545a8cf1beSShreyansh Jain ((mbuf->packet_type & RTE_PTYPE_L3_MASK) == 2555a8cf1beSShreyansh Jain RTE_PTYPE_L3_IPV4_EXT)) 2565a8cf1beSShreyansh Jain prs->l3r = DPAA_L3_PARSE_RESULT_IPV4; 2575a8cf1beSShreyansh Jain else if (((mbuf->packet_type & RTE_PTYPE_L3_MASK) == 2585a8cf1beSShreyansh Jain RTE_PTYPE_L3_IPV6) || 2595a8cf1beSShreyansh Jain ((mbuf->packet_type & RTE_PTYPE_L3_MASK) == 2605a8cf1beSShreyansh Jain RTE_PTYPE_L3_IPV6_EXT)) 2615a8cf1beSShreyansh Jain prs->l3r = DPAA_L3_PARSE_RESULT_IPV6; 2625a8cf1beSShreyansh Jain 2635a8cf1beSShreyansh Jain if ((mbuf->packet_type & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP) 2645a8cf1beSShreyansh Jain prs->l4r = DPAA_L4_PARSE_RESULT_TCP; 2655a8cf1beSShreyansh Jain else if ((mbuf->packet_type & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_UDP) 2665a8cf1beSShreyansh Jain prs->l4r = DPAA_L4_PARSE_RESULT_UDP; 2675a8cf1beSShreyansh Jain 2685a8cf1beSShreyansh Jain prs->ip_off[0] = mbuf->l2_len; 2695a8cf1beSShreyansh Jain prs->l4_off = mbuf->l3_len + mbuf->l2_len; 2705a8cf1beSShreyansh Jain /* Enable L3 (and L4, if TCP or UDP) HW checksum*/ 2715a8cf1beSShreyansh Jain fd->cmd = DPAA_FD_CMD_RPD | DPAA_FD_CMD_DTC; 2725a8cf1beSShreyansh Jain } 2735a8cf1beSShreyansh Jain 2748cffdcbeSShreyansh Jain struct rte_mbuf * 2758cffdcbeSShreyansh Jain dpaa_eth_sg_to_mbuf(struct qm_fd *fd, uint32_t ifid) 2768cffdcbeSShreyansh Jain { 2778cffdcbeSShreyansh Jain struct dpaa_bp_info *bp_info = DPAA_BPID_TO_POOL_INFO(fd->bpid); 2788cffdcbeSShreyansh Jain struct rte_mbuf *first_seg, *prev_seg, *cur_seg, *temp; 2798cffdcbeSShreyansh Jain struct qm_sg_entry *sgt, *sg_temp; 2808cffdcbeSShreyansh Jain void *vaddr, *sg_vaddr; 2818cffdcbeSShreyansh Jain int i = 0; 2828cffdcbeSShreyansh Jain uint8_t fd_offset = fd->offset; 2838cffdcbeSShreyansh Jain 2848cffdcbeSShreyansh Jain DPAA_DP_LOG(DEBUG, "Received an SG frame"); 2858cffdcbeSShreyansh Jain 2868cffdcbeSShreyansh Jain vaddr = rte_dpaa_mem_ptov(qm_fd_addr(fd)); 2878cffdcbeSShreyansh Jain if (!vaddr) { 2888cffdcbeSShreyansh Jain DPAA_PMD_ERR("unable to convert physical address"); 2898cffdcbeSShreyansh Jain return NULL; 2908cffdcbeSShreyansh Jain } 2918cffdcbeSShreyansh Jain sgt = vaddr + fd_offset; 2928cffdcbeSShreyansh Jain sg_temp = &sgt[i++]; 2938cffdcbeSShreyansh Jain hw_sg_to_cpu(sg_temp); 2948cffdcbeSShreyansh Jain temp = (struct rte_mbuf *)((char *)vaddr - bp_info->meta_data_size); 2958cffdcbeSShreyansh Jain sg_vaddr = rte_dpaa_mem_ptov(qm_sg_entry_get64(sg_temp)); 2968cffdcbeSShreyansh Jain 2978cffdcbeSShreyansh Jain first_seg = (struct rte_mbuf *)((char *)sg_vaddr - 2988cffdcbeSShreyansh Jain bp_info->meta_data_size); 2998cffdcbeSShreyansh Jain first_seg->data_off = sg_temp->offset; 3008cffdcbeSShreyansh Jain first_seg->data_len = sg_temp->length; 3018cffdcbeSShreyansh Jain first_seg->pkt_len = sg_temp->length; 3028cffdcbeSShreyansh Jain rte_mbuf_refcnt_set(first_seg, 1); 3038cffdcbeSShreyansh Jain 3048cffdcbeSShreyansh Jain first_seg->port = ifid; 3058cffdcbeSShreyansh Jain first_seg->nb_segs = 1; 3068cffdcbeSShreyansh Jain first_seg->ol_flags = 0; 3078cffdcbeSShreyansh Jain prev_seg = first_seg; 3088cffdcbeSShreyansh Jain while (i < DPAA_SGT_MAX_ENTRIES) { 3098cffdcbeSShreyansh Jain sg_temp = &sgt[i++]; 3108cffdcbeSShreyansh Jain hw_sg_to_cpu(sg_temp); 3118cffdcbeSShreyansh Jain sg_vaddr = rte_dpaa_mem_ptov(qm_sg_entry_get64(sg_temp)); 3128cffdcbeSShreyansh Jain cur_seg = (struct rte_mbuf *)((char *)sg_vaddr - 3138cffdcbeSShreyansh Jain bp_info->meta_data_size); 3148cffdcbeSShreyansh Jain cur_seg->data_off = sg_temp->offset; 3158cffdcbeSShreyansh Jain cur_seg->data_len = sg_temp->length; 3168cffdcbeSShreyansh Jain first_seg->pkt_len += sg_temp->length; 3178cffdcbeSShreyansh Jain first_seg->nb_segs += 1; 3188cffdcbeSShreyansh Jain rte_mbuf_refcnt_set(cur_seg, 1); 3198cffdcbeSShreyansh Jain prev_seg->next = cur_seg; 3208cffdcbeSShreyansh Jain if (sg_temp->final) { 3218cffdcbeSShreyansh Jain cur_seg->next = NULL; 3228cffdcbeSShreyansh Jain break; 3238cffdcbeSShreyansh Jain } 3248cffdcbeSShreyansh Jain prev_seg = cur_seg; 3258cffdcbeSShreyansh Jain } 3268cffdcbeSShreyansh Jain 3278cffdcbeSShreyansh Jain dpaa_eth_packet_info(first_seg, (uint64_t)vaddr); 3288cffdcbeSShreyansh Jain rte_pktmbuf_free_seg(temp); 3298cffdcbeSShreyansh Jain 3308cffdcbeSShreyansh Jain return first_seg; 3318cffdcbeSShreyansh Jain } 3328cffdcbeSShreyansh Jain 33337f9b54bSShreyansh Jain static inline struct rte_mbuf *dpaa_eth_fd_to_mbuf(struct qm_fd *fd, 33437f9b54bSShreyansh Jain uint32_t ifid) 33537f9b54bSShreyansh Jain { 33637f9b54bSShreyansh Jain struct dpaa_bp_info *bp_info = DPAA_BPID_TO_POOL_INFO(fd->bpid); 33737f9b54bSShreyansh Jain struct rte_mbuf *mbuf; 33837f9b54bSShreyansh Jain void *ptr; 3398cffdcbeSShreyansh Jain uint8_t format = 3408cffdcbeSShreyansh Jain (fd->opaque & DPAA_FD_FORMAT_MASK) >> DPAA_FD_FORMAT_SHIFT; 34137f9b54bSShreyansh Jain uint16_t offset = 34237f9b54bSShreyansh Jain (fd->opaque & DPAA_FD_OFFSET_MASK) >> DPAA_FD_OFFSET_SHIFT; 34337f9b54bSShreyansh Jain uint32_t length = fd->opaque & DPAA_FD_LENGTH_MASK; 34437f9b54bSShreyansh Jain 34537f9b54bSShreyansh Jain DPAA_DP_LOG(DEBUG, " FD--->MBUF"); 34637f9b54bSShreyansh Jain 3478cffdcbeSShreyansh Jain if (unlikely(format == qm_fd_sg)) 3488cffdcbeSShreyansh Jain return dpaa_eth_sg_to_mbuf(fd, ifid); 3498cffdcbeSShreyansh Jain 35037f9b54bSShreyansh Jain /* Ignoring case when format != qm_fd_contig */ 35105ba55bcSShreyansh Jain dpaa_display_frame(fd); 35237f9b54bSShreyansh Jain ptr = rte_dpaa_mem_ptov(fd->addr); 35337f9b54bSShreyansh Jain /* Ignoring case when ptr would be NULL. That is only possible incase 35437f9b54bSShreyansh Jain * of a corrupted packet 35537f9b54bSShreyansh Jain */ 35637f9b54bSShreyansh Jain 35737f9b54bSShreyansh Jain mbuf = (struct rte_mbuf *)((char *)ptr - bp_info->meta_data_size); 35837f9b54bSShreyansh Jain /* Prefetch the Parse results and packet data to L1 */ 35937f9b54bSShreyansh Jain rte_prefetch0((void *)((uint8_t *)ptr + DEFAULT_RX_ICEOF)); 36037f9b54bSShreyansh Jain rte_prefetch0((void *)((uint8_t *)ptr + offset)); 36137f9b54bSShreyansh Jain 36237f9b54bSShreyansh Jain mbuf->data_off = offset; 36337f9b54bSShreyansh Jain mbuf->data_len = length; 36437f9b54bSShreyansh Jain mbuf->pkt_len = length; 36537f9b54bSShreyansh Jain 36637f9b54bSShreyansh Jain mbuf->port = ifid; 36737f9b54bSShreyansh Jain mbuf->nb_segs = 1; 36837f9b54bSShreyansh Jain mbuf->ol_flags = 0; 36937f9b54bSShreyansh Jain mbuf->next = NULL; 37037f9b54bSShreyansh Jain rte_mbuf_refcnt_set(mbuf, 1); 371a7bdc3bdSShreyansh Jain dpaa_eth_packet_info(mbuf, (uint64_t)mbuf->buf_addr); 37237f9b54bSShreyansh Jain 37337f9b54bSShreyansh Jain return mbuf; 37437f9b54bSShreyansh Jain } 37537f9b54bSShreyansh Jain 37637f9b54bSShreyansh Jain uint16_t dpaa_eth_queue_rx(void *q, 37737f9b54bSShreyansh Jain struct rte_mbuf **bufs, 37837f9b54bSShreyansh Jain uint16_t nb_bufs) 37937f9b54bSShreyansh Jain { 38037f9b54bSShreyansh Jain struct qman_fq *fq = q; 38137f9b54bSShreyansh Jain struct qm_dqrr_entry *dq; 38237f9b54bSShreyansh Jain uint32_t num_rx = 0, ifid = ((struct dpaa_if *)fq->dpaa_intf)->ifid; 38337f9b54bSShreyansh Jain int ret; 38437f9b54bSShreyansh Jain 38537f9b54bSShreyansh Jain ret = rte_dpaa_portal_init((void *)0); 38637f9b54bSShreyansh Jain if (ret) { 38737f9b54bSShreyansh Jain DPAA_PMD_ERR("Failure in affining portal"); 38837f9b54bSShreyansh Jain return 0; 38937f9b54bSShreyansh Jain } 39037f9b54bSShreyansh Jain 39137f9b54bSShreyansh Jain ret = qman_set_vdq(fq, (nb_bufs > DPAA_MAX_DEQUEUE_NUM_FRAMES) ? 39237f9b54bSShreyansh Jain DPAA_MAX_DEQUEUE_NUM_FRAMES : nb_bufs); 39337f9b54bSShreyansh Jain if (ret) 39437f9b54bSShreyansh Jain return 0; 39537f9b54bSShreyansh Jain 39637f9b54bSShreyansh Jain do { 39737f9b54bSShreyansh Jain dq = qman_dequeue(fq); 39837f9b54bSShreyansh Jain if (!dq) 39937f9b54bSShreyansh Jain continue; 40037f9b54bSShreyansh Jain bufs[num_rx++] = dpaa_eth_fd_to_mbuf(&dq->fd, ifid); 40137f9b54bSShreyansh Jain qman_dqrr_consume(fq, dq); 40237f9b54bSShreyansh Jain } while (fq->flags & QMAN_FQ_STATE_VDQCR); 40337f9b54bSShreyansh Jain 40437f9b54bSShreyansh Jain return num_rx; 40537f9b54bSShreyansh Jain } 40637f9b54bSShreyansh Jain 40737f9b54bSShreyansh Jain static void *dpaa_get_pktbuf(struct dpaa_bp_info *bp_info) 40837f9b54bSShreyansh Jain { 40937f9b54bSShreyansh Jain int ret; 41037f9b54bSShreyansh Jain uint64_t buf = 0; 41137f9b54bSShreyansh Jain struct bm_buffer bufs; 41237f9b54bSShreyansh Jain 41337f9b54bSShreyansh Jain ret = bman_acquire(bp_info->bp, &bufs, 1, 0); 41437f9b54bSShreyansh Jain if (ret <= 0) { 41537f9b54bSShreyansh Jain DPAA_PMD_WARN("Failed to allocate buffers %d", ret); 41637f9b54bSShreyansh Jain return (void *)buf; 41737f9b54bSShreyansh Jain } 41837f9b54bSShreyansh Jain 41937f9b54bSShreyansh Jain DPAA_DP_LOG(DEBUG, "got buffer 0x%lx from pool %d", 42037f9b54bSShreyansh Jain (uint64_t)bufs.addr, bufs.bpid); 42137f9b54bSShreyansh Jain 42237f9b54bSShreyansh Jain buf = (uint64_t)rte_dpaa_mem_ptov(bufs.addr) - bp_info->meta_data_size; 42337f9b54bSShreyansh Jain if (!buf) 42437f9b54bSShreyansh Jain goto out; 42537f9b54bSShreyansh Jain 42637f9b54bSShreyansh Jain out: 42737f9b54bSShreyansh Jain return (void *)buf; 42837f9b54bSShreyansh Jain } 42937f9b54bSShreyansh Jain 43037f9b54bSShreyansh Jain static struct rte_mbuf *dpaa_get_dmable_mbuf(struct rte_mbuf *mbuf, 43137f9b54bSShreyansh Jain struct dpaa_if *dpaa_intf) 43237f9b54bSShreyansh Jain { 43337f9b54bSShreyansh Jain struct rte_mbuf *dpaa_mbuf; 43437f9b54bSShreyansh Jain 43537f9b54bSShreyansh Jain /* allocate pktbuffer on bpid for dpaa port */ 43637f9b54bSShreyansh Jain dpaa_mbuf = dpaa_get_pktbuf(dpaa_intf->bp_info); 43737f9b54bSShreyansh Jain if (!dpaa_mbuf) 43837f9b54bSShreyansh Jain return NULL; 43937f9b54bSShreyansh Jain 44037f9b54bSShreyansh Jain memcpy((uint8_t *)(dpaa_mbuf->buf_addr) + mbuf->data_off, (void *) 44137f9b54bSShreyansh Jain ((uint8_t *)(mbuf->buf_addr) + mbuf->data_off), mbuf->pkt_len); 44237f9b54bSShreyansh Jain 44337f9b54bSShreyansh Jain /* Copy only the required fields */ 44437f9b54bSShreyansh Jain dpaa_mbuf->data_off = mbuf->data_off; 44537f9b54bSShreyansh Jain dpaa_mbuf->pkt_len = mbuf->pkt_len; 44637f9b54bSShreyansh Jain dpaa_mbuf->ol_flags = mbuf->ol_flags; 44737f9b54bSShreyansh Jain dpaa_mbuf->packet_type = mbuf->packet_type; 44837f9b54bSShreyansh Jain dpaa_mbuf->tx_offload = mbuf->tx_offload; 44937f9b54bSShreyansh Jain rte_pktmbuf_free(mbuf); 45037f9b54bSShreyansh Jain return dpaa_mbuf; 45137f9b54bSShreyansh Jain } 45237f9b54bSShreyansh Jain 4538cffdcbeSShreyansh Jain int 4548cffdcbeSShreyansh Jain dpaa_eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf, 4558cffdcbeSShreyansh Jain struct qm_fd *fd, 4568cffdcbeSShreyansh Jain uint32_t bpid) 4578cffdcbeSShreyansh Jain { 4588cffdcbeSShreyansh Jain struct rte_mbuf *cur_seg = mbuf, *prev_seg = NULL; 4598cffdcbeSShreyansh Jain struct dpaa_bp_info *bp_info = DPAA_BPID_TO_POOL_INFO(bpid); 4608cffdcbeSShreyansh Jain struct rte_mbuf *temp, *mi; 4618cffdcbeSShreyansh Jain struct qm_sg_entry *sg_temp, *sgt; 4628cffdcbeSShreyansh Jain int i = 0; 4638cffdcbeSShreyansh Jain 4648cffdcbeSShreyansh Jain DPAA_DP_LOG(DEBUG, "Creating SG FD to transmit"); 4658cffdcbeSShreyansh Jain 4668cffdcbeSShreyansh Jain temp = rte_pktmbuf_alloc(bp_info->mp); 4678cffdcbeSShreyansh Jain if (!temp) { 4688cffdcbeSShreyansh Jain DPAA_PMD_ERR("Failure in allocation of mbuf"); 4698cffdcbeSShreyansh Jain return -1; 4708cffdcbeSShreyansh Jain } 4718cffdcbeSShreyansh Jain if (temp->buf_len < ((mbuf->nb_segs * sizeof(struct qm_sg_entry)) 4728cffdcbeSShreyansh Jain + temp->data_off)) { 4738cffdcbeSShreyansh Jain DPAA_PMD_ERR("Insufficient space in mbuf for SG entries"); 4748cffdcbeSShreyansh Jain return -1; 4758cffdcbeSShreyansh Jain } 4768cffdcbeSShreyansh Jain 4778cffdcbeSShreyansh Jain fd->cmd = 0; 4788cffdcbeSShreyansh Jain fd->opaque_addr = 0; 4798cffdcbeSShreyansh Jain 4808cffdcbeSShreyansh Jain if (mbuf->ol_flags & DPAA_TX_CKSUM_OFFLOAD_MASK) { 4818cffdcbeSShreyansh Jain if (temp->data_off < DEFAULT_TX_ICEOF 4828cffdcbeSShreyansh Jain + sizeof(struct dpaa_eth_parse_results_t)) 4838cffdcbeSShreyansh Jain temp->data_off = DEFAULT_TX_ICEOF 4848cffdcbeSShreyansh Jain + sizeof(struct dpaa_eth_parse_results_t); 4858cffdcbeSShreyansh Jain dcbz_64(temp->buf_addr); 4868cffdcbeSShreyansh Jain dpaa_checksum_offload(mbuf, fd, temp->buf_addr); 4878cffdcbeSShreyansh Jain } 4888cffdcbeSShreyansh Jain 4898cffdcbeSShreyansh Jain sgt = temp->buf_addr + temp->data_off; 4908cffdcbeSShreyansh Jain fd->format = QM_FD_SG; 491455da545SSantosh Shukla fd->addr = temp->buf_iova; 4928cffdcbeSShreyansh Jain fd->offset = temp->data_off; 4938cffdcbeSShreyansh Jain fd->bpid = bpid; 4948cffdcbeSShreyansh Jain fd->length20 = mbuf->pkt_len; 4958cffdcbeSShreyansh Jain 4968cffdcbeSShreyansh Jain while (i < DPAA_SGT_MAX_ENTRIES) { 4978cffdcbeSShreyansh Jain sg_temp = &sgt[i++]; 4988cffdcbeSShreyansh Jain sg_temp->opaque = 0; 4998cffdcbeSShreyansh Jain sg_temp->val = 0; 500455da545SSantosh Shukla sg_temp->addr = cur_seg->buf_iova; 5018cffdcbeSShreyansh Jain sg_temp->offset = cur_seg->data_off; 5028cffdcbeSShreyansh Jain sg_temp->length = cur_seg->data_len; 5038cffdcbeSShreyansh Jain if (RTE_MBUF_DIRECT(cur_seg)) { 5048cffdcbeSShreyansh Jain if (rte_mbuf_refcnt_read(cur_seg) > 1) { 5058cffdcbeSShreyansh Jain /*If refcnt > 1, invalid bpid is set to ensure 5068cffdcbeSShreyansh Jain * buffer is not freed by HW. 5078cffdcbeSShreyansh Jain */ 5088cffdcbeSShreyansh Jain sg_temp->bpid = 0xff; 5098cffdcbeSShreyansh Jain rte_mbuf_refcnt_update(cur_seg, -1); 5108cffdcbeSShreyansh Jain } else { 5118cffdcbeSShreyansh Jain sg_temp->bpid = 5128cffdcbeSShreyansh Jain DPAA_MEMPOOL_TO_BPID(cur_seg->pool); 5138cffdcbeSShreyansh Jain } 5148cffdcbeSShreyansh Jain cur_seg = cur_seg->next; 5158cffdcbeSShreyansh Jain } else { 5168cffdcbeSShreyansh Jain /* Get owner MBUF from indirect buffer */ 5178cffdcbeSShreyansh Jain mi = rte_mbuf_from_indirect(cur_seg); 5188cffdcbeSShreyansh Jain if (rte_mbuf_refcnt_read(mi) > 1) { 5198cffdcbeSShreyansh Jain /*If refcnt > 1, invalid bpid is set to ensure 5208cffdcbeSShreyansh Jain * owner buffer is not freed by HW. 5218cffdcbeSShreyansh Jain */ 5228cffdcbeSShreyansh Jain sg_temp->bpid = 0xff; 5238cffdcbeSShreyansh Jain } else { 5248cffdcbeSShreyansh Jain sg_temp->bpid = DPAA_MEMPOOL_TO_BPID(mi->pool); 5258cffdcbeSShreyansh Jain rte_mbuf_refcnt_update(mi, 1); 5268cffdcbeSShreyansh Jain } 5278cffdcbeSShreyansh Jain prev_seg = cur_seg; 5288cffdcbeSShreyansh Jain cur_seg = cur_seg->next; 5298cffdcbeSShreyansh Jain prev_seg->next = NULL; 5308cffdcbeSShreyansh Jain rte_pktmbuf_free(prev_seg); 5318cffdcbeSShreyansh Jain } 5328cffdcbeSShreyansh Jain if (cur_seg == NULL) { 5338cffdcbeSShreyansh Jain sg_temp->final = 1; 5348cffdcbeSShreyansh Jain cpu_to_hw_sg(sg_temp); 5358cffdcbeSShreyansh Jain break; 5368cffdcbeSShreyansh Jain } 5378cffdcbeSShreyansh Jain cpu_to_hw_sg(sg_temp); 5388cffdcbeSShreyansh Jain } 5398cffdcbeSShreyansh Jain return 0; 5408cffdcbeSShreyansh Jain } 5418cffdcbeSShreyansh Jain 54237f9b54bSShreyansh Jain /* Handle mbufs which are not segmented (non SG) */ 54337f9b54bSShreyansh Jain static inline void 54437f9b54bSShreyansh Jain tx_on_dpaa_pool_unsegmented(struct rte_mbuf *mbuf, 54537f9b54bSShreyansh Jain struct dpaa_bp_info *bp_info, 54637f9b54bSShreyansh Jain struct qm_fd *fd_arr) 54737f9b54bSShreyansh Jain { 54837f9b54bSShreyansh Jain struct rte_mbuf *mi = NULL; 54937f9b54bSShreyansh Jain 55037f9b54bSShreyansh Jain if (RTE_MBUF_DIRECT(mbuf)) { 55137f9b54bSShreyansh Jain if (rte_mbuf_refcnt_read(mbuf) > 1) { 55237f9b54bSShreyansh Jain /* In case of direct mbuf and mbuf being cloned, 55337f9b54bSShreyansh Jain * BMAN should _not_ release buffer. 55437f9b54bSShreyansh Jain */ 55537f9b54bSShreyansh Jain DPAA_MBUF_TO_CONTIG_FD(mbuf, fd_arr, 0xff); 55637f9b54bSShreyansh Jain /* Buffer should be releasd by EAL */ 55737f9b54bSShreyansh Jain rte_mbuf_refcnt_update(mbuf, -1); 55837f9b54bSShreyansh Jain } else { 55937f9b54bSShreyansh Jain /* In case of direct mbuf and no cloning, mbuf can be 56037f9b54bSShreyansh Jain * released by BMAN. 56137f9b54bSShreyansh Jain */ 56237f9b54bSShreyansh Jain DPAA_MBUF_TO_CONTIG_FD(mbuf, fd_arr, bp_info->bpid); 56337f9b54bSShreyansh Jain } 56437f9b54bSShreyansh Jain } else { 56537f9b54bSShreyansh Jain /* This is data-containing core mbuf: 'mi' */ 56637f9b54bSShreyansh Jain mi = rte_mbuf_from_indirect(mbuf); 56737f9b54bSShreyansh Jain if (rte_mbuf_refcnt_read(mi) > 1) { 56837f9b54bSShreyansh Jain /* In case of indirect mbuf, and mbuf being cloned, 56937f9b54bSShreyansh Jain * BMAN should _not_ release it and let EAL release 57037f9b54bSShreyansh Jain * it through pktmbuf_free below. 57137f9b54bSShreyansh Jain */ 57237f9b54bSShreyansh Jain DPAA_MBUF_TO_CONTIG_FD(mbuf, fd_arr, 0xff); 57337f9b54bSShreyansh Jain } else { 57437f9b54bSShreyansh Jain /* In case of indirect mbuf, and no cloning, core mbuf 57537f9b54bSShreyansh Jain * should be released by BMAN. 57637f9b54bSShreyansh Jain * Increate refcnt of core mbuf so that when 57737f9b54bSShreyansh Jain * pktmbuf_free is called and mbuf is released, EAL 57837f9b54bSShreyansh Jain * doesn't try to release core mbuf which would have 57937f9b54bSShreyansh Jain * been released by BMAN. 58037f9b54bSShreyansh Jain */ 58137f9b54bSShreyansh Jain rte_mbuf_refcnt_update(mi, 1); 58237f9b54bSShreyansh Jain DPAA_MBUF_TO_CONTIG_FD(mbuf, fd_arr, bp_info->bpid); 58337f9b54bSShreyansh Jain } 58437f9b54bSShreyansh Jain rte_pktmbuf_free(mbuf); 58537f9b54bSShreyansh Jain } 5865a8cf1beSShreyansh Jain 5875a8cf1beSShreyansh Jain if (mbuf->ol_flags & DPAA_TX_CKSUM_OFFLOAD_MASK) { 5885a8cf1beSShreyansh Jain if (mbuf->data_off < (DEFAULT_TX_ICEOF + 5895a8cf1beSShreyansh Jain sizeof(struct dpaa_eth_parse_results_t))) { 5905a8cf1beSShreyansh Jain DPAA_DP_LOG(DEBUG, "Checksum offload Err: " 5915a8cf1beSShreyansh Jain "Not enough Headroom " 5925a8cf1beSShreyansh Jain "space for correct Checksum offload." 5935a8cf1beSShreyansh Jain "So Calculating checksum in Software."); 5945a8cf1beSShreyansh Jain dpaa_checksum(mbuf); 5955a8cf1beSShreyansh Jain } else { 5965a8cf1beSShreyansh Jain dpaa_checksum_offload(mbuf, fd_arr, mbuf->buf_addr); 5975a8cf1beSShreyansh Jain } 5985a8cf1beSShreyansh Jain } 59937f9b54bSShreyansh Jain } 60037f9b54bSShreyansh Jain 60137f9b54bSShreyansh Jain /* Handle all mbufs on dpaa BMAN managed pool */ 60237f9b54bSShreyansh Jain static inline uint16_t 60337f9b54bSShreyansh Jain tx_on_dpaa_pool(struct rte_mbuf *mbuf, 60437f9b54bSShreyansh Jain struct dpaa_bp_info *bp_info, 60537f9b54bSShreyansh Jain struct qm_fd *fd_arr) 60637f9b54bSShreyansh Jain { 60737f9b54bSShreyansh Jain DPAA_DP_LOG(DEBUG, "BMAN offloaded buffer, mbuf: %p", mbuf); 60837f9b54bSShreyansh Jain 60937f9b54bSShreyansh Jain if (mbuf->nb_segs == 1) { 61037f9b54bSShreyansh Jain /* Case for non-segmented buffers */ 61137f9b54bSShreyansh Jain tx_on_dpaa_pool_unsegmented(mbuf, bp_info, fd_arr); 6128cffdcbeSShreyansh Jain } else if (mbuf->nb_segs > 1 && 6138cffdcbeSShreyansh Jain mbuf->nb_segs <= DPAA_SGT_MAX_ENTRIES) { 6148cffdcbeSShreyansh Jain if (dpaa_eth_mbuf_to_sg_fd(mbuf, fd_arr, bp_info->bpid)) { 6158cffdcbeSShreyansh Jain DPAA_PMD_DEBUG("Unable to create Scatter Gather FD"); 6168cffdcbeSShreyansh Jain return 1; 6178cffdcbeSShreyansh Jain } 61837f9b54bSShreyansh Jain } else { 61937f9b54bSShreyansh Jain DPAA_PMD_DEBUG("Number of Segments not supported"); 62037f9b54bSShreyansh Jain return 1; 62137f9b54bSShreyansh Jain } 62237f9b54bSShreyansh Jain 62337f9b54bSShreyansh Jain return 0; 62437f9b54bSShreyansh Jain } 62537f9b54bSShreyansh Jain 62637f9b54bSShreyansh Jain /* Handle all mbufs on an external pool (non-dpaa) */ 62737f9b54bSShreyansh Jain static inline uint16_t 62837f9b54bSShreyansh Jain tx_on_external_pool(struct qman_fq *txq, struct rte_mbuf *mbuf, 62937f9b54bSShreyansh Jain struct qm_fd *fd_arr) 63037f9b54bSShreyansh Jain { 63137f9b54bSShreyansh Jain struct dpaa_if *dpaa_intf = txq->dpaa_intf; 63237f9b54bSShreyansh Jain struct rte_mbuf *dmable_mbuf; 63337f9b54bSShreyansh Jain 63437f9b54bSShreyansh Jain DPAA_DP_LOG(DEBUG, "Non-BMAN offloaded buffer." 63537f9b54bSShreyansh Jain "Allocating an offloaded buffer"); 63637f9b54bSShreyansh Jain dmable_mbuf = dpaa_get_dmable_mbuf(mbuf, dpaa_intf); 63737f9b54bSShreyansh Jain if (!dmable_mbuf) { 63837f9b54bSShreyansh Jain DPAA_DP_LOG(DEBUG, "no dpaa buffers."); 63937f9b54bSShreyansh Jain return 1; 64037f9b54bSShreyansh Jain } 64137f9b54bSShreyansh Jain 642*8d804cf1SHemant Agrawal DPAA_MBUF_TO_CONTIG_FD(dmable_mbuf, fd_arr, dpaa_intf->bp_info->bpid); 64337f9b54bSShreyansh Jain 64437f9b54bSShreyansh Jain return 0; 64537f9b54bSShreyansh Jain } 64637f9b54bSShreyansh Jain 64737f9b54bSShreyansh Jain uint16_t 64837f9b54bSShreyansh Jain dpaa_eth_queue_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs) 64937f9b54bSShreyansh Jain { 65037f9b54bSShreyansh Jain struct rte_mbuf *mbuf, *mi = NULL; 65137f9b54bSShreyansh Jain struct rte_mempool *mp; 65237f9b54bSShreyansh Jain struct dpaa_bp_info *bp_info; 65337f9b54bSShreyansh Jain struct qm_fd fd_arr[MAX_TX_RING_SLOTS]; 65437f9b54bSShreyansh Jain uint32_t frames_to_send, loop, i = 0; 65537f9b54bSShreyansh Jain uint16_t state; 65637f9b54bSShreyansh Jain int ret; 65737f9b54bSShreyansh Jain 65837f9b54bSShreyansh Jain ret = rte_dpaa_portal_init((void *)0); 65937f9b54bSShreyansh Jain if (ret) { 66037f9b54bSShreyansh Jain DPAA_PMD_ERR("Failure in affining portal"); 66137f9b54bSShreyansh Jain return 0; 66237f9b54bSShreyansh Jain } 66337f9b54bSShreyansh Jain 66437f9b54bSShreyansh Jain DPAA_DP_LOG(DEBUG, "Transmitting %d buffers on queue: %p", nb_bufs, q); 66537f9b54bSShreyansh Jain 66637f9b54bSShreyansh Jain while (nb_bufs) { 66737f9b54bSShreyansh Jain frames_to_send = (nb_bufs >> 3) ? MAX_TX_RING_SLOTS : nb_bufs; 66837f9b54bSShreyansh Jain for (loop = 0; loop < frames_to_send; loop++, i++) { 66937f9b54bSShreyansh Jain mbuf = bufs[i]; 67037f9b54bSShreyansh Jain if (RTE_MBUF_DIRECT(mbuf)) { 67137f9b54bSShreyansh Jain mp = mbuf->pool; 67237f9b54bSShreyansh Jain } else { 67337f9b54bSShreyansh Jain mi = rte_mbuf_from_indirect(mbuf); 67437f9b54bSShreyansh Jain mp = mi->pool; 67537f9b54bSShreyansh Jain } 67637f9b54bSShreyansh Jain 67737f9b54bSShreyansh Jain bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp); 67837f9b54bSShreyansh Jain if (likely(mp->ops_index == bp_info->dpaa_ops_index)) { 67937f9b54bSShreyansh Jain state = tx_on_dpaa_pool(mbuf, bp_info, 68037f9b54bSShreyansh Jain &fd_arr[loop]); 68137f9b54bSShreyansh Jain if (unlikely(state)) { 68237f9b54bSShreyansh Jain /* Set frames_to_send & nb_bufs so 68337f9b54bSShreyansh Jain * that packets are transmitted till 68437f9b54bSShreyansh Jain * previous frame. 68537f9b54bSShreyansh Jain */ 68637f9b54bSShreyansh Jain frames_to_send = loop; 68737f9b54bSShreyansh Jain nb_bufs = loop; 68837f9b54bSShreyansh Jain goto send_pkts; 68937f9b54bSShreyansh Jain } 69037f9b54bSShreyansh Jain } else { 69137f9b54bSShreyansh Jain state = tx_on_external_pool(q, mbuf, 69237f9b54bSShreyansh Jain &fd_arr[loop]); 69337f9b54bSShreyansh Jain if (unlikely(state)) { 69437f9b54bSShreyansh Jain /* Set frames_to_send & nb_bufs so 69537f9b54bSShreyansh Jain * that packets are transmitted till 69637f9b54bSShreyansh Jain * previous frame. 69737f9b54bSShreyansh Jain */ 69837f9b54bSShreyansh Jain frames_to_send = loop; 69937f9b54bSShreyansh Jain nb_bufs = loop; 70037f9b54bSShreyansh Jain goto send_pkts; 70137f9b54bSShreyansh Jain } 70237f9b54bSShreyansh Jain } 70337f9b54bSShreyansh Jain } 70437f9b54bSShreyansh Jain 70537f9b54bSShreyansh Jain send_pkts: 70637f9b54bSShreyansh Jain loop = 0; 70737f9b54bSShreyansh Jain while (loop < frames_to_send) { 70837f9b54bSShreyansh Jain loop += qman_enqueue_multi(q, &fd_arr[loop], 70937f9b54bSShreyansh Jain frames_to_send - loop); 71037f9b54bSShreyansh Jain } 71137f9b54bSShreyansh Jain nb_bufs -= frames_to_send; 71237f9b54bSShreyansh Jain } 71337f9b54bSShreyansh Jain 71437f9b54bSShreyansh Jain DPAA_DP_LOG(DEBUG, "Transmitted %d buffers on queue: %p", i, q); 71537f9b54bSShreyansh Jain 71637f9b54bSShreyansh Jain return i; 71737f9b54bSShreyansh Jain } 71837f9b54bSShreyansh Jain 71937f9b54bSShreyansh Jain uint16_t dpaa_eth_tx_drop_all(void *q __rte_unused, 72037f9b54bSShreyansh Jain struct rte_mbuf **bufs __rte_unused, 72137f9b54bSShreyansh Jain uint16_t nb_bufs __rte_unused) 72237f9b54bSShreyansh Jain { 72337f9b54bSShreyansh Jain DPAA_DP_LOG(DEBUG, "Drop all packets"); 72437f9b54bSShreyansh Jain 72537f9b54bSShreyansh Jain /* Drop all incoming packets. No need to free packets here 72637f9b54bSShreyansh Jain * because the rte_eth f/w frees up the packets through tx_buffer 72737f9b54bSShreyansh Jain * callback in case this functions returns count less than nb_bufs 72837f9b54bSShreyansh Jain */ 72937f9b54bSShreyansh Jain return 0; 73037f9b54bSShreyansh Jain } 731