1131a75b6SHemant Agrawal /* SPDX-License-Identifier: BSD-3-Clause 2cd9935ceSHemant Agrawal * 3cd9935ceSHemant Agrawal * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved. 4*12d98eceSJun Yang * Copyright 2016-2024 NXP 5cd9935ceSHemant Agrawal * 6cd9935ceSHemant Agrawal */ 7cd9935ceSHemant Agrawal 8cd9935ceSHemant Agrawal #include <time.h> 9cd9935ceSHemant Agrawal #include <net/if.h> 10cd9935ceSHemant Agrawal 11cd9935ceSHemant Agrawal #include <rte_mbuf.h> 12df96fd0dSBruce Richardson #include <ethdev_driver.h> 13cd9935ceSHemant Agrawal #include <rte_malloc.h> 14cd9935ceSHemant Agrawal #include <rte_memcpy.h> 15cd9935ceSHemant Agrawal #include <rte_string_fns.h> 161acb7f54SDavid Marchand #include <dev_driver.h> 174690a611SNipun Gupta #include <rte_hexdump.h> 18cd9935ceSHemant Agrawal 19b4f22ca5SDavid Marchand #include <bus_fslmc_driver.h> 20cd9935ceSHemant Agrawal #include <fslmc_vfio.h> 21cd9935ceSHemant Agrawal #include <dpaa2_hw_pvt.h> 22cd9935ceSHemant Agrawal #include <dpaa2_hw_dpio.h> 23cd9935ceSHemant Agrawal #include <dpaa2_hw_mempool.h> 24cd9935ceSHemant Agrawal 25a10a988aSShreyansh Jain #include "dpaa2_pmd_logs.h" 26cd9935ceSHemant Agrawal #include "dpaa2_ethdev.h" 27a5fc38d4SHemant Agrawal #include "base/dpaa2_hw_dpni_annot.h" 28a5fc38d4SHemant Agrawal 29e3866e73SThomas Monjalon static inline uint32_t __rte_hot 302375f879SHemant Agrawal dpaa2_dev_rx_parse_slow(struct rte_mbuf *mbuf, 312375f879SHemant Agrawal struct dpaa2_annot_hdr *annotation); 322375f879SHemant Agrawal 33f2fc83b4SThomas Monjalon static void enable_tx_tstamp(struct qbman_fd *fd) __rte_unused; 34e806bf87SPriyanka Jain 3561c41e2eSThomas Monjalon static inline rte_mbuf_timestamp_t * 3661c41e2eSThomas Monjalon dpaa2_timestamp_dynfield(struct rte_mbuf *mbuf) 3761c41e2eSThomas Monjalon { 3861c41e2eSThomas Monjalon return RTE_MBUF_DYNFIELD(mbuf, 3961c41e2eSThomas Monjalon dpaa2_timestamp_dynfield_offset, rte_mbuf_timestamp_t *); 4061c41e2eSThomas Monjalon } 4161c41e2eSThomas Monjalon 4248e7f156SNipun Gupta #define DPAA2_MBUF_TO_CONTIG_FD(_mbuf, _fd, _bpid) do { \ 4348e7f156SNipun Gupta DPAA2_SET_FD_ADDR(_fd, DPAA2_MBUF_VADDR_TO_IOVA(_mbuf)); \ 4448e7f156SNipun Gupta DPAA2_SET_FD_LEN(_fd, _mbuf->data_len); \ 4548e7f156SNipun Gupta DPAA2_SET_ONLY_FD_BPID(_fd, _bpid); \ 4648e7f156SNipun Gupta DPAA2_SET_FD_OFFSET(_fd, _mbuf->data_off); \ 47fa21a6feSHemant Agrawal DPAA2_SET_FD_FRC(_fd, 0); \ 48fa21a6feSHemant Agrawal DPAA2_RESET_FD_CTRL(_fd); \ 49fa21a6feSHemant Agrawal DPAA2_RESET_FD_FLC(_fd); \ 5048e7f156SNipun Gupta } while (0) 5148e7f156SNipun Gupta 52e3866e73SThomas Monjalon static inline void __rte_hot 53bff6a98fSRoman Kapl dpaa2_dev_rx_parse_new(struct rte_mbuf *m, const struct qbman_fd *fd, 54bff6a98fSRoman Kapl void *hw_annot_addr) 55a5852a94SNipun Gupta { 5651aa71e8SHemant Agrawal uint16_t frc = DPAA2_GET_FD_FRC_PARSE_SUM(fd); 57bff6a98fSRoman Kapl struct dpaa2_annot_hdr *annotation = 58bff6a98fSRoman Kapl (struct dpaa2_annot_hdr *)hw_annot_addr; 5951aa71e8SHemant Agrawal 60a5852a94SNipun Gupta m->packet_type = RTE_PTYPE_UNKNOWN; 61a5852a94SNipun Gupta switch (frc) { 62a5852a94SNipun Gupta case DPAA2_PKT_TYPE_ETHER: 63a5852a94SNipun Gupta m->packet_type = RTE_PTYPE_L2_ETHER; 64a5852a94SNipun Gupta break; 65a5852a94SNipun Gupta case DPAA2_PKT_TYPE_IPV4: 66a5852a94SNipun Gupta m->packet_type = RTE_PTYPE_L2_ETHER | 67a5852a94SNipun Gupta RTE_PTYPE_L3_IPV4; 68a5852a94SNipun Gupta break; 69a5852a94SNipun Gupta case DPAA2_PKT_TYPE_IPV6: 70a5852a94SNipun Gupta m->packet_type = RTE_PTYPE_L2_ETHER | 71a5852a94SNipun Gupta RTE_PTYPE_L3_IPV6; 72a5852a94SNipun Gupta break; 73a5852a94SNipun Gupta case DPAA2_PKT_TYPE_IPV4_EXT: 74a5852a94SNipun Gupta m->packet_type = RTE_PTYPE_L2_ETHER | 75a5852a94SNipun Gupta RTE_PTYPE_L3_IPV4_EXT; 76a5852a94SNipun Gupta break; 77a5852a94SNipun Gupta case DPAA2_PKT_TYPE_IPV6_EXT: 78a5852a94SNipun Gupta m->packet_type = RTE_PTYPE_L2_ETHER | 79a5852a94SNipun Gupta RTE_PTYPE_L3_IPV6_EXT; 80a5852a94SNipun Gupta break; 81a5852a94SNipun Gupta case DPAA2_PKT_TYPE_IPV4_TCP: 82a5852a94SNipun Gupta m->packet_type = RTE_PTYPE_L2_ETHER | 83a5852a94SNipun Gupta RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP; 84a5852a94SNipun Gupta break; 85a5852a94SNipun Gupta case DPAA2_PKT_TYPE_IPV6_TCP: 86a5852a94SNipun Gupta m->packet_type = RTE_PTYPE_L2_ETHER | 87a5852a94SNipun Gupta RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP; 88a5852a94SNipun Gupta break; 89a5852a94SNipun Gupta case DPAA2_PKT_TYPE_IPV4_UDP: 90a5852a94SNipun Gupta m->packet_type = RTE_PTYPE_L2_ETHER | 91a5852a94SNipun Gupta RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP; 92a5852a94SNipun Gupta break; 93a5852a94SNipun Gupta case DPAA2_PKT_TYPE_IPV6_UDP: 94a5852a94SNipun Gupta m->packet_type = RTE_PTYPE_L2_ETHER | 95a5852a94SNipun Gupta RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP; 96a5852a94SNipun Gupta break; 97a5852a94SNipun Gupta case DPAA2_PKT_TYPE_IPV4_SCTP: 98a5852a94SNipun Gupta m->packet_type = RTE_PTYPE_L2_ETHER | 99a5852a94SNipun Gupta RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP; 100a5852a94SNipun Gupta break; 101a5852a94SNipun Gupta case DPAA2_PKT_TYPE_IPV6_SCTP: 102a5852a94SNipun Gupta m->packet_type = RTE_PTYPE_L2_ETHER | 103a5852a94SNipun Gupta RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_SCTP; 104a5852a94SNipun Gupta break; 105a5852a94SNipun Gupta case DPAA2_PKT_TYPE_IPV4_ICMP: 106a5852a94SNipun Gupta m->packet_type = RTE_PTYPE_L2_ETHER | 107a5852a94SNipun Gupta RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_ICMP; 108a5852a94SNipun Gupta break; 109a5852a94SNipun Gupta case DPAA2_PKT_TYPE_IPV6_ICMP: 110a5852a94SNipun Gupta m->packet_type = RTE_PTYPE_L2_ETHER | 111a5852a94SNipun Gupta RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_ICMP; 112a5852a94SNipun Gupta break; 113a5852a94SNipun Gupta default: 114bff6a98fSRoman Kapl m->packet_type = dpaa2_dev_rx_parse_slow(m, annotation); 115a5852a94SNipun Gupta } 11651aa71e8SHemant Agrawal m->hash.rss = fd->simple.flc_hi; 117daa02b5cSOlivier Matz m->ol_flags |= RTE_MBUF_F_RX_RSS_HASH; 118c1870f65SAkhil Goyal 119724f79dfSHemant Agrawal if (dpaa2_enable_ts[m->port]) { 12061c41e2eSThomas Monjalon *dpaa2_timestamp_dynfield(m) = annotation->word2; 12161c41e2eSThomas Monjalon m->ol_flags |= dpaa2_timestamp_rx_dynflag; 12261c41e2eSThomas Monjalon DPAA2_PMD_DP_DEBUG("pkt timestamp:0x%" PRIx64 "", 12361c41e2eSThomas Monjalon *dpaa2_timestamp_dynfield(m)); 124c1870f65SAkhil Goyal } 125c1870f65SAkhil Goyal 126c1870f65SAkhil Goyal DPAA2_PMD_DP_DEBUG("HW frc = 0x%x\t packet type =0x%x " 127c1870f65SAkhil Goyal "ol_flags =0x%" PRIx64 "", 128c1870f65SAkhil Goyal frc, m->packet_type, m->ol_flags); 129a5852a94SNipun Gupta } 130a5852a94SNipun Gupta 131e3866e73SThomas Monjalon static inline uint32_t __rte_hot 1322375f879SHemant Agrawal dpaa2_dev_rx_parse_slow(struct rte_mbuf *mbuf, 1332375f879SHemant Agrawal struct dpaa2_annot_hdr *annotation) 134a5fc38d4SHemant Agrawal { 135a5fc38d4SHemant Agrawal uint32_t pkt_type = RTE_PTYPE_UNKNOWN; 1362375f879SHemant Agrawal uint16_t *vlan_tci; 137a5fc38d4SHemant Agrawal 1382375f879SHemant Agrawal DPAA2_PMD_DP_DEBUG("(slow parse)annotation(3)=0x%" PRIx64 "\t" 1392375f879SHemant Agrawal "(4)=0x%" PRIx64 "\t", 1402375f879SHemant Agrawal annotation->word3, annotation->word4); 1412375f879SHemant Agrawal 142e806bf87SPriyanka Jain #if defined(RTE_LIBRTE_IEEE1588) 14390762e5cSVanshika Shukla if (BIT_ISSET_AT_POS(annotation->word1, DPAA2_ETH_FAS_PTP)) { 144daa02b5cSOlivier Matz mbuf->ol_flags |= RTE_MBUF_F_RX_IEEE1588_PTP; 14590762e5cSVanshika Shukla mbuf->ol_flags |= RTE_MBUF_F_RX_IEEE1588_TMST; 14690762e5cSVanshika Shukla } 147e806bf87SPriyanka Jain #endif 148e806bf87SPriyanka Jain 1492375f879SHemant Agrawal if (BIT_ISSET_AT_POS(annotation->word3, L2_VLAN_1_PRESENT)) { 1502375f879SHemant Agrawal vlan_tci = rte_pktmbuf_mtod_offset(mbuf, uint16_t *, 1512375f879SHemant Agrawal (VLAN_TCI_OFFSET_1(annotation->word5) >> 16)); 1522375f879SHemant Agrawal mbuf->vlan_tci = rte_be_to_cpu_16(*vlan_tci); 153daa02b5cSOlivier Matz mbuf->ol_flags |= RTE_MBUF_F_RX_VLAN; 1542375f879SHemant Agrawal pkt_type |= RTE_PTYPE_L2_ETHER_VLAN; 1552375f879SHemant Agrawal } else if (BIT_ISSET_AT_POS(annotation->word3, L2_VLAN_N_PRESENT)) { 1562375f879SHemant Agrawal vlan_tci = rte_pktmbuf_mtod_offset(mbuf, uint16_t *, 1572375f879SHemant Agrawal (VLAN_TCI_OFFSET_1(annotation->word5) >> 16)); 1582375f879SHemant Agrawal mbuf->vlan_tci = rte_be_to_cpu_16(*vlan_tci); 159daa02b5cSOlivier Matz mbuf->ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_QINQ; 1602375f879SHemant Agrawal pkt_type |= RTE_PTYPE_L2_ETHER_QINQ; 1612375f879SHemant Agrawal } 1622375f879SHemant Agrawal 163a5fc38d4SHemant Agrawal if (BIT_ISSET_AT_POS(annotation->word3, L2_ARP_PRESENT)) { 1642375f879SHemant Agrawal pkt_type |= RTE_PTYPE_L2_ETHER_ARP; 165a5fc38d4SHemant Agrawal goto parse_done; 166a5fc38d4SHemant Agrawal } else if (BIT_ISSET_AT_POS(annotation->word3, L2_ETH_MAC_PRESENT)) { 1672375f879SHemant Agrawal pkt_type |= RTE_PTYPE_L2_ETHER; 168a5fc38d4SHemant Agrawal } else { 169a5fc38d4SHemant Agrawal goto parse_done; 170a5fc38d4SHemant Agrawal } 171a5fc38d4SHemant Agrawal 1721832bc8eSApeksha Gupta if (BIT_ISSET_AT_POS(annotation->word3, L2_MPLS_1_PRESENT | 1731832bc8eSApeksha Gupta L2_MPLS_N_PRESENT)) 1741832bc8eSApeksha Gupta pkt_type |= RTE_PTYPE_L2_ETHER_MPLS; 1751832bc8eSApeksha Gupta 176a5fc38d4SHemant Agrawal if (BIT_ISSET_AT_POS(annotation->word4, L3_IPV4_1_PRESENT | 177a5fc38d4SHemant Agrawal L3_IPV4_N_PRESENT)) { 178a5fc38d4SHemant Agrawal pkt_type |= RTE_PTYPE_L3_IPV4; 179a5fc38d4SHemant Agrawal if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_OPT_PRESENT | 180a5fc38d4SHemant Agrawal L3_IP_N_OPT_PRESENT)) 181a5fc38d4SHemant Agrawal pkt_type |= RTE_PTYPE_L3_IPV4_EXT; 1820bffc64aSGagandeep Singh if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_IPSEC_ESP_PRESENT | 1830bffc64aSGagandeep Singh L3_PROTO_ESP_PRESENT)) 1840bffc64aSGagandeep Singh pkt_type |= RTE_PTYPE_TUNNEL_ESP; 185a5fc38d4SHemant Agrawal 186a5fc38d4SHemant Agrawal } else if (BIT_ISSET_AT_POS(annotation->word4, L3_IPV6_1_PRESENT | 187a5fc38d4SHemant Agrawal L3_IPV6_N_PRESENT)) { 188a5fc38d4SHemant Agrawal pkt_type |= RTE_PTYPE_L3_IPV6; 189a5fc38d4SHemant Agrawal if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_OPT_PRESENT | 190a5fc38d4SHemant Agrawal L3_IP_N_OPT_PRESENT)) 191a5fc38d4SHemant Agrawal pkt_type |= RTE_PTYPE_L3_IPV6_EXT; 1920bffc64aSGagandeep Singh if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_IPSEC_ESP_PRESENT | 1930bffc64aSGagandeep Singh L3_PROTO_ESP_PRESENT)) 1940bffc64aSGagandeep Singh pkt_type |= RTE_PTYPE_TUNNEL_ESP; 195a5fc38d4SHemant Agrawal } else { 196a5fc38d4SHemant Agrawal goto parse_done; 197a5fc38d4SHemant Agrawal } 198a5fc38d4SHemant Agrawal 19994d31549SHemant Agrawal if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L3CE)) 200daa02b5cSOlivier Matz mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD; 2017d83632bSTianli Lai else 2027d83632bSTianli Lai mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD; 2037d83632bSTianli Lai if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L4CE)) 204daa02b5cSOlivier Matz mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD; 2057d83632bSTianli Lai else 2067d83632bSTianli Lai mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD; 20794d31549SHemant Agrawal 208a5fc38d4SHemant Agrawal if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_FIRST_FRAGMENT | 209a5fc38d4SHemant Agrawal L3_IP_1_MORE_FRAGMENT | 210a5fc38d4SHemant Agrawal L3_IP_N_FIRST_FRAGMENT | 211a5fc38d4SHemant Agrawal L3_IP_N_MORE_FRAGMENT)) { 212a5fc38d4SHemant Agrawal pkt_type |= RTE_PTYPE_L4_FRAG; 213a5fc38d4SHemant Agrawal goto parse_done; 214a5fc38d4SHemant Agrawal } else { 215a5fc38d4SHemant Agrawal pkt_type |= RTE_PTYPE_L4_NONFRAG; 216a5fc38d4SHemant Agrawal } 217a5fc38d4SHemant Agrawal 218a5fc38d4SHemant Agrawal if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_UDP_PRESENT)) 219a5fc38d4SHemant Agrawal pkt_type |= RTE_PTYPE_L4_UDP; 220a5fc38d4SHemant Agrawal 221a5fc38d4SHemant Agrawal else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_TCP_PRESENT)) 222a5fc38d4SHemant Agrawal pkt_type |= RTE_PTYPE_L4_TCP; 223a5fc38d4SHemant Agrawal 224a5fc38d4SHemant Agrawal else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_SCTP_PRESENT)) 225a5fc38d4SHemant Agrawal pkt_type |= RTE_PTYPE_L4_SCTP; 226a5fc38d4SHemant Agrawal 227a5fc38d4SHemant Agrawal else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_ICMP_PRESENT)) 228a5fc38d4SHemant Agrawal pkt_type |= RTE_PTYPE_L4_ICMP; 229a5fc38d4SHemant Agrawal 230a5fc38d4SHemant Agrawal else if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_UNKNOWN_PROTOCOL)) 231a5fc38d4SHemant Agrawal pkt_type |= RTE_PTYPE_UNKNOWN; 232a5fc38d4SHemant Agrawal 233a5fc38d4SHemant Agrawal parse_done: 234a5fc38d4SHemant Agrawal return pkt_type; 235a5fc38d4SHemant Agrawal } 236a5fc38d4SHemant Agrawal 237e3866e73SThomas Monjalon static inline uint32_t __rte_hot 2385ae1edffSHemant Agrawal dpaa2_dev_rx_parse(struct rte_mbuf *mbuf, void *hw_annot_addr) 2397ec5e530SNipun Gupta { 2407ec5e530SNipun Gupta struct dpaa2_annot_hdr *annotation = 2417ec5e530SNipun Gupta (struct dpaa2_annot_hdr *)hw_annot_addr; 2427ec5e530SNipun Gupta 243a10a988aSShreyansh Jain DPAA2_PMD_DP_DEBUG("(fast parse) Annotation = 0x%" PRIx64 "\t", 244a10a988aSShreyansh Jain annotation->word4); 2457ec5e530SNipun Gupta 246d2ef05d5SHemant Agrawal if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L3CE)) 247daa02b5cSOlivier Matz mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD; 2487d83632bSTianli Lai else 2497d83632bSTianli Lai mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD; 2507d83632bSTianli Lai if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L4CE)) 251daa02b5cSOlivier Matz mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD; 2527d83632bSTianli Lai else 2537d83632bSTianli Lai mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD; 254d2ef05d5SHemant Agrawal 25561c41e2eSThomas Monjalon if (dpaa2_enable_ts[mbuf->port]) { 25661c41e2eSThomas Monjalon *dpaa2_timestamp_dynfield(mbuf) = annotation->word2; 25761c41e2eSThomas Monjalon mbuf->ol_flags |= dpaa2_timestamp_rx_dynflag; 25861c41e2eSThomas Monjalon DPAA2_PMD_DP_DEBUG("pkt timestamp: 0x%" PRIx64 "", 25961c41e2eSThomas Monjalon *dpaa2_timestamp_dynfield(mbuf)); 26061c41e2eSThomas Monjalon } 261c1870f65SAkhil Goyal 2622375f879SHemant Agrawal /* Check detailed parsing requirement */ 2632375f879SHemant Agrawal if (annotation->word3 & 0x7FFFFC3FFFF) 2642375f879SHemant Agrawal return dpaa2_dev_rx_parse_slow(mbuf, annotation); 2652375f879SHemant Agrawal 2667ec5e530SNipun Gupta /* Return some common types from parse processing */ 2677ec5e530SNipun Gupta switch (annotation->word4) { 2687ec5e530SNipun Gupta case DPAA2_L3_IPv4: 2697ec5e530SNipun Gupta return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4; 2707ec5e530SNipun Gupta case DPAA2_L3_IPv6: 2717ec5e530SNipun Gupta return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6; 2727ec5e530SNipun Gupta case DPAA2_L3_IPv4_TCP: 2737ec5e530SNipun Gupta return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 | 2747ec5e530SNipun Gupta RTE_PTYPE_L4_TCP; 2757ec5e530SNipun Gupta case DPAA2_L3_IPv4_UDP: 2767ec5e530SNipun Gupta return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 | 2777ec5e530SNipun Gupta RTE_PTYPE_L4_UDP; 2787ec5e530SNipun Gupta case DPAA2_L3_IPv6_TCP: 2797ec5e530SNipun Gupta return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 | 2807ec5e530SNipun Gupta RTE_PTYPE_L4_TCP; 2817ec5e530SNipun Gupta case DPAA2_L3_IPv6_UDP: 2827ec5e530SNipun Gupta return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 | 2837ec5e530SNipun Gupta RTE_PTYPE_L4_UDP; 2847ec5e530SNipun Gupta default: 2857ec5e530SNipun Gupta break; 2867ec5e530SNipun Gupta } 2877ec5e530SNipun Gupta 2882375f879SHemant Agrawal return dpaa2_dev_rx_parse_slow(mbuf, annotation); 2897ec5e530SNipun Gupta } 2907ec5e530SNipun Gupta 291e3866e73SThomas Monjalon static inline struct rte_mbuf *__rte_hot 292005d943eSNipun Gupta eth_sg_fd_to_mbuf(const struct qbman_fd *fd, 293005d943eSNipun Gupta int port_id) 294774e9ea9SHemant Agrawal { 295774e9ea9SHemant Agrawal struct qbman_sge *sgt, *sge; 2965ae1edffSHemant Agrawal size_t sg_addr, fd_addr; 297774e9ea9SHemant Agrawal int i = 0; 298bff6a98fSRoman Kapl void *hw_annot_addr; 299774e9ea9SHemant Agrawal struct rte_mbuf *first_seg, *next_seg, *cur_seg, *temp; 300774e9ea9SHemant Agrawal 3015ae1edffSHemant Agrawal fd_addr = (size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)); 302bff6a98fSRoman Kapl hw_annot_addr = (void *)(fd_addr + DPAA2_FD_PTA_SIZE); 303774e9ea9SHemant Agrawal 304774e9ea9SHemant Agrawal /* Get Scatter gather table address */ 305774e9ea9SHemant Agrawal sgt = (struct qbman_sge *)(fd_addr + DPAA2_GET_FD_OFFSET(fd)); 306774e9ea9SHemant Agrawal 307774e9ea9SHemant Agrawal sge = &sgt[i++]; 3085ae1edffSHemant Agrawal sg_addr = (size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FLE_ADDR(sge)); 309774e9ea9SHemant Agrawal 310774e9ea9SHemant Agrawal /* First Scatter gather entry */ 311774e9ea9SHemant Agrawal first_seg = DPAA2_INLINE_MBUF_FROM_BUF(sg_addr, 312774e9ea9SHemant Agrawal rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size); 313774e9ea9SHemant Agrawal /* Prepare all the metadata for first segment */ 314774e9ea9SHemant Agrawal first_seg->buf_addr = (uint8_t *)sg_addr; 315774e9ea9SHemant Agrawal first_seg->ol_flags = 0; 316774e9ea9SHemant Agrawal first_seg->data_off = DPAA2_GET_FLE_OFFSET(sge); 317774e9ea9SHemant Agrawal first_seg->data_len = sge->length & 0x1FFFF; 318774e9ea9SHemant Agrawal first_seg->pkt_len = DPAA2_GET_FD_LEN(fd); 319774e9ea9SHemant Agrawal first_seg->nb_segs = 1; 320774e9ea9SHemant Agrawal first_seg->next = NULL; 321005d943eSNipun Gupta first_seg->port = port_id; 322a5852a94SNipun Gupta if (dpaa2_svr_family == SVR_LX2160A) 323bff6a98fSRoman Kapl dpaa2_dev_rx_parse_new(first_seg, fd, hw_annot_addr); 324d2ef05d5SHemant Agrawal else 325bff6a98fSRoman Kapl first_seg->packet_type = 326bff6a98fSRoman Kapl dpaa2_dev_rx_parse(first_seg, hw_annot_addr); 327d2ef05d5SHemant Agrawal 328774e9ea9SHemant Agrawal rte_mbuf_refcnt_set(first_seg, 1); 3293fa54e3dSGagandeep Singh #ifdef RTE_LIBRTE_MEMPOOL_DEBUG 3303fa54e3dSGagandeep Singh rte_mempool_check_cookies(rte_mempool_from_obj((void *)first_seg), 3313fa54e3dSGagandeep Singh (void **)&first_seg, 1, 1); 3323fa54e3dSGagandeep Singh #endif 333774e9ea9SHemant Agrawal cur_seg = first_seg; 334774e9ea9SHemant Agrawal while (!DPAA2_SG_IS_FINAL(sge)) { 335774e9ea9SHemant Agrawal sge = &sgt[i++]; 3365ae1edffSHemant Agrawal sg_addr = (size_t)DPAA2_IOVA_TO_VADDR( 337774e9ea9SHemant Agrawal DPAA2_GET_FLE_ADDR(sge)); 338774e9ea9SHemant Agrawal next_seg = DPAA2_INLINE_MBUF_FROM_BUF(sg_addr, 339774e9ea9SHemant Agrawal rte_dpaa2_bpid_info[DPAA2_GET_FLE_BPID(sge)].meta_data_size); 340774e9ea9SHemant Agrawal next_seg->buf_addr = (uint8_t *)sg_addr; 341774e9ea9SHemant Agrawal next_seg->data_off = DPAA2_GET_FLE_OFFSET(sge); 342774e9ea9SHemant Agrawal next_seg->data_len = sge->length & 0x1FFFF; 343774e9ea9SHemant Agrawal first_seg->nb_segs += 1; 344774e9ea9SHemant Agrawal rte_mbuf_refcnt_set(next_seg, 1); 3453fa54e3dSGagandeep Singh #ifdef RTE_LIBRTE_MEMPOOL_DEBUG 3463fa54e3dSGagandeep Singh rte_mempool_check_cookies(rte_mempool_from_obj((void *)next_seg), 3473fa54e3dSGagandeep Singh (void **)&next_seg, 1, 1); 3483fa54e3dSGagandeep Singh #endif 349774e9ea9SHemant Agrawal cur_seg->next = next_seg; 350774e9ea9SHemant Agrawal next_seg->next = NULL; 351774e9ea9SHemant Agrawal cur_seg = next_seg; 352774e9ea9SHemant Agrawal } 353774e9ea9SHemant Agrawal temp = DPAA2_INLINE_MBUF_FROM_BUF(fd_addr, 354774e9ea9SHemant Agrawal rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size); 355774e9ea9SHemant Agrawal rte_mbuf_refcnt_set(temp, 1); 3563fa54e3dSGagandeep Singh #ifdef RTE_LIBRTE_MEMPOOL_DEBUG 3573fa54e3dSGagandeep Singh rte_mempool_check_cookies(rte_mempool_from_obj((void *)temp), 3583fa54e3dSGagandeep Singh (void **)&temp, 1, 1); 3593fa54e3dSGagandeep Singh #endif 360774e9ea9SHemant Agrawal rte_pktmbuf_free_seg(temp); 361774e9ea9SHemant Agrawal 362774e9ea9SHemant Agrawal return (void *)first_seg; 363774e9ea9SHemant Agrawal } 364774e9ea9SHemant Agrawal 365e3866e73SThomas Monjalon static inline struct rte_mbuf *__rte_hot 366005d943eSNipun Gupta eth_fd_to_mbuf(const struct qbman_fd *fd, 367005d943eSNipun Gupta int port_id) 368cd9935ceSHemant Agrawal { 369bff6a98fSRoman Kapl void *v_addr = DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)); 370bff6a98fSRoman Kapl void *hw_annot_addr = (void *)((size_t)v_addr + DPAA2_FD_PTA_SIZE); 371bff6a98fSRoman Kapl struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF(v_addr, 372cd9935ceSHemant Agrawal rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size); 373cd9935ceSHemant Agrawal 374cd9935ceSHemant Agrawal /* need to repopulated some of the fields, 375cd9935ceSHemant Agrawal * as they may have changed in last transmission 376cd9935ceSHemant Agrawal */ 377cd9935ceSHemant Agrawal mbuf->nb_segs = 1; 378cd9935ceSHemant Agrawal mbuf->ol_flags = 0; 379cd9935ceSHemant Agrawal mbuf->data_off = DPAA2_GET_FD_OFFSET(fd); 380cd9935ceSHemant Agrawal mbuf->data_len = DPAA2_GET_FD_LEN(fd); 381cd9935ceSHemant Agrawal mbuf->pkt_len = mbuf->data_len; 382005d943eSNipun Gupta mbuf->port = port_id; 3837ec5e530SNipun Gupta mbuf->next = NULL; 3847ec5e530SNipun Gupta rte_mbuf_refcnt_set(mbuf, 1); 3853fa54e3dSGagandeep Singh #ifdef RTE_LIBRTE_MEMPOOL_DEBUG 3863fa54e3dSGagandeep Singh rte_mempool_check_cookies(rte_mempool_from_obj((void *)mbuf), 3873fa54e3dSGagandeep Singh (void **)&mbuf, 1, 1); 3883fa54e3dSGagandeep Singh #endif 389cd9935ceSHemant Agrawal 390a5fc38d4SHemant Agrawal /* Parse the packet */ 391a5852a94SNipun Gupta /* parse results for LX2 are there in FRC field of FD. 392a5852a94SNipun Gupta * For other DPAA2 platforms , parse results are after 393a5852a94SNipun Gupta * the private - sw annotation area 394a5852a94SNipun Gupta */ 395a5852a94SNipun Gupta 396a5852a94SNipun Gupta if (dpaa2_svr_family == SVR_LX2160A) 397bff6a98fSRoman Kapl dpaa2_dev_rx_parse_new(mbuf, fd, hw_annot_addr); 398d2ef05d5SHemant Agrawal else 399bff6a98fSRoman Kapl mbuf->packet_type = dpaa2_dev_rx_parse(mbuf, hw_annot_addr); 400cd9935ceSHemant Agrawal 401a10a988aSShreyansh Jain DPAA2_PMD_DP_DEBUG("to mbuf - mbuf =%p, mbuf->buf_addr =%p, off = %d," 4022b843cacSDavid Marchand "fd_off=%d fd =%" PRIx64 ", meta = %d bpid =%d, len=%d", 403a10a988aSShreyansh Jain mbuf, mbuf->buf_addr, mbuf->data_off, 404cd9935ceSHemant Agrawal DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ADDR(fd), 405cd9935ceSHemant Agrawal rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size, 406cd9935ceSHemant Agrawal DPAA2_GET_FD_BPID(fd), DPAA2_GET_FD_LEN(fd)); 407cd9935ceSHemant Agrawal 408cd9935ceSHemant Agrawal return mbuf; 409cd9935ceSHemant Agrawal } 410cd9935ceSHemant Agrawal 411e3866e73SThomas Monjalon static int __rte_noinline __rte_hot 412774e9ea9SHemant Agrawal eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf, 413cc8569f0SHemant Agrawal struct qbman_fd *fd, 414b0074a7bSGagandeep Singh struct sw_buf_free *free_buf, 415b0074a7bSGagandeep Singh uint32_t *free_count, 416b0074a7bSGagandeep Singh uint32_t pkt_id, 41775e2a1d4SGagandeep Singh uint16_t bpid) 418774e9ea9SHemant Agrawal { 419b0074a7bSGagandeep Singh struct rte_mbuf *cur_seg = mbuf, *mi, *temp; 420774e9ea9SHemant Agrawal struct qbman_sge *sgt, *sge = NULL; 421cc8569f0SHemant Agrawal int i, offset = 0; 422774e9ea9SHemant Agrawal 423cc8569f0SHemant Agrawal #ifdef RTE_LIBRTE_IEEE1588 424cc8569f0SHemant Agrawal /* annotation area for timestamp in first buffer */ 425cc8569f0SHemant Agrawal offset = 0x64; 426cc8569f0SHemant Agrawal #endif 427cc8569f0SHemant Agrawal if (RTE_MBUF_DIRECT(mbuf) && 428cc8569f0SHemant Agrawal (mbuf->data_off > (mbuf->nb_segs * sizeof(struct qbman_sge) 429cc8569f0SHemant Agrawal + offset))) { 430cc8569f0SHemant Agrawal temp = mbuf; 431cc8569f0SHemant Agrawal if (rte_mbuf_refcnt_read(temp) > 1) { 432cc8569f0SHemant Agrawal /* If refcnt > 1, invalid bpid is set to ensure 433cc8569f0SHemant Agrawal * buffer is not freed by HW 434cc8569f0SHemant Agrawal */ 435cc8569f0SHemant Agrawal fd->simple.bpid_offset = 0; 436cc8569f0SHemant Agrawal DPAA2_SET_FD_IVP(fd); 437cc8569f0SHemant Agrawal rte_mbuf_refcnt_update(temp, -1); 438cc8569f0SHemant Agrawal } else { 439cc8569f0SHemant Agrawal DPAA2_SET_ONLY_FD_BPID(fd, bpid); 4403fa54e3dSGagandeep Singh #ifdef RTE_LIBRTE_MEMPOOL_DEBUG 4413fa54e3dSGagandeep Singh rte_mempool_check_cookies(rte_mempool_from_obj((void *)temp), 4423fa54e3dSGagandeep Singh (void **)&temp, 1, 0); 4433fa54e3dSGagandeep Singh #endif 444cc8569f0SHemant Agrawal } 445cc8569f0SHemant Agrawal DPAA2_SET_FD_OFFSET(fd, offset); 446cc8569f0SHemant Agrawal } else { 44775e2a1d4SGagandeep Singh temp = rte_pktmbuf_alloc(dpaa2_tx_sg_pool); 448774e9ea9SHemant Agrawal if (temp == NULL) { 4492b843cacSDavid Marchand DPAA2_PMD_DP_DEBUG("No memory to allocate S/G table"); 450774e9ea9SHemant Agrawal return -ENOMEM; 451774e9ea9SHemant Agrawal } 45275e2a1d4SGagandeep Singh DPAA2_SET_ONLY_FD_BPID(fd, mempool_to_bpid(dpaa2_tx_sg_pool)); 453774e9ea9SHemant Agrawal DPAA2_SET_FD_OFFSET(fd, temp->data_off); 4543fa54e3dSGagandeep Singh #ifdef RTE_LIBRTE_MEMPOOL_DEBUG 4553fa54e3dSGagandeep Singh rte_mempool_check_cookies(rte_mempool_from_obj((void *)temp), 4563fa54e3dSGagandeep Singh (void **)&temp, 1, 0); 4573fa54e3dSGagandeep Singh #endif 458cc8569f0SHemant Agrawal } 459cc8569f0SHemant Agrawal DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(temp)); 460cc8569f0SHemant Agrawal DPAA2_SET_FD_LEN(fd, mbuf->pkt_len); 461774e9ea9SHemant Agrawal DPAA2_FD_SET_FORMAT(fd, qbman_fd_sg); 4621f4d77d2SNipun Gupta DPAA2_RESET_FD_FRC(fd); 4631f4d77d2SNipun Gupta DPAA2_RESET_FD_CTRL(fd); 464cc8569f0SHemant Agrawal DPAA2_RESET_FD_FLC(fd); 465774e9ea9SHemant Agrawal /*Set Scatter gather table and Scatter gather entries*/ 466774e9ea9SHemant Agrawal sgt = (struct qbman_sge *)( 4675ae1edffSHemant Agrawal (size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)) 468774e9ea9SHemant Agrawal + DPAA2_GET_FD_OFFSET(fd)); 469774e9ea9SHemant Agrawal 470774e9ea9SHemant Agrawal for (i = 0; i < mbuf->nb_segs; i++) { 471774e9ea9SHemant Agrawal sge = &sgt[i]; 472774e9ea9SHemant Agrawal /*Resetting the buffer pool id and offset field*/ 473774e9ea9SHemant Agrawal sge->fin_bpid_offset = 0; 4741e522746SApeksha Gupta DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(cur_seg)); 475774e9ea9SHemant Agrawal sge->length = cur_seg->data_len; 476774e9ea9SHemant Agrawal if (RTE_MBUF_DIRECT(cur_seg)) { 477cc8569f0SHemant Agrawal /* if we are using inline SGT in same buffers 478cc8569f0SHemant Agrawal * set the FLE FMT as Frame Data Section 479cc8569f0SHemant Agrawal */ 480cc8569f0SHemant Agrawal if (temp == cur_seg) { 481cc8569f0SHemant Agrawal DPAA2_SG_SET_FORMAT(sge, qbman_fd_list); 482cc8569f0SHemant Agrawal DPAA2_SET_FLE_IVP(sge); 483cc8569f0SHemant Agrawal } else { 484774e9ea9SHemant Agrawal if (rte_mbuf_refcnt_read(cur_seg) > 1) { 485774e9ea9SHemant Agrawal /* If refcnt > 1, invalid bpid is set to ensure 486774e9ea9SHemant Agrawal * buffer is not freed by HW 487774e9ea9SHemant Agrawal */ 488774e9ea9SHemant Agrawal DPAA2_SET_FLE_IVP(sge); 489774e9ea9SHemant Agrawal rte_mbuf_refcnt_update(cur_seg, -1); 490cc8569f0SHemant Agrawal } else { 491774e9ea9SHemant Agrawal DPAA2_SET_FLE_BPID(sge, 492774e9ea9SHemant Agrawal mempool_to_bpid(cur_seg->pool)); 4933fa54e3dSGagandeep Singh #ifdef RTE_LIBRTE_MEMPOOL_DEBUG 4943fa54e3dSGagandeep Singh rte_mempool_check_cookies(rte_mempool_from_obj((void *)cur_seg), 4953fa54e3dSGagandeep Singh (void **)&cur_seg, 1, 0); 4963fa54e3dSGagandeep Singh #endif 497cc8569f0SHemant Agrawal } 498cc8569f0SHemant Agrawal } 4996bfbafe1SNipun Gupta } else if (RTE_MBUF_HAS_EXTBUF(cur_seg)) { 500b0074a7bSGagandeep Singh free_buf[*free_count].seg = cur_seg; 501b0074a7bSGagandeep Singh free_buf[*free_count].pkt_id = pkt_id; 502b0074a7bSGagandeep Singh ++*free_count; 5036bfbafe1SNipun Gupta DPAA2_SET_FLE_IVP(sge); 504774e9ea9SHemant Agrawal } else { 505774e9ea9SHemant Agrawal /* Get owner MBUF from indirect buffer */ 506774e9ea9SHemant Agrawal mi = rte_mbuf_from_indirect(cur_seg); 507774e9ea9SHemant Agrawal if (rte_mbuf_refcnt_read(mi) > 1) { 508774e9ea9SHemant Agrawal /* If refcnt > 1, invalid bpid is set to ensure 509774e9ea9SHemant Agrawal * owner buffer is not freed by HW 510774e9ea9SHemant Agrawal */ 511774e9ea9SHemant Agrawal DPAA2_SET_FLE_IVP(sge); 512774e9ea9SHemant Agrawal } else { 513774e9ea9SHemant Agrawal DPAA2_SET_FLE_BPID(sge, 514774e9ea9SHemant Agrawal mempool_to_bpid(mi->pool)); 515774e9ea9SHemant Agrawal rte_mbuf_refcnt_update(mi, 1); 516774e9ea9SHemant Agrawal } 517b0074a7bSGagandeep Singh free_buf[*free_count].seg = cur_seg; 518b0074a7bSGagandeep Singh free_buf[*free_count].pkt_id = pkt_id; 519b0074a7bSGagandeep Singh ++*free_count; 520774e9ea9SHemant Agrawal } 521b0074a7bSGagandeep Singh cur_seg = cur_seg->next; 522774e9ea9SHemant Agrawal } 523774e9ea9SHemant Agrawal DPAA2_SG_SET_FINAL(sge, true); 524774e9ea9SHemant Agrawal return 0; 525774e9ea9SHemant Agrawal } 526774e9ea9SHemant Agrawal 527774e9ea9SHemant Agrawal static void 528774e9ea9SHemant Agrawal eth_mbuf_to_fd(struct rte_mbuf *mbuf, 529b0074a7bSGagandeep Singh struct qbman_fd *fd, 530b0074a7bSGagandeep Singh struct sw_buf_free *buf_to_free, 531b0074a7bSGagandeep Singh uint32_t *free_count, 532b0074a7bSGagandeep Singh uint32_t pkt_id, 533b0074a7bSGagandeep Singh uint16_t bpid) __rte_unused; 534774e9ea9SHemant Agrawal 535e3866e73SThomas Monjalon static void __rte_noinline __rte_hot 536cd9935ceSHemant Agrawal eth_mbuf_to_fd(struct rte_mbuf *mbuf, 537b0074a7bSGagandeep Singh struct qbman_fd *fd, 538b0074a7bSGagandeep Singh struct sw_buf_free *buf_to_free, 539b0074a7bSGagandeep Singh uint32_t *free_count, 540b0074a7bSGagandeep Singh uint32_t pkt_id, 541b0074a7bSGagandeep Singh uint16_t bpid) 542cd9935ceSHemant Agrawal { 54348e7f156SNipun Gupta DPAA2_MBUF_TO_CONTIG_FD(mbuf, fd, bpid); 544cd9935ceSHemant Agrawal 545a10a988aSShreyansh Jain DPAA2_PMD_DP_DEBUG("mbuf =%p, mbuf->buf_addr =%p, off = %d," 5462b843cacSDavid Marchand "fd_off=%d fd =%" PRIx64 ", meta = %d bpid =%d, len=%d", 547a10a988aSShreyansh Jain mbuf, mbuf->buf_addr, mbuf->data_off, 548cd9935ceSHemant Agrawal DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ADDR(fd), 549cd9935ceSHemant Agrawal rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size, 550cd9935ceSHemant Agrawal DPAA2_GET_FD_BPID(fd), DPAA2_GET_FD_LEN(fd)); 551774e9ea9SHemant Agrawal if (RTE_MBUF_DIRECT(mbuf)) { 552774e9ea9SHemant Agrawal if (rte_mbuf_refcnt_read(mbuf) > 1) { 553774e9ea9SHemant Agrawal DPAA2_SET_FD_IVP(fd); 554774e9ea9SHemant Agrawal rte_mbuf_refcnt_update(mbuf, -1); 555cd9935ceSHemant Agrawal } 5563fa54e3dSGagandeep Singh #ifdef RTE_LIBRTE_MEMPOOL_DEBUG 5573fa54e3dSGagandeep Singh else 5583fa54e3dSGagandeep Singh rte_mempool_check_cookies(rte_mempool_from_obj((void *)mbuf), 5593fa54e3dSGagandeep Singh (void **)&mbuf, 1, 0); 5603fa54e3dSGagandeep Singh #endif 5616bfbafe1SNipun Gupta } else if (RTE_MBUF_HAS_EXTBUF(mbuf)) { 562b0074a7bSGagandeep Singh buf_to_free[*free_count].seg = mbuf; 563b0074a7bSGagandeep Singh buf_to_free[*free_count].pkt_id = pkt_id; 564b0074a7bSGagandeep Singh ++*free_count; 5656bfbafe1SNipun Gupta DPAA2_SET_FD_IVP(fd); 566774e9ea9SHemant Agrawal } else { 567774e9ea9SHemant Agrawal struct rte_mbuf *mi; 568cd9935ceSHemant Agrawal 569774e9ea9SHemant Agrawal mi = rte_mbuf_from_indirect(mbuf); 570774e9ea9SHemant Agrawal if (rte_mbuf_refcnt_read(mi) > 1) 571774e9ea9SHemant Agrawal DPAA2_SET_FD_IVP(fd); 572774e9ea9SHemant Agrawal else 573774e9ea9SHemant Agrawal rte_mbuf_refcnt_update(mi, 1); 574b0074a7bSGagandeep Singh 575b0074a7bSGagandeep Singh buf_to_free[*free_count].seg = mbuf; 576b0074a7bSGagandeep Singh buf_to_free[*free_count].pkt_id = pkt_id; 577b0074a7bSGagandeep Singh ++*free_count; 578774e9ea9SHemant Agrawal } 579774e9ea9SHemant Agrawal } 5809e5f3e6dSHemant Agrawal 581e3866e73SThomas Monjalon static inline int __rte_hot 5829e5f3e6dSHemant Agrawal eth_copy_mbuf_to_fd(struct rte_mbuf *mbuf, 5839e5f3e6dSHemant Agrawal struct qbman_fd *fd, uint16_t bpid) 5849e5f3e6dSHemant Agrawal { 5859e5f3e6dSHemant Agrawal struct rte_mbuf *m; 5869e5f3e6dSHemant Agrawal void *mb = NULL; 5879e5f3e6dSHemant Agrawal 5889e5f3e6dSHemant Agrawal if (rte_dpaa2_mbuf_alloc_bulk( 5899e5f3e6dSHemant Agrawal rte_dpaa2_bpid_info[bpid].bp_list->mp, &mb, 1)) { 5902b843cacSDavid Marchand DPAA2_PMD_DP_DEBUG("Unable to allocated DPAA2 buffer"); 5919e5f3e6dSHemant Agrawal return -1; 5929e5f3e6dSHemant Agrawal } 5939e5f3e6dSHemant Agrawal m = (struct rte_mbuf *)mb; 5949e5f3e6dSHemant Agrawal memcpy((char *)m->buf_addr + mbuf->data_off, 5959e5f3e6dSHemant Agrawal (void *)((char *)mbuf->buf_addr + mbuf->data_off), 5969e5f3e6dSHemant Agrawal mbuf->pkt_len); 5979e5f3e6dSHemant Agrawal 5989e5f3e6dSHemant Agrawal /* Copy required fields */ 5999e5f3e6dSHemant Agrawal m->data_off = mbuf->data_off; 6009e5f3e6dSHemant Agrawal m->ol_flags = mbuf->ol_flags; 6019e5f3e6dSHemant Agrawal m->packet_type = mbuf->packet_type; 6029e5f3e6dSHemant Agrawal m->tx_offload = mbuf->tx_offload; 6039e5f3e6dSHemant Agrawal 60448e7f156SNipun Gupta DPAA2_MBUF_TO_CONTIG_FD(m, fd, bpid); 6059e5f3e6dSHemant Agrawal 6063fa54e3dSGagandeep Singh #ifdef RTE_LIBRTE_MEMPOOL_DEBUG 6073fa54e3dSGagandeep Singh rte_mempool_check_cookies(rte_mempool_from_obj((void *)m), 6083fa54e3dSGagandeep Singh (void **)&m, 1, 0); 6093fa54e3dSGagandeep Singh #endif 610a10a988aSShreyansh Jain DPAA2_PMD_DP_DEBUG( 611a10a988aSShreyansh Jain "mbuf: %p, BMAN buf addr: %p, fdaddr: %" PRIx64 ", bpid: %d," 6122b843cacSDavid Marchand " meta: %d, off: %d, len: %d", 613a10a988aSShreyansh Jain (void *)mbuf, 614a10a988aSShreyansh Jain mbuf->buf_addr, 6155ae1edffSHemant Agrawal DPAA2_GET_FD_ADDR(fd), 6165ae1edffSHemant Agrawal DPAA2_GET_FD_BPID(fd), 617a10a988aSShreyansh Jain rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size, 618a10a988aSShreyansh Jain DPAA2_GET_FD_OFFSET(fd), 6199e5f3e6dSHemant Agrawal DPAA2_GET_FD_LEN(fd)); 6209e5f3e6dSHemant Agrawal 6219e5f3e6dSHemant Agrawal return 0; 6229e5f3e6dSHemant Agrawal } 6239e5f3e6dSHemant Agrawal 6244690a611SNipun Gupta static void 6254690a611SNipun Gupta dump_err_pkts(struct dpaa2_queue *dpaa2_q) 6264690a611SNipun Gupta { 6274690a611SNipun Gupta /* Function receive frames for a given device and VQ */ 6284690a611SNipun Gupta struct qbman_result *dq_storage; 6294690a611SNipun Gupta uint32_t fqid = dpaa2_q->fqid; 630d9298902SDavid Marchand int ret, num_rx = 0; 6314690a611SNipun Gupta uint8_t pending, status; 6324690a611SNipun Gupta struct qbman_swp *swp; 6334690a611SNipun Gupta const struct qbman_fd *fd; 6344690a611SNipun Gupta struct qbman_pull_desc pulldesc; 6354690a611SNipun Gupta struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data; 6364690a611SNipun Gupta uint32_t lcore_id = rte_lcore_id(); 6374690a611SNipun Gupta void *v_addr, *hw_annot_addr; 6384690a611SNipun Gupta struct dpaa2_fas *fas; 6394690a611SNipun Gupta 6404690a611SNipun Gupta if (unlikely(!DPAA2_PER_LCORE_DPIO)) { 6414690a611SNipun Gupta ret = dpaa2_affine_qbman_swp(); 6424690a611SNipun Gupta if (ret) { 643f665790aSDavid Marchand DPAA2_PMD_ERR("Failed to allocate IO portal, tid: %d", 6444690a611SNipun Gupta rte_gettid()); 6454690a611SNipun Gupta return; 6464690a611SNipun Gupta } 6474690a611SNipun Gupta } 6484690a611SNipun Gupta swp = DPAA2_PER_LCORE_PORTAL; 6494690a611SNipun Gupta 650*12d98eceSJun Yang dq_storage = dpaa2_q->q_storage[lcore_id]->dq_storage[0]; 6514690a611SNipun Gupta qbman_pull_desc_clear(&pulldesc); 6524690a611SNipun Gupta qbman_pull_desc_set_fq(&pulldesc, fqid); 6534690a611SNipun Gupta qbman_pull_desc_set_storage(&pulldesc, dq_storage, 6544690a611SNipun Gupta (size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1); 6554690a611SNipun Gupta qbman_pull_desc_set_numframes(&pulldesc, dpaa2_dqrr_size); 6564690a611SNipun Gupta 6574690a611SNipun Gupta while (1) { 6584690a611SNipun Gupta if (qbman_swp_pull(swp, &pulldesc)) { 6592b843cacSDavid Marchand DPAA2_PMD_DP_DEBUG("VDQ command is not issued.QBMAN is busy"); 6604690a611SNipun Gupta /* Portal was busy, try again */ 6614690a611SNipun Gupta continue; 6624690a611SNipun Gupta } 6634690a611SNipun Gupta break; 6644690a611SNipun Gupta } 6654690a611SNipun Gupta 6664690a611SNipun Gupta /* Check if the previous issued command is completed. */ 6674690a611SNipun Gupta while (!qbman_check_command_complete(dq_storage)) 6684690a611SNipun Gupta ; 6694690a611SNipun Gupta 6704690a611SNipun Gupta pending = 1; 6714690a611SNipun Gupta do { 6724690a611SNipun Gupta /* Loop until the dq_storage is updated with 6734690a611SNipun Gupta * new token by QBMAN 6744690a611SNipun Gupta */ 6754690a611SNipun Gupta while (!qbman_check_new_result(dq_storage)) 6764690a611SNipun Gupta ; 6774690a611SNipun Gupta 6784690a611SNipun Gupta /* Check whether Last Pull command is Expired and 6794690a611SNipun Gupta * setting Condition for Loop termination 6804690a611SNipun Gupta */ 6814690a611SNipun Gupta if (qbman_result_DQ_is_pull_complete(dq_storage)) { 6824690a611SNipun Gupta pending = 0; 6834690a611SNipun Gupta /* Check for valid frame. */ 6844690a611SNipun Gupta status = qbman_result_DQ_flags(dq_storage); 6854690a611SNipun Gupta if (unlikely((status & 6864690a611SNipun Gupta QBMAN_DQ_STAT_VALIDFRAME) == 0)) 6874690a611SNipun Gupta continue; 6884690a611SNipun Gupta } 6894690a611SNipun Gupta fd = qbman_result_DQ_fd(dq_storage); 6904690a611SNipun Gupta v_addr = DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)); 6914690a611SNipun Gupta hw_annot_addr = (void *)((size_t)v_addr + DPAA2_FD_PTA_SIZE); 6924690a611SNipun Gupta fas = hw_annot_addr; 6934690a611SNipun Gupta 694f665790aSDavid Marchand DPAA2_PMD_ERR("[%d] error packet on port[%d]:" 6954690a611SNipun Gupta " fd_off: %d, fd_err: %x, fas_status: %x", 6964690a611SNipun Gupta rte_lcore_id(), eth_data->port_id, 6974690a611SNipun Gupta DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ERR(fd), 6984690a611SNipun Gupta fas->status); 6994690a611SNipun Gupta rte_hexdump(stderr, "Error packet", v_addr, 7004690a611SNipun Gupta DPAA2_GET_FD_OFFSET(fd) + DPAA2_GET_FD_LEN(fd)); 7014690a611SNipun Gupta 7024690a611SNipun Gupta dq_storage++; 7034690a611SNipun Gupta num_rx++; 7044690a611SNipun Gupta } while (pending); 7054690a611SNipun Gupta 7064690a611SNipun Gupta dpaa2_q->err_pkts += num_rx; 7074690a611SNipun Gupta } 7084690a611SNipun Gupta 70965a70a98SHemant Agrawal /* This function assumes that caller will be keep the same value for nb_pkts 71065a70a98SHemant Agrawal * across calls per queue, if that is not the case, better use non-prefetch 71165a70a98SHemant Agrawal * version of rx call. 71265a70a98SHemant Agrawal * It will return the packets as requested in previous call without honoring 71365a70a98SHemant Agrawal * the current nb_pkts or bufs space. 71465a70a98SHemant Agrawal */ 715cd9935ceSHemant Agrawal uint16_t 7165c6942fdSHemant Agrawal dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) 717cd9935ceSHemant Agrawal { 7185c6942fdSHemant Agrawal /* Function receive frames for a given device and VQ*/ 719*12d98eceSJun Yang struct dpaa2_queue *dpaa2_q = queue; 7204bc5ab88SHemant Agrawal struct qbman_result *dq_storage, *dq_storage1 = NULL; 721cd9935ceSHemant Agrawal uint32_t fqid = dpaa2_q->fqid; 72265a70a98SHemant Agrawal int ret, num_rx = 0, pull_size; 7234bc5ab88SHemant Agrawal uint8_t pending, status; 724cd9935ceSHemant Agrawal struct qbman_swp *swp; 7252f41c930SNipun Gupta const struct qbman_fd *fd; 726cd9935ceSHemant Agrawal struct qbman_pull_desc pulldesc; 727*12d98eceSJun Yang struct queue_storage_info_t *q_storage; 72885ee5ddaSShreyansh Jain struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data; 729e806bf87SPriyanka Jain struct dpaa2_dev_priv *priv = eth_data->dev_private; 7304690a611SNipun Gupta 731*12d98eceSJun Yang q_storage = dpaa2_q->q_storage[rte_lcore_id()]; 732*12d98eceSJun Yang 7334690a611SNipun Gupta if (unlikely(dpaa2_enable_err_queue)) 7344690a611SNipun Gupta dump_err_pkts(priv->rx_err_vq); 735cd9935ceSHemant Agrawal 736b3ec974cSNipun Gupta if (unlikely(!DPAA2_PER_LCORE_ETHRX_DPIO)) { 737b3ec974cSNipun Gupta ret = dpaa2_affine_qbman_ethrx_swp(); 738cd9935ceSHemant Agrawal if (ret) { 739a10a988aSShreyansh Jain DPAA2_PMD_ERR("Failure in affining portal"); 740cd9935ceSHemant Agrawal return 0; 741cd9935ceSHemant Agrawal } 742cd9935ceSHemant Agrawal } 743109df460SShreyansh Jain 744109df460SShreyansh Jain if (unlikely(!rte_dpaa2_bpid_info && 745109df460SShreyansh Jain rte_eal_process_type() == RTE_PROC_SECONDARY)) 746109df460SShreyansh Jain rte_dpaa2_bpid_info = dpaa2_q->bp_array; 747109df460SShreyansh Jain 748b3ec974cSNipun Gupta swp = DPAA2_PER_LCORE_ETHRX_PORTAL; 749bd23b1a8SNipun Gupta pull_size = (nb_pkts > dpaa2_dqrr_size) ? dpaa2_dqrr_size : nb_pkts; 750043b36f6SHemant Agrawal if (unlikely(!q_storage->active_dqs)) { 751043b36f6SHemant Agrawal q_storage->toggle = 0; 752043b36f6SHemant Agrawal dq_storage = q_storage->dq_storage[q_storage->toggle]; 75365a70a98SHemant Agrawal q_storage->last_num_pkts = pull_size; 754043b36f6SHemant Agrawal qbman_pull_desc_clear(&pulldesc); 755043b36f6SHemant Agrawal qbman_pull_desc_set_numframes(&pulldesc, 756043b36f6SHemant Agrawal q_storage->last_num_pkts); 757043b36f6SHemant Agrawal qbman_pull_desc_set_fq(&pulldesc, fqid); 758043b36f6SHemant Agrawal qbman_pull_desc_set_storage(&pulldesc, dq_storage, 7593ef648aaSHemant Agrawal (uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1); 760b3ec974cSNipun Gupta if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) { 7614bc5ab88SHemant Agrawal while (!qbman_check_command_complete( 762b3ec974cSNipun Gupta get_swp_active_dqs( 763b3ec974cSNipun Gupta DPAA2_PER_LCORE_ETHRX_DPIO->index))) 7644bc5ab88SHemant Agrawal ; 765b3ec974cSNipun Gupta clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index); 7664bc5ab88SHemant Agrawal } 767cd9935ceSHemant Agrawal while (1) { 768cd9935ceSHemant Agrawal if (qbman_swp_pull(swp, &pulldesc)) { 769a10a988aSShreyansh Jain DPAA2_PMD_DP_DEBUG("VDQ command is not issued." 7702b843cacSDavid Marchand " QBMAN is busy (1)"); 771cd9935ceSHemant Agrawal /* Portal was busy, try again */ 772cd9935ceSHemant Agrawal continue; 773cd9935ceSHemant Agrawal } 774cd9935ceSHemant Agrawal break; 7755c6942fdSHemant Agrawal } 7765c6942fdSHemant Agrawal q_storage->active_dqs = dq_storage; 777b3ec974cSNipun Gupta q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index; 778b3ec974cSNipun Gupta set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index, 779b3ec974cSNipun Gupta dq_storage); 7805c6942fdSHemant Agrawal } 781043b36f6SHemant Agrawal 7825c6942fdSHemant Agrawal dq_storage = q_storage->active_dqs; 7835ae1edffSHemant Agrawal rte_prefetch0((void *)(size_t)(dq_storage)); 7845ae1edffSHemant Agrawal rte_prefetch0((void *)(size_t)(dq_storage + 1)); 7854bc5ab88SHemant Agrawal 7864bc5ab88SHemant Agrawal /* Prepare next pull descriptor. This will give space for the 7877be78d02SJosh Soref * prefetching done on DQRR entries 7884bc5ab88SHemant Agrawal */ 7894bc5ab88SHemant Agrawal q_storage->toggle ^= 1; 7904bc5ab88SHemant Agrawal dq_storage1 = q_storage->dq_storage[q_storage->toggle]; 7914bc5ab88SHemant Agrawal qbman_pull_desc_clear(&pulldesc); 79265a70a98SHemant Agrawal qbman_pull_desc_set_numframes(&pulldesc, pull_size); 7934bc5ab88SHemant Agrawal qbman_pull_desc_set_fq(&pulldesc, fqid); 7944bc5ab88SHemant Agrawal qbman_pull_desc_set_storage(&pulldesc, dq_storage1, 7953ef648aaSHemant Agrawal (uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage1)), 1); 7964bc5ab88SHemant Agrawal 797cd9935ceSHemant Agrawal /* Check if the previous issued command is completed. 7985c6942fdSHemant Agrawal * Also seems like the SWP is shared between the Ethernet Driver 7995c6942fdSHemant Agrawal * and the SEC driver. 800cd9935ceSHemant Agrawal */ 80169293c77SHemant Agrawal while (!qbman_check_command_complete(dq_storage)) 802cd9935ceSHemant Agrawal ; 8035c6942fdSHemant Agrawal if (dq_storage == get_swp_active_dqs(q_storage->active_dpio_id)) 8045c6942fdSHemant Agrawal clear_swp_active_dqs(q_storage->active_dpio_id); 805043b36f6SHemant Agrawal 806043b36f6SHemant Agrawal pending = 1; 807043b36f6SHemant Agrawal 808043b36f6SHemant Agrawal do { 809cd9935ceSHemant Agrawal /* Loop until the dq_storage is updated with 810cd9935ceSHemant Agrawal * new token by QBMAN 811cd9935ceSHemant Agrawal */ 81269293c77SHemant Agrawal while (!qbman_check_new_result(dq_storage)) 813cd9935ceSHemant Agrawal ; 8145ae1edffSHemant Agrawal rte_prefetch0((void *)((size_t)(dq_storage + 2))); 815cd9935ceSHemant Agrawal /* Check whether Last Pull command is Expired and 816cd9935ceSHemant Agrawal * setting Condition for Loop termination 817cd9935ceSHemant Agrawal */ 818cd9935ceSHemant Agrawal if (qbman_result_DQ_is_pull_complete(dq_storage)) { 819043b36f6SHemant Agrawal pending = 0; 820cd9935ceSHemant Agrawal /* Check for valid frame. */ 821043b36f6SHemant Agrawal status = qbman_result_DQ_flags(dq_storage); 822cd9935ceSHemant Agrawal if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) 823cd9935ceSHemant Agrawal continue; 824cd9935ceSHemant Agrawal } 825043b36f6SHemant Agrawal fd = qbman_result_DQ_fd(dq_storage); 826cd9935ceSHemant Agrawal 8272f41c930SNipun Gupta #ifndef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA 8281a814f4bSNipun Gupta if (dpaa2_svr_family != SVR_LX2160A) { 8292f41c930SNipun Gupta const struct qbman_fd *next_fd = 8302f41c930SNipun Gupta qbman_result_DQ_fd(dq_storage + 1); 831cd9935ceSHemant Agrawal /* Prefetch Annotation address for the parse results */ 8322f41c930SNipun Gupta rte_prefetch0(DPAA2_IOVA_TO_VADDR((DPAA2_GET_FD_ADDR( 8332f41c930SNipun Gupta next_fd) + DPAA2_FD_PTA_SIZE + 16))); 8341a814f4bSNipun Gupta } 8352f41c930SNipun Gupta #endif 836cd9935ceSHemant Agrawal 837043b36f6SHemant Agrawal if (unlikely(DPAA2_FD_GET_FORMAT(fd) == qbman_fd_sg)) 838005d943eSNipun Gupta bufs[num_rx] = eth_sg_fd_to_mbuf(fd, eth_data->port_id); 839774e9ea9SHemant Agrawal else 840005d943eSNipun Gupta bufs[num_rx] = eth_fd_to_mbuf(fd, eth_data->port_id); 841e806bf87SPriyanka Jain #if defined(RTE_LIBRTE_IEEE1588) 84202548404SGagandeep Singh if (bufs[num_rx]->ol_flags & RTE_MBUF_F_RX_IEEE1588_TMST) { 84390762e5cSVanshika Shukla priv->rx_timestamp = 84490762e5cSVanshika Shukla *dpaa2_timestamp_dynfield(bufs[num_rx]); 84590762e5cSVanshika Shukla } 846e806bf87SPriyanka Jain #endif 847cd9935ceSHemant Agrawal 84885ee5ddaSShreyansh Jain if (eth_data->dev_conf.rxmode.offloads & 849295968d1SFerruh Yigit RTE_ETH_RX_OFFLOAD_VLAN_STRIP) 850d20e6e60SHemant Agrawal rte_vlan_strip(bufs[num_rx]); 851d20e6e60SHemant Agrawal 852cd9935ceSHemant Agrawal dq_storage++; 8535c6942fdSHemant Agrawal num_rx++; 854043b36f6SHemant Agrawal } while (pending); 855043b36f6SHemant Agrawal 856b3ec974cSNipun Gupta if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) { 8574bc5ab88SHemant Agrawal while (!qbman_check_command_complete( 858b3ec974cSNipun Gupta get_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index))) 859043b36f6SHemant Agrawal ; 860b3ec974cSNipun Gupta clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index); 861043b36f6SHemant Agrawal } 862043b36f6SHemant Agrawal /* issue a volatile dequeue command for next pull */ 8635c6942fdSHemant Agrawal while (1) { 8645c6942fdSHemant Agrawal if (qbman_swp_pull(swp, &pulldesc)) { 865a10a988aSShreyansh Jain DPAA2_PMD_DP_DEBUG("VDQ command is not issued." 8662b843cacSDavid Marchand "QBMAN is busy (2)"); 8675c6942fdSHemant Agrawal continue; 8685c6942fdSHemant Agrawal } 8695c6942fdSHemant Agrawal break; 8705c6942fdSHemant Agrawal } 8714bc5ab88SHemant Agrawal q_storage->active_dqs = dq_storage1; 872b3ec974cSNipun Gupta q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index; 873b3ec974cSNipun Gupta set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index, dq_storage1); 874cd9935ceSHemant Agrawal 875cd9935ceSHemant Agrawal dpaa2_q->rx_pkts += num_rx; 876cd9935ceSHemant Agrawal 877cd9935ceSHemant Agrawal return num_rx; 878cd9935ceSHemant Agrawal } 879cd9935ceSHemant Agrawal 880e3866e73SThomas Monjalon void __rte_hot 881b677d4c6SNipun Gupta dpaa2_dev_process_parallel_event(struct qbman_swp *swp, 882b677d4c6SNipun Gupta const struct qbman_fd *fd, 883b677d4c6SNipun Gupta const struct qbman_result *dq, 884b677d4c6SNipun Gupta struct dpaa2_queue *rxq, 885b677d4c6SNipun Gupta struct rte_event *ev) 886b677d4c6SNipun Gupta { 8875ae1edffSHemant Agrawal rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd) + 8888fc28889SNipun Gupta DPAA2_FD_PTA_SIZE + 16)); 889b677d4c6SNipun Gupta 890b677d4c6SNipun Gupta ev->flow_id = rxq->ev.flow_id; 891b677d4c6SNipun Gupta ev->sub_event_type = rxq->ev.sub_event_type; 892b677d4c6SNipun Gupta ev->event_type = RTE_EVENT_TYPE_ETHDEV; 893b677d4c6SNipun Gupta ev->op = RTE_EVENT_OP_NEW; 894b677d4c6SNipun Gupta ev->sched_type = rxq->ev.sched_type; 895b677d4c6SNipun Gupta ev->queue_id = rxq->ev.queue_id; 896b677d4c6SNipun Gupta ev->priority = rxq->ev.priority; 897b677d4c6SNipun Gupta 898005d943eSNipun Gupta ev->mbuf = eth_fd_to_mbuf(fd, rxq->eth_data->port_id); 8998fc28889SNipun Gupta 900b677d4c6SNipun Gupta qbman_swp_dqrr_consume(swp, dq); 901b677d4c6SNipun Gupta } 902b677d4c6SNipun Gupta 903e3866e73SThomas Monjalon void __rte_hot 904f2fc83b4SThomas Monjalon dpaa2_dev_process_atomic_event(struct qbman_swp *swp __rte_unused, 9052d378863SNipun Gupta const struct qbman_fd *fd, 9062d378863SNipun Gupta const struct qbman_result *dq, 9072d378863SNipun Gupta struct dpaa2_queue *rxq, 9082d378863SNipun Gupta struct rte_event *ev) 9092d378863SNipun Gupta { 9108fc28889SNipun Gupta uint8_t dqrr_index; 9112d378863SNipun Gupta 9125ae1edffSHemant Agrawal rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd) + 9138fc28889SNipun Gupta DPAA2_FD_PTA_SIZE + 16)); 9142d378863SNipun Gupta 9152d378863SNipun Gupta ev->flow_id = rxq->ev.flow_id; 9162d378863SNipun Gupta ev->sub_event_type = rxq->ev.sub_event_type; 9172d378863SNipun Gupta ev->event_type = RTE_EVENT_TYPE_ETHDEV; 9182d378863SNipun Gupta ev->op = RTE_EVENT_OP_NEW; 9192d378863SNipun Gupta ev->sched_type = rxq->ev.sched_type; 9202d378863SNipun Gupta ev->queue_id = rxq->ev.queue_id; 9212d378863SNipun Gupta ev->priority = rxq->ev.priority; 9222d378863SNipun Gupta 923005d943eSNipun Gupta ev->mbuf = eth_fd_to_mbuf(fd, rxq->eth_data->port_id); 9248fc28889SNipun Gupta 9258fc28889SNipun Gupta dqrr_index = qbman_get_dqrr_idx(dq); 926ea278063SDavid Marchand *dpaa2_seqn(ev->mbuf) = dqrr_index + 1; 9272d378863SNipun Gupta DPAA2_PER_LCORE_DQRR_SIZE++; 9282d378863SNipun Gupta DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index; 9292d378863SNipun Gupta DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = ev->mbuf; 9302d378863SNipun Gupta } 9312d378863SNipun Gupta 932e3866e73SThomas Monjalon void __rte_hot 93316c4a3c4SNipun Gupta dpaa2_dev_process_ordered_event(struct qbman_swp *swp, 93416c4a3c4SNipun Gupta const struct qbman_fd *fd, 93516c4a3c4SNipun Gupta const struct qbman_result *dq, 93616c4a3c4SNipun Gupta struct dpaa2_queue *rxq, 93716c4a3c4SNipun Gupta struct rte_event *ev) 93816c4a3c4SNipun Gupta { 93916c4a3c4SNipun Gupta rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd) + 94016c4a3c4SNipun Gupta DPAA2_FD_PTA_SIZE + 16)); 94116c4a3c4SNipun Gupta 94216c4a3c4SNipun Gupta ev->flow_id = rxq->ev.flow_id; 94316c4a3c4SNipun Gupta ev->sub_event_type = rxq->ev.sub_event_type; 94416c4a3c4SNipun Gupta ev->event_type = RTE_EVENT_TYPE_ETHDEV; 94516c4a3c4SNipun Gupta ev->op = RTE_EVENT_OP_NEW; 94616c4a3c4SNipun Gupta ev->sched_type = rxq->ev.sched_type; 94716c4a3c4SNipun Gupta ev->queue_id = rxq->ev.queue_id; 94816c4a3c4SNipun Gupta ev->priority = rxq->ev.priority; 94916c4a3c4SNipun Gupta 950005d943eSNipun Gupta ev->mbuf = eth_fd_to_mbuf(fd, rxq->eth_data->port_id); 95116c4a3c4SNipun Gupta 952ea278063SDavid Marchand *dpaa2_seqn(ev->mbuf) = DPAA2_ENQUEUE_FLAG_ORP; 953ea278063SDavid Marchand *dpaa2_seqn(ev->mbuf) |= qbman_result_DQ_odpid(dq) << DPAA2_EQCR_OPRID_SHIFT; 954ea278063SDavid Marchand *dpaa2_seqn(ev->mbuf) |= qbman_result_DQ_seqnum(dq) << DPAA2_EQCR_SEQNUM_SHIFT; 95516c4a3c4SNipun Gupta 95616c4a3c4SNipun Gupta qbman_swp_dqrr_consume(swp, dq); 95716c4a3c4SNipun Gupta } 95816c4a3c4SNipun Gupta 95920191ab3SNipun Gupta uint16_t 96020191ab3SNipun Gupta dpaa2_dev_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) 96120191ab3SNipun Gupta { 96220191ab3SNipun Gupta /* Function receive frames for a given device and VQ */ 963*12d98eceSJun Yang struct dpaa2_queue *dpaa2_q = queue; 96420191ab3SNipun Gupta struct qbman_result *dq_storage; 96520191ab3SNipun Gupta uint32_t fqid = dpaa2_q->fqid; 96620191ab3SNipun Gupta int ret, num_rx = 0, next_pull = nb_pkts, num_pulled; 96720191ab3SNipun Gupta uint8_t pending, status; 96820191ab3SNipun Gupta struct qbman_swp *swp; 9692f41c930SNipun Gupta const struct qbman_fd *fd; 97020191ab3SNipun Gupta struct qbman_pull_desc pulldesc; 97120191ab3SNipun Gupta struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data; 9724690a611SNipun Gupta struct dpaa2_dev_priv *priv = eth_data->dev_private; 9734690a611SNipun Gupta 9744690a611SNipun Gupta if (unlikely(dpaa2_enable_err_queue)) 9754690a611SNipun Gupta dump_err_pkts(priv->rx_err_vq); 97620191ab3SNipun Gupta 97720191ab3SNipun Gupta if (unlikely(!DPAA2_PER_LCORE_DPIO)) { 97820191ab3SNipun Gupta ret = dpaa2_affine_qbman_swp(); 97920191ab3SNipun Gupta if (ret) { 980d527f5d9SNipun Gupta DPAA2_PMD_ERR( 981f665790aSDavid Marchand "Failed to allocate IO portal, tid: %d", 982d527f5d9SNipun Gupta rte_gettid()); 98320191ab3SNipun Gupta return 0; 98420191ab3SNipun Gupta } 98520191ab3SNipun Gupta } 98620191ab3SNipun Gupta swp = DPAA2_PER_LCORE_PORTAL; 98720191ab3SNipun Gupta 98820191ab3SNipun Gupta do { 989*12d98eceSJun Yang dq_storage = dpaa2_q->q_storage[0]->dq_storage[0]; 99020191ab3SNipun Gupta qbman_pull_desc_clear(&pulldesc); 99120191ab3SNipun Gupta qbman_pull_desc_set_fq(&pulldesc, fqid); 99220191ab3SNipun Gupta qbman_pull_desc_set_storage(&pulldesc, dq_storage, 99320191ab3SNipun Gupta (size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1); 99420191ab3SNipun Gupta 99520191ab3SNipun Gupta if (next_pull > dpaa2_dqrr_size) { 99620191ab3SNipun Gupta qbman_pull_desc_set_numframes(&pulldesc, 99720191ab3SNipun Gupta dpaa2_dqrr_size); 99820191ab3SNipun Gupta next_pull -= dpaa2_dqrr_size; 99920191ab3SNipun Gupta } else { 100020191ab3SNipun Gupta qbman_pull_desc_set_numframes(&pulldesc, next_pull); 100120191ab3SNipun Gupta next_pull = 0; 100220191ab3SNipun Gupta } 100320191ab3SNipun Gupta 100420191ab3SNipun Gupta while (1) { 100520191ab3SNipun Gupta if (qbman_swp_pull(swp, &pulldesc)) { 100620191ab3SNipun Gupta DPAA2_PMD_DP_DEBUG( 10072b843cacSDavid Marchand "VDQ command is not issued.QBMAN is busy"); 100820191ab3SNipun Gupta /* Portal was busy, try again */ 100920191ab3SNipun Gupta continue; 101020191ab3SNipun Gupta } 101120191ab3SNipun Gupta break; 101220191ab3SNipun Gupta } 101320191ab3SNipun Gupta 101420191ab3SNipun Gupta rte_prefetch0((void *)((size_t)(dq_storage + 1))); 101520191ab3SNipun Gupta /* Check if the previous issued command is completed. */ 101620191ab3SNipun Gupta while (!qbman_check_command_complete(dq_storage)) 101720191ab3SNipun Gupta ; 101820191ab3SNipun Gupta 101920191ab3SNipun Gupta num_pulled = 0; 102020191ab3SNipun Gupta pending = 1; 102120191ab3SNipun Gupta do { 102220191ab3SNipun Gupta /* Loop until the dq_storage is updated with 102320191ab3SNipun Gupta * new token by QBMAN 102420191ab3SNipun Gupta */ 102520191ab3SNipun Gupta while (!qbman_check_new_result(dq_storage)) 102620191ab3SNipun Gupta ; 102720191ab3SNipun Gupta rte_prefetch0((void *)((size_t)(dq_storage + 2))); 102820191ab3SNipun Gupta /* Check whether Last Pull command is Expired and 102920191ab3SNipun Gupta * setting Condition for Loop termination 103020191ab3SNipun Gupta */ 103120191ab3SNipun Gupta if (qbman_result_DQ_is_pull_complete(dq_storage)) { 103220191ab3SNipun Gupta pending = 0; 103320191ab3SNipun Gupta /* Check for valid frame. */ 103420191ab3SNipun Gupta status = qbman_result_DQ_flags(dq_storage); 103520191ab3SNipun Gupta if (unlikely((status & 103620191ab3SNipun Gupta QBMAN_DQ_STAT_VALIDFRAME) == 0)) 103720191ab3SNipun Gupta continue; 103820191ab3SNipun Gupta } 103920191ab3SNipun Gupta fd = qbman_result_DQ_fd(dq_storage); 104020191ab3SNipun Gupta 10412f41c930SNipun Gupta #ifndef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA 10422f41c930SNipun Gupta if (dpaa2_svr_family != SVR_LX2160A) { 10432f41c930SNipun Gupta const struct qbman_fd *next_fd = 10442f41c930SNipun Gupta qbman_result_DQ_fd(dq_storage + 1); 10452f41c930SNipun Gupta 10462f41c930SNipun Gupta /* Prefetch Annotation address for the parse 10472f41c930SNipun Gupta * results. 10482f41c930SNipun Gupta */ 10492f41c930SNipun Gupta rte_prefetch0((DPAA2_IOVA_TO_VADDR( 10502f41c930SNipun Gupta DPAA2_GET_FD_ADDR(next_fd) + 10512f41c930SNipun Gupta DPAA2_FD_PTA_SIZE + 16))); 10522f41c930SNipun Gupta } 10532f41c930SNipun Gupta #endif 105420191ab3SNipun Gupta 105520191ab3SNipun Gupta if (unlikely(DPAA2_FD_GET_FORMAT(fd) == qbman_fd_sg)) 1056005d943eSNipun Gupta bufs[num_rx] = eth_sg_fd_to_mbuf(fd, 1057005d943eSNipun Gupta eth_data->port_id); 105820191ab3SNipun Gupta else 1059005d943eSNipun Gupta bufs[num_rx] = eth_fd_to_mbuf(fd, 1060005d943eSNipun Gupta eth_data->port_id); 106120191ab3SNipun Gupta 106290762e5cSVanshika Shukla #if defined(RTE_LIBRTE_IEEE1588) 106302548404SGagandeep Singh if (bufs[num_rx]->ol_flags & RTE_MBUF_F_RX_IEEE1588_TMST) { 106490762e5cSVanshika Shukla priv->rx_timestamp = 106590762e5cSVanshika Shukla *dpaa2_timestamp_dynfield(bufs[num_rx]); 106690762e5cSVanshika Shukla } 106790762e5cSVanshika Shukla #endif 106890762e5cSVanshika Shukla 106920191ab3SNipun Gupta if (eth_data->dev_conf.rxmode.offloads & 1070295968d1SFerruh Yigit RTE_ETH_RX_OFFLOAD_VLAN_STRIP) { 107120191ab3SNipun Gupta rte_vlan_strip(bufs[num_rx]); 107220191ab3SNipun Gupta } 107320191ab3SNipun Gupta 107420191ab3SNipun Gupta dq_storage++; 107520191ab3SNipun Gupta num_rx++; 107620191ab3SNipun Gupta num_pulled++; 107720191ab3SNipun Gupta } while (pending); 107820191ab3SNipun Gupta /* Last VDQ provided all packets and more packets are requested */ 107920191ab3SNipun Gupta } while (next_pull && num_pulled == dpaa2_dqrr_size); 108020191ab3SNipun Gupta 108120191ab3SNipun Gupta dpaa2_q->rx_pkts += num_rx; 108220191ab3SNipun Gupta 108320191ab3SNipun Gupta return num_rx; 108420191ab3SNipun Gupta } 108520191ab3SNipun Gupta 10869ceacab7SPriyanka Jain uint16_t dpaa2_dev_tx_conf(void *queue) 10879ceacab7SPriyanka Jain { 10889ceacab7SPriyanka Jain /* Function receive frames for a given device and VQ */ 10899ceacab7SPriyanka Jain struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue; 10909ceacab7SPriyanka Jain struct qbman_result *dq_storage; 10919ceacab7SPriyanka Jain uint32_t fqid = dpaa2_q->fqid; 10929ceacab7SPriyanka Jain int ret, num_tx_conf = 0, num_pulled; 10939ceacab7SPriyanka Jain uint8_t pending, status; 10949ceacab7SPriyanka Jain struct qbman_swp *swp; 10959ceacab7SPriyanka Jain const struct qbman_fd *fd, *next_fd; 10969ceacab7SPriyanka Jain struct qbman_pull_desc pulldesc; 10979ceacab7SPriyanka Jain struct qbman_release_desc releasedesc; 10989ceacab7SPriyanka Jain uint32_t bpid; 10999ceacab7SPriyanka Jain uint64_t buf; 1100e806bf87SPriyanka Jain #if defined(RTE_LIBRTE_IEEE1588) 1101e806bf87SPriyanka Jain struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data; 1102e806bf87SPriyanka Jain struct dpaa2_dev_priv *priv = eth_data->dev_private; 1103e806bf87SPriyanka Jain struct dpaa2_annot_hdr *annotation; 110490762e5cSVanshika Shukla void *v_addr; 110590762e5cSVanshika Shukla struct rte_mbuf *mbuf; 1106e806bf87SPriyanka Jain #endif 11079ceacab7SPriyanka Jain 11089ceacab7SPriyanka Jain if (unlikely(!DPAA2_PER_LCORE_DPIO)) { 11099ceacab7SPriyanka Jain ret = dpaa2_affine_qbman_swp(); 11109ceacab7SPriyanka Jain if (ret) { 1111d527f5d9SNipun Gupta DPAA2_PMD_ERR( 1112f665790aSDavid Marchand "Failed to allocate IO portal, tid: %d", 1113d527f5d9SNipun Gupta rte_gettid()); 11149ceacab7SPriyanka Jain return 0; 11159ceacab7SPriyanka Jain } 11169ceacab7SPriyanka Jain } 11179ceacab7SPriyanka Jain swp = DPAA2_PER_LCORE_PORTAL; 11189ceacab7SPriyanka Jain 11199ceacab7SPriyanka Jain do { 1120*12d98eceSJun Yang dq_storage = dpaa2_q->q_storage[0]->dq_storage[0]; 11219ceacab7SPriyanka Jain qbman_pull_desc_clear(&pulldesc); 11229ceacab7SPriyanka Jain qbman_pull_desc_set_fq(&pulldesc, fqid); 11239ceacab7SPriyanka Jain qbman_pull_desc_set_storage(&pulldesc, dq_storage, 11249ceacab7SPriyanka Jain (size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1); 11259ceacab7SPriyanka Jain 11269ceacab7SPriyanka Jain qbman_pull_desc_set_numframes(&pulldesc, dpaa2_dqrr_size); 11279ceacab7SPriyanka Jain 11289ceacab7SPriyanka Jain while (1) { 11299ceacab7SPriyanka Jain if (qbman_swp_pull(swp, &pulldesc)) { 11309ceacab7SPriyanka Jain DPAA2_PMD_DP_DEBUG("VDQ command is not issued." 11312b843cacSDavid Marchand "QBMAN is busy"); 11329ceacab7SPriyanka Jain /* Portal was busy, try again */ 11339ceacab7SPriyanka Jain continue; 11349ceacab7SPriyanka Jain } 11359ceacab7SPriyanka Jain break; 11369ceacab7SPriyanka Jain } 11379ceacab7SPriyanka Jain 11389ceacab7SPriyanka Jain rte_prefetch0((void *)((size_t)(dq_storage + 1))); 11399ceacab7SPriyanka Jain /* Check if the previous issued command is completed. */ 11409ceacab7SPriyanka Jain while (!qbman_check_command_complete(dq_storage)) 11419ceacab7SPriyanka Jain ; 11429ceacab7SPriyanka Jain 11439ceacab7SPriyanka Jain num_pulled = 0; 11449ceacab7SPriyanka Jain pending = 1; 11459ceacab7SPriyanka Jain do { 11469ceacab7SPriyanka Jain /* Loop until the dq_storage is updated with 11479ceacab7SPriyanka Jain * new token by QBMAN 11489ceacab7SPriyanka Jain */ 11499ceacab7SPriyanka Jain while (!qbman_check_new_result(dq_storage)) 11509ceacab7SPriyanka Jain ; 11519ceacab7SPriyanka Jain rte_prefetch0((void *)((size_t)(dq_storage + 2))); 11529ceacab7SPriyanka Jain /* Check whether Last Pull command is Expired and 11539ceacab7SPriyanka Jain * setting Condition for Loop termination 11549ceacab7SPriyanka Jain */ 11559ceacab7SPriyanka Jain if (qbman_result_DQ_is_pull_complete(dq_storage)) { 11569ceacab7SPriyanka Jain pending = 0; 11579ceacab7SPriyanka Jain /* Check for valid frame. */ 11589ceacab7SPriyanka Jain status = qbman_result_DQ_flags(dq_storage); 11599ceacab7SPriyanka Jain if (unlikely((status & 11609ceacab7SPriyanka Jain QBMAN_DQ_STAT_VALIDFRAME) == 0)) 11619ceacab7SPriyanka Jain continue; 11629ceacab7SPriyanka Jain } 11639ceacab7SPriyanka Jain fd = qbman_result_DQ_fd(dq_storage); 11649ceacab7SPriyanka Jain 11659ceacab7SPriyanka Jain next_fd = qbman_result_DQ_fd(dq_storage + 1); 11669ceacab7SPriyanka Jain /* Prefetch Annotation address for the parse results */ 11679ceacab7SPriyanka Jain rte_prefetch0((void *)(size_t) 11689ceacab7SPriyanka Jain (DPAA2_GET_FD_ADDR(next_fd) + 11699ceacab7SPriyanka Jain DPAA2_FD_PTA_SIZE + 16)); 11709ceacab7SPriyanka Jain 11719ceacab7SPriyanka Jain bpid = DPAA2_GET_FD_BPID(fd); 11729ceacab7SPriyanka Jain 11739ceacab7SPriyanka Jain /* Create a release descriptor required for releasing 11749ceacab7SPriyanka Jain * buffers into QBMAN 11759ceacab7SPriyanka Jain */ 11769ceacab7SPriyanka Jain qbman_release_desc_clear(&releasedesc); 11779ceacab7SPriyanka Jain qbman_release_desc_set_bpid(&releasedesc, bpid); 11789ceacab7SPriyanka Jain 11799ceacab7SPriyanka Jain buf = DPAA2_GET_FD_ADDR(fd); 11809ceacab7SPriyanka Jain /* feed them to bman */ 11819ceacab7SPriyanka Jain do { 11829ceacab7SPriyanka Jain ret = qbman_swp_release(swp, &releasedesc, 11839ceacab7SPriyanka Jain &buf, 1); 11849ceacab7SPriyanka Jain } while (ret == -EBUSY); 11859ceacab7SPriyanka Jain 11869ceacab7SPriyanka Jain dq_storage++; 11879ceacab7SPriyanka Jain num_tx_conf++; 11889ceacab7SPriyanka Jain num_pulled++; 1189e806bf87SPriyanka Jain #if defined(RTE_LIBRTE_IEEE1588) 119090762e5cSVanshika Shukla v_addr = DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)); 119190762e5cSVanshika Shukla mbuf = DPAA2_INLINE_MBUF_FROM_BUF(v_addr, 119290762e5cSVanshika Shukla rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size); 119390762e5cSVanshika Shukla 119402548404SGagandeep Singh if (mbuf->ol_flags & RTE_MBUF_F_TX_IEEE1588_TMST) { 1195e806bf87SPriyanka Jain annotation = (struct dpaa2_annot_hdr *)((size_t) 1196e806bf87SPriyanka Jain DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)) + 1197e806bf87SPriyanka Jain DPAA2_FD_PTA_SIZE); 1198e806bf87SPriyanka Jain priv->tx_timestamp = annotation->word2; 119990762e5cSVanshika Shukla } 1200e806bf87SPriyanka Jain #endif 12019ceacab7SPriyanka Jain } while (pending); 12029ceacab7SPriyanka Jain 12039ceacab7SPriyanka Jain /* Last VDQ provided all packets and more packets are requested */ 12049ceacab7SPriyanka Jain } while (num_pulled == dpaa2_dqrr_size); 12059ceacab7SPriyanka Jain 12069ceacab7SPriyanka Jain dpaa2_q->rx_pkts += num_tx_conf; 12079ceacab7SPriyanka Jain 12089ceacab7SPriyanka Jain return num_tx_conf; 12099ceacab7SPriyanka Jain } 12109ceacab7SPriyanka Jain 1211e806bf87SPriyanka Jain /* Configure the egress frame annotation for timestamp update */ 1212e806bf87SPriyanka Jain static void enable_tx_tstamp(struct qbman_fd *fd) 1213e806bf87SPriyanka Jain { 1214e806bf87SPriyanka Jain struct dpaa2_faead *fd_faead; 1215e806bf87SPriyanka Jain 1216e806bf87SPriyanka Jain /* Set frame annotation status field as valid */ 1217e806bf87SPriyanka Jain (fd)->simple.frc |= DPAA2_FD_FRC_FASV; 1218e806bf87SPriyanka Jain 1219e806bf87SPriyanka Jain /* Set frame annotation egress action descriptor as valid */ 1220e806bf87SPriyanka Jain (fd)->simple.frc |= DPAA2_FD_FRC_FAEADV; 1221e806bf87SPriyanka Jain 1222e806bf87SPriyanka Jain /* Set Annotation Length as 128B */ 1223e806bf87SPriyanka Jain (fd)->simple.ctrl |= DPAA2_FD_CTRL_ASAL; 1224e806bf87SPriyanka Jain 1225e806bf87SPriyanka Jain /* enable update of confirmation frame annotation */ 1226e806bf87SPriyanka Jain fd_faead = (struct dpaa2_faead *)((size_t) 1227e806bf87SPriyanka Jain DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)) + 1228e806bf87SPriyanka Jain DPAA2_FD_PTA_SIZE + DPAA2_FD_HW_ANNOT_FAEAD_OFFSET); 1229e806bf87SPriyanka Jain fd_faead->ctrl = DPAA2_ANNOT_FAEAD_A2V | DPAA2_ANNOT_FAEAD_UPDV | 1230e806bf87SPriyanka Jain DPAA2_ANNOT_FAEAD_UPD; 1231e806bf87SPriyanka Jain } 1232e806bf87SPriyanka Jain 1233cd9935ceSHemant Agrawal /* 1234cd9935ceSHemant Agrawal * Callback to handle sending packets through WRIOP based interface 1235cd9935ceSHemant Agrawal */ 1236cd9935ceSHemant Agrawal uint16_t 1237cd9935ceSHemant Agrawal dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) 1238cd9935ceSHemant Agrawal { 1239cd9935ceSHemant Agrawal /* Function to transmit the frames to given device and VQ*/ 1240a0840963SHemant Agrawal uint32_t loop, retry_count; 1241cd9935ceSHemant Agrawal int32_t ret; 1242cd9935ceSHemant Agrawal struct qbman_fd fd_arr[MAX_TX_RING_SLOTS]; 1243774e9ea9SHemant Agrawal struct rte_mbuf *mi; 1244cd9935ceSHemant Agrawal uint32_t frames_to_send; 1245cd9935ceSHemant Agrawal struct rte_mempool *mp; 1246cd9935ceSHemant Agrawal struct qbman_eq_desc eqdesc; 1247cd9935ceSHemant Agrawal struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue; 1248cd9935ceSHemant Agrawal struct qbman_swp *swp; 1249cd9935ceSHemant Agrawal uint16_t num_tx = 0; 1250cd9935ceSHemant Agrawal uint16_t bpid; 125185ee5ddaSShreyansh Jain struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data; 125285ee5ddaSShreyansh Jain struct dpaa2_dev_priv *priv = eth_data->dev_private; 12532d378863SNipun Gupta uint32_t flags[MAX_TX_RING_SLOTS] = {0}; 1254b0074a7bSGagandeep Singh struct sw_buf_free buf_to_free[DPAA2_MAX_SGS * dpaa2_dqrr_size]; 1255b0074a7bSGagandeep Singh uint32_t free_count = 0; 1256cd9935ceSHemant Agrawal 1257cd9935ceSHemant Agrawal if (unlikely(!DPAA2_PER_LCORE_DPIO)) { 1258cd9935ceSHemant Agrawal ret = dpaa2_affine_qbman_swp(); 1259cd9935ceSHemant Agrawal if (ret) { 1260d527f5d9SNipun Gupta DPAA2_PMD_ERR( 1261f665790aSDavid Marchand "Failed to allocate IO portal, tid: %d", 1262d527f5d9SNipun Gupta rte_gettid()); 1263cd9935ceSHemant Agrawal return 0; 1264cd9935ceSHemant Agrawal } 1265cd9935ceSHemant Agrawal } 1266cd9935ceSHemant Agrawal swp = DPAA2_PER_LCORE_PORTAL; 1267cd9935ceSHemant Agrawal 12682b843cacSDavid Marchand DPAA2_PMD_DP_DEBUG("===> eth_data =%p, fqid =%d", 126985ee5ddaSShreyansh Jain eth_data, dpaa2_q->fqid); 1270cd9935ceSHemant Agrawal 1271e806bf87SPriyanka Jain #ifdef RTE_LIBRTE_IEEE1588 1272e806bf87SPriyanka Jain /* IEEE1588 driver need pointer to tx confirmation queue 1273e806bf87SPriyanka Jain * corresponding to last packet transmitted for reading 1274e806bf87SPriyanka Jain * the timestamp 1275e806bf87SPriyanka Jain */ 127602548404SGagandeep Singh if ((*bufs)->ol_flags & RTE_MBUF_F_TX_IEEE1588_TMST) { 1277e806bf87SPriyanka Jain priv->next_tx_conf_queue = dpaa2_q->tx_conf_queue; 1278e806bf87SPriyanka Jain dpaa2_dev_tx_conf(dpaa2_q->tx_conf_queue); 127990762e5cSVanshika Shukla priv->tx_timestamp = 0; 128090762e5cSVanshika Shukla } 1281e806bf87SPriyanka Jain #endif 1282e806bf87SPriyanka Jain 1283cd9935ceSHemant Agrawal /*Prepare enqueue descriptor*/ 1284cd9935ceSHemant Agrawal qbman_eq_desc_clear(&eqdesc); 1285cd9935ceSHemant Agrawal qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ); 1286e26bf82eSSachin Saxena qbman_eq_desc_set_fq(&eqdesc, dpaa2_q->fqid); 1287e26bf82eSSachin Saxena 1288cd9935ceSHemant Agrawal /*Clear the unused FD fields before sending*/ 1289cd9935ceSHemant Agrawal while (nb_pkts) { 12907ae777d0SHemant Agrawal /*Check if the queue is congested*/ 1291a0840963SHemant Agrawal retry_count = 0; 129269293c77SHemant Agrawal while (qbman_result_SCN_state(dpaa2_q->cscn)) { 1293a0840963SHemant Agrawal retry_count++; 1294a0840963SHemant Agrawal /* Retry for some time before giving up */ 1295a0840963SHemant Agrawal if (retry_count > CONG_RETRY_COUNT) 12967ae777d0SHemant Agrawal goto skip_tx; 1297a0840963SHemant Agrawal } 12987ae777d0SHemant Agrawal 1299bd23b1a8SNipun Gupta frames_to_send = (nb_pkts > dpaa2_eqcr_size) ? 1300bd23b1a8SNipun Gupta dpaa2_eqcr_size : nb_pkts; 1301cd9935ceSHemant Agrawal 1302cd9935ceSHemant Agrawal for (loop = 0; loop < frames_to_send; loop++) { 1303ea278063SDavid Marchand if (*dpaa2_seqn(*bufs)) { 1304ea278063SDavid Marchand uint8_t dqrr_index = *dpaa2_seqn(*bufs) - 1; 13052d378863SNipun Gupta 13062d378863SNipun Gupta flags[loop] = QBMAN_ENQUEUE_FLAG_DCA | 13072d378863SNipun Gupta dqrr_index; 13082d378863SNipun Gupta DPAA2_PER_LCORE_DQRR_SIZE--; 13092d378863SNipun Gupta DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dqrr_index); 1310ea278063SDavid Marchand *dpaa2_seqn(*bufs) = DPAA2_INVALID_MBUF_SEQN; 13112d378863SNipun Gupta } 13122d378863SNipun Gupta 131348e7f156SNipun Gupta if (likely(RTE_MBUF_DIRECT(*bufs))) { 1314cd9935ceSHemant Agrawal mp = (*bufs)->pool; 131548e7f156SNipun Gupta /* Check the basic scenario and set 131648e7f156SNipun Gupta * the FD appropriately here itself. 131748e7f156SNipun Gupta */ 131848e7f156SNipun Gupta if (likely(mp && mp->ops_index == 131948e7f156SNipun Gupta priv->bp_list->dpaa2_ops_index && 132048e7f156SNipun Gupta (*bufs)->nb_segs == 1 && 132148e7f156SNipun Gupta rte_mbuf_refcnt_read((*bufs)) == 1)) { 13220ebce612SSunil Kumar Kori if (unlikely(((*bufs)->ol_flags 1323daa02b5cSOlivier Matz & RTE_MBUF_F_TX_VLAN) || 132485ee5ddaSShreyansh Jain (eth_data->dev_conf.txmode.offloads 1325295968d1SFerruh Yigit & RTE_ETH_TX_OFFLOAD_VLAN_INSERT))) { 132648e7f156SNipun Gupta ret = rte_vlan_insert(bufs); 132748e7f156SNipun Gupta if (ret) 132848e7f156SNipun Gupta goto send_n_return; 132948e7f156SNipun Gupta } 133048e7f156SNipun Gupta DPAA2_MBUF_TO_CONTIG_FD((*bufs), 133148e7f156SNipun Gupta &fd_arr[loop], mempool_to_bpid(mp)); 13323fa54e3dSGagandeep Singh #ifdef RTE_LIBRTE_MEMPOOL_DEBUG 13333fa54e3dSGagandeep Singh rte_mempool_check_cookies 13343fa54e3dSGagandeep Singh (rte_mempool_from_obj((void *)*bufs), 13353fa54e3dSGagandeep Singh (void **)bufs, 1, 0); 13363fa54e3dSGagandeep Singh #endif 133748e7f156SNipun Gupta bufs++; 1338e806bf87SPriyanka Jain #ifdef RTE_LIBRTE_IEEE1588 1339e806bf87SPriyanka Jain enable_tx_tstamp(&fd_arr[loop]); 1340e806bf87SPriyanka Jain #endif 134148e7f156SNipun Gupta continue; 134248e7f156SNipun Gupta } 1343774e9ea9SHemant Agrawal } else { 1344774e9ea9SHemant Agrawal mi = rte_mbuf_from_indirect(*bufs); 1345774e9ea9SHemant Agrawal mp = mi->pool; 1346774e9ea9SHemant Agrawal } 13476bfbafe1SNipun Gupta 13486bfbafe1SNipun Gupta if (unlikely(RTE_MBUF_HAS_EXTBUF(*bufs))) { 13496bfbafe1SNipun Gupta if (unlikely((*bufs)->nb_segs > 1)) { 135075e2a1d4SGagandeep Singh mp = (*bufs)->pool; 13516bfbafe1SNipun Gupta if (eth_mbuf_to_sg_fd(*bufs, 13526bfbafe1SNipun Gupta &fd_arr[loop], 1353b0074a7bSGagandeep Singh buf_to_free, 1354b0074a7bSGagandeep Singh &free_count, 1355b0074a7bSGagandeep Singh loop, 135675e2a1d4SGagandeep Singh mempool_to_bpid(mp))) 13576bfbafe1SNipun Gupta goto send_n_return; 13586bfbafe1SNipun Gupta } else { 13596bfbafe1SNipun Gupta eth_mbuf_to_fd(*bufs, 1360b0074a7bSGagandeep Singh &fd_arr[loop], 1361b0074a7bSGagandeep Singh buf_to_free, 1362b0074a7bSGagandeep Singh &free_count, 1363b0074a7bSGagandeep Singh loop, 0); 13646bfbafe1SNipun Gupta } 13656bfbafe1SNipun Gupta bufs++; 13666bfbafe1SNipun Gupta #ifdef RTE_LIBRTE_IEEE1588 13676bfbafe1SNipun Gupta enable_tx_tstamp(&fd_arr[loop]); 13686bfbafe1SNipun Gupta #endif 13696bfbafe1SNipun Gupta continue; 13706bfbafe1SNipun Gupta } 13716bfbafe1SNipun Gupta 13729e5f3e6dSHemant Agrawal /* Not a hw_pkt pool allocated frame */ 1373790ec226SHemant Agrawal if (unlikely(!mp || !priv->bp_list)) { 1374a10a988aSShreyansh Jain DPAA2_PMD_ERR("Err: No buffer pool attached"); 1375790ec226SHemant Agrawal goto send_n_return; 1376774e9ea9SHemant Agrawal } 1377790ec226SHemant Agrawal 1378daa02b5cSOlivier Matz if (unlikely(((*bufs)->ol_flags & RTE_MBUF_F_TX_VLAN) || 137985ee5ddaSShreyansh Jain (eth_data->dev_conf.txmode.offloads 1380295968d1SFerruh Yigit & RTE_ETH_TX_OFFLOAD_VLAN_INSERT))) { 13810ebce612SSunil Kumar Kori int ret = rte_vlan_insert(bufs); 13820ebce612SSunil Kumar Kori if (ret) 13830ebce612SSunil Kumar Kori goto send_n_return; 13840ebce612SSunil Kumar Kori } 13859e5f3e6dSHemant Agrawal if (mp->ops_index != priv->bp_list->dpaa2_ops_index) { 1386a10a988aSShreyansh Jain DPAA2_PMD_WARN("Non DPAA2 buffer pool"); 13879e5f3e6dSHemant Agrawal /* alloc should be from the default buffer pool 13889e5f3e6dSHemant Agrawal * attached to this interface 13899e5f3e6dSHemant Agrawal */ 13909e5f3e6dSHemant Agrawal bpid = priv->bp_list->buf_pool.bpid; 1391790ec226SHemant Agrawal 1392774e9ea9SHemant Agrawal if (unlikely((*bufs)->nb_segs > 1)) { 1393a10a988aSShreyansh Jain DPAA2_PMD_ERR("S/G support not added" 1394774e9ea9SHemant Agrawal " for non hw offload buffer"); 1395790ec226SHemant Agrawal goto send_n_return; 1396774e9ea9SHemant Agrawal } 13979e5f3e6dSHemant Agrawal if (eth_copy_mbuf_to_fd(*bufs, 13989e5f3e6dSHemant Agrawal &fd_arr[loop], bpid)) { 1399790ec226SHemant Agrawal goto send_n_return; 14009e5f3e6dSHemant Agrawal } 1401790ec226SHemant Agrawal /* free the original packet */ 1402790ec226SHemant Agrawal rte_pktmbuf_free(*bufs); 14039e5f3e6dSHemant Agrawal } else { 1404cd9935ceSHemant Agrawal bpid = mempool_to_bpid(mp); 1405774e9ea9SHemant Agrawal if (unlikely((*bufs)->nb_segs > 1)) { 1406774e9ea9SHemant Agrawal if (eth_mbuf_to_sg_fd(*bufs, 1407cc8569f0SHemant Agrawal &fd_arr[loop], 1408b0074a7bSGagandeep Singh buf_to_free, 1409b0074a7bSGagandeep Singh &free_count, 1410b0074a7bSGagandeep Singh loop, 141175e2a1d4SGagandeep Singh bpid)) 1412790ec226SHemant Agrawal goto send_n_return; 1413774e9ea9SHemant Agrawal } else { 1414774e9ea9SHemant Agrawal eth_mbuf_to_fd(*bufs, 1415b0074a7bSGagandeep Singh &fd_arr[loop], 1416b0074a7bSGagandeep Singh buf_to_free, 1417b0074a7bSGagandeep Singh &free_count, 1418b0074a7bSGagandeep Singh loop, bpid); 1419774e9ea9SHemant Agrawal } 14209e5f3e6dSHemant Agrawal } 1421e806bf87SPriyanka Jain #ifdef RTE_LIBRTE_IEEE1588 1422e806bf87SPriyanka Jain enable_tx_tstamp(&fd_arr[loop]); 1423e806bf87SPriyanka Jain #endif 1424cd9935ceSHemant Agrawal bufs++; 1425cd9935ceSHemant Agrawal } 1426ce4fd609SNipun Gupta 1427cd9935ceSHemant Agrawal loop = 0; 1428ce4fd609SNipun Gupta retry_count = 0; 1429cd9935ceSHemant Agrawal while (loop < frames_to_send) { 1430ce4fd609SNipun Gupta ret = qbman_swp_enqueue_multiple(swp, &eqdesc, 14312d378863SNipun Gupta &fd_arr[loop], &flags[loop], 1432496324d2SNipun Gupta frames_to_send - loop); 1433ce4fd609SNipun Gupta if (unlikely(ret < 0)) { 1434ce4fd609SNipun Gupta retry_count++; 1435ce4fd609SNipun Gupta if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) { 1436ce4fd609SNipun Gupta num_tx += loop; 1437ce4fd609SNipun Gupta nb_pkts -= loop; 1438ce4fd609SNipun Gupta goto send_n_return; 1439ce4fd609SNipun Gupta } 1440ce4fd609SNipun Gupta } else { 1441ce4fd609SNipun Gupta loop += ret; 1442ce4fd609SNipun Gupta retry_count = 0; 1443ce4fd609SNipun Gupta } 1444cd9935ceSHemant Agrawal } 1445cd9935ceSHemant Agrawal 1446ce4fd609SNipun Gupta num_tx += loop; 1447ce4fd609SNipun Gupta nb_pkts -= loop; 1448cd9935ceSHemant Agrawal } 144948e7f156SNipun Gupta dpaa2_q->tx_pkts += num_tx; 14506bfbafe1SNipun Gupta 1451b0074a7bSGagandeep Singh for (loop = 0; loop < free_count; loop++) { 1452b0074a7bSGagandeep Singh if (buf_to_free[loop].pkt_id < num_tx) 1453b0074a7bSGagandeep Singh rte_pktmbuf_free_seg(buf_to_free[loop].seg); 14546bfbafe1SNipun Gupta } 14556bfbafe1SNipun Gupta 1456790ec226SHemant Agrawal return num_tx; 1457790ec226SHemant Agrawal 1458790ec226SHemant Agrawal send_n_return: 1459790ec226SHemant Agrawal /* send any already prepared fd */ 1460790ec226SHemant Agrawal if (loop) { 1461790ec226SHemant Agrawal unsigned int i = 0; 1462790ec226SHemant Agrawal 1463ce4fd609SNipun Gupta retry_count = 0; 1464790ec226SHemant Agrawal while (i < loop) { 1465ce4fd609SNipun Gupta ret = qbman_swp_enqueue_multiple(swp, &eqdesc, 14662d378863SNipun Gupta &fd_arr[i], 1467ce4fd609SNipun Gupta &flags[i], 1468496324d2SNipun Gupta loop - i); 1469ce4fd609SNipun Gupta if (unlikely(ret < 0)) { 1470ce4fd609SNipun Gupta retry_count++; 1471ce4fd609SNipun Gupta if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) 1472ce4fd609SNipun Gupta break; 1473ce4fd609SNipun Gupta } else { 1474ce4fd609SNipun Gupta i += ret; 1475ce4fd609SNipun Gupta retry_count = 0; 1476790ec226SHemant Agrawal } 1477ce4fd609SNipun Gupta } 1478ce4fd609SNipun Gupta num_tx += i; 1479790ec226SHemant Agrawal } 14809e5f3e6dSHemant Agrawal skip_tx: 148148e7f156SNipun Gupta dpaa2_q->tx_pkts += num_tx; 14826bfbafe1SNipun Gupta 1483b0074a7bSGagandeep Singh for (loop = 0; loop < free_count; loop++) { 1484b0074a7bSGagandeep Singh if (buf_to_free[loop].pkt_id < num_tx) 1485b0074a7bSGagandeep Singh rte_pktmbuf_free_seg(buf_to_free[loop].seg); 14866bfbafe1SNipun Gupta } 14876bfbafe1SNipun Gupta 1488cd9935ceSHemant Agrawal return num_tx; 1489cd9935ceSHemant Agrawal } 1490a1f3a12cSHemant Agrawal 149116c4a3c4SNipun Gupta void 149295af364bSGagandeep Singh dpaa2_dev_free_eqresp_buf(uint16_t eqresp_ci, 149395af364bSGagandeep Singh __rte_unused struct dpaa2_queue *dpaa2_q) 149416c4a3c4SNipun Gupta { 149516c4a3c4SNipun Gupta struct dpaa2_dpio_dev *dpio_dev = DPAA2_PER_LCORE_DPIO; 149616c4a3c4SNipun Gupta struct qbman_fd *fd; 149716c4a3c4SNipun Gupta struct rte_mbuf *m; 149816c4a3c4SNipun Gupta 149916c4a3c4SNipun Gupta fd = qbman_result_eqresp_fd(&dpio_dev->eqresp[eqresp_ci]); 1500005d943eSNipun Gupta 1501005d943eSNipun Gupta /* Setting port id does not matter as we are to free the mbuf */ 1502005d943eSNipun Gupta m = eth_fd_to_mbuf(fd, 0); 150316c4a3c4SNipun Gupta rte_pktmbuf_free(m); 150416c4a3c4SNipun Gupta } 150516c4a3c4SNipun Gupta 150616c4a3c4SNipun Gupta static void 150716c4a3c4SNipun Gupta dpaa2_set_enqueue_descriptor(struct dpaa2_queue *dpaa2_q, 150816c4a3c4SNipun Gupta struct rte_mbuf *m, 150916c4a3c4SNipun Gupta struct qbman_eq_desc *eqdesc) 151016c4a3c4SNipun Gupta { 151116c4a3c4SNipun Gupta struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data; 151216c4a3c4SNipun Gupta struct dpaa2_dev_priv *priv = eth_data->dev_private; 151316c4a3c4SNipun Gupta struct dpaa2_dpio_dev *dpio_dev = DPAA2_PER_LCORE_DPIO; 151416c4a3c4SNipun Gupta struct eqresp_metadata *eqresp_meta; 151516c4a3c4SNipun Gupta uint16_t orpid, seqnum; 151616c4a3c4SNipun Gupta uint8_t dq_idx; 151716c4a3c4SNipun Gupta 1518e26bf82eSSachin Saxena qbman_eq_desc_set_fq(eqdesc, dpaa2_q->fqid); 151916c4a3c4SNipun Gupta 1520ea278063SDavid Marchand if (*dpaa2_seqn(m) & DPAA2_ENQUEUE_FLAG_ORP) { 1521ea278063SDavid Marchand orpid = (*dpaa2_seqn(m) & DPAA2_EQCR_OPRID_MASK) >> 152216c4a3c4SNipun Gupta DPAA2_EQCR_OPRID_SHIFT; 1523ea278063SDavid Marchand seqnum = (*dpaa2_seqn(m) & DPAA2_EQCR_SEQNUM_MASK) >> 152416c4a3c4SNipun Gupta DPAA2_EQCR_SEQNUM_SHIFT; 152516c4a3c4SNipun Gupta 152616c4a3c4SNipun Gupta if (!priv->en_loose_ordered) { 152716c4a3c4SNipun Gupta qbman_eq_desc_set_orp(eqdesc, 1, orpid, seqnum, 0); 152816c4a3c4SNipun Gupta qbman_eq_desc_set_response(eqdesc, (uint64_t) 152916c4a3c4SNipun Gupta DPAA2_VADDR_TO_IOVA(&dpio_dev->eqresp[ 153016c4a3c4SNipun Gupta dpio_dev->eqresp_pi]), 1); 153116c4a3c4SNipun Gupta qbman_eq_desc_set_token(eqdesc, 1); 153216c4a3c4SNipun Gupta 153316c4a3c4SNipun Gupta eqresp_meta = &dpio_dev->eqresp_meta[ 153416c4a3c4SNipun Gupta dpio_dev->eqresp_pi]; 153516c4a3c4SNipun Gupta eqresp_meta->dpaa2_q = dpaa2_q; 153616c4a3c4SNipun Gupta eqresp_meta->mp = m->pool; 153716c4a3c4SNipun Gupta 153816c4a3c4SNipun Gupta dpio_dev->eqresp_pi + 1 < MAX_EQ_RESP_ENTRIES ? 153916c4a3c4SNipun Gupta dpio_dev->eqresp_pi++ : 154016c4a3c4SNipun Gupta (dpio_dev->eqresp_pi = 0); 154116c4a3c4SNipun Gupta } else { 154216c4a3c4SNipun Gupta qbman_eq_desc_set_orp(eqdesc, 0, orpid, seqnum, 0); 154316c4a3c4SNipun Gupta } 154416c4a3c4SNipun Gupta } else { 1545ea278063SDavid Marchand dq_idx = *dpaa2_seqn(m) - 1; 154616c4a3c4SNipun Gupta qbman_eq_desc_set_dca(eqdesc, 1, dq_idx, 0); 154716c4a3c4SNipun Gupta DPAA2_PER_LCORE_DQRR_SIZE--; 154816c4a3c4SNipun Gupta DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dq_idx); 154916c4a3c4SNipun Gupta } 1550ea278063SDavid Marchand *dpaa2_seqn(m) = DPAA2_INVALID_MBUF_SEQN; 155116c4a3c4SNipun Gupta } 155216c4a3c4SNipun Gupta 1553ed1cdbedSJun Yang uint16_t 1554ed1cdbedSJun Yang dpaa2_dev_tx_multi_txq_ordered(void **queue, 1555ed1cdbedSJun Yang struct rte_mbuf **bufs, uint16_t nb_pkts) 1556ed1cdbedSJun Yang { 1557ed1cdbedSJun Yang /* Function to transmit the frames to multiple queues respectively.*/ 1558b0074a7bSGagandeep Singh uint32_t loop, i, retry_count; 1559ed1cdbedSJun Yang int32_t ret; 1560ed1cdbedSJun Yang struct qbman_fd fd_arr[MAX_TX_RING_SLOTS]; 1561fb2790a5SBrick Yang uint32_t frames_to_send, num_free_eq_desc = 0; 1562ed1cdbedSJun Yang struct rte_mempool *mp; 1563ed1cdbedSJun Yang struct qbman_eq_desc eqdesc[MAX_TX_RING_SLOTS]; 1564ed1cdbedSJun Yang struct dpaa2_queue *dpaa2_q[MAX_TX_RING_SLOTS]; 1565ed1cdbedSJun Yang struct qbman_swp *swp; 1566ed1cdbedSJun Yang uint16_t bpid; 1567ed1cdbedSJun Yang struct rte_mbuf *mi; 1568ed1cdbedSJun Yang struct rte_eth_dev_data *eth_data; 1569ed1cdbedSJun Yang struct dpaa2_dev_priv *priv; 1570ed1cdbedSJun Yang struct dpaa2_queue *order_sendq; 1571b0074a7bSGagandeep Singh struct sw_buf_free buf_to_free[DPAA2_MAX_SGS * dpaa2_dqrr_size]; 1572b0074a7bSGagandeep Singh uint32_t free_count = 0; 1573ed1cdbedSJun Yang 1574ed1cdbedSJun Yang if (unlikely(!DPAA2_PER_LCORE_DPIO)) { 1575ed1cdbedSJun Yang ret = dpaa2_affine_qbman_swp(); 1576ed1cdbedSJun Yang if (ret) { 1577ed1cdbedSJun Yang DPAA2_PMD_ERR( 1578f665790aSDavid Marchand "Failed to allocate IO portal, tid: %d", 1579ed1cdbedSJun Yang rte_gettid()); 1580ed1cdbedSJun Yang return 0; 1581ed1cdbedSJun Yang } 1582ed1cdbedSJun Yang } 1583ed1cdbedSJun Yang swp = DPAA2_PER_LCORE_PORTAL; 1584ed1cdbedSJun Yang 1585fb2790a5SBrick Yang frames_to_send = (nb_pkts > dpaa2_eqcr_size) ? 1586fb2790a5SBrick Yang dpaa2_eqcr_size : nb_pkts; 1587fb2790a5SBrick Yang 1588fb2790a5SBrick Yang for (loop = 0; loop < frames_to_send; loop++) { 1589ed1cdbedSJun Yang dpaa2_q[loop] = (struct dpaa2_queue *)queue[loop]; 1590ed1cdbedSJun Yang eth_data = dpaa2_q[loop]->eth_data; 1591ed1cdbedSJun Yang priv = eth_data->dev_private; 1592fb2790a5SBrick Yang if (!priv->en_loose_ordered) { 1593fb2790a5SBrick Yang if (*dpaa2_seqn(*bufs) & DPAA2_ENQUEUE_FLAG_ORP) { 1594fb2790a5SBrick Yang if (!num_free_eq_desc) { 1595fb2790a5SBrick Yang num_free_eq_desc = dpaa2_free_eq_descriptors(); 1596fb2790a5SBrick Yang if (!num_free_eq_desc) 1597fb2790a5SBrick Yang goto send_frames; 1598fb2790a5SBrick Yang } 1599fb2790a5SBrick Yang num_free_eq_desc--; 1600fb2790a5SBrick Yang } 1601fb2790a5SBrick Yang } 1602fb2790a5SBrick Yang 16032b843cacSDavid Marchand DPAA2_PMD_DP_DEBUG("===> eth_data =%p, fqid =%d", 1604fb2790a5SBrick Yang eth_data, dpaa2_q[loop]->fqid); 1605fb2790a5SBrick Yang 1606fb2790a5SBrick Yang /* Check if the queue is congested */ 1607fb2790a5SBrick Yang retry_count = 0; 1608fb2790a5SBrick Yang while (qbman_result_SCN_state(dpaa2_q[loop]->cscn)) { 1609fb2790a5SBrick Yang retry_count++; 1610fb2790a5SBrick Yang /* Retry for some time before giving up */ 1611fb2790a5SBrick Yang if (retry_count > CONG_RETRY_COUNT) 1612fb2790a5SBrick Yang goto send_frames; 1613fb2790a5SBrick Yang } 1614fb2790a5SBrick Yang 1615fb2790a5SBrick Yang /* Prepare enqueue descriptor */ 1616ed1cdbedSJun Yang qbman_eq_desc_clear(&eqdesc[loop]); 1617fb2790a5SBrick Yang 1618ed1cdbedSJun Yang if (*dpaa2_seqn(*bufs) && priv->en_ordered) { 1619ed1cdbedSJun Yang order_sendq = (struct dpaa2_queue *)priv->tx_vq[0]; 1620ed1cdbedSJun Yang dpaa2_set_enqueue_descriptor(order_sendq, 1621ed1cdbedSJun Yang (*bufs), 1622ed1cdbedSJun Yang &eqdesc[loop]); 1623ed1cdbedSJun Yang } else { 1624ed1cdbedSJun Yang qbman_eq_desc_set_no_orp(&eqdesc[loop], 1625ed1cdbedSJun Yang DPAA2_EQ_RESP_ERR_FQ); 1626ed1cdbedSJun Yang qbman_eq_desc_set_fq(&eqdesc[loop], 1627ed1cdbedSJun Yang dpaa2_q[loop]->fqid); 1628ed1cdbedSJun Yang } 1629ed1cdbedSJun Yang 1630ed1cdbedSJun Yang if (likely(RTE_MBUF_DIRECT(*bufs))) { 1631ed1cdbedSJun Yang mp = (*bufs)->pool; 1632ed1cdbedSJun Yang /* Check the basic scenario and set 1633ed1cdbedSJun Yang * the FD appropriately here itself. 1634ed1cdbedSJun Yang */ 1635ed1cdbedSJun Yang if (likely(mp && mp->ops_index == 1636ed1cdbedSJun Yang priv->bp_list->dpaa2_ops_index && 1637ed1cdbedSJun Yang (*bufs)->nb_segs == 1 && 1638ed1cdbedSJun Yang rte_mbuf_refcnt_read((*bufs)) == 1)) { 1639ed1cdbedSJun Yang if (unlikely((*bufs)->ol_flags 1640ed1cdbedSJun Yang & RTE_MBUF_F_TX_VLAN)) { 1641ed1cdbedSJun Yang ret = rte_vlan_insert(bufs); 1642ed1cdbedSJun Yang if (ret) 1643ed1cdbedSJun Yang goto send_frames; 1644ed1cdbedSJun Yang } 1645ed1cdbedSJun Yang DPAA2_MBUF_TO_CONTIG_FD((*bufs), 1646ed1cdbedSJun Yang &fd_arr[loop], 1647ed1cdbedSJun Yang mempool_to_bpid(mp)); 1648ed1cdbedSJun Yang bufs++; 1649ed1cdbedSJun Yang continue; 1650ed1cdbedSJun Yang } 1651ed1cdbedSJun Yang } else { 1652ed1cdbedSJun Yang mi = rte_mbuf_from_indirect(*bufs); 1653ed1cdbedSJun Yang mp = mi->pool; 1654ed1cdbedSJun Yang } 1655ed1cdbedSJun Yang /* Not a hw_pkt pool allocated frame */ 1656ed1cdbedSJun Yang if (unlikely(!mp || !priv->bp_list)) { 1657ed1cdbedSJun Yang DPAA2_PMD_ERR("Err: No buffer pool attached"); 1658ed1cdbedSJun Yang goto send_frames; 1659ed1cdbedSJun Yang } 1660ed1cdbedSJun Yang 1661ed1cdbedSJun Yang if (mp->ops_index != priv->bp_list->dpaa2_ops_index) { 1662ed1cdbedSJun Yang DPAA2_PMD_WARN("Non DPAA2 buffer pool"); 1663ed1cdbedSJun Yang /* alloc should be from the default buffer pool 1664ed1cdbedSJun Yang * attached to this interface 1665ed1cdbedSJun Yang */ 1666ed1cdbedSJun Yang bpid = priv->bp_list->buf_pool.bpid; 1667ed1cdbedSJun Yang 1668ed1cdbedSJun Yang if (unlikely((*bufs)->nb_segs > 1)) { 1669ed1cdbedSJun Yang DPAA2_PMD_ERR( 1670ed1cdbedSJun Yang "S/G not supp for non hw offload buffer"); 1671ed1cdbedSJun Yang goto send_frames; 1672ed1cdbedSJun Yang } 1673ed1cdbedSJun Yang if (eth_copy_mbuf_to_fd(*bufs, 1674ed1cdbedSJun Yang &fd_arr[loop], bpid)) { 1675ed1cdbedSJun Yang goto send_frames; 1676ed1cdbedSJun Yang } 1677ed1cdbedSJun Yang /* free the original packet */ 1678ed1cdbedSJun Yang rte_pktmbuf_free(*bufs); 1679ed1cdbedSJun Yang } else { 1680ed1cdbedSJun Yang bpid = mempool_to_bpid(mp); 1681ed1cdbedSJun Yang if (unlikely((*bufs)->nb_segs > 1)) { 1682ed1cdbedSJun Yang if (eth_mbuf_to_sg_fd(*bufs, 1683ed1cdbedSJun Yang &fd_arr[loop], 1684b0074a7bSGagandeep Singh buf_to_free, 1685b0074a7bSGagandeep Singh &free_count, 1686b0074a7bSGagandeep Singh loop, 1687ed1cdbedSJun Yang bpid)) 1688ed1cdbedSJun Yang goto send_frames; 1689ed1cdbedSJun Yang } else { 1690ed1cdbedSJun Yang eth_mbuf_to_fd(*bufs, 1691b0074a7bSGagandeep Singh &fd_arr[loop], 1692b0074a7bSGagandeep Singh buf_to_free, 1693b0074a7bSGagandeep Singh &free_count, 1694b0074a7bSGagandeep Singh loop, bpid); 1695ed1cdbedSJun Yang } 1696ed1cdbedSJun Yang } 1697ed1cdbedSJun Yang 1698ed1cdbedSJun Yang bufs++; 1699ed1cdbedSJun Yang } 1700ed1cdbedSJun Yang 1701ed1cdbedSJun Yang send_frames: 1702ed1cdbedSJun Yang frames_to_send = loop; 1703ed1cdbedSJun Yang loop = 0; 1704fb2790a5SBrick Yang retry_count = 0; 1705ed1cdbedSJun Yang while (loop < frames_to_send) { 1706ed1cdbedSJun Yang ret = qbman_swp_enqueue_multiple_desc(swp, &eqdesc[loop], 1707ed1cdbedSJun Yang &fd_arr[loop], 1708ed1cdbedSJun Yang frames_to_send - loop); 1709ed1cdbedSJun Yang if (likely(ret > 0)) { 1710ed1cdbedSJun Yang loop += ret; 1711fb2790a5SBrick Yang retry_count = 0; 1712ed1cdbedSJun Yang } else { 1713ed1cdbedSJun Yang retry_count++; 1714ed1cdbedSJun Yang if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) 1715ed1cdbedSJun Yang break; 1716ed1cdbedSJun Yang } 1717ed1cdbedSJun Yang } 1718ed1cdbedSJun Yang 1719b0074a7bSGagandeep Singh for (i = 0; i < free_count; i++) { 1720b0074a7bSGagandeep Singh if (buf_to_free[i].pkt_id < loop) 1721b0074a7bSGagandeep Singh rte_pktmbuf_free_seg(buf_to_free[i].seg); 1722b0074a7bSGagandeep Singh } 1723ed1cdbedSJun Yang return loop; 1724ed1cdbedSJun Yang } 1725ed1cdbedSJun Yang 172616c4a3c4SNipun Gupta /* Callback to handle sending ordered packets through WRIOP based interface */ 172716c4a3c4SNipun Gupta uint16_t 172816c4a3c4SNipun Gupta dpaa2_dev_tx_ordered(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) 172916c4a3c4SNipun Gupta { 173016c4a3c4SNipun Gupta /* Function to transmit the frames to given device and VQ*/ 173116c4a3c4SNipun Gupta struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue; 173216c4a3c4SNipun Gupta struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data; 173316c4a3c4SNipun Gupta struct dpaa2_dev_priv *priv = eth_data->dev_private; 173416c4a3c4SNipun Gupta struct dpaa2_queue *order_sendq = (struct dpaa2_queue *)priv->tx_vq[0]; 173516c4a3c4SNipun Gupta struct qbman_fd fd_arr[MAX_TX_RING_SLOTS]; 173616c4a3c4SNipun Gupta struct rte_mbuf *mi; 173716c4a3c4SNipun Gupta struct rte_mempool *mp; 173816c4a3c4SNipun Gupta struct qbman_eq_desc eqdesc[MAX_TX_RING_SLOTS]; 173916c4a3c4SNipun Gupta struct qbman_swp *swp; 174016c4a3c4SNipun Gupta uint32_t frames_to_send, num_free_eq_desc; 174116c4a3c4SNipun Gupta uint32_t loop, retry_count; 174216c4a3c4SNipun Gupta int32_t ret; 174316c4a3c4SNipun Gupta uint16_t num_tx = 0; 174416c4a3c4SNipun Gupta uint16_t bpid; 1745b0074a7bSGagandeep Singh struct sw_buf_free buf_to_free[DPAA2_MAX_SGS * dpaa2_dqrr_size]; 1746b0074a7bSGagandeep Singh uint32_t free_count = 0; 174716c4a3c4SNipun Gupta 174816c4a3c4SNipun Gupta if (unlikely(!DPAA2_PER_LCORE_DPIO)) { 174916c4a3c4SNipun Gupta ret = dpaa2_affine_qbman_swp(); 175016c4a3c4SNipun Gupta if (ret) { 1751d527f5d9SNipun Gupta DPAA2_PMD_ERR( 1752f665790aSDavid Marchand "Failed to allocate IO portal, tid: %d", 1753d527f5d9SNipun Gupta rte_gettid()); 175416c4a3c4SNipun Gupta return 0; 175516c4a3c4SNipun Gupta } 175616c4a3c4SNipun Gupta } 175716c4a3c4SNipun Gupta swp = DPAA2_PER_LCORE_PORTAL; 175816c4a3c4SNipun Gupta 17592b843cacSDavid Marchand DPAA2_PMD_DP_DEBUG("===> eth_data =%p, fqid =%d", 176016c4a3c4SNipun Gupta eth_data, dpaa2_q->fqid); 176116c4a3c4SNipun Gupta 176216c4a3c4SNipun Gupta /* This would also handle normal and atomic queues as any type 176316c4a3c4SNipun Gupta * of packet can be enqueued when ordered queues are being used. 176416c4a3c4SNipun Gupta */ 176516c4a3c4SNipun Gupta while (nb_pkts) { 176616c4a3c4SNipun Gupta /*Check if the queue is congested*/ 176716c4a3c4SNipun Gupta retry_count = 0; 176816c4a3c4SNipun Gupta while (qbman_result_SCN_state(dpaa2_q->cscn)) { 176916c4a3c4SNipun Gupta retry_count++; 177016c4a3c4SNipun Gupta /* Retry for some time before giving up */ 177116c4a3c4SNipun Gupta if (retry_count > CONG_RETRY_COUNT) 177216c4a3c4SNipun Gupta goto skip_tx; 177316c4a3c4SNipun Gupta } 177416c4a3c4SNipun Gupta 177516c4a3c4SNipun Gupta frames_to_send = (nb_pkts > dpaa2_eqcr_size) ? 177616c4a3c4SNipun Gupta dpaa2_eqcr_size : nb_pkts; 177716c4a3c4SNipun Gupta 177816c4a3c4SNipun Gupta if (!priv->en_loose_ordered) { 1779ea278063SDavid Marchand if (*dpaa2_seqn(*bufs) & DPAA2_ENQUEUE_FLAG_ORP) { 178016c4a3c4SNipun Gupta num_free_eq_desc = dpaa2_free_eq_descriptors(); 178116c4a3c4SNipun Gupta if (num_free_eq_desc < frames_to_send) 178216c4a3c4SNipun Gupta frames_to_send = num_free_eq_desc; 178316c4a3c4SNipun Gupta } 178416c4a3c4SNipun Gupta } 178516c4a3c4SNipun Gupta 178616c4a3c4SNipun Gupta for (loop = 0; loop < frames_to_send; loop++) { 178716c4a3c4SNipun Gupta /*Prepare enqueue descriptor*/ 178816c4a3c4SNipun Gupta qbman_eq_desc_clear(&eqdesc[loop]); 178916c4a3c4SNipun Gupta 1790ea278063SDavid Marchand if (*dpaa2_seqn(*bufs)) { 179116c4a3c4SNipun Gupta /* Use only queue 0 for Tx in case of atomic/ 179216c4a3c4SNipun Gupta * ordered packets as packets can get unordered 17937be78d02SJosh Soref * when being transmitted out from the interface 179416c4a3c4SNipun Gupta */ 179516c4a3c4SNipun Gupta dpaa2_set_enqueue_descriptor(order_sendq, 179616c4a3c4SNipun Gupta (*bufs), 179716c4a3c4SNipun Gupta &eqdesc[loop]); 179816c4a3c4SNipun Gupta } else { 179916c4a3c4SNipun Gupta qbman_eq_desc_set_no_orp(&eqdesc[loop], 180016c4a3c4SNipun Gupta DPAA2_EQ_RESP_ERR_FQ); 1801e26bf82eSSachin Saxena qbman_eq_desc_set_fq(&eqdesc[loop], 1802e26bf82eSSachin Saxena dpaa2_q->fqid); 180316c4a3c4SNipun Gupta } 180416c4a3c4SNipun Gupta 180516c4a3c4SNipun Gupta if (likely(RTE_MBUF_DIRECT(*bufs))) { 180616c4a3c4SNipun Gupta mp = (*bufs)->pool; 180716c4a3c4SNipun Gupta /* Check the basic scenario and set 180816c4a3c4SNipun Gupta * the FD appropriately here itself. 180916c4a3c4SNipun Gupta */ 181016c4a3c4SNipun Gupta if (likely(mp && mp->ops_index == 181116c4a3c4SNipun Gupta priv->bp_list->dpaa2_ops_index && 181216c4a3c4SNipun Gupta (*bufs)->nb_segs == 1 && 181316c4a3c4SNipun Gupta rte_mbuf_refcnt_read((*bufs)) == 1)) { 181416c4a3c4SNipun Gupta if (unlikely((*bufs)->ol_flags 1815daa02b5cSOlivier Matz & RTE_MBUF_F_TX_VLAN)) { 181616c4a3c4SNipun Gupta ret = rte_vlan_insert(bufs); 181716c4a3c4SNipun Gupta if (ret) 181816c4a3c4SNipun Gupta goto send_n_return; 181916c4a3c4SNipun Gupta } 182016c4a3c4SNipun Gupta DPAA2_MBUF_TO_CONTIG_FD((*bufs), 182116c4a3c4SNipun Gupta &fd_arr[loop], 182216c4a3c4SNipun Gupta mempool_to_bpid(mp)); 182316c4a3c4SNipun Gupta bufs++; 182416c4a3c4SNipun Gupta continue; 182516c4a3c4SNipun Gupta } 182616c4a3c4SNipun Gupta } else { 182716c4a3c4SNipun Gupta mi = rte_mbuf_from_indirect(*bufs); 182816c4a3c4SNipun Gupta mp = mi->pool; 182916c4a3c4SNipun Gupta } 183016c4a3c4SNipun Gupta /* Not a hw_pkt pool allocated frame */ 183116c4a3c4SNipun Gupta if (unlikely(!mp || !priv->bp_list)) { 183216c4a3c4SNipun Gupta DPAA2_PMD_ERR("Err: No buffer pool attached"); 183316c4a3c4SNipun Gupta goto send_n_return; 183416c4a3c4SNipun Gupta } 183516c4a3c4SNipun Gupta 183616c4a3c4SNipun Gupta if (mp->ops_index != priv->bp_list->dpaa2_ops_index) { 183716c4a3c4SNipun Gupta DPAA2_PMD_WARN("Non DPAA2 buffer pool"); 183816c4a3c4SNipun Gupta /* alloc should be from the default buffer pool 183916c4a3c4SNipun Gupta * attached to this interface 184016c4a3c4SNipun Gupta */ 184116c4a3c4SNipun Gupta bpid = priv->bp_list->buf_pool.bpid; 184216c4a3c4SNipun Gupta 184316c4a3c4SNipun Gupta if (unlikely((*bufs)->nb_segs > 1)) { 184416c4a3c4SNipun Gupta DPAA2_PMD_ERR( 184516c4a3c4SNipun Gupta "S/G not supp for non hw offload buffer"); 184616c4a3c4SNipun Gupta goto send_n_return; 184716c4a3c4SNipun Gupta } 184816c4a3c4SNipun Gupta if (eth_copy_mbuf_to_fd(*bufs, 184916c4a3c4SNipun Gupta &fd_arr[loop], bpid)) { 185016c4a3c4SNipun Gupta goto send_n_return; 185116c4a3c4SNipun Gupta } 185216c4a3c4SNipun Gupta /* free the original packet */ 185316c4a3c4SNipun Gupta rte_pktmbuf_free(*bufs); 185416c4a3c4SNipun Gupta } else { 185516c4a3c4SNipun Gupta bpid = mempool_to_bpid(mp); 185616c4a3c4SNipun Gupta if (unlikely((*bufs)->nb_segs > 1)) { 185716c4a3c4SNipun Gupta if (eth_mbuf_to_sg_fd(*bufs, 185816c4a3c4SNipun Gupta &fd_arr[loop], 1859b0074a7bSGagandeep Singh buf_to_free, 1860b0074a7bSGagandeep Singh &free_count, 1861b0074a7bSGagandeep Singh loop, 186216c4a3c4SNipun Gupta bpid)) 186316c4a3c4SNipun Gupta goto send_n_return; 186416c4a3c4SNipun Gupta } else { 186516c4a3c4SNipun Gupta eth_mbuf_to_fd(*bufs, 1866b0074a7bSGagandeep Singh &fd_arr[loop], 1867b0074a7bSGagandeep Singh buf_to_free, 1868b0074a7bSGagandeep Singh &free_count, 1869b0074a7bSGagandeep Singh loop, bpid); 187016c4a3c4SNipun Gupta } 187116c4a3c4SNipun Gupta } 187216c4a3c4SNipun Gupta bufs++; 187316c4a3c4SNipun Gupta } 1874ce4fd609SNipun Gupta 187516c4a3c4SNipun Gupta loop = 0; 1876ce4fd609SNipun Gupta retry_count = 0; 187716c4a3c4SNipun Gupta while (loop < frames_to_send) { 1878ce4fd609SNipun Gupta ret = qbman_swp_enqueue_multiple_desc(swp, 187916c4a3c4SNipun Gupta &eqdesc[loop], &fd_arr[loop], 188016c4a3c4SNipun Gupta frames_to_send - loop); 1881ce4fd609SNipun Gupta if (unlikely(ret < 0)) { 1882ce4fd609SNipun Gupta retry_count++; 1883ce4fd609SNipun Gupta if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) { 1884ce4fd609SNipun Gupta num_tx += loop; 1885ce4fd609SNipun Gupta nb_pkts -= loop; 1886ce4fd609SNipun Gupta goto send_n_return; 1887ce4fd609SNipun Gupta } 1888ce4fd609SNipun Gupta } else { 1889ce4fd609SNipun Gupta loop += ret; 1890ce4fd609SNipun Gupta retry_count = 0; 1891ce4fd609SNipun Gupta } 189216c4a3c4SNipun Gupta } 189316c4a3c4SNipun Gupta 1894ce4fd609SNipun Gupta num_tx += loop; 1895ce4fd609SNipun Gupta nb_pkts -= loop; 189616c4a3c4SNipun Gupta } 189716c4a3c4SNipun Gupta dpaa2_q->tx_pkts += num_tx; 1898b0074a7bSGagandeep Singh for (loop = 0; loop < free_count; loop++) { 1899b0074a7bSGagandeep Singh if (buf_to_free[loop].pkt_id < num_tx) 1900b0074a7bSGagandeep Singh rte_pktmbuf_free_seg(buf_to_free[loop].seg); 1901b0074a7bSGagandeep Singh } 1902b0074a7bSGagandeep Singh 190316c4a3c4SNipun Gupta return num_tx; 190416c4a3c4SNipun Gupta 190516c4a3c4SNipun Gupta send_n_return: 190616c4a3c4SNipun Gupta /* send any already prepared fd */ 190716c4a3c4SNipun Gupta if (loop) { 190816c4a3c4SNipun Gupta unsigned int i = 0; 190916c4a3c4SNipun Gupta 1910ce4fd609SNipun Gupta retry_count = 0; 191116c4a3c4SNipun Gupta while (i < loop) { 1912ce4fd609SNipun Gupta ret = qbman_swp_enqueue_multiple_desc(swp, 1913fb2790a5SBrick Yang &eqdesc[i], &fd_arr[i], loop - i); 1914ce4fd609SNipun Gupta if (unlikely(ret < 0)) { 1915ce4fd609SNipun Gupta retry_count++; 1916ce4fd609SNipun Gupta if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) 1917ce4fd609SNipun Gupta break; 1918ce4fd609SNipun Gupta } else { 1919ce4fd609SNipun Gupta i += ret; 1920ce4fd609SNipun Gupta retry_count = 0; 192116c4a3c4SNipun Gupta } 1922ce4fd609SNipun Gupta } 1923ce4fd609SNipun Gupta num_tx += i; 192416c4a3c4SNipun Gupta } 192516c4a3c4SNipun Gupta skip_tx: 192616c4a3c4SNipun Gupta dpaa2_q->tx_pkts += num_tx; 1927b0074a7bSGagandeep Singh for (loop = 0; loop < free_count; loop++) { 1928b0074a7bSGagandeep Singh if (buf_to_free[loop].pkt_id < num_tx) 1929b0074a7bSGagandeep Singh rte_pktmbuf_free_seg(buf_to_free[loop].seg); 1930b0074a7bSGagandeep Singh } 1931b0074a7bSGagandeep Singh 193216c4a3c4SNipun Gupta return num_tx; 193316c4a3c4SNipun Gupta } 193416c4a3c4SNipun Gupta 1935a3a997f0SHemant Agrawal #if defined(RTE_TOOLCHAIN_GCC) 1936a3a997f0SHemant Agrawal #pragma GCC diagnostic push 1937a3a997f0SHemant Agrawal #pragma GCC diagnostic ignored "-Wcast-qual" 1938a3a997f0SHemant Agrawal #elif defined(RTE_TOOLCHAIN_CLANG) 1939a3a997f0SHemant Agrawal #pragma clang diagnostic push 1940a3a997f0SHemant Agrawal #pragma clang diagnostic ignored "-Wcast-qual" 1941a3a997f0SHemant Agrawal #endif 1942a3a997f0SHemant Agrawal 1943a3a997f0SHemant Agrawal /* This function loopbacks all the received packets.*/ 1944a3a997f0SHemant Agrawal uint16_t 1945a3a997f0SHemant Agrawal dpaa2_dev_loopback_rx(void *queue, 1946a3a997f0SHemant Agrawal struct rte_mbuf **bufs __rte_unused, 1947a3a997f0SHemant Agrawal uint16_t nb_pkts) 1948a3a997f0SHemant Agrawal { 1949a3a997f0SHemant Agrawal /* Function receive frames for a given device and VQ*/ 1950a3a997f0SHemant Agrawal struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue; 1951a3a997f0SHemant Agrawal struct qbman_result *dq_storage, *dq_storage1 = NULL; 1952a3a997f0SHemant Agrawal uint32_t fqid = dpaa2_q->fqid; 1953a3a997f0SHemant Agrawal int ret, num_rx = 0, num_tx = 0, pull_size; 1954a3a997f0SHemant Agrawal uint8_t pending, status; 1955a3a997f0SHemant Agrawal struct qbman_swp *swp; 1956a3a997f0SHemant Agrawal struct qbman_fd *fd[DPAA2_LX2_DQRR_RING_SIZE]; 1957a3a997f0SHemant Agrawal struct qbman_pull_desc pulldesc; 1958a3a997f0SHemant Agrawal struct qbman_eq_desc eqdesc; 1959*12d98eceSJun Yang struct queue_storage_info_t *q_storage; 1960a3a997f0SHemant Agrawal struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data; 1961a3a997f0SHemant Agrawal struct dpaa2_dev_priv *priv = eth_data->dev_private; 1962a3a997f0SHemant Agrawal struct dpaa2_queue *tx_q = priv->tx_vq[0]; 1963a3a997f0SHemant Agrawal /* todo - currently we are using 1st TX queue only for loopback*/ 1964a3a997f0SHemant Agrawal 1965*12d98eceSJun Yang q_storage = dpaa2_q->q_storage[rte_lcore_id()]; 1966a3a997f0SHemant Agrawal if (unlikely(!DPAA2_PER_LCORE_ETHRX_DPIO)) { 1967a3a997f0SHemant Agrawal ret = dpaa2_affine_qbman_ethrx_swp(); 1968a3a997f0SHemant Agrawal if (ret) { 1969a3a997f0SHemant Agrawal DPAA2_PMD_ERR("Failure in affining portal"); 1970a3a997f0SHemant Agrawal return 0; 1971a3a997f0SHemant Agrawal } 1972a3a997f0SHemant Agrawal } 1973a3a997f0SHemant Agrawal swp = DPAA2_PER_LCORE_ETHRX_PORTAL; 1974a3a997f0SHemant Agrawal pull_size = (nb_pkts > dpaa2_dqrr_size) ? dpaa2_dqrr_size : nb_pkts; 1975a3a997f0SHemant Agrawal if (unlikely(!q_storage->active_dqs)) { 1976a3a997f0SHemant Agrawal q_storage->toggle = 0; 1977a3a997f0SHemant Agrawal dq_storage = q_storage->dq_storage[q_storage->toggle]; 1978a3a997f0SHemant Agrawal q_storage->last_num_pkts = pull_size; 1979a3a997f0SHemant Agrawal qbman_pull_desc_clear(&pulldesc); 1980a3a997f0SHemant Agrawal qbman_pull_desc_set_numframes(&pulldesc, 1981a3a997f0SHemant Agrawal q_storage->last_num_pkts); 1982a3a997f0SHemant Agrawal qbman_pull_desc_set_fq(&pulldesc, fqid); 1983a3a997f0SHemant Agrawal qbman_pull_desc_set_storage(&pulldesc, dq_storage, 1984a3a997f0SHemant Agrawal (size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1); 1985a3a997f0SHemant Agrawal if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) { 1986a3a997f0SHemant Agrawal while (!qbman_check_command_complete( 1987a3a997f0SHemant Agrawal get_swp_active_dqs( 1988a3a997f0SHemant Agrawal DPAA2_PER_LCORE_ETHRX_DPIO->index))) 1989a3a997f0SHemant Agrawal ; 1990a3a997f0SHemant Agrawal clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index); 1991a3a997f0SHemant Agrawal } 1992a3a997f0SHemant Agrawal while (1) { 1993a3a997f0SHemant Agrawal if (qbman_swp_pull(swp, &pulldesc)) { 1994a3a997f0SHemant Agrawal DPAA2_PMD_DP_DEBUG( 19952b843cacSDavid Marchand "VDQ command not issued.QBMAN busy"); 1996a3a997f0SHemant Agrawal /* Portal was busy, try again */ 1997a3a997f0SHemant Agrawal continue; 1998a3a997f0SHemant Agrawal } 1999a3a997f0SHemant Agrawal break; 2000a3a997f0SHemant Agrawal } 2001a3a997f0SHemant Agrawal q_storage->active_dqs = dq_storage; 2002a3a997f0SHemant Agrawal q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index; 2003a3a997f0SHemant Agrawal set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index, 2004a3a997f0SHemant Agrawal dq_storage); 2005a3a997f0SHemant Agrawal } 2006a3a997f0SHemant Agrawal 2007a3a997f0SHemant Agrawal dq_storage = q_storage->active_dqs; 2008a3a997f0SHemant Agrawal rte_prefetch0((void *)(size_t)(dq_storage)); 2009a3a997f0SHemant Agrawal rte_prefetch0((void *)(size_t)(dq_storage + 1)); 2010a3a997f0SHemant Agrawal 2011a3a997f0SHemant Agrawal /* Prepare next pull descriptor. This will give space for the 20127be78d02SJosh Soref * prefetching done on DQRR entries 2013a3a997f0SHemant Agrawal */ 2014a3a997f0SHemant Agrawal q_storage->toggle ^= 1; 2015a3a997f0SHemant Agrawal dq_storage1 = q_storage->dq_storage[q_storage->toggle]; 2016a3a997f0SHemant Agrawal qbman_pull_desc_clear(&pulldesc); 2017a3a997f0SHemant Agrawal qbman_pull_desc_set_numframes(&pulldesc, pull_size); 2018a3a997f0SHemant Agrawal qbman_pull_desc_set_fq(&pulldesc, fqid); 2019a3a997f0SHemant Agrawal qbman_pull_desc_set_storage(&pulldesc, dq_storage1, 2020a3a997f0SHemant Agrawal (size_t)(DPAA2_VADDR_TO_IOVA(dq_storage1)), 1); 2021a3a997f0SHemant Agrawal 2022a3a997f0SHemant Agrawal /*Prepare enqueue descriptor*/ 2023a3a997f0SHemant Agrawal qbman_eq_desc_clear(&eqdesc); 2024a3a997f0SHemant Agrawal qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ); 2025a3a997f0SHemant Agrawal qbman_eq_desc_set_response(&eqdesc, 0, 0); 2026a3a997f0SHemant Agrawal qbman_eq_desc_set_fq(&eqdesc, tx_q->fqid); 2027a3a997f0SHemant Agrawal 2028a3a997f0SHemant Agrawal /* Check if the previous issued command is completed. 2029a3a997f0SHemant Agrawal * Also seems like the SWP is shared between the Ethernet Driver 2030a3a997f0SHemant Agrawal * and the SEC driver. 2031a3a997f0SHemant Agrawal */ 2032a3a997f0SHemant Agrawal while (!qbman_check_command_complete(dq_storage)) 2033a3a997f0SHemant Agrawal ; 2034a3a997f0SHemant Agrawal if (dq_storage == get_swp_active_dqs(q_storage->active_dpio_id)) 2035a3a997f0SHemant Agrawal clear_swp_active_dqs(q_storage->active_dpio_id); 2036a3a997f0SHemant Agrawal 2037a3a997f0SHemant Agrawal pending = 1; 2038a3a997f0SHemant Agrawal 2039a3a997f0SHemant Agrawal do { 2040a3a997f0SHemant Agrawal /* Loop until the dq_storage is updated with 2041a3a997f0SHemant Agrawal * new token by QBMAN 2042a3a997f0SHemant Agrawal */ 2043a3a997f0SHemant Agrawal while (!qbman_check_new_result(dq_storage)) 2044a3a997f0SHemant Agrawal ; 2045a3a997f0SHemant Agrawal rte_prefetch0((void *)((size_t)(dq_storage + 2))); 2046a3a997f0SHemant Agrawal /* Check whether Last Pull command is Expired and 2047a3a997f0SHemant Agrawal * setting Condition for Loop termination 2048a3a997f0SHemant Agrawal */ 2049a3a997f0SHemant Agrawal if (qbman_result_DQ_is_pull_complete(dq_storage)) { 2050a3a997f0SHemant Agrawal pending = 0; 2051a3a997f0SHemant Agrawal /* Check for valid frame. */ 2052a3a997f0SHemant Agrawal status = qbman_result_DQ_flags(dq_storage); 2053a3a997f0SHemant Agrawal if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) 2054a3a997f0SHemant Agrawal continue; 2055a3a997f0SHemant Agrawal } 2056a3a997f0SHemant Agrawal fd[num_rx] = (struct qbman_fd *)qbman_result_DQ_fd(dq_storage); 2057a3a997f0SHemant Agrawal 2058a3a997f0SHemant Agrawal dq_storage++; 2059a3a997f0SHemant Agrawal num_rx++; 2060a3a997f0SHemant Agrawal } while (pending); 2061a3a997f0SHemant Agrawal 2062a3a997f0SHemant Agrawal while (num_tx < num_rx) { 2063a3a997f0SHemant Agrawal num_tx += qbman_swp_enqueue_multiple_fd(swp, &eqdesc, 2064a3a997f0SHemant Agrawal &fd[num_tx], 0, num_rx - num_tx); 2065a3a997f0SHemant Agrawal } 2066a3a997f0SHemant Agrawal 2067a3a997f0SHemant Agrawal if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) { 2068a3a997f0SHemant Agrawal while (!qbman_check_command_complete( 2069a3a997f0SHemant Agrawal get_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index))) 2070a3a997f0SHemant Agrawal ; 2071a3a997f0SHemant Agrawal clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index); 2072a3a997f0SHemant Agrawal } 2073a3a997f0SHemant Agrawal /* issue a volatile dequeue command for next pull */ 2074a3a997f0SHemant Agrawal while (1) { 2075a3a997f0SHemant Agrawal if (qbman_swp_pull(swp, &pulldesc)) { 2076a3a997f0SHemant Agrawal DPAA2_PMD_DP_DEBUG("VDQ command is not issued." 20772b843cacSDavid Marchand "QBMAN is busy (2)"); 2078a3a997f0SHemant Agrawal continue; 2079a3a997f0SHemant Agrawal } 2080a3a997f0SHemant Agrawal break; 2081a3a997f0SHemant Agrawal } 2082a3a997f0SHemant Agrawal q_storage->active_dqs = dq_storage1; 2083a3a997f0SHemant Agrawal q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index; 2084a3a997f0SHemant Agrawal set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index, dq_storage1); 2085a3a997f0SHemant Agrawal 2086a3a997f0SHemant Agrawal dpaa2_q->rx_pkts += num_rx; 2087a3a997f0SHemant Agrawal dpaa2_q->tx_pkts += num_tx; 2088a3a997f0SHemant Agrawal 2089a3a997f0SHemant Agrawal return 0; 2090a3a997f0SHemant Agrawal } 2091a3a997f0SHemant Agrawal #if defined(RTE_TOOLCHAIN_GCC) 2092a3a997f0SHemant Agrawal #pragma GCC diagnostic pop 2093a3a997f0SHemant Agrawal #elif defined(RTE_TOOLCHAIN_CLANG) 2094a3a997f0SHemant Agrawal #pragma clang diagnostic pop 2095a3a997f0SHemant Agrawal #endif 2096