1aaf4363eSJerin Jacob /* SPDX-License-Identifier: BSD-3-Clause 2aaf4363eSJerin Jacob * Copyright(c) 2017 Cavium, Inc 39e747589SJerin Jacob */ 49e747589SJerin Jacob 59e747589SJerin Jacob #ifndef __OCTEONTX_RXTX_H__ 69e747589SJerin Jacob #define __OCTEONTX_RXTX_H__ 79e747589SJerin Jacob 8ffc905f3SFerruh Yigit #include <rte_ethdev_driver.h> 99e747589SJerin Jacob 1085221a0cSHarman Kalra #define OFFLOAD_FLAGS \ 1185221a0cSHarman Kalra uint16_t rx_offload_flags; \ 1285221a0cSHarman Kalra uint16_t tx_offload_flags 1385221a0cSHarman Kalra 1485221a0cSHarman Kalra #define BIT(nr) (1UL << (nr)) 1585221a0cSHarman Kalra 1685221a0cSHarman Kalra #define OCCTX_RX_OFFLOAD_NONE (0) 17100f6992SHarman Kalra #define OCCTX_RX_MULTI_SEG_F BIT(0) 18100f6992SHarman Kalra #define OCCTX_RX_OFFLOAD_CSUM_F BIT(1) 19100f6992SHarman Kalra #define OCCTX_RX_VLAN_FLTR_F BIT(2) 2085221a0cSHarman Kalra 2185221a0cSHarman Kalra #define OCCTX_TX_OFFLOAD_NONE (0) 22100f6992SHarman Kalra #define OCCTX_TX_MULTI_SEG_F BIT(0) 23100f6992SHarman Kalra #define OCCTX_TX_OFFLOAD_L3_L4_CSUM_F BIT(1) 24100f6992SHarman Kalra #define OCCTX_TX_OFFLOAD_OL3_OL4_CSUM_F BIT(2) 255cbe1848SHarman Kalra #define OCCTX_TX_OFFLOAD_MBUF_NOFF_F BIT(3) 2685221a0cSHarman Kalra 27d0d65498SPavan Nikhilesh /* Packet type table */ 28d0d65498SPavan Nikhilesh #define PTYPE_SIZE OCCTX_PKI_LTYPE_LAST 29d0d65498SPavan Nikhilesh 30100f6992SHarman Kalra /* octeontx send header sub descriptor structure */ 31100f6992SHarman Kalra RTE_STD_C11 32100f6992SHarman Kalra union octeontx_send_hdr_w0_u { 33100f6992SHarman Kalra uint64_t u; 34100f6992SHarman Kalra struct { 35100f6992SHarman Kalra uint64_t total : 16; 36100f6992SHarman Kalra uint64_t markptr : 8; 37100f6992SHarman Kalra uint64_t l3ptr : 8; 38100f6992SHarman Kalra uint64_t l4ptr : 8; 39100f6992SHarman Kalra uint64_t ii : 1; 40100f6992SHarman Kalra uint64_t shp_dis : 1; 41100f6992SHarman Kalra uint64_t ckle : 1; 42100f6992SHarman Kalra uint64_t cklf : 2; 43100f6992SHarman Kalra uint64_t ckl3 : 1; 44100f6992SHarman Kalra uint64_t ckl4 : 2; 45100f6992SHarman Kalra uint64_t p : 1; 46100f6992SHarman Kalra uint64_t format : 7; 47100f6992SHarman Kalra uint64_t tstamp : 1; 48100f6992SHarman Kalra uint64_t tso_eom : 1; 49100f6992SHarman Kalra uint64_t df : 1; 50100f6992SHarman Kalra uint64_t tso : 1; 51100f6992SHarman Kalra uint64_t n2 : 1; 52100f6992SHarman Kalra uint64_t scntn1 : 3; 53100f6992SHarman Kalra }; 54100f6992SHarman Kalra }; 55100f6992SHarman Kalra 56100f6992SHarman Kalra RTE_STD_C11 57100f6992SHarman Kalra union octeontx_send_hdr_w1_u { 58100f6992SHarman Kalra uint64_t u; 59100f6992SHarman Kalra struct { 60100f6992SHarman Kalra uint64_t tso_mss : 14; 61100f6992SHarman Kalra uint64_t shp_ra : 2; 62100f6992SHarman Kalra uint64_t tso_sb : 8; 63100f6992SHarman Kalra uint64_t leptr : 8; 64100f6992SHarman Kalra uint64_t lfptr : 8; 65100f6992SHarman Kalra uint64_t shp_chg : 9; 66100f6992SHarman Kalra uint64_t tso_fn : 7; 67100f6992SHarman Kalra uint64_t l2len : 8; 68100f6992SHarman Kalra }; 69100f6992SHarman Kalra }; 70100f6992SHarman Kalra 71100f6992SHarman Kalra struct octeontx_send_hdr_s { 72100f6992SHarman Kalra union octeontx_send_hdr_w0_u w0; 73100f6992SHarman Kalra union octeontx_send_hdr_w1_u w1; 74100f6992SHarman Kalra }; 75100f6992SHarman Kalra 76d0d65498SPavan Nikhilesh static const uint32_t __rte_cache_aligned 77d0d65498SPavan Nikhilesh ptype_table[PTYPE_SIZE][PTYPE_SIZE][PTYPE_SIZE] = { 78d0d65498SPavan Nikhilesh [LC_NONE][LE_NONE][LF_NONE] = RTE_PTYPE_UNKNOWN, 79d0d65498SPavan Nikhilesh [LC_NONE][LE_NONE][LF_IPSEC_ESP] = RTE_PTYPE_UNKNOWN, 80d0d65498SPavan Nikhilesh [LC_NONE][LE_NONE][LF_IPFRAG] = RTE_PTYPE_L4_FRAG, 81d0d65498SPavan Nikhilesh [LC_NONE][LE_NONE][LF_IPCOMP] = RTE_PTYPE_UNKNOWN, 82d0d65498SPavan Nikhilesh [LC_NONE][LE_NONE][LF_TCP] = RTE_PTYPE_L4_TCP, 83d0d65498SPavan Nikhilesh [LC_NONE][LE_NONE][LF_UDP] = RTE_PTYPE_L4_UDP, 84d0d65498SPavan Nikhilesh [LC_NONE][LE_NONE][LF_GRE] = RTE_PTYPE_TUNNEL_GRE, 85d0d65498SPavan Nikhilesh [LC_NONE][LE_NONE][LF_UDP_GENEVE] = RTE_PTYPE_TUNNEL_GENEVE, 86d0d65498SPavan Nikhilesh [LC_NONE][LE_NONE][LF_UDP_VXLAN] = RTE_PTYPE_TUNNEL_VXLAN, 87d0d65498SPavan Nikhilesh [LC_NONE][LE_NONE][LF_NVGRE] = RTE_PTYPE_TUNNEL_NVGRE, 88d0d65498SPavan Nikhilesh 89d0d65498SPavan Nikhilesh [LC_IPV4][LE_NONE][LF_NONE] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_UNKNOWN, 90d0d65498SPavan Nikhilesh [LC_IPV4][LE_NONE][LF_IPSEC_ESP] = 91d0d65498SPavan Nikhilesh RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L3_IPV4, 92d0d65498SPavan Nikhilesh [LC_IPV4][LE_NONE][LF_IPFRAG] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_FRAG, 93d0d65498SPavan Nikhilesh [LC_IPV4][LE_NONE][LF_IPCOMP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_UNKNOWN, 94d0d65498SPavan Nikhilesh [LC_IPV4][LE_NONE][LF_TCP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP, 95d0d65498SPavan Nikhilesh [LC_IPV4][LE_NONE][LF_UDP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP, 96d0d65498SPavan Nikhilesh [LC_IPV4][LE_NONE][LF_GRE] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_GRE, 97d0d65498SPavan Nikhilesh [LC_IPV4][LE_NONE][LF_UDP_GENEVE] = 98d0d65498SPavan Nikhilesh RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_GENEVE, 99d0d65498SPavan Nikhilesh [LC_IPV4][LE_NONE][LF_UDP_VXLAN] = 100d0d65498SPavan Nikhilesh RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_VXLAN, 101d0d65498SPavan Nikhilesh [LC_IPV4][LE_NONE][LF_NVGRE] = 102d0d65498SPavan Nikhilesh RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_NVGRE, 103d0d65498SPavan Nikhilesh 104d0d65498SPavan Nikhilesh [LC_IPV4_OPT][LE_NONE][LF_NONE] = 105d0d65498SPavan Nikhilesh RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_UNKNOWN, 106d0d65498SPavan Nikhilesh [LC_IPV4_OPT][LE_NONE][LF_IPSEC_ESP] = 107d0d65498SPavan Nikhilesh RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L3_IPV4, 108d0d65498SPavan Nikhilesh [LC_IPV4_OPT][LE_NONE][LF_IPFRAG] = 109d0d65498SPavan Nikhilesh RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_FRAG, 110d0d65498SPavan Nikhilesh [LC_IPV4_OPT][LE_NONE][LF_IPCOMP] = 111d0d65498SPavan Nikhilesh RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_UNKNOWN, 112d0d65498SPavan Nikhilesh [LC_IPV4_OPT][LE_NONE][LF_TCP] = 113d0d65498SPavan Nikhilesh RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_TCP, 114d0d65498SPavan Nikhilesh [LC_IPV4_OPT][LE_NONE][LF_UDP] = 115d0d65498SPavan Nikhilesh RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_UDP, 116d0d65498SPavan Nikhilesh [LC_IPV4_OPT][LE_NONE][LF_GRE] = 117d0d65498SPavan Nikhilesh RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_GRE, 118d0d65498SPavan Nikhilesh [LC_IPV4_OPT][LE_NONE][LF_UDP_GENEVE] = 119d0d65498SPavan Nikhilesh RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_GENEVE, 120d0d65498SPavan Nikhilesh [LC_IPV4_OPT][LE_NONE][LF_UDP_VXLAN] = 121d0d65498SPavan Nikhilesh RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_VXLAN, 122d0d65498SPavan Nikhilesh [LC_IPV4_OPT][LE_NONE][LF_NVGRE] = 123d0d65498SPavan Nikhilesh RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_NVGRE, 124d0d65498SPavan Nikhilesh 125d0d65498SPavan Nikhilesh [LC_IPV6][LE_NONE][LF_NONE] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_UNKNOWN, 126d0d65498SPavan Nikhilesh [LC_IPV6][LE_NONE][LF_IPSEC_ESP] = 127d0d65498SPavan Nikhilesh RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L3_IPV4, 128d0d65498SPavan Nikhilesh [LC_IPV6][LE_NONE][LF_IPFRAG] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_FRAG, 129d0d65498SPavan Nikhilesh [LC_IPV6][LE_NONE][LF_IPCOMP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_UNKNOWN, 130d0d65498SPavan Nikhilesh [LC_IPV6][LE_NONE][LF_TCP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP, 131d0d65498SPavan Nikhilesh [LC_IPV6][LE_NONE][LF_UDP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP, 132d0d65498SPavan Nikhilesh [LC_IPV6][LE_NONE][LF_GRE] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_TUNNEL_GRE, 133d0d65498SPavan Nikhilesh [LC_IPV6][LE_NONE][LF_UDP_GENEVE] = 134d0d65498SPavan Nikhilesh RTE_PTYPE_L3_IPV6 | RTE_PTYPE_TUNNEL_GENEVE, 135d0d65498SPavan Nikhilesh [LC_IPV6][LE_NONE][LF_UDP_VXLAN] = 136d0d65498SPavan Nikhilesh RTE_PTYPE_L3_IPV6 | RTE_PTYPE_TUNNEL_VXLAN, 137d0d65498SPavan Nikhilesh [LC_IPV6][LE_NONE][LF_NVGRE] = 138d0d65498SPavan Nikhilesh RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_NVGRE, 139d0d65498SPavan Nikhilesh [LC_IPV6_OPT][LE_NONE][LF_NONE] = 140d0d65498SPavan Nikhilesh RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_UNKNOWN, 141d0d65498SPavan Nikhilesh [LC_IPV6_OPT][LE_NONE][LF_IPSEC_ESP] = 142d0d65498SPavan Nikhilesh RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L3_IPV4, 143d0d65498SPavan Nikhilesh [LC_IPV6_OPT][LE_NONE][LF_IPFRAG] = 144d0d65498SPavan Nikhilesh RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_FRAG, 145d0d65498SPavan Nikhilesh [LC_IPV6_OPT][LE_NONE][LF_IPCOMP] = 146d0d65498SPavan Nikhilesh RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_UNKNOWN, 147d0d65498SPavan Nikhilesh [LC_IPV6_OPT][LE_NONE][LF_TCP] = 148d0d65498SPavan Nikhilesh RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP, 149d0d65498SPavan Nikhilesh [LC_IPV6_OPT][LE_NONE][LF_UDP] = 150d0d65498SPavan Nikhilesh RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP, 151d0d65498SPavan Nikhilesh [LC_IPV6_OPT][LE_NONE][LF_GRE] = 152d0d65498SPavan Nikhilesh RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_TUNNEL_GRE, 153d0d65498SPavan Nikhilesh [LC_IPV6_OPT][LE_NONE][LF_UDP_GENEVE] = 154d0d65498SPavan Nikhilesh RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_TUNNEL_GENEVE, 155d0d65498SPavan Nikhilesh [LC_IPV6_OPT][LE_NONE][LF_UDP_VXLAN] = 156d0d65498SPavan Nikhilesh RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_TUNNEL_VXLAN, 157d0d65498SPavan Nikhilesh [LC_IPV6_OPT][LE_NONE][LF_NVGRE] = 158d0d65498SPavan Nikhilesh RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_TUNNEL_NVGRE, 159d0d65498SPavan Nikhilesh 160d0d65498SPavan Nikhilesh }; 161d0d65498SPavan Nikhilesh 1627f4116bdSHarman Kalra 1635cbe1848SHarman Kalra static __rte_always_inline uint64_t 1645cbe1848SHarman Kalra octeontx_pktmbuf_detach(struct rte_mbuf *m) 1655cbe1848SHarman Kalra { 1665cbe1848SHarman Kalra struct rte_mempool *mp = m->pool; 1675cbe1848SHarman Kalra uint32_t mbuf_size, buf_len; 1685cbe1848SHarman Kalra struct rte_mbuf *md; 1695cbe1848SHarman Kalra uint16_t priv_size; 1705cbe1848SHarman Kalra uint16_t refcount; 1715cbe1848SHarman Kalra 1725cbe1848SHarman Kalra /* Update refcount of direct mbuf */ 1735cbe1848SHarman Kalra md = rte_mbuf_from_indirect(m); 1745cbe1848SHarman Kalra refcount = rte_mbuf_refcnt_update(md, -1); 1755cbe1848SHarman Kalra 1765cbe1848SHarman Kalra priv_size = rte_pktmbuf_priv_size(mp); 1775cbe1848SHarman Kalra mbuf_size = (uint32_t)(sizeof(struct rte_mbuf) + priv_size); 1785cbe1848SHarman Kalra buf_len = rte_pktmbuf_data_room_size(mp); 1795cbe1848SHarman Kalra 1805cbe1848SHarman Kalra m->priv_size = priv_size; 1815cbe1848SHarman Kalra m->buf_addr = (char *)m + mbuf_size; 1825cbe1848SHarman Kalra m->buf_iova = rte_mempool_virt2iova(m) + mbuf_size; 1835cbe1848SHarman Kalra m->buf_len = (uint16_t)buf_len; 1845cbe1848SHarman Kalra rte_pktmbuf_reset_headroom(m); 1855cbe1848SHarman Kalra m->data_len = 0; 1865cbe1848SHarman Kalra m->ol_flags = 0; 1875cbe1848SHarman Kalra m->next = NULL; 1885cbe1848SHarman Kalra m->nb_segs = 1; 1895cbe1848SHarman Kalra 1905cbe1848SHarman Kalra /* Now indirect mbuf is safe to free */ 1915cbe1848SHarman Kalra rte_pktmbuf_free(m); 1925cbe1848SHarman Kalra 1935cbe1848SHarman Kalra if (refcount == 0) { 1945cbe1848SHarman Kalra rte_mbuf_refcnt_set(md, 1); 1955cbe1848SHarman Kalra md->data_len = 0; 1965cbe1848SHarman Kalra md->ol_flags = 0; 1975cbe1848SHarman Kalra md->next = NULL; 1985cbe1848SHarman Kalra md->nb_segs = 1; 1995cbe1848SHarman Kalra return 0; 2005cbe1848SHarman Kalra } else { 2015cbe1848SHarman Kalra return 1; 2025cbe1848SHarman Kalra } 2035cbe1848SHarman Kalra } 2045cbe1848SHarman Kalra 2055cbe1848SHarman Kalra static __rte_always_inline uint64_t 2065cbe1848SHarman Kalra octeontx_prefree_seg(struct rte_mbuf *m) 2075cbe1848SHarman Kalra { 2085cbe1848SHarman Kalra if (likely(rte_mbuf_refcnt_read(m) == 1)) { 2095cbe1848SHarman Kalra if (!RTE_MBUF_DIRECT(m)) 2105cbe1848SHarman Kalra return octeontx_pktmbuf_detach(m); 2115cbe1848SHarman Kalra 2125cbe1848SHarman Kalra m->next = NULL; 2135cbe1848SHarman Kalra m->nb_segs = 1; 2145cbe1848SHarman Kalra return 0; 2155cbe1848SHarman Kalra } else if (rte_mbuf_refcnt_update(m, -1) == 0) { 2165cbe1848SHarman Kalra if (!RTE_MBUF_DIRECT(m)) 2175cbe1848SHarman Kalra return octeontx_pktmbuf_detach(m); 2185cbe1848SHarman Kalra 2195cbe1848SHarman Kalra rte_mbuf_refcnt_set(m, 1); 2205cbe1848SHarman Kalra m->next = NULL; 2215cbe1848SHarman Kalra m->nb_segs = 1; 2225cbe1848SHarman Kalra return 0; 2235cbe1848SHarman Kalra } 2245cbe1848SHarman Kalra 2255cbe1848SHarman Kalra /* Mbuf is having refcount more than 1 so need not to be freed */ 2265cbe1848SHarman Kalra return 1; 2275cbe1848SHarman Kalra } 2285cbe1848SHarman Kalra 229100f6992SHarman Kalra static __rte_always_inline void 230100f6992SHarman Kalra octeontx_tx_checksum_offload(uint64_t *cmd_buf, const uint16_t flags, 231100f6992SHarman Kalra struct rte_mbuf *m) 232100f6992SHarman Kalra { 233100f6992SHarman Kalra struct octeontx_send_hdr_s *send_hdr = 234100f6992SHarman Kalra (struct octeontx_send_hdr_s *)cmd_buf; 235100f6992SHarman Kalra uint64_t ol_flags = m->ol_flags; 236100f6992SHarman Kalra 237100f6992SHarman Kalra /* PKO Checksum L4 Algorithm Enumeration 238100f6992SHarman Kalra * 0x0 - No checksum 239100f6992SHarman Kalra * 0x1 - UDP L4 checksum 240100f6992SHarman Kalra * 0x2 - TCP L4 checksum 241100f6992SHarman Kalra * 0x3 - SCTP L4 checksum 242100f6992SHarman Kalra */ 243100f6992SHarman Kalra const uint8_t csum = (!(((ol_flags ^ PKT_TX_UDP_CKSUM) >> 52) & 0x3) + 244100f6992SHarman Kalra (!(((ol_flags ^ PKT_TX_TCP_CKSUM) >> 52) & 0x3) * 2) + 245100f6992SHarman Kalra (!(((ol_flags ^ PKT_TX_SCTP_CKSUM) >> 52) & 0x3) * 3)); 246100f6992SHarman Kalra 247100f6992SHarman Kalra const uint8_t is_tunnel_parsed = (!!(ol_flags & PKT_TX_TUNNEL_GTP) || 248100f6992SHarman Kalra !!(ol_flags & PKT_TX_TUNNEL_VXLAN_GPE) || 249100f6992SHarman Kalra !!(ol_flags & PKT_TX_TUNNEL_VXLAN) || 250100f6992SHarman Kalra !!(ol_flags & PKT_TX_TUNNEL_GRE) || 251100f6992SHarman Kalra !!(ol_flags & PKT_TX_TUNNEL_GENEVE) || 252100f6992SHarman Kalra !!(ol_flags & PKT_TX_TUNNEL_IP) || 253100f6992SHarman Kalra !!(ol_flags & PKT_TX_TUNNEL_IPIP)); 254100f6992SHarman Kalra 255100f6992SHarman Kalra const uint8_t csum_outer = (!!(ol_flags & PKT_TX_OUTER_UDP_CKSUM) || 256100f6992SHarman Kalra !!(ol_flags & PKT_TX_TUNNEL_UDP)); 257100f6992SHarman Kalra const uint8_t outer_l2_len = m->outer_l2_len; 258100f6992SHarman Kalra const uint8_t l2_len = m->l2_len; 259100f6992SHarman Kalra 260100f6992SHarman Kalra if ((flags & OCCTX_TX_OFFLOAD_OL3_OL4_CSUM_F) && 261100f6992SHarman Kalra (flags & OCCTX_TX_OFFLOAD_L3_L4_CSUM_F)) { 262100f6992SHarman Kalra if (is_tunnel_parsed) { 263100f6992SHarman Kalra /* Outer L3 */ 264100f6992SHarman Kalra send_hdr->w0.l3ptr = outer_l2_len; 265100f6992SHarman Kalra send_hdr->w0.l4ptr = outer_l2_len + m->outer_l3_len; 266100f6992SHarman Kalra /* Set clk3 for PKO to calculate IPV4 header checksum */ 267100f6992SHarman Kalra send_hdr->w0.ckl3 = !!(ol_flags & PKT_TX_OUTER_IPV4); 268100f6992SHarman Kalra 269100f6992SHarman Kalra /* Outer L4 */ 270100f6992SHarman Kalra send_hdr->w0.ckl4 = csum_outer; 271100f6992SHarman Kalra 272100f6992SHarman Kalra /* Inner L3 */ 273100f6992SHarman Kalra send_hdr->w1.leptr = send_hdr->w0.l4ptr + l2_len; 274100f6992SHarman Kalra send_hdr->w1.lfptr = send_hdr->w1.leptr + m->l3_len; 275100f6992SHarman Kalra /* Set clke for PKO to calculate inner IPV4 header 276100f6992SHarman Kalra * checksum. 277100f6992SHarman Kalra */ 278100f6992SHarman Kalra send_hdr->w0.ckle = !!(ol_flags & PKT_TX_IPV4); 279100f6992SHarman Kalra 280100f6992SHarman Kalra /* Inner L4 */ 281100f6992SHarman Kalra send_hdr->w0.cklf = csum; 282100f6992SHarman Kalra } else { 283100f6992SHarman Kalra /* Inner L3 */ 284100f6992SHarman Kalra send_hdr->w0.l3ptr = l2_len; 285100f6992SHarman Kalra send_hdr->w0.l4ptr = l2_len + m->l3_len; 286100f6992SHarman Kalra /* Set clk3 for PKO to calculate IPV4 header checksum */ 287100f6992SHarman Kalra send_hdr->w0.ckl3 = !!(ol_flags & PKT_TX_IPV4); 288100f6992SHarman Kalra 289100f6992SHarman Kalra /* Inner L4 */ 290100f6992SHarman Kalra send_hdr->w0.ckl4 = csum; 291100f6992SHarman Kalra } 292100f6992SHarman Kalra } else if (flags & OCCTX_TX_OFFLOAD_OL3_OL4_CSUM_F) { 293100f6992SHarman Kalra /* Outer L3 */ 294100f6992SHarman Kalra send_hdr->w0.l3ptr = outer_l2_len; 295100f6992SHarman Kalra send_hdr->w0.l4ptr = outer_l2_len + m->outer_l3_len; 296100f6992SHarman Kalra /* Set clk3 for PKO to calculate IPV4 header checksum */ 297100f6992SHarman Kalra send_hdr->w0.ckl3 = !!(ol_flags & PKT_TX_OUTER_IPV4); 298100f6992SHarman Kalra 299100f6992SHarman Kalra /* Outer L4 */ 300100f6992SHarman Kalra send_hdr->w0.ckl4 = csum_outer; 301100f6992SHarman Kalra } else if (flags & OCCTX_TX_OFFLOAD_L3_L4_CSUM_F) { 302100f6992SHarman Kalra /* Inner L3 */ 303100f6992SHarman Kalra send_hdr->w0.l3ptr = l2_len; 304100f6992SHarman Kalra send_hdr->w0.l4ptr = l2_len + m->l3_len; 305100f6992SHarman Kalra /* Set clk3 for PKO to calculate IPV4 header checksum */ 306100f6992SHarman Kalra send_hdr->w0.ckl3 = !!(ol_flags & PKT_TX_IPV4); 307100f6992SHarman Kalra 308100f6992SHarman Kalra /* Inner L4 */ 309100f6992SHarman Kalra send_hdr->w0.ckl4 = csum; 310100f6992SHarman Kalra } 311100f6992SHarman Kalra } 312100f6992SHarman Kalra 3137f4116bdSHarman Kalra static __rte_always_inline uint16_t 3147f4116bdSHarman Kalra __octeontx_xmit_prepare(struct rte_mbuf *tx_pkt, uint64_t *cmd_buf, 3155cbe1848SHarman Kalra const uint16_t flag) 3161dedffebSPavan Nikhilesh { 3177f4116bdSHarman Kalra uint16_t gaura_id, nb_desc = 0; 3181dedffebSPavan Nikhilesh 31985221a0cSHarman Kalra /* Setup PKO_SEND_HDR_S */ 32085221a0cSHarman Kalra cmd_buf[nb_desc++] = tx_pkt->data_len & 0xffff; 32185221a0cSHarman Kalra cmd_buf[nb_desc++] = 0x0; 32285221a0cSHarman Kalra 323100f6992SHarman Kalra /* Enable tx checksum offload */ 324100f6992SHarman Kalra if ((flag & OCCTX_TX_OFFLOAD_OL3_OL4_CSUM_F) || 325100f6992SHarman Kalra (flag & OCCTX_TX_OFFLOAD_L3_L4_CSUM_F)) 326100f6992SHarman Kalra octeontx_tx_checksum_offload(cmd_buf, flag, tx_pkt); 327100f6992SHarman Kalra 3285cbe1848SHarman Kalra /* SEND_HDR[DF] bit controls if buffer is to be freed or 3295cbe1848SHarman Kalra * not, as SG_DESC[I] and SEND_HDR[II] are clear. 3305cbe1848SHarman Kalra */ 3315cbe1848SHarman Kalra if (flag & OCCTX_TX_OFFLOAD_MBUF_NOFF_F) 3325cbe1848SHarman Kalra cmd_buf[0] |= (octeontx_prefree_seg(tx_pkt) << 3335cbe1848SHarman Kalra 58); 3345cbe1848SHarman Kalra 33585221a0cSHarman Kalra /* Mark mempool object as "put" since it is freed by PKO */ 33685221a0cSHarman Kalra if (!(cmd_buf[0] & (1ULL << 58))) 33785221a0cSHarman Kalra __mempool_check_cookies(tx_pkt->pool, (void **)&tx_pkt, 33885221a0cSHarman Kalra 1, 0); 33985221a0cSHarman Kalra /* Get the gaura Id */ 34085221a0cSHarman Kalra gaura_id = octeontx_fpa_bufpool_gpool((uintptr_t) 34185221a0cSHarman Kalra tx_pkt->pool->pool_id); 34285221a0cSHarman Kalra 34385221a0cSHarman Kalra /* Setup PKO_SEND_BUFLINK_S */ 34485221a0cSHarman Kalra cmd_buf[nb_desc++] = PKO_SEND_BUFLINK_SUBDC | 34585221a0cSHarman Kalra PKO_SEND_BUFLINK_LDTYPE(0x1ull) | 34685221a0cSHarman Kalra PKO_SEND_BUFLINK_GAUAR((long)gaura_id) | 34785221a0cSHarman Kalra tx_pkt->data_len; 34885221a0cSHarman Kalra cmd_buf[nb_desc++] = rte_mbuf_data_iova(tx_pkt); 3491dedffebSPavan Nikhilesh 3507f4116bdSHarman Kalra return nb_desc; 3511dedffebSPavan Nikhilesh } 3521dedffebSPavan Nikhilesh 3537f4116bdSHarman Kalra static __rte_always_inline uint16_t 3547f4116bdSHarman Kalra __octeontx_xmit_mseg_prepare(struct rte_mbuf *tx_pkt, uint64_t *cmd_buf, 3555cbe1848SHarman Kalra const uint16_t flag) 3567f4116bdSHarman Kalra { 3577f4116bdSHarman Kalra uint16_t nb_segs, nb_desc = 0; 3587f4116bdSHarman Kalra uint16_t gaura_id, len = 0; 3597f4116bdSHarman Kalra struct rte_mbuf *m_next = NULL; 3609e747589SJerin Jacob 3617f4116bdSHarman Kalra nb_segs = tx_pkt->nb_segs; 3627f4116bdSHarman Kalra /* Setup PKO_SEND_HDR_S */ 3637f4116bdSHarman Kalra cmd_buf[nb_desc++] = tx_pkt->pkt_len & 0xffff; 3647f4116bdSHarman Kalra cmd_buf[nb_desc++] = 0x0; 3657f4116bdSHarman Kalra 366100f6992SHarman Kalra /* Enable tx checksum offload */ 367100f6992SHarman Kalra if ((flag & OCCTX_TX_OFFLOAD_OL3_OL4_CSUM_F) || 368100f6992SHarman Kalra (flag & OCCTX_TX_OFFLOAD_L3_L4_CSUM_F)) 369100f6992SHarman Kalra octeontx_tx_checksum_offload(cmd_buf, flag, tx_pkt); 370100f6992SHarman Kalra 3717f4116bdSHarman Kalra do { 3727f4116bdSHarman Kalra m_next = tx_pkt->next; 3737f4116bdSHarman Kalra /* To handle case where mbufs belong to diff pools, like 3747f4116bdSHarman Kalra * fragmentation 3757f4116bdSHarman Kalra */ 3767f4116bdSHarman Kalra gaura_id = octeontx_fpa_bufpool_gpool((uintptr_t) 3777f4116bdSHarman Kalra tx_pkt->pool->pool_id); 3787f4116bdSHarman Kalra 3797f4116bdSHarman Kalra /* Setup PKO_SEND_GATHER_S */ 3807f4116bdSHarman Kalra cmd_buf[nb_desc] = PKO_SEND_GATHER_SUBDC | 3817f4116bdSHarman Kalra PKO_SEND_GATHER_LDTYPE(0x1ull) | 3827f4116bdSHarman Kalra PKO_SEND_GATHER_GAUAR((long)gaura_id) | 3837f4116bdSHarman Kalra tx_pkt->data_len; 3847f4116bdSHarman Kalra 3855cbe1848SHarman Kalra /* SG_DESC[I] bit controls if buffer is to be freed or 3865cbe1848SHarman Kalra * not, as SEND_HDR[DF] and SEND_HDR[II] are clear. 3875cbe1848SHarman Kalra */ 3885cbe1848SHarman Kalra if (flag & OCCTX_TX_OFFLOAD_MBUF_NOFF_F) { 3895cbe1848SHarman Kalra cmd_buf[nb_desc] |= 3905cbe1848SHarman Kalra (octeontx_prefree_seg(tx_pkt) << 57); 3915cbe1848SHarman Kalra } 3925cbe1848SHarman Kalra 3937f4116bdSHarman Kalra /* Mark mempool object as "put" since it is freed by 3947f4116bdSHarman Kalra * PKO. 3957f4116bdSHarman Kalra */ 3967f4116bdSHarman Kalra if (!(cmd_buf[nb_desc] & (1ULL << 57))) { 3977f4116bdSHarman Kalra tx_pkt->next = NULL; 3987f4116bdSHarman Kalra __mempool_check_cookies(tx_pkt->pool, 3997f4116bdSHarman Kalra (void **)&tx_pkt, 1, 0); 4007f4116bdSHarman Kalra } 4017f4116bdSHarman Kalra nb_desc++; 4027f4116bdSHarman Kalra 4037f4116bdSHarman Kalra cmd_buf[nb_desc++] = rte_mbuf_data_iova(tx_pkt); 4047f4116bdSHarman Kalra 4057f4116bdSHarman Kalra nb_segs--; 4067f4116bdSHarman Kalra len += tx_pkt->data_len; 4077f4116bdSHarman Kalra tx_pkt = m_next; 4087f4116bdSHarman Kalra } while (nb_segs); 4097f4116bdSHarman Kalra 4107f4116bdSHarman Kalra return nb_desc; 4117f4116bdSHarman Kalra } 4127f4116bdSHarman Kalra 4137f4116bdSHarman Kalra static __rte_always_inline uint16_t 4147f4116bdSHarman Kalra __octeontx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 4157f4116bdSHarman Kalra uint16_t nb_pkts, uint64_t *cmd_buf, 4167f4116bdSHarman Kalra const uint16_t flags) 4177f4116bdSHarman Kalra { 4187f4116bdSHarman Kalra struct octeontx_txq *txq = tx_queue; 4197f4116bdSHarman Kalra octeontx_dq_t *dq = &txq->dq; 4207f4116bdSHarman Kalra uint16_t count = 0, nb_desc; 4217f4116bdSHarman Kalra rte_cio_wmb(); 4227f4116bdSHarman Kalra 4237f4116bdSHarman Kalra while (count < nb_pkts) { 4247f4116bdSHarman Kalra if (unlikely(*((volatile int64_t *)dq->fc_status_va) < 0)) 4257f4116bdSHarman Kalra break; 4267f4116bdSHarman Kalra 4277f4116bdSHarman Kalra if (flags & OCCTX_TX_MULTI_SEG_F) { 4287f4116bdSHarman Kalra nb_desc = __octeontx_xmit_mseg_prepare(tx_pkts[count], 4297f4116bdSHarman Kalra cmd_buf, flags); 4307f4116bdSHarman Kalra } else { 4317f4116bdSHarman Kalra nb_desc = __octeontx_xmit_prepare(tx_pkts[count], 4327f4116bdSHarman Kalra cmd_buf, flags); 4337f4116bdSHarman Kalra } 4347f4116bdSHarman Kalra 4357f4116bdSHarman Kalra octeontx_reg_lmtst(dq->lmtline_va, dq->ioreg_va, cmd_buf, 4367f4116bdSHarman Kalra nb_desc); 4377f4116bdSHarman Kalra 4387f4116bdSHarman Kalra count++; 4397f4116bdSHarman Kalra } 4407f4116bdSHarman Kalra return count; 4417f4116bdSHarman Kalra } 44285221a0cSHarman Kalra 44385221a0cSHarman Kalra uint16_t 4442d2c7918SJerin Jacob octeontx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts); 4459e747589SJerin Jacob 446100f6992SHarman Kalra #define L3L4CSUM_F OCCTX_TX_OFFLOAD_L3_L4_CSUM_F 447100f6992SHarman Kalra #define OL3OL4CSUM_F OCCTX_TX_OFFLOAD_OL3_OL4_CSUM_F 4485cbe1848SHarman Kalra #define NOFF_F OCCTX_TX_OFFLOAD_MBUF_NOFF_F 4497f4116bdSHarman Kalra #define MULT_F OCCTX_TX_MULTI_SEG_F 450100f6992SHarman Kalra 451100f6992SHarman Kalra /* [L3L4CSUM_F] [OL3OL4CSUM_F] [NOFF] [MULTI_SEG] */ 4527f4116bdSHarman Kalra #define OCCTX_TX_FASTPATH_MODES \ 453100f6992SHarman Kalra T(no_offload, 0, 0, 0, 0, 4, \ 454100f6992SHarman Kalra OCCTX_TX_OFFLOAD_NONE) \ 455100f6992SHarman Kalra T(mseg, 0, 0, 0, 1, 14, \ 456100f6992SHarman Kalra MULT_F) \ 457100f6992SHarman Kalra T(l3l4csum, 0, 0, 1, 0, 4, \ 458100f6992SHarman Kalra L3L4CSUM_F) \ 459100f6992SHarman Kalra T(l3l4csum_mseg, 0, 0, 1, 1, 14, \ 460100f6992SHarman Kalra L3L4CSUM_F | MULT_F) \ 461100f6992SHarman Kalra T(ol3ol4csum, 0, 1, 0, 0, 4, \ 462100f6992SHarman Kalra OL3OL4CSUM_F) \ 463100f6992SHarman Kalra T(ol3l4csum_mseg, 0, 1, 0, 1, 14, \ 464100f6992SHarman Kalra OL3OL4CSUM_F | MULT_F) \ 465100f6992SHarman Kalra T(ol3l4csum_l3l4csum, 0, 1, 1, 0, 4, \ 466100f6992SHarman Kalra OL3OL4CSUM_F | L3L4CSUM_F) \ 467100f6992SHarman Kalra T(ol3l4csum_l3l4csum_mseg, 0, 1, 1, 1, 14, \ 468100f6992SHarman Kalra OL3OL4CSUM_F | L3L4CSUM_F | MULT_F) \ 469100f6992SHarman Kalra T(noff, 1, 0, 0, 0, 4, \ 470100f6992SHarman Kalra NOFF_F) \ 471100f6992SHarman Kalra T(noff_mseg, 1, 0, 0, 1, 14, \ 472100f6992SHarman Kalra NOFF_F | MULT_F) \ 473100f6992SHarman Kalra T(noff_l3l4csum, 1, 0, 1, 0, 4, \ 474100f6992SHarman Kalra NOFF_F | L3L4CSUM_F) \ 475100f6992SHarman Kalra T(noff_l3l4csum_mseg, 1, 0, 1, 1, 14, \ 476100f6992SHarman Kalra NOFF_F | L3L4CSUM_F | MULT_F) \ 477100f6992SHarman Kalra T(noff_ol3ol4csum, 1, 1, 0, 0, 4, \ 478100f6992SHarman Kalra NOFF_F | OL3OL4CSUM_F) \ 479100f6992SHarman Kalra T(noff_ol3ol4csum_mseg, 1, 1, 0, 1, 14, \ 480100f6992SHarman Kalra NOFF_F | OL3OL4CSUM_F | MULT_F) \ 481100f6992SHarman Kalra T(noff_ol3ol4csum_l3l4csum, 1, 1, 1, 0, 4, \ 482100f6992SHarman Kalra NOFF_F | OL3OL4CSUM_F | L3L4CSUM_F) \ 483100f6992SHarman Kalra T(noff_ol3ol4csum_l3l4csum_mseg, 1, 1, 1, 1, 14, \ 484100f6992SHarman Kalra NOFF_F | OL3OL4CSUM_F | L3L4CSUM_F | \ 485100f6992SHarman Kalra MULT_F) 4867f4116bdSHarman Kalra 48756a96aa4SHarman Kalra /* RX offload macros */ 48845231cc6SVamsi Attunuru #define VLAN_FLTR_F OCCTX_RX_VLAN_FLTR_F 489*cf55f04aSHarman Kalra #define CSUM_F OCCTX_RX_OFFLOAD_CSUM_F 49056a96aa4SHarman Kalra #define MULT_RX_F OCCTX_RX_MULTI_SEG_F 491*cf55f04aSHarman Kalra 492*cf55f04aSHarman Kalra /* [VLAN_FLTR] [CSUM_F] [MULTI_SEG] */ 49356a96aa4SHarman Kalra #define OCCTX_RX_FASTPATH_MODES \ 494*cf55f04aSHarman Kalra R(no_offload, 0, 0, 0, OCCTX_RX_OFFLOAD_NONE) \ 495*cf55f04aSHarman Kalra R(mseg, 0, 0, 1, MULT_RX_F) \ 496*cf55f04aSHarman Kalra R(csum, 0, 1, 0, CSUM_F) \ 497*cf55f04aSHarman Kalra R(csum_mseg, 0, 1, 1, CSUM_F | MULT_RX_F) \ 498*cf55f04aSHarman Kalra R(vlan, 1, 0, 0, VLAN_FLTR_F) \ 499*cf55f04aSHarman Kalra R(vlan_mseg, 1, 0, 1, VLAN_FLTR_F | MULT_RX_F) \ 500*cf55f04aSHarman Kalra R(vlan_csum, 1, 1, 0, VLAN_FLTR_F | CSUM_F) \ 501*cf55f04aSHarman Kalra R(vlan_csum_mseg, 1, 1, 1, CSUM_F | VLAN_FLTR_F | \ 502*cf55f04aSHarman Kalra MULT_RX_F) 50356a96aa4SHarman Kalra 5049e747589SJerin Jacob #endif /* __OCTEONTX_RXTX_H__ */ 505