xref: /dpdk/drivers/net/octeontx/octeontx_rxtx.h (revision 27595cd83053b2d39634a159d6709b3ce3cdf3b0)
1aaf4363eSJerin Jacob /* SPDX-License-Identifier: BSD-3-Clause
2aaf4363eSJerin Jacob  * Copyright(c) 2017 Cavium, Inc
39e747589SJerin Jacob  */
49e747589SJerin Jacob 
59e747589SJerin Jacob #ifndef	__OCTEONTX_RXTX_H__
69e747589SJerin Jacob #define	__OCTEONTX_RXTX_H__
79e747589SJerin Jacob 
8df96fd0dSBruce Richardson #include <ethdev_driver.h>
99e747589SJerin Jacob 
1085221a0cSHarman Kalra #define OFFLOAD_FLAGS					\
1185221a0cSHarman Kalra 	uint16_t rx_offload_flags;			\
1285221a0cSHarman Kalra 	uint16_t tx_offload_flags
1385221a0cSHarman Kalra 
1485221a0cSHarman Kalra #define BIT(nr) (1UL << (nr))
1585221a0cSHarman Kalra 
1685221a0cSHarman Kalra #define OCCTX_RX_OFFLOAD_NONE		(0)
17100f6992SHarman Kalra #define OCCTX_RX_MULTI_SEG_F		BIT(0)
18100f6992SHarman Kalra #define OCCTX_RX_OFFLOAD_CSUM_F         BIT(1)
19100f6992SHarman Kalra #define OCCTX_RX_VLAN_FLTR_F            BIT(2)
2085221a0cSHarman Kalra 
2185221a0cSHarman Kalra #define OCCTX_TX_OFFLOAD_NONE		(0)
22100f6992SHarman Kalra #define OCCTX_TX_MULTI_SEG_F		BIT(0)
23100f6992SHarman Kalra #define OCCTX_TX_OFFLOAD_L3_L4_CSUM_F	BIT(1)
24100f6992SHarman Kalra #define OCCTX_TX_OFFLOAD_OL3_OL4_CSUM_F	BIT(2)
255cbe1848SHarman Kalra #define OCCTX_TX_OFFLOAD_MBUF_NOFF_F	BIT(3)
2685221a0cSHarman Kalra 
27d0d65498SPavan Nikhilesh /* Packet type table */
28d0d65498SPavan Nikhilesh #define PTYPE_SIZE	OCCTX_PKI_LTYPE_LAST
29d0d65498SPavan Nikhilesh 
30100f6992SHarman Kalra /* octeontx send header sub descriptor structure */
31100f6992SHarman Kalra union octeontx_send_hdr_w0_u {
32100f6992SHarman Kalra 	uint64_t u;
33100f6992SHarman Kalra 	struct {
34100f6992SHarman Kalra 		uint64_t total   : 16;
35100f6992SHarman Kalra 		uint64_t markptr : 8;
36100f6992SHarman Kalra 		uint64_t l3ptr   : 8;
37100f6992SHarman Kalra 		uint64_t l4ptr   : 8;
38100f6992SHarman Kalra 		uint64_t ii	 : 1;
39100f6992SHarman Kalra 		uint64_t shp_dis : 1;
40100f6992SHarman Kalra 		uint64_t ckle    : 1;
41100f6992SHarman Kalra 		uint64_t cklf    : 2;
42100f6992SHarman Kalra 		uint64_t ckl3    : 1;
43100f6992SHarman Kalra 		uint64_t ckl4    : 2;
44100f6992SHarman Kalra 		uint64_t p	 : 1;
45100f6992SHarman Kalra 		uint64_t format	 : 7;
46100f6992SHarman Kalra 		uint64_t tstamp  : 1;
47100f6992SHarman Kalra 		uint64_t tso_eom : 1;
48100f6992SHarman Kalra 		uint64_t df	 : 1;
49100f6992SHarman Kalra 		uint64_t tso	 : 1;
50100f6992SHarman Kalra 		uint64_t n2	 : 1;
51100f6992SHarman Kalra 		uint64_t scntn1	 : 3;
52100f6992SHarman Kalra 	};
53100f6992SHarman Kalra };
54100f6992SHarman Kalra 
55100f6992SHarman Kalra union octeontx_send_hdr_w1_u {
56100f6992SHarman Kalra 	uint64_t u;
57100f6992SHarman Kalra 	struct {
58100f6992SHarman Kalra 		uint64_t tso_mss : 14;
59100f6992SHarman Kalra 		uint64_t shp_ra  : 2;
60100f6992SHarman Kalra 		uint64_t tso_sb  : 8;
61100f6992SHarman Kalra 		uint64_t leptr   : 8;
62100f6992SHarman Kalra 		uint64_t lfptr   : 8;
63100f6992SHarman Kalra 		uint64_t shp_chg : 9;
64100f6992SHarman Kalra 		uint64_t tso_fn  : 7;
65100f6992SHarman Kalra 		uint64_t l2len   : 8;
66100f6992SHarman Kalra 	};
67100f6992SHarman Kalra };
68100f6992SHarman Kalra 
69100f6992SHarman Kalra struct octeontx_send_hdr_s {
70100f6992SHarman Kalra 	union octeontx_send_hdr_w0_u w0;
71100f6992SHarman Kalra 	union octeontx_send_hdr_w1_u w1;
72100f6992SHarman Kalra };
73100f6992SHarman Kalra 
74*27595cd8STyler Retzlaff static const alignas(RTE_CACHE_LINE_SIZE) uint32_t
75d0d65498SPavan Nikhilesh ptype_table[PTYPE_SIZE][PTYPE_SIZE][PTYPE_SIZE] = {
76d0d65498SPavan Nikhilesh 	[LC_NONE][LE_NONE][LF_NONE] = RTE_PTYPE_UNKNOWN,
77d0d65498SPavan Nikhilesh 	[LC_NONE][LE_NONE][LF_IPSEC_ESP] = RTE_PTYPE_UNKNOWN,
78d0d65498SPavan Nikhilesh 	[LC_NONE][LE_NONE][LF_IPFRAG] = RTE_PTYPE_L4_FRAG,
79d0d65498SPavan Nikhilesh 	[LC_NONE][LE_NONE][LF_IPCOMP] = RTE_PTYPE_UNKNOWN,
80d0d65498SPavan Nikhilesh 	[LC_NONE][LE_NONE][LF_TCP] = RTE_PTYPE_L4_TCP,
81d0d65498SPavan Nikhilesh 	[LC_NONE][LE_NONE][LF_UDP] = RTE_PTYPE_L4_UDP,
82d0d65498SPavan Nikhilesh 	[LC_NONE][LE_NONE][LF_GRE] = RTE_PTYPE_TUNNEL_GRE,
83d0d65498SPavan Nikhilesh 	[LC_NONE][LE_NONE][LF_UDP_GENEVE] = RTE_PTYPE_TUNNEL_GENEVE,
84d0d65498SPavan Nikhilesh 	[LC_NONE][LE_NONE][LF_UDP_VXLAN] = RTE_PTYPE_TUNNEL_VXLAN,
85d0d65498SPavan Nikhilesh 	[LC_NONE][LE_NONE][LF_NVGRE] = RTE_PTYPE_TUNNEL_NVGRE,
86d0d65498SPavan Nikhilesh 
87d0d65498SPavan Nikhilesh 	[LC_IPV4][LE_NONE][LF_NONE] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_UNKNOWN,
88d0d65498SPavan Nikhilesh 	[LC_IPV4][LE_NONE][LF_IPSEC_ESP] =
89d0d65498SPavan Nikhilesh 				RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L3_IPV4,
90d0d65498SPavan Nikhilesh 	[LC_IPV4][LE_NONE][LF_IPFRAG] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_FRAG,
91d0d65498SPavan Nikhilesh 	[LC_IPV4][LE_NONE][LF_IPCOMP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_UNKNOWN,
92d0d65498SPavan Nikhilesh 	[LC_IPV4][LE_NONE][LF_TCP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
93d0d65498SPavan Nikhilesh 	[LC_IPV4][LE_NONE][LF_UDP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
94d0d65498SPavan Nikhilesh 	[LC_IPV4][LE_NONE][LF_GRE] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_GRE,
95d0d65498SPavan Nikhilesh 	[LC_IPV4][LE_NONE][LF_UDP_GENEVE] =
96d0d65498SPavan Nikhilesh 				RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_GENEVE,
97d0d65498SPavan Nikhilesh 	[LC_IPV4][LE_NONE][LF_UDP_VXLAN] =
98d0d65498SPavan Nikhilesh 				RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_VXLAN,
99d0d65498SPavan Nikhilesh 	[LC_IPV4][LE_NONE][LF_NVGRE] =
100d0d65498SPavan Nikhilesh 				RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_NVGRE,
101d0d65498SPavan Nikhilesh 
102d0d65498SPavan Nikhilesh 	[LC_IPV4_OPT][LE_NONE][LF_NONE] =
103d0d65498SPavan Nikhilesh 				RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_UNKNOWN,
104d0d65498SPavan Nikhilesh 	[LC_IPV4_OPT][LE_NONE][LF_IPSEC_ESP] =
105d0d65498SPavan Nikhilesh 				RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L3_IPV4,
106d0d65498SPavan Nikhilesh 	[LC_IPV4_OPT][LE_NONE][LF_IPFRAG] =
107d0d65498SPavan Nikhilesh 				RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_FRAG,
108d0d65498SPavan Nikhilesh 	[LC_IPV4_OPT][LE_NONE][LF_IPCOMP] =
109d0d65498SPavan Nikhilesh 				RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_UNKNOWN,
110d0d65498SPavan Nikhilesh 	[LC_IPV4_OPT][LE_NONE][LF_TCP] =
111d0d65498SPavan Nikhilesh 				RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_TCP,
112d0d65498SPavan Nikhilesh 	[LC_IPV4_OPT][LE_NONE][LF_UDP] =
113d0d65498SPavan Nikhilesh 				RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_UDP,
114d0d65498SPavan Nikhilesh 	[LC_IPV4_OPT][LE_NONE][LF_GRE] =
115d0d65498SPavan Nikhilesh 				RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_GRE,
116d0d65498SPavan Nikhilesh 	[LC_IPV4_OPT][LE_NONE][LF_UDP_GENEVE] =
117d0d65498SPavan Nikhilesh 				RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_GENEVE,
118d0d65498SPavan Nikhilesh 	[LC_IPV4_OPT][LE_NONE][LF_UDP_VXLAN] =
119d0d65498SPavan Nikhilesh 				RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_VXLAN,
120d0d65498SPavan Nikhilesh 	[LC_IPV4_OPT][LE_NONE][LF_NVGRE] =
121d0d65498SPavan Nikhilesh 				RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_NVGRE,
122d0d65498SPavan Nikhilesh 
123d0d65498SPavan Nikhilesh 	[LC_IPV6][LE_NONE][LF_NONE] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_UNKNOWN,
124d0d65498SPavan Nikhilesh 	[LC_IPV6][LE_NONE][LF_IPSEC_ESP] =
125d0d65498SPavan Nikhilesh 				RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L3_IPV4,
126d0d65498SPavan Nikhilesh 	[LC_IPV6][LE_NONE][LF_IPFRAG] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_FRAG,
127d0d65498SPavan Nikhilesh 	[LC_IPV6][LE_NONE][LF_IPCOMP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_UNKNOWN,
128d0d65498SPavan Nikhilesh 	[LC_IPV6][LE_NONE][LF_TCP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
129d0d65498SPavan Nikhilesh 	[LC_IPV6][LE_NONE][LF_UDP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
130d0d65498SPavan Nikhilesh 	[LC_IPV6][LE_NONE][LF_GRE] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_TUNNEL_GRE,
131d0d65498SPavan Nikhilesh 	[LC_IPV6][LE_NONE][LF_UDP_GENEVE] =
132d0d65498SPavan Nikhilesh 				RTE_PTYPE_L3_IPV6 | RTE_PTYPE_TUNNEL_GENEVE,
133d0d65498SPavan Nikhilesh 	[LC_IPV6][LE_NONE][LF_UDP_VXLAN] =
134d0d65498SPavan Nikhilesh 				RTE_PTYPE_L3_IPV6 | RTE_PTYPE_TUNNEL_VXLAN,
135d0d65498SPavan Nikhilesh 	[LC_IPV6][LE_NONE][LF_NVGRE] =
136d0d65498SPavan Nikhilesh 				RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_NVGRE,
137d0d65498SPavan Nikhilesh 	[LC_IPV6_OPT][LE_NONE][LF_NONE] =
138d0d65498SPavan Nikhilesh 				RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_UNKNOWN,
139d0d65498SPavan Nikhilesh 	[LC_IPV6_OPT][LE_NONE][LF_IPSEC_ESP] =
140d0d65498SPavan Nikhilesh 				RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L3_IPV4,
141d0d65498SPavan Nikhilesh 	[LC_IPV6_OPT][LE_NONE][LF_IPFRAG] =
142d0d65498SPavan Nikhilesh 				RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_FRAG,
143d0d65498SPavan Nikhilesh 	[LC_IPV6_OPT][LE_NONE][LF_IPCOMP] =
144d0d65498SPavan Nikhilesh 				RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_UNKNOWN,
145d0d65498SPavan Nikhilesh 	[LC_IPV6_OPT][LE_NONE][LF_TCP] =
146d0d65498SPavan Nikhilesh 				RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP,
147d0d65498SPavan Nikhilesh 	[LC_IPV6_OPT][LE_NONE][LF_UDP] =
148d0d65498SPavan Nikhilesh 				RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP,
149d0d65498SPavan Nikhilesh 	[LC_IPV6_OPT][LE_NONE][LF_GRE] =
150d0d65498SPavan Nikhilesh 				RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_TUNNEL_GRE,
151d0d65498SPavan Nikhilesh 	[LC_IPV6_OPT][LE_NONE][LF_UDP_GENEVE] =
152d0d65498SPavan Nikhilesh 				RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_TUNNEL_GENEVE,
153d0d65498SPavan Nikhilesh 	[LC_IPV6_OPT][LE_NONE][LF_UDP_VXLAN] =
154d0d65498SPavan Nikhilesh 				RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_TUNNEL_VXLAN,
155d0d65498SPavan Nikhilesh 	[LC_IPV6_OPT][LE_NONE][LF_NVGRE] =
156d0d65498SPavan Nikhilesh 				RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_TUNNEL_NVGRE,
157d0d65498SPavan Nikhilesh 
158d0d65498SPavan Nikhilesh };
159d0d65498SPavan Nikhilesh 
1607f4116bdSHarman Kalra 
1615cbe1848SHarman Kalra static __rte_always_inline uint64_t
octeontx_pktmbuf_detach(struct rte_mbuf * m,struct rte_mbuf ** m_tofree)1629eb5cb3bSHarman Kalra octeontx_pktmbuf_detach(struct rte_mbuf *m, struct rte_mbuf **m_tofree)
1635cbe1848SHarman Kalra {
1645cbe1848SHarman Kalra 	struct rte_mempool *mp = m->pool;
1655cbe1848SHarman Kalra 	uint32_t mbuf_size, buf_len;
1665cbe1848SHarman Kalra 	struct rte_mbuf *md;
1675cbe1848SHarman Kalra 	uint16_t priv_size;
1685cbe1848SHarman Kalra 	uint16_t refcount;
1695cbe1848SHarman Kalra 
1705cbe1848SHarman Kalra 	/* Update refcount of direct mbuf */
1715cbe1848SHarman Kalra 	md = rte_mbuf_from_indirect(m);
1729eb5cb3bSHarman Kalra 	/* The real data will be in the direct buffer, inform callers this */
1739eb5cb3bSHarman Kalra 	*m_tofree = md;
1745cbe1848SHarman Kalra 	refcount = rte_mbuf_refcnt_update(md, -1);
1755cbe1848SHarman Kalra 
1765cbe1848SHarman Kalra 	priv_size = rte_pktmbuf_priv_size(mp);
1775cbe1848SHarman Kalra 	mbuf_size = (uint32_t)(sizeof(struct rte_mbuf) + priv_size);
1785cbe1848SHarman Kalra 	buf_len = rte_pktmbuf_data_room_size(mp);
1795cbe1848SHarman Kalra 
1805cbe1848SHarman Kalra 	m->priv_size = priv_size;
1815cbe1848SHarman Kalra 	m->buf_addr = (char *)m + mbuf_size;
1825cbe1848SHarman Kalra 	m->buf_iova = rte_mempool_virt2iova(m) + mbuf_size;
1835cbe1848SHarman Kalra 	m->buf_len = (uint16_t)buf_len;
1845cbe1848SHarman Kalra 	rte_pktmbuf_reset_headroom(m);
1855cbe1848SHarman Kalra 	m->data_len = 0;
1865cbe1848SHarman Kalra 	m->ol_flags = 0;
1875cbe1848SHarman Kalra 	m->next = NULL;
1885cbe1848SHarman Kalra 	m->nb_segs = 1;
1895cbe1848SHarman Kalra 
1905cbe1848SHarman Kalra 	/* Now indirect mbuf is safe to free */
1915cbe1848SHarman Kalra 	rte_pktmbuf_free(m);
1925cbe1848SHarman Kalra 
1935cbe1848SHarman Kalra 	if (refcount == 0) {
1945cbe1848SHarman Kalra 		rte_mbuf_refcnt_set(md, 1);
1955cbe1848SHarman Kalra 		md->data_len = 0;
1965cbe1848SHarman Kalra 		md->ol_flags = 0;
1975cbe1848SHarman Kalra 		md->next = NULL;
1985cbe1848SHarman Kalra 		md->nb_segs = 1;
1995cbe1848SHarman Kalra 		return 0;
2005cbe1848SHarman Kalra 	} else {
2015cbe1848SHarman Kalra 		return 1;
2025cbe1848SHarman Kalra 	}
2035cbe1848SHarman Kalra }
2045cbe1848SHarman Kalra 
2055cbe1848SHarman Kalra static __rte_always_inline uint64_t
octeontx_prefree_seg(struct rte_mbuf * m,struct rte_mbuf ** m_tofree)2069eb5cb3bSHarman Kalra octeontx_prefree_seg(struct rte_mbuf *m, struct rte_mbuf **m_tofree)
2075cbe1848SHarman Kalra {
2085cbe1848SHarman Kalra 	if (likely(rte_mbuf_refcnt_read(m) == 1)) {
2095cbe1848SHarman Kalra 		if (!RTE_MBUF_DIRECT(m))
2109eb5cb3bSHarman Kalra 			return octeontx_pktmbuf_detach(m, m_tofree);
2115cbe1848SHarman Kalra 
2125cbe1848SHarman Kalra 		m->next = NULL;
2135cbe1848SHarman Kalra 		m->nb_segs = 1;
2145cbe1848SHarman Kalra 		return 0;
2155cbe1848SHarman Kalra 	} else if (rte_mbuf_refcnt_update(m, -1) == 0) {
2165cbe1848SHarman Kalra 		if (!RTE_MBUF_DIRECT(m))
2179eb5cb3bSHarman Kalra 			return octeontx_pktmbuf_detach(m, m_tofree);
2185cbe1848SHarman Kalra 
2195cbe1848SHarman Kalra 		rte_mbuf_refcnt_set(m, 1);
2205cbe1848SHarman Kalra 		m->next = NULL;
2215cbe1848SHarman Kalra 		m->nb_segs = 1;
2225cbe1848SHarman Kalra 		return 0;
2235cbe1848SHarman Kalra 	}
2245cbe1848SHarman Kalra 
2255cbe1848SHarman Kalra 	/* Mbuf is having refcount more than 1 so need not to be freed */
2265cbe1848SHarman Kalra 	return 1;
2275cbe1848SHarman Kalra }
2285cbe1848SHarman Kalra 
229100f6992SHarman Kalra static __rte_always_inline void
octeontx_tx_checksum_offload(uint64_t * cmd_buf,const uint16_t flags,struct rte_mbuf * m)230100f6992SHarman Kalra octeontx_tx_checksum_offload(uint64_t *cmd_buf, const uint16_t flags,
231100f6992SHarman Kalra 			     struct rte_mbuf *m)
232100f6992SHarman Kalra {
233100f6992SHarman Kalra 	struct octeontx_send_hdr_s *send_hdr =
234100f6992SHarman Kalra 				(struct octeontx_send_hdr_s *)cmd_buf;
235100f6992SHarman Kalra 	uint64_t ol_flags = m->ol_flags;
236100f6992SHarman Kalra 
237100f6992SHarman Kalra 	/* PKO Checksum L4 Algorithm Enumeration
238100f6992SHarman Kalra 	 * 0x0 - No checksum
239100f6992SHarman Kalra 	 * 0x1 - UDP L4 checksum
240100f6992SHarman Kalra 	 * 0x2 - TCP L4 checksum
241100f6992SHarman Kalra 	 * 0x3 - SCTP L4 checksum
242100f6992SHarman Kalra 	 */
243daa02b5cSOlivier Matz 	const uint8_t csum = (!(((ol_flags ^ RTE_MBUF_F_TX_UDP_CKSUM) >> 52) & 0x3) +
244daa02b5cSOlivier Matz 		      (!(((ol_flags ^ RTE_MBUF_F_TX_TCP_CKSUM) >> 52) & 0x3) * 2) +
245daa02b5cSOlivier Matz 		      (!(((ol_flags ^ RTE_MBUF_F_TX_SCTP_CKSUM) >> 52) & 0x3) * 3));
246100f6992SHarman Kalra 
247daa02b5cSOlivier Matz 	const uint8_t is_tunnel_parsed = (!!(ol_flags & RTE_MBUF_F_TX_TUNNEL_GTP) ||
248daa02b5cSOlivier Matz 				      !!(ol_flags & RTE_MBUF_F_TX_TUNNEL_VXLAN_GPE) ||
249daa02b5cSOlivier Matz 				      !!(ol_flags & RTE_MBUF_F_TX_TUNNEL_VXLAN) ||
250daa02b5cSOlivier Matz 				      !!(ol_flags & RTE_MBUF_F_TX_TUNNEL_GRE) ||
251daa02b5cSOlivier Matz 				      !!(ol_flags & RTE_MBUF_F_TX_TUNNEL_GENEVE) ||
252daa02b5cSOlivier Matz 				      !!(ol_flags & RTE_MBUF_F_TX_TUNNEL_IP) ||
253daa02b5cSOlivier Matz 				      !!(ol_flags & RTE_MBUF_F_TX_TUNNEL_IPIP));
254100f6992SHarman Kalra 
255daa02b5cSOlivier Matz 	const uint8_t csum_outer = (!!(ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM) ||
256daa02b5cSOlivier Matz 				    !!(ol_flags & RTE_MBUF_F_TX_TUNNEL_UDP));
257100f6992SHarman Kalra 	const uint8_t outer_l2_len = m->outer_l2_len;
258100f6992SHarman Kalra 	const uint8_t l2_len = m->l2_len;
259100f6992SHarman Kalra 
260100f6992SHarman Kalra 	if ((flags & OCCTX_TX_OFFLOAD_OL3_OL4_CSUM_F) &&
261100f6992SHarman Kalra 	    (flags & OCCTX_TX_OFFLOAD_L3_L4_CSUM_F)) {
262100f6992SHarman Kalra 		if (is_tunnel_parsed) {
263100f6992SHarman Kalra 			/* Outer L3 */
264100f6992SHarman Kalra 			send_hdr->w0.l3ptr = outer_l2_len;
265100f6992SHarman Kalra 			send_hdr->w0.l4ptr = outer_l2_len + m->outer_l3_len;
266100f6992SHarman Kalra 			/* Set clk3 for PKO to calculate IPV4 header checksum */
267daa02b5cSOlivier Matz 			send_hdr->w0.ckl3 = !!(ol_flags & RTE_MBUF_F_TX_OUTER_IPV4);
268100f6992SHarman Kalra 
269100f6992SHarman Kalra 			/* Outer L4 */
270100f6992SHarman Kalra 			send_hdr->w0.ckl4 = csum_outer;
271100f6992SHarman Kalra 
272100f6992SHarman Kalra 			/* Inner L3 */
273100f6992SHarman Kalra 			send_hdr->w1.leptr = send_hdr->w0.l4ptr + l2_len;
274100f6992SHarman Kalra 			send_hdr->w1.lfptr = send_hdr->w1.leptr + m->l3_len;
275100f6992SHarman Kalra 			/* Set clke for PKO to calculate inner IPV4 header
276100f6992SHarman Kalra 			 * checksum.
277100f6992SHarman Kalra 			 */
278daa02b5cSOlivier Matz 			send_hdr->w0.ckle = !!(ol_flags & RTE_MBUF_F_TX_IPV4);
279100f6992SHarman Kalra 
280100f6992SHarman Kalra 			/* Inner L4 */
281100f6992SHarman Kalra 			send_hdr->w0.cklf = csum;
282100f6992SHarman Kalra 		} else {
283100f6992SHarman Kalra 			/* Inner L3 */
284100f6992SHarman Kalra 			send_hdr->w0.l3ptr = l2_len;
285100f6992SHarman Kalra 			send_hdr->w0.l4ptr = l2_len + m->l3_len;
286100f6992SHarman Kalra 			/* Set clk3 for PKO to calculate IPV4 header checksum */
287daa02b5cSOlivier Matz 			send_hdr->w0.ckl3 = !!(ol_flags & RTE_MBUF_F_TX_IPV4);
288100f6992SHarman Kalra 
289100f6992SHarman Kalra 			/* Inner L4 */
290100f6992SHarman Kalra 			send_hdr->w0.ckl4 = csum;
291100f6992SHarman Kalra 		}
292100f6992SHarman Kalra 	} else if (flags & OCCTX_TX_OFFLOAD_OL3_OL4_CSUM_F) {
293100f6992SHarman Kalra 		/* Outer L3 */
294100f6992SHarman Kalra 		send_hdr->w0.l3ptr = outer_l2_len;
295100f6992SHarman Kalra 		send_hdr->w0.l4ptr = outer_l2_len + m->outer_l3_len;
296100f6992SHarman Kalra 		/* Set clk3 for PKO to calculate IPV4 header checksum */
297daa02b5cSOlivier Matz 		send_hdr->w0.ckl3 = !!(ol_flags & RTE_MBUF_F_TX_OUTER_IPV4);
298100f6992SHarman Kalra 
299100f6992SHarman Kalra 		/* Outer L4 */
300100f6992SHarman Kalra 		send_hdr->w0.ckl4 = csum_outer;
301100f6992SHarman Kalra 	} else if (flags & OCCTX_TX_OFFLOAD_L3_L4_CSUM_F) {
302100f6992SHarman Kalra 		/* Inner L3 */
303100f6992SHarman Kalra 		send_hdr->w0.l3ptr = l2_len;
304100f6992SHarman Kalra 		send_hdr->w0.l4ptr = l2_len + m->l3_len;
305100f6992SHarman Kalra 		/* Set clk3 for PKO to calculate IPV4 header checksum */
306daa02b5cSOlivier Matz 		send_hdr->w0.ckl3 = !!(ol_flags & RTE_MBUF_F_TX_IPV4);
307100f6992SHarman Kalra 
308100f6992SHarman Kalra 		/* Inner L4 */
309100f6992SHarman Kalra 		send_hdr->w0.ckl4 = csum;
310100f6992SHarman Kalra 	}
311100f6992SHarman Kalra }
312100f6992SHarman Kalra 
3137f4116bdSHarman Kalra static __rte_always_inline uint16_t
__octeontx_xmit_prepare(struct rte_mbuf * tx_pkt,uint64_t * cmd_buf,const uint16_t flag)3147f4116bdSHarman Kalra __octeontx_xmit_prepare(struct rte_mbuf *tx_pkt, uint64_t *cmd_buf,
3155cbe1848SHarman Kalra 			const uint16_t flag)
3161dedffebSPavan Nikhilesh {
3177f4116bdSHarman Kalra 	uint16_t gaura_id, nb_desc = 0;
3189eb5cb3bSHarman Kalra 	struct rte_mbuf *m_tofree;
3199eb5cb3bSHarman Kalra 	rte_iova_t iova;
3209eb5cb3bSHarman Kalra 	uint16_t data_len;
3219eb5cb3bSHarman Kalra 
3229eb5cb3bSHarman Kalra 	m_tofree = tx_pkt;
3239eb5cb3bSHarman Kalra 
3249eb5cb3bSHarman Kalra 	data_len = tx_pkt->data_len;
3259eb5cb3bSHarman Kalra 	iova = rte_mbuf_data_iova(tx_pkt);
3261dedffebSPavan Nikhilesh 
32785221a0cSHarman Kalra 	/* Setup PKO_SEND_HDR_S */
32885221a0cSHarman Kalra 	cmd_buf[nb_desc++] = tx_pkt->data_len & 0xffff;
32985221a0cSHarman Kalra 	cmd_buf[nb_desc++] = 0x0;
33085221a0cSHarman Kalra 
331100f6992SHarman Kalra 	/* Enable tx checksum offload */
332100f6992SHarman Kalra 	if ((flag & OCCTX_TX_OFFLOAD_OL3_OL4_CSUM_F) ||
333100f6992SHarman Kalra 	    (flag & OCCTX_TX_OFFLOAD_L3_L4_CSUM_F))
334100f6992SHarman Kalra 		octeontx_tx_checksum_offload(cmd_buf, flag, tx_pkt);
335100f6992SHarman Kalra 
3365cbe1848SHarman Kalra 	/* SEND_HDR[DF] bit controls if buffer is to be freed or
3375cbe1848SHarman Kalra 	 * not, as SG_DESC[I] and SEND_HDR[II] are clear.
3385cbe1848SHarman Kalra 	 */
3395cbe1848SHarman Kalra 	if (flag & OCCTX_TX_OFFLOAD_MBUF_NOFF_F)
3409eb5cb3bSHarman Kalra 		cmd_buf[0] |= (octeontx_prefree_seg(tx_pkt, &m_tofree) <<
3415cbe1848SHarman Kalra 			       58);
3425cbe1848SHarman Kalra 
34385221a0cSHarman Kalra 	/* Mark mempool object as "put" since it is freed by PKO */
34485221a0cSHarman Kalra 	if (!(cmd_buf[0] & (1ULL << 58)))
345ad276d5cSAndrew Rybchenko 		RTE_MEMPOOL_CHECK_COOKIES(m_tofree->pool, (void **)&m_tofree,
34685221a0cSHarman Kalra 					1, 0);
34785221a0cSHarman Kalra 	/* Get the gaura Id */
3489eb5cb3bSHarman Kalra 	gaura_id =
3499eb5cb3bSHarman Kalra 		octeontx_fpa_bufpool_gaura((uintptr_t)m_tofree->pool->pool_id);
35085221a0cSHarman Kalra 
35185221a0cSHarman Kalra 	/* Setup PKO_SEND_BUFLINK_S */
35285221a0cSHarman Kalra 	cmd_buf[nb_desc++] = PKO_SEND_BUFLINK_SUBDC |
35385221a0cSHarman Kalra 		PKO_SEND_BUFLINK_LDTYPE(0x1ull) |
35485221a0cSHarman Kalra 		PKO_SEND_BUFLINK_GAUAR((long)gaura_id) |
3559eb5cb3bSHarman Kalra 		data_len;
3569eb5cb3bSHarman Kalra 	cmd_buf[nb_desc++] = iova;
3571dedffebSPavan Nikhilesh 
3587f4116bdSHarman Kalra 	return nb_desc;
3591dedffebSPavan Nikhilesh }
3601dedffebSPavan Nikhilesh 
3617f4116bdSHarman Kalra static __rte_always_inline uint16_t
__octeontx_xmit_mseg_prepare(struct rte_mbuf * tx_pkt,uint64_t * cmd_buf,const uint16_t flag)3627f4116bdSHarman Kalra __octeontx_xmit_mseg_prepare(struct rte_mbuf *tx_pkt, uint64_t *cmd_buf,
3635cbe1848SHarman Kalra 			const uint16_t flag)
3647f4116bdSHarman Kalra {
3657f4116bdSHarman Kalra 	uint16_t nb_segs, nb_desc = 0;
36623c97a67SConor Walsh 	uint16_t gaura_id;
3679eb5cb3bSHarman Kalra 	struct rte_mbuf *m_next = NULL, *m_tofree;
3689eb5cb3bSHarman Kalra 	rte_iova_t iova;
3699eb5cb3bSHarman Kalra 	uint16_t data_len;
3709e747589SJerin Jacob 
3717f4116bdSHarman Kalra 	nb_segs = tx_pkt->nb_segs;
3727f4116bdSHarman Kalra 	/* Setup PKO_SEND_HDR_S */
3737f4116bdSHarman Kalra 	cmd_buf[nb_desc++] = tx_pkt->pkt_len & 0xffff;
3747f4116bdSHarman Kalra 	cmd_buf[nb_desc++] = 0x0;
3757f4116bdSHarman Kalra 
376100f6992SHarman Kalra 	/* Enable tx checksum offload */
377100f6992SHarman Kalra 	if ((flag & OCCTX_TX_OFFLOAD_OL3_OL4_CSUM_F) ||
378100f6992SHarman Kalra 	    (flag & OCCTX_TX_OFFLOAD_L3_L4_CSUM_F))
379100f6992SHarman Kalra 		octeontx_tx_checksum_offload(cmd_buf, flag, tx_pkt);
380100f6992SHarman Kalra 
3817f4116bdSHarman Kalra 	do {
3827f4116bdSHarman Kalra 		m_next = tx_pkt->next;
3839eb5cb3bSHarman Kalra 		/* Get TX parameters up front, octeontx_prefree_seg might change
3849eb5cb3bSHarman Kalra 		 * them
3857f4116bdSHarman Kalra 		 */
3869eb5cb3bSHarman Kalra 		m_tofree = tx_pkt;
3879eb5cb3bSHarman Kalra 		data_len = tx_pkt->data_len;
3889eb5cb3bSHarman Kalra 		iova = rte_mbuf_data_iova(tx_pkt);
3897f4116bdSHarman Kalra 
3907f4116bdSHarman Kalra 		/* Setup PKO_SEND_GATHER_S */
3919eb5cb3bSHarman Kalra 		cmd_buf[nb_desc] = 0;
3927f4116bdSHarman Kalra 
3935cbe1848SHarman Kalra 		/* SG_DESC[I] bit controls if buffer is to be freed or
3945cbe1848SHarman Kalra 		 * not, as SEND_HDR[DF] and SEND_HDR[II] are clear.
3955cbe1848SHarman Kalra 		 */
3965cbe1848SHarman Kalra 		if (flag & OCCTX_TX_OFFLOAD_MBUF_NOFF_F) {
3975cbe1848SHarman Kalra 			cmd_buf[nb_desc] |=
3989eb5cb3bSHarman Kalra 				(octeontx_prefree_seg(tx_pkt, &m_tofree) << 57);
3995cbe1848SHarman Kalra 		}
4005cbe1848SHarman Kalra 
4019eb5cb3bSHarman Kalra 		/* To handle case where mbufs belong to diff pools, like
4029eb5cb3bSHarman Kalra 		 * fragmentation
4039eb5cb3bSHarman Kalra 		 */
4049eb5cb3bSHarman Kalra 		gaura_id = octeontx_fpa_bufpool_gaura((uintptr_t)
4059eb5cb3bSHarman Kalra 					m_tofree->pool->pool_id);
4069eb5cb3bSHarman Kalra 
4079eb5cb3bSHarman Kalra 		/* Setup PKO_SEND_GATHER_S */
4089eb5cb3bSHarman Kalra 		cmd_buf[nb_desc] |= PKO_SEND_GATHER_SUBDC		 |
4099eb5cb3bSHarman Kalra 				   PKO_SEND_GATHER_LDTYPE(0x1ull)	 |
4109eb5cb3bSHarman Kalra 				   PKO_SEND_GATHER_GAUAR((long)gaura_id) |
4119eb5cb3bSHarman Kalra 				   data_len;
4129eb5cb3bSHarman Kalra 
4137f4116bdSHarman Kalra 		/* Mark mempool object as "put" since it is freed by
4147f4116bdSHarman Kalra 		 * PKO.
4157f4116bdSHarman Kalra 		 */
4167f4116bdSHarman Kalra 		if (!(cmd_buf[nb_desc] & (1ULL << 57))) {
4177f4116bdSHarman Kalra 			tx_pkt->next = NULL;
418ad276d5cSAndrew Rybchenko 			RTE_MEMPOOL_CHECK_COOKIES(m_tofree->pool,
4199eb5cb3bSHarman Kalra 						(void **)&m_tofree, 1, 0);
4207f4116bdSHarman Kalra 		}
4217f4116bdSHarman Kalra 		nb_desc++;
4227f4116bdSHarman Kalra 
4239eb5cb3bSHarman Kalra 		cmd_buf[nb_desc++] = iova;
4247f4116bdSHarman Kalra 
4257f4116bdSHarman Kalra 		nb_segs--;
4267f4116bdSHarman Kalra 		tx_pkt = m_next;
4277f4116bdSHarman Kalra 	} while (nb_segs);
4287f4116bdSHarman Kalra 
4297f4116bdSHarman Kalra 	return nb_desc;
4307f4116bdSHarman Kalra }
4317f4116bdSHarman Kalra 
4327f4116bdSHarman Kalra static __rte_always_inline uint16_t
__octeontx_xmit_pkts(void * tx_queue,struct rte_mbuf ** tx_pkts,uint16_t nb_pkts,uint64_t * cmd_buf,const uint16_t flags)4337f4116bdSHarman Kalra __octeontx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
4347f4116bdSHarman Kalra 		     uint16_t nb_pkts, uint64_t *cmd_buf,
4357f4116bdSHarman Kalra 		     const uint16_t flags)
4367f4116bdSHarman Kalra {
4377f4116bdSHarman Kalra 	struct octeontx_txq *txq = tx_queue;
4387f4116bdSHarman Kalra 	octeontx_dq_t *dq = &txq->dq;
4397f4116bdSHarman Kalra 	uint16_t count = 0, nb_desc;
440f0f5d844SPhil Yang 	rte_io_wmb();
4417f4116bdSHarman Kalra 
4427f4116bdSHarman Kalra 	while (count < nb_pkts) {
4437f4116bdSHarman Kalra 		if (unlikely(*((volatile int64_t *)dq->fc_status_va) < 0))
4447f4116bdSHarman Kalra 			break;
4457f4116bdSHarman Kalra 
4467f4116bdSHarman Kalra 		if (flags & OCCTX_TX_MULTI_SEG_F) {
4477f4116bdSHarman Kalra 			nb_desc = __octeontx_xmit_mseg_prepare(tx_pkts[count],
4487f4116bdSHarman Kalra 							       cmd_buf, flags);
4497f4116bdSHarman Kalra 		} else {
4507f4116bdSHarman Kalra 			nb_desc = __octeontx_xmit_prepare(tx_pkts[count],
4517f4116bdSHarman Kalra 							  cmd_buf, flags);
4527f4116bdSHarman Kalra 		}
4537f4116bdSHarman Kalra 
4547f4116bdSHarman Kalra 		octeontx_reg_lmtst(dq->lmtline_va, dq->ioreg_va, cmd_buf,
4557f4116bdSHarman Kalra 				   nb_desc);
4567f4116bdSHarman Kalra 
4577f4116bdSHarman Kalra 		count++;
4587f4116bdSHarman Kalra 	}
4597f4116bdSHarman Kalra 	return count;
4607f4116bdSHarman Kalra }
46185221a0cSHarman Kalra 
46285221a0cSHarman Kalra uint16_t
4632d2c7918SJerin Jacob octeontx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
4649e747589SJerin Jacob 
465100f6992SHarman Kalra #define L3L4CSUM_F   OCCTX_TX_OFFLOAD_L3_L4_CSUM_F
466100f6992SHarman Kalra #define OL3OL4CSUM_F OCCTX_TX_OFFLOAD_OL3_OL4_CSUM_F
4675cbe1848SHarman Kalra #define NOFF_F       OCCTX_TX_OFFLOAD_MBUF_NOFF_F
4687f4116bdSHarman Kalra #define MULT_F       OCCTX_TX_MULTI_SEG_F
469100f6992SHarman Kalra 
470100f6992SHarman Kalra /* [L3L4CSUM_F] [OL3OL4CSUM_F] [NOFF] [MULTI_SEG] */
4717f4116bdSHarman Kalra #define OCCTX_TX_FASTPATH_MODES						       \
472100f6992SHarman Kalra T(no_offload,				0, 0, 0, 0,	4,		       \
473100f6992SHarman Kalra 					OCCTX_TX_OFFLOAD_NONE)		       \
474100f6992SHarman Kalra T(mseg,					0, 0, 0, 1,	14,		       \
475100f6992SHarman Kalra 					MULT_F)			               \
476100f6992SHarman Kalra T(l3l4csum,				0, 0, 1, 0,     4,		       \
477100f6992SHarman Kalra 					L3L4CSUM_F)			       \
478100f6992SHarman Kalra T(l3l4csum_mseg,			0, 0, 1, 1,	14,		       \
479100f6992SHarman Kalra 					L3L4CSUM_F | MULT_F)		       \
480100f6992SHarman Kalra T(ol3ol4csum,				0, 1, 0, 0,	4,		       \
481100f6992SHarman Kalra 					OL3OL4CSUM_F)			       \
482100f6992SHarman Kalra T(ol3l4csum_mseg,			0, 1, 0, 1,	14,		       \
483100f6992SHarman Kalra 					OL3OL4CSUM_F | MULT_F)	               \
484100f6992SHarman Kalra T(ol3l4csum_l3l4csum,			0, 1, 1, 0,     4,		       \
485100f6992SHarman Kalra 					OL3OL4CSUM_F | L3L4CSUM_F)	       \
486100f6992SHarman Kalra T(ol3l4csum_l3l4csum_mseg,		0, 1, 1, 1,	14,		       \
487100f6992SHarman Kalra 					OL3OL4CSUM_F | L3L4CSUM_F | MULT_F)    \
488100f6992SHarman Kalra T(noff,					1, 0, 0, 0,     4,		       \
489100f6992SHarman Kalra 					NOFF_F)				       \
490100f6992SHarman Kalra T(noff_mseg,				1, 0, 0, 1,	14,		       \
491100f6992SHarman Kalra 					NOFF_F | MULT_F)	               \
492100f6992SHarman Kalra T(noff_l3l4csum,			1, 0, 1, 0,     4,		       \
493100f6992SHarman Kalra 					NOFF_F | L3L4CSUM_F)		       \
494100f6992SHarman Kalra T(noff_l3l4csum_mseg,			1, 0, 1, 1,	14,		       \
495100f6992SHarman Kalra 					NOFF_F | L3L4CSUM_F | MULT_F)	       \
496100f6992SHarman Kalra T(noff_ol3ol4csum,			1, 1, 0, 0,	4,		       \
497100f6992SHarman Kalra 					NOFF_F | OL3OL4CSUM_F)		       \
498100f6992SHarman Kalra T(noff_ol3ol4csum_mseg,			1, 1, 0, 1,	14,		       \
499100f6992SHarman Kalra 					NOFF_F | OL3OL4CSUM_F | MULT_F)	       \
500100f6992SHarman Kalra T(noff_ol3ol4csum_l3l4csum,		1, 1, 1, 0,     4,		       \
501100f6992SHarman Kalra 					NOFF_F | OL3OL4CSUM_F | L3L4CSUM_F)    \
502100f6992SHarman Kalra T(noff_ol3ol4csum_l3l4csum_mseg,	1, 1, 1, 1,	14,		       \
503100f6992SHarman Kalra 					NOFF_F | OL3OL4CSUM_F | L3L4CSUM_F |   \
504100f6992SHarman Kalra 					MULT_F)
5057f4116bdSHarman Kalra 
50656a96aa4SHarman Kalra /* RX offload macros */
50745231cc6SVamsi Attunuru #define VLAN_FLTR_F     OCCTX_RX_VLAN_FLTR_F
508cf55f04aSHarman Kalra #define CSUM_F		OCCTX_RX_OFFLOAD_CSUM_F
50956a96aa4SHarman Kalra #define MULT_RX_F       OCCTX_RX_MULTI_SEG_F
510cf55f04aSHarman Kalra 
511cf55f04aSHarman Kalra /* [VLAN_FLTR] [CSUM_F] [MULTI_SEG] */
51256a96aa4SHarman Kalra #define OCCTX_RX_FASTPATH_MODES						       \
513cf55f04aSHarman Kalra R(no_offload,				0, 0, 0,  OCCTX_RX_OFFLOAD_NONE)       \
514cf55f04aSHarman Kalra R(mseg,					0, 0, 1,  MULT_RX_F)		       \
515cf55f04aSHarman Kalra R(csum,					0, 1, 0,  CSUM_F)		       \
516cf55f04aSHarman Kalra R(csum_mseg,				0, 1, 1,  CSUM_F | MULT_RX_F)	       \
517cf55f04aSHarman Kalra R(vlan,					1, 0, 0,  VLAN_FLTR_F)		       \
518cf55f04aSHarman Kalra R(vlan_mseg,				1, 0, 1,  VLAN_FLTR_F | MULT_RX_F)     \
519cf55f04aSHarman Kalra R(vlan_csum,				1, 1, 0,  VLAN_FLTR_F | CSUM_F)	       \
520cf55f04aSHarman Kalra R(vlan_csum_mseg,			1, 1, 1,  CSUM_F | VLAN_FLTR_F |       \
521cf55f04aSHarman Kalra 					MULT_RX_F)
52256a96aa4SHarman Kalra 
5239e747589SJerin Jacob  #endif /* __OCTEONTX_RXTX_H__ */
524