xref: /dpdk/drivers/net/qede/qede_rxtx.h (revision a41f593f1bce27cd94eae0e85a8085c592b14b30)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2016 - 2018 Cavium Inc.
3  * All rights reserved.
4  * www.cavium.com
5  */
6 
7 
8 #ifndef _QEDE_RXTX_H_
9 #define _QEDE_RXTX_H_
10 
11 #include "qede_ethdev.h"
12 
13 /* Ring Descriptors */
14 #define RX_RING_SIZE_POW        16	/* 64K */
15 #define RX_RING_SIZE            (1ULL << RX_RING_SIZE_POW)
16 #define NUM_RX_BDS_MAX          (RX_RING_SIZE - 1)
17 #define NUM_RX_BDS_MIN          128
18 #define NUM_RX_BDS_DEF          NUM_RX_BDS_MAX
19 #define NUM_RX_BDS(q)           (q->nb_rx_desc - 1)
20 
21 #define TX_RING_SIZE_POW        16	/* 64K */
22 #define TX_RING_SIZE            (1ULL << TX_RING_SIZE_POW)
23 #define NUM_TX_BDS_MAX          (TX_RING_SIZE - 1)
24 #define NUM_TX_BDS_MIN          128
25 #define NUM_TX_BDS_DEF          NUM_TX_BDS_MAX
26 #define NUM_TX_BDS(q)           (q->nb_tx_desc - 1)
27 
28 #define TX_CONS(txq)            (txq->sw_tx_cons & NUM_TX_BDS(txq))
29 #define TX_PROD(txq)            (txq->sw_tx_prod & NUM_TX_BDS(txq))
30 
31 #define QEDE_DEFAULT_TX_FREE_THRESH	32
32 
33 #define QEDE_CSUM_ERROR			(1 << 0)
34 #define QEDE_CSUM_UNNECESSARY		(1 << 1)
35 #define QEDE_TUNN_CSUM_UNNECESSARY	(1 << 2)
36 
37 #define QEDE_BD_SET_ADDR_LEN(bd, maddr, len) \
38 	do { \
39 		(bd)->addr.hi = rte_cpu_to_le_32(U64_HI(maddr)); \
40 		(bd)->addr.lo = rte_cpu_to_le_32(U64_LO(maddr)); \
41 		(bd)->nbytes = rte_cpu_to_le_16(len); \
42 	} while (0)
43 
44 #define CQE_HAS_VLAN(flags) \
45 	((flags) & (PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK \
46 		<< PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT))
47 
48 #define CQE_HAS_OUTER_VLAN(flags) \
49 	((flags) & (PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_MASK \
50 		<< PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_SHIFT))
51 
52 #define QEDE_MIN_RX_BUFF_SIZE		(1024)
53 #define QEDE_VLAN_TAG_SIZE		(4)
54 #define QEDE_LLC_SNAP_HDR_LEN		(8)
55 
56 /* Max supported alignment is 256 (8 shift)
57  * minimal alignment shift 6 is optimal for 57xxx HW performance
58  */
59 #define QEDE_L1_CACHE_SHIFT	6
60 #define QEDE_RX_ALIGN_SHIFT	(RTE_MAX(6, RTE_MIN(8, QEDE_L1_CACHE_SHIFT)))
61 #define QEDE_FW_RX_ALIGN_END	(1UL << QEDE_RX_ALIGN_SHIFT)
62 #define QEDE_CEIL_TO_CACHE_LINE_SIZE(n) (((n) + (QEDE_FW_RX_ALIGN_END - 1)) & \
63 					~(QEDE_FW_RX_ALIGN_END - 1))
64 #define QEDE_FLOOR_TO_CACHE_LINE_SIZE(n) RTE_ALIGN_FLOOR(n, \
65 							 QEDE_FW_RX_ALIGN_END)
66 
67 /* Note: QEDE_LLC_SNAP_HDR_LEN is optional,
68  * +2 is for padding in front of L2 header
69  */
70 #define QEDE_ETH_OVERHEAD	(((2 * QEDE_VLAN_TAG_SIZE)) \
71 				 + (QEDE_LLC_SNAP_HDR_LEN) + 2)
72 
73 #define QEDE_MAX_ETHER_HDR_LEN	(RTE_ETHER_HDR_LEN + QEDE_ETH_OVERHEAD)
74 #define QEDE_ETH_MAX_LEN	(RTE_ETHER_MTU + QEDE_MAX_ETHER_HDR_LEN)
75 
76 #define QEDE_RSS_OFFLOAD_ALL    (RTE_ETH_RSS_IPV4			|\
77 				 RTE_ETH_RSS_NONFRAG_IPV4_TCP	|\
78 				 RTE_ETH_RSS_NONFRAG_IPV4_UDP	|\
79 				 RTE_ETH_RSS_IPV6			|\
80 				 RTE_ETH_RSS_NONFRAG_IPV6_TCP	|\
81 				 RTE_ETH_RSS_NONFRAG_IPV6_UDP	|\
82 				 RTE_ETH_RSS_VXLAN			|\
83 				 RTE_ETH_RSS_GENEVE)
84 
85 #define QEDE_RXTX_MAX(qdev) \
86 	(RTE_MAX(qdev->num_rx_queues, qdev->num_tx_queues))
87 
88 /* Macros for non-tunnel packet types lkup table */
89 #define QEDE_PKT_TYPE_UNKNOWN				0x0
90 #define QEDE_PKT_TYPE_MAX				0x3f
91 
92 #define QEDE_PKT_TYPE_IPV4				0x1
93 #define QEDE_PKT_TYPE_IPV6				0x2
94 #define QEDE_PKT_TYPE_IPV4_TCP				0x5
95 #define QEDE_PKT_TYPE_IPV6_TCP				0x6
96 #define QEDE_PKT_TYPE_IPV4_UDP				0x9
97 #define QEDE_PKT_TYPE_IPV6_UDP				0xa
98 
99 /* For frag pkts, corresponding IP bits is set */
100 #define QEDE_PKT_TYPE_IPV4_FRAG				0x11
101 #define QEDE_PKT_TYPE_IPV6_FRAG				0x12
102 
103 #define QEDE_PKT_TYPE_IPV4_VLAN				0x21
104 #define QEDE_PKT_TYPE_IPV6_VLAN				0x22
105 #define QEDE_PKT_TYPE_IPV4_TCP_VLAN			0x25
106 #define QEDE_PKT_TYPE_IPV6_TCP_VLAN			0x26
107 #define QEDE_PKT_TYPE_IPV4_UDP_VLAN			0x29
108 #define QEDE_PKT_TYPE_IPV6_UDP_VLAN			0x2a
109 
110 #define QEDE_PKT_TYPE_IPV4_VLAN_FRAG			0x31
111 #define QEDE_PKT_TYPE_IPV6_VLAN_FRAG			0x32
112 
113 /* Macros for tunneled packets with next protocol lkup table */
114 #define QEDE_PKT_TYPE_TUNN_GENEVE			0x1
115 #define QEDE_PKT_TYPE_TUNN_GRE				0x2
116 #define QEDE_PKT_TYPE_TUNN_VXLAN			0x3
117 
118 /* Bit 2 is don't care bit */
119 #define QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_GENEVE	0x9
120 #define QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_GRE		0xa
121 #define QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_VXLAN	0xb
122 
123 #define QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_GENEVE	0xd
124 #define QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_GRE		0xe
125 #define QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_VXLAN		0xf
126 
127 
128 #define QEDE_PKT_TYPE_TUNN_IPV4_TENID_NOEXIST_GENEVE    0x11
129 #define QEDE_PKT_TYPE_TUNN_IPV4_TENID_NOEXIST_GRE       0x12
130 #define QEDE_PKT_TYPE_TUNN_IPV4_TENID_NOEXIST_VXLAN     0x13
131 
132 #define QEDE_PKT_TYPE_TUNN_IPV4_TENID_EXIST_GENEVE	0x15
133 #define QEDE_PKT_TYPE_TUNN_IPV4_TENID_EXIST_GRE		0x16
134 #define QEDE_PKT_TYPE_TUNN_IPV4_TENID_EXIST_VXLAN	0x17
135 
136 
137 #define QEDE_PKT_TYPE_TUNN_IPV6_TENID_NOEXIST_GENEVE    0x19
138 #define QEDE_PKT_TYPE_TUNN_IPV6_TENID_NOEXIST_GRE       0x1a
139 #define QEDE_PKT_TYPE_TUNN_IPV6_TENID_NOEXIST_VXLAN     0x1b
140 
141 #define QEDE_PKT_TYPE_TUNN_IPV6_TENID_EXIST_GENEVE      0x1d
142 #define QEDE_PKT_TYPE_TUNN_IPV6_TENID_EXIST_GRE		0x1e
143 #define QEDE_PKT_TYPE_TUNN_IPV6_TENID_EXIST_VXLAN       0x1f
144 
145 #define QEDE_PKT_TYPE_TUNN_MAX_TYPE			0x20 /* 2^5 */
146 
147 #define QEDE_TX_CSUM_OFFLOAD_MASK (RTE_MBUF_F_TX_IP_CKSUM              | \
148 				   RTE_MBUF_F_TX_TCP_CKSUM             | \
149 				   RTE_MBUF_F_TX_UDP_CKSUM             | \
150 				   RTE_MBUF_F_TX_OUTER_IP_CKSUM        | \
151 				   RTE_MBUF_F_TX_TCP_SEG		| \
152 				   RTE_MBUF_F_TX_IPV4			| \
153 				   RTE_MBUF_F_TX_IPV6)
154 
155 #define QEDE_TX_OFFLOAD_MASK (QEDE_TX_CSUM_OFFLOAD_MASK | \
156 			      RTE_MBUF_F_TX_VLAN		| \
157 			      RTE_MBUF_F_TX_TUNNEL_MASK)
158 
159 #define QEDE_TX_OFFLOAD_NOTSUP_MASK \
160 	(RTE_MBUF_F_TX_OFFLOAD_MASK ^ QEDE_TX_OFFLOAD_MASK)
161 
162 /* TPA related structures */
163 struct qede_agg_info {
164 	struct rte_mbuf *tpa_head; /* Pointer to first TPA segment */
165 	struct rte_mbuf *tpa_tail; /* Pointer to last TPA segment */
166 };
167 
168 /*
169  * Structure associated with each RX queue.
170  */
171 struct qede_rx_queue {
172 	/* Always keep qdev as first member */
173 	struct qede_dev *qdev;
174 	struct rte_mempool *mb_pool;
175 	struct ecore_chain rx_bd_ring;
176 	struct ecore_chain rx_comp_ring;
177 	uint16_t *hw_cons_ptr;
178 	void OSAL_IOMEM *hw_rxq_prod_addr;
179 	struct rte_mbuf **sw_rx_ring;
180 	struct ecore_sb_info *sb_info;
181 	uint16_t sw_rx_cons;
182 	uint16_t sw_rx_prod;
183 	uint16_t nb_rx_desc;
184 	uint16_t queue_id;
185 	uint16_t port_id;
186 	uint16_t rx_buf_size;
187 	uint16_t rx_alloc_count;
188 	uint16_t unused;
189 	uint64_t rcv_pkts;
190 	uint64_t rx_segs;
191 	uint64_t rx_hw_errors;
192 	uint64_t rx_alloc_errors;
193 	struct qede_agg_info tpa_info[ETH_TPA_MAX_AGGS_NUM];
194 	void *handle;
195 };
196 
197 union db_prod {
198 	struct eth_db_data data;
199 	uint32_t raw;
200 };
201 
202 struct qede_tx_queue {
203 	/* Always keep qdev as first member */
204 	struct qede_dev *qdev;
205 	struct ecore_chain tx_pbl;
206 	struct rte_mbuf **sw_tx_ring;
207 	uint16_t nb_tx_desc;
208 	uint16_t nb_tx_avail;
209 	uint16_t tx_free_thresh;
210 	uint16_t queue_id;
211 	uint16_t *hw_cons_ptr;
212 	uint16_t sw_tx_cons;
213 	uint16_t sw_tx_prod;
214 	void OSAL_IOMEM *doorbell_addr;
215 	volatile union db_prod tx_db;
216 	uint16_t port_id;
217 	uint64_t xmit_pkts;
218 	bool is_legacy;
219 	void *handle;
220 };
221 
222 struct qede_fastpath {
223 	struct ecore_sb_info *sb_info;
224 	struct qede_rx_queue *rxq;
225 	struct qede_tx_queue *txq;
226 };
227 
228 /* This structure holds the information of fast path queues
229  * belonging to individual engines in CMT mode.
230  */
231 struct qede_fastpath_cmt {
232 	/* Always keep this a first element */
233 	struct qede_dev *qdev;
234 	/* fastpath info of engine 0 */
235 	struct qede_fastpath *fp0;
236 	/* fastpath info of engine 1 */
237 	struct qede_fastpath *fp1;
238 };
239 
240 /*
241  * RX/TX function prototypes
242  */
243 int qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
244 			uint16_t nb_desc, unsigned int socket_id,
245 			const struct rte_eth_rxconf *rx_conf,
246 			struct rte_mempool *mp);
247 
248 int qede_tx_queue_setup(struct rte_eth_dev *dev,
249 			uint16_t queue_idx,
250 			uint16_t nb_desc,
251 			unsigned int socket_id,
252 			const struct rte_eth_txconf *tx_conf);
253 
254 void qede_rx_queue_release(void *rx_queue);
255 
256 void qede_tx_queue_release(void *tx_queue);
257 
258 uint16_t qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts,
259 			uint16_t nb_pkts);
260 uint16_t qede_xmit_pkts_cmt(void *p_txq, struct rte_mbuf **tx_pkts,
261 			    uint16_t nb_pkts);
262 uint16_t qede_xmit_pkts_regular(void *p_txq, struct rte_mbuf **tx_pkts,
263 				uint16_t nb_pkts);
264 
265 uint16_t qede_xmit_prep_pkts(void *p_txq, struct rte_mbuf **tx_pkts,
266 			     uint16_t nb_pkts);
267 
268 uint16_t qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts,
269 			uint16_t nb_pkts);
270 uint16_t qede_recv_pkts_cmt(void *p_rxq, struct rte_mbuf **rx_pkts,
271 			    uint16_t nb_pkts);
272 uint16_t
273 qede_recv_pkts_regular(void *p_rxq, struct rte_mbuf **rx_pkts,
274 		       uint16_t nb_pkts);
275 
276 int qede_start_queues(struct rte_eth_dev *eth_dev);
277 
278 void qede_stop_queues(struct rte_eth_dev *eth_dev);
279 int qede_calc_rx_buf_size(struct rte_eth_dev *dev, uint16_t mbufsz,
280 			  uint16_t max_frame_size);
281 int
282 qede_rx_descriptor_status(void *rxq, uint16_t offset);
283 
284 /* Fastpath resource alloc/dealloc helpers */
285 int qede_alloc_fp_resc(struct qede_dev *qdev);
286 
287 void qede_dealloc_fp_resc(struct rte_eth_dev *eth_dev);
288 
289 #endif /* _QEDE_RXTX_H_ */
290