xref: /dpdk/drivers/common/idpf/idpf_common_rxtx.h (revision e12a0166c80f65e35408f4715b2f3a60763c3741)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2023 Intel Corporation
3  */
4 
5 #ifndef _IDPF_COMMON_RXTX_H_
6 #define _IDPF_COMMON_RXTX_H_
7 
8 #include <rte_mbuf.h>
9 #include <rte_mbuf_ptype.h>
10 #include <rte_mbuf_core.h>
11 
12 #include "idpf_common_device.h"
13 
14 #define IDPF_RX_MAX_BURST		32
15 
16 #define IDPF_RX_OFFLOAD_IPV4_CKSUM		RTE_BIT64(1)
17 #define IDPF_RX_OFFLOAD_UDP_CKSUM		RTE_BIT64(2)
18 #define IDPF_RX_OFFLOAD_TCP_CKSUM		RTE_BIT64(3)
19 #define IDPF_RX_OFFLOAD_OUTER_IPV4_CKSUM	RTE_BIT64(6)
20 #define IDPF_RX_OFFLOAD_TIMESTAMP		RTE_BIT64(14)
21 
22 #define IDPF_TX_OFFLOAD_IPV4_CKSUM       RTE_BIT64(1)
23 #define IDPF_TX_OFFLOAD_UDP_CKSUM        RTE_BIT64(2)
24 #define IDPF_TX_OFFLOAD_TCP_CKSUM        RTE_BIT64(3)
25 #define IDPF_TX_OFFLOAD_SCTP_CKSUM       RTE_BIT64(4)
26 #define IDPF_TX_OFFLOAD_TCP_TSO          RTE_BIT64(5)
27 #define IDPF_TX_OFFLOAD_MULTI_SEGS       RTE_BIT64(15)
28 #define IDPF_TX_OFFLOAD_MBUF_FAST_FREE   RTE_BIT64(16)
29 
30 #define IDPF_TX_MAX_MTU_SEG	10
31 
32 #define IDPF_MIN_TSO_MSS	88
33 #define IDPF_MAX_TSO_MSS	9728
34 #define IDPF_MAX_TSO_FRAME_SIZE	262143
35 #define IDPF_TX_MAX_MTU_SEG     10
36 
37 #define IDPF_RLAN_CTX_DBUF_S	7
38 #define IDPF_RX_MAX_DATA_BUF_SIZE	(16 * 1024 - 128)
39 
40 #define IDPF_TX_CKSUM_OFFLOAD_MASK (		\
41 		RTE_MBUF_F_TX_IP_CKSUM |	\
42 		RTE_MBUF_F_TX_L4_MASK |		\
43 		RTE_MBUF_F_TX_TCP_SEG)
44 
45 #define IDPF_TX_OFFLOAD_MASK (			\
46 		IDPF_TX_CKSUM_OFFLOAD_MASK |	\
47 		RTE_MBUF_F_TX_IPV4 |		\
48 		RTE_MBUF_F_TX_IPV6)
49 
50 #define IDPF_TX_OFFLOAD_NOTSUP_MASK \
51 		(RTE_MBUF_F_TX_OFFLOAD_MASK ^ IDPF_TX_OFFLOAD_MASK)
52 
53 /* used for Vector PMD */
54 #define IDPF_VPMD_RX_MAX_BURST		32
55 #define IDPF_VPMD_TX_MAX_BURST		32
56 #define IDPF_VPMD_DESCS_PER_LOOP	4
57 #define IDPF_RXQ_REARM_THRESH		64
58 #define IDPD_TXQ_SCAN_CQ_THRESH	64
59 #define IDPF_TX_CTYPE_NUM	8
60 
61 /* MTS */
62 #define GLTSYN_CMD_SYNC_0_0	(PF_TIMESYNC_BASE + 0x0)
63 #define PF_GLTSYN_SHTIME_0_0	(PF_TIMESYNC_BASE + 0x4)
64 #define PF_GLTSYN_SHTIME_L_0	(PF_TIMESYNC_BASE + 0x8)
65 #define PF_GLTSYN_SHTIME_H_0	(PF_TIMESYNC_BASE + 0xC)
66 #define GLTSYN_ART_L_0		(PF_TIMESYNC_BASE + 0x10)
67 #define GLTSYN_ART_H_0		(PF_TIMESYNC_BASE + 0x14)
68 #define PF_GLTSYN_SHTIME_0_1	(PF_TIMESYNC_BASE + 0x24)
69 #define PF_GLTSYN_SHTIME_L_1	(PF_TIMESYNC_BASE + 0x28)
70 #define PF_GLTSYN_SHTIME_H_1	(PF_TIMESYNC_BASE + 0x2C)
71 #define PF_GLTSYN_SHTIME_0_2	(PF_TIMESYNC_BASE + 0x44)
72 #define PF_GLTSYN_SHTIME_L_2	(PF_TIMESYNC_BASE + 0x48)
73 #define PF_GLTSYN_SHTIME_H_2	(PF_TIMESYNC_BASE + 0x4C)
74 #define PF_GLTSYN_SHTIME_0_3	(PF_TIMESYNC_BASE + 0x64)
75 #define PF_GLTSYN_SHTIME_L_3	(PF_TIMESYNC_BASE + 0x68)
76 #define PF_GLTSYN_SHTIME_H_3	(PF_TIMESYNC_BASE + 0x6C)
77 
78 #define PF_TIMESYNC_BAR4_BASE	0x0E400000
79 #define GLTSYN_ENA		(PF_TIMESYNC_BAR4_BASE + 0x90)
80 #define GLTSYN_CMD		(PF_TIMESYNC_BAR4_BASE + 0x94)
81 #define GLTSYC_TIME_L		(PF_TIMESYNC_BAR4_BASE + 0x104)
82 #define GLTSYC_TIME_H		(PF_TIMESYNC_BAR4_BASE + 0x108)
83 
84 #define GLTSYN_CMD_SYNC_0_4	(PF_TIMESYNC_BAR4_BASE + 0x110)
85 #define PF_GLTSYN_SHTIME_L_4	(PF_TIMESYNC_BAR4_BASE + 0x118)
86 #define PF_GLTSYN_SHTIME_H_4	(PF_TIMESYNC_BAR4_BASE + 0x11C)
87 #define GLTSYN_INCVAL_L		(PF_TIMESYNC_BAR4_BASE + 0x150)
88 #define GLTSYN_INCVAL_H		(PF_TIMESYNC_BAR4_BASE + 0x154)
89 #define GLTSYN_SHADJ_L		(PF_TIMESYNC_BAR4_BASE + 0x158)
90 #define GLTSYN_SHADJ_H		(PF_TIMESYNC_BAR4_BASE + 0x15C)
91 
92 #define GLTSYN_CMD_SYNC_0_5	(PF_TIMESYNC_BAR4_BASE + 0x130)
93 #define PF_GLTSYN_SHTIME_L_5	(PF_TIMESYNC_BAR4_BASE + 0x138)
94 #define PF_GLTSYN_SHTIME_H_5	(PF_TIMESYNC_BAR4_BASE + 0x13C)
95 
96 #define IDPF_RX_SPLIT_BUFQ1_ID	1
97 #define IDPF_RX_SPLIT_BUFQ2_ID	2
98 
99 struct idpf_rx_stats {
100 	RTE_ATOMIC(uint64_t) mbuf_alloc_failed;
101 };
102 
103 struct idpf_rx_queue {
104 	struct idpf_adapter *adapter;   /* the adapter this queue belongs to */
105 	struct rte_mempool *mp;         /* mbuf pool to populate Rx ring */
106 	const struct rte_memzone *mz;   /* memzone for Rx ring */
107 	volatile void *rx_ring;
108 	struct rte_mbuf **sw_ring;      /* address of SW ring */
109 	uint64_t rx_ring_phys_addr;     /* Rx ring DMA address */
110 
111 	uint16_t nb_rx_desc;            /* ring length */
112 	uint16_t rx_tail;               /* current value of tail */
113 	volatile uint8_t *qrx_tail;     /* register address of tail */
114 	uint16_t rx_free_thresh;        /* max free RX desc to hold */
115 	uint16_t nb_rx_hold;            /* number of held free RX desc */
116 	struct rte_mbuf *pkt_first_seg; /* first segment of current packet */
117 	struct rte_mbuf *pkt_last_seg;  /* last segment of current packet */
118 	struct rte_mbuf fake_mbuf;      /* dummy mbuf */
119 
120 	/* used for VPMD */
121 	uint16_t rxrearm_nb;       /* number of remaining to be re-armed */
122 	uint16_t rxrearm_start;    /* the idx we start the re-arming from */
123 	uint64_t mbuf_initializer; /* value to init mbufs */
124 
125 	uint16_t rx_nb_avail;
126 	uint16_t rx_next_avail;
127 
128 	uint16_t port_id;       /* device port ID */
129 	uint16_t queue_id;      /* Rx queue index */
130 	uint16_t rx_buf_len;    /* The packet buffer size */
131 	uint16_t rx_hdr_len;    /* The header buffer size */
132 	uint16_t max_pkt_len;   /* Maximum packet length */
133 	uint8_t rxdid;
134 
135 	bool q_set;             /* if rx queue has been configured */
136 	bool q_started;         /* if rx queue has been started */
137 	bool rx_deferred_start; /* don't start this queue in dev start */
138 	const struct idpf_rxq_ops *ops;
139 
140 	struct idpf_rx_stats rx_stats;
141 
142 	/* only valid for split queue mode */
143 	uint8_t expected_gen_id;
144 	struct idpf_rx_queue *bufq1;
145 	struct idpf_rx_queue *bufq2;
146 
147 	uint64_t offloads;
148 	uint32_t hw_register_set;
149 };
150 
151 struct idpf_tx_entry {
152 	struct rte_mbuf *mbuf;
153 	uint16_t next_id;
154 	uint16_t last_id;
155 };
156 
157 /* Structure associated with each TX queue. */
158 struct idpf_tx_queue {
159 	const struct rte_memzone *mz;		/* memzone for Tx ring */
160 	volatile struct idpf_base_tx_desc *tx_ring;	/* Tx ring virtual address */
161 	volatile union {
162 		struct idpf_flex_tx_sched_desc *desc_ring;
163 		struct idpf_splitq_tx_compl_desc *compl_ring;
164 	};
165 	uint64_t tx_ring_phys_addr;		/* Tx ring DMA address */
166 	struct idpf_tx_entry *sw_ring;		/* address array of SW ring */
167 
168 	uint16_t nb_tx_desc;		/* ring length */
169 	uint16_t tx_tail;		/* current value of tail */
170 	volatile uint8_t *qtx_tail;	/* register address of tail */
171 	/* number of used desc since RS bit set */
172 	uint16_t nb_used;
173 	uint16_t nb_free;
174 	uint16_t last_desc_cleaned;	/* last desc have been cleaned*/
175 	uint16_t free_thresh;
176 	uint16_t rs_thresh;
177 
178 	uint16_t port_id;
179 	uint16_t queue_id;
180 	uint64_t offloads;
181 	uint16_t next_dd;	/* next to set RS, for VPMD */
182 	uint16_t next_rs;	/* next to check DD,  for VPMD */
183 
184 	bool q_set;		/* if tx queue has been configured */
185 	bool q_started;		/* if tx queue has been started */
186 	bool tx_deferred_start; /* don't start this queue in dev start */
187 	const struct idpf_txq_ops *ops;
188 
189 	/* only valid for split queue mode */
190 	uint16_t sw_nb_desc;
191 	uint16_t sw_tail;
192 	void **txqs;
193 	uint32_t tx_start_qid;
194 	uint8_t expected_gen_id;
195 	struct idpf_tx_queue *complq;
196 	uint16_t ctype[IDPF_TX_CTYPE_NUM];
197 };
198 
199 /* Offload features */
200 union idpf_tx_offload {
201 	uint64_t data;
202 	struct {
203 		uint64_t l2_len:7; /* L2 (MAC) Header Length. */
204 		uint64_t l3_len:9; /* L3 (IP) Header Length. */
205 		uint64_t l4_len:8; /* L4 Header Length. */
206 		uint64_t tso_segsz:16; /* TCP TSO segment size */
207 		/* uint64_t unused : 24; */
208 	};
209 };
210 
211 struct idpf_tx_vec_entry {
212 	struct rte_mbuf *mbuf;
213 };
214 
215 union idpf_tx_desc {
216 	struct idpf_base_tx_desc *tx_ring;
217 	struct idpf_flex_tx_sched_desc *desc_ring;
218 	struct idpf_splitq_tx_compl_desc *compl_ring;
219 };
220 
221 struct idpf_rxq_ops {
222 	void (*release_mbufs)(struct idpf_rx_queue *rxq);
223 };
224 
225 struct idpf_txq_ops {
226 	void (*release_mbufs)(struct idpf_tx_queue *txq);
227 };
228 
229 extern int idpf_timestamp_dynfield_offset;
230 extern uint64_t idpf_timestamp_dynflag;
231 
232 __rte_internal
233 int idpf_qc_rx_thresh_check(uint16_t nb_desc, uint16_t thresh);
234 __rte_internal
235 int idpf_qc_tx_thresh_check(uint16_t nb_desc, uint16_t tx_rs_thresh,
236 			    uint16_t tx_free_thresh);
237 __rte_internal
238 void idpf_qc_rxq_mbufs_release(struct idpf_rx_queue *rxq);
239 __rte_internal
240 void idpf_qc_txq_mbufs_release(struct idpf_tx_queue *txq);
241 __rte_internal
242 void idpf_qc_split_rx_descq_reset(struct idpf_rx_queue *rxq);
243 __rte_internal
244 void idpf_qc_split_rx_bufq_reset(struct idpf_rx_queue *rxq);
245 __rte_internal
246 void idpf_qc_split_rx_queue_reset(struct idpf_rx_queue *rxq);
247 __rte_internal
248 void idpf_qc_single_rx_queue_reset(struct idpf_rx_queue *rxq);
249 __rte_internal
250 void idpf_qc_split_tx_descq_reset(struct idpf_tx_queue *txq);
251 __rte_internal
252 void idpf_qc_split_tx_complq_reset(struct idpf_tx_queue *cq);
253 __rte_internal
254 void idpf_qc_single_tx_queue_reset(struct idpf_tx_queue *txq);
255 __rte_internal
256 void idpf_qc_rx_queue_release(void *rxq);
257 __rte_internal
258 void idpf_qc_tx_queue_release(void *txq);
259 __rte_internal
260 int idpf_qc_ts_mbuf_register(struct idpf_rx_queue *rxq);
261 __rte_internal
262 int idpf_qc_single_rxq_mbufs_alloc(struct idpf_rx_queue *rxq);
263 __rte_internal
264 int idpf_qc_split_rxq_mbufs_alloc(struct idpf_rx_queue *rxq);
265 __rte_internal
266 uint16_t idpf_dp_splitq_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
267 				  uint16_t nb_pkts);
268 __rte_internal
269 uint16_t idpf_dp_splitq_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
270 				  uint16_t nb_pkts);
271 __rte_internal
272 uint16_t idpf_dp_singleq_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
273 				   uint16_t nb_pkts);
274 __rte_internal
275 uint16_t idpf_dp_singleq_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
276 				   uint16_t nb_pkts);
277 __rte_internal
278 uint16_t idpf_dp_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
279 			   uint16_t nb_pkts);
280 __rte_internal
281 int idpf_qc_singleq_rx_vec_setup(struct idpf_rx_queue *rxq);
282 __rte_internal
283 int idpf_qc_splitq_rx_vec_setup(struct idpf_rx_queue *rxq);
284 __rte_internal
285 int idpf_qc_tx_vec_avx512_setup(struct idpf_tx_queue *txq);
286 __rte_internal
287 int idpf_qc_tx_vec_avx512_setup(struct idpf_tx_queue *txq);
288 __rte_internal
289 uint16_t idpf_dp_singleq_recv_pkts_avx512(void *rx_queue,
290 					  struct rte_mbuf **rx_pkts,
291 					  uint16_t nb_pkts);
292 __rte_internal
293 uint16_t idpf_dp_splitq_recv_pkts_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
294 					 uint16_t nb_pkts);
295 __rte_internal
296 uint16_t idpf_dp_singleq_xmit_pkts_avx512(void *tx_queue,
297 					  struct rte_mbuf **tx_pkts,
298 					  uint16_t nb_pkts);
299 __rte_internal
300 uint16_t idpf_dp_splitq_xmit_pkts_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
301 					 uint16_t nb_pkts);
302 __rte_internal
303 uint16_t idpf_dp_singleq_recv_scatter_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
304 			  uint16_t nb_pkts);
305 
306 #endif /* _IDPF_COMMON_RXTX_H_ */
307