xref: /dpdk/drivers/net/gve/gve_ethdev.h (revision eb8ec5c3452a35e02dbb928b19d6c394e5e7e658)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2022 Intel Corporation
3  */
4 
5 #ifndef _GVE_ETHDEV_H_
6 #define _GVE_ETHDEV_H_
7 
8 #include <ethdev_driver.h>
9 #include <ethdev_pci.h>
10 #include <rte_ether.h>
11 #include <rte_pci.h>
12 
13 #include "base/gve.h"
14 
15 /* TODO: this is a workaround to ensure that Tx complq is enough */
16 #define DQO_TX_MULTIPLIER 4
17 
18 #define GVE_DEFAULT_MAX_RING_SIZE	1024
19 #define GVE_DEFAULT_MIN_RX_RING_SIZE	512
20 #define GVE_DEFAULT_MIN_TX_RING_SIZE	256
21 
22 #define GVE_DEFAULT_RX_FREE_THRESH	64
23 #define GVE_DEFAULT_TX_FREE_THRESH	32
24 #define GVE_DEFAULT_TX_RS_THRESH	32
25 #define GVE_TX_MAX_FREE_SZ		512
26 
27 #define GVE_RX_BUF_ALIGN_DQO		128
28 #define GVE_RX_MIN_BUF_SIZE_DQO		1024
29 #define GVE_RX_MAX_BUF_SIZE_DQO		((16 * 1024) - GVE_RX_BUF_ALIGN_DQO)
30 #define GVE_MAX_QUEUE_SIZE_DQO		4096
31 
32 #define GVE_RX_BUF_ALIGN_GQI		2048
33 #define GVE_RX_MIN_BUF_SIZE_GQI		2048
34 #define GVE_RX_MAX_BUF_SIZE_GQI		4096
35 
36 #define GVE_RSS_HASH_KEY_SIZE 40
37 #define GVE_RSS_INDIR_SIZE 128
38 
39 #define GVE_TX_CKSUM_OFFLOAD_MASK (		\
40 		RTE_MBUF_F_TX_L4_MASK  |	\
41 		RTE_MBUF_F_TX_TCP_SEG)
42 
43 #define GVE_TX_CKSUM_OFFLOAD_MASK_DQO (		\
44 		GVE_TX_CKSUM_OFFLOAD_MASK |	\
45 		RTE_MBUF_F_TX_IP_CKSUM)
46 
47 #define GVE_RTE_RSS_OFFLOAD_ALL (	\
48 	RTE_ETH_RSS_IPV4 |		\
49 	RTE_ETH_RSS_NONFRAG_IPV4_TCP |	\
50 	RTE_ETH_RSS_IPV6 |		\
51 	RTE_ETH_RSS_IPV6_EX |		\
52 	RTE_ETH_RSS_NONFRAG_IPV6_TCP |	\
53 	RTE_ETH_RSS_IPV6_TCP_EX |	\
54 	RTE_ETH_RSS_NONFRAG_IPV4_UDP |	\
55 	RTE_ETH_RSS_NONFRAG_IPV6_UDP |	\
56 	RTE_ETH_RSS_IPV6_UDP_EX)
57 
58 /* A list of pages registered with the device during setup and used by a queue
59  * as buffers
60  */
61 struct gve_queue_page_list {
62 	uint32_t id; /* unique id */
63 	uint32_t num_entries;
64 	dma_addr_t *page_buses; /* the dma addrs of the pages */
65 	const struct rte_memzone *mz;
66 };
67 
68 /* A TX desc ring entry */
69 union gve_tx_desc {
70 	struct gve_tx_pkt_desc pkt; /* first desc for a packet */
71 	struct gve_tx_seg_desc seg; /* subsequent descs for a packet */
72 };
73 
74 /* Tx desc for DQO format */
75 union gve_tx_desc_dqo {
76 	struct gve_tx_pkt_desc_dqo pkt;
77 	struct gve_tx_tso_context_desc_dqo tso_ctx;
78 	struct gve_tx_general_context_desc_dqo general_ctx;
79 };
80 
81 /* Offload features */
82 union gve_tx_offload {
83 	uint64_t data;
84 	struct {
85 		uint64_t l2_len:7; /* L2 (MAC) Header Length. */
86 		uint64_t l3_len:9; /* L3 (IP) Header Length. */
87 		uint64_t l4_len:8; /* L4 Header Length. */
88 		uint64_t tso_segsz:16; /* TCP TSO segment size */
89 		/* uint64_t unused : 24; */
90 	};
91 };
92 
93 struct gve_tx_iovec {
94 	uint32_t iov_base; /* offset in fifo */
95 	uint32_t iov_len;
96 };
97 
98 struct gve_tx_stats {
99 	uint64_t packets;
100 	uint64_t bytes;
101 	uint64_t errors;
102 };
103 
104 struct gve_rx_stats {
105 	uint64_t packets;
106 	uint64_t bytes;
107 	uint64_t errors;
108 	uint64_t no_mbufs;
109 	uint64_t no_mbufs_bulk;
110 	uint64_t imissed;
111 };
112 
113 struct gve_xstats_name_offset {
114 	char name[RTE_ETH_XSTATS_NAME_SIZE];
115 	unsigned int offset;
116 };
117 
118 struct gve_tx_queue {
119 	volatile union gve_tx_desc *tx_desc_ring;
120 	const struct rte_memzone *mz;
121 	uint64_t tx_ring_phys_addr;
122 	struct rte_mbuf **sw_ring;
123 	volatile rte_be32_t *qtx_tail;
124 	volatile rte_be32_t *qtx_head;
125 
126 	uint32_t tx_tail;
127 	uint16_t nb_tx_desc;
128 	uint16_t nb_free;
129 	uint16_t nb_used;
130 	uint32_t next_to_clean;
131 	uint16_t free_thresh;
132 	uint16_t rs_thresh;
133 
134 	/* Only valid for DQO_QPL queue format */
135 	uint16_t sw_tail;
136 	uint16_t sw_ntc;
137 	uint16_t sw_nb_free;
138 	uint32_t fifo_size;
139 	uint32_t fifo_head;
140 	uint32_t fifo_avail;
141 	uint64_t fifo_base;
142 	struct gve_queue_page_list *qpl;
143 	struct gve_tx_iovec *iov_ring;
144 
145 	/* stats items */
146 	struct gve_tx_stats stats;
147 
148 	uint16_t port_id;
149 	uint16_t queue_id;
150 
151 	uint16_t ntfy_id;
152 	volatile rte_be32_t *ntfy_addr;
153 
154 	struct gve_priv *hw;
155 	const struct rte_memzone *qres_mz;
156 	struct gve_queue_resources *qres;
157 
158 	/* newly added for DQO */
159 	volatile union gve_tx_desc_dqo *tx_ring;
160 	struct gve_tx_compl_desc *compl_ring;
161 	const struct rte_memzone *compl_ring_mz;
162 	uint64_t compl_ring_phys_addr;
163 	uint32_t complq_tail;
164 	uint16_t sw_size;
165 	uint8_t cur_gen_bit;
166 	uint32_t last_desc_cleaned;
167 	void **txqs;
168 	uint16_t re_cnt;
169 
170 	/* Only valid for DQO_RDA queue format */
171 	struct gve_tx_queue *complq;
172 
173 	uint8_t is_gqi_qpl;
174 };
175 
176 struct gve_rx_ctx {
177 	struct rte_mbuf *mbuf_head;
178 	struct rte_mbuf *mbuf_tail;
179 	uint16_t total_frags;
180 	bool drop_pkt;
181 };
182 
183 struct gve_rx_queue {
184 	volatile struct gve_rx_desc *rx_desc_ring;
185 	volatile union gve_rx_data_slot *rx_data_ring;
186 	const struct rte_memzone *mz;
187 	const struct rte_memzone *data_mz;
188 	uint64_t rx_ring_phys_addr;
189 	struct rte_mbuf **sw_ring;
190 	struct rte_mempool *mpool;
191 	struct gve_rx_ctx ctx;
192 
193 	uint16_t rx_tail;
194 	uint16_t nb_rx_desc;
195 	uint16_t expected_seqno; /* the next expected seqno */
196 	uint16_t free_thresh;
197 	uint16_t nb_rx_hold;
198 	uint32_t next_avail;
199 	uint32_t nb_avail;
200 
201 	volatile rte_be32_t *qrx_tail;
202 	volatile rte_be32_t *ntfy_addr;
203 
204 	/* only valid for GQI_QPL queue format */
205 	struct gve_queue_page_list *qpl;
206 
207 	/* stats items */
208 	struct gve_rx_stats stats;
209 
210 	struct gve_priv *hw;
211 	const struct rte_memzone *qres_mz;
212 	struct gve_queue_resources *qres;
213 
214 	uint16_t port_id;
215 	uint16_t queue_id;
216 	uint16_t ntfy_id;
217 	uint16_t rx_buf_len;
218 
219 	/* newly added for DQO */
220 	volatile struct gve_rx_desc_dqo *rx_ring;
221 	struct gve_rx_compl_desc_dqo *compl_ring;
222 	const struct rte_memzone *compl_ring_mz;
223 	uint64_t compl_ring_phys_addr;
224 	uint8_t cur_gen_bit;
225 	uint16_t bufq_tail;
226 
227 	/* Only valid for DQO_RDA queue format */
228 	struct gve_rx_queue *bufq;
229 
230 	uint8_t is_gqi_qpl;
231 };
232 
233 struct gve_priv {
234 	struct gve_irq_db *irq_dbs; /* array of num_ntfy_blks */
235 	const struct rte_memzone *irq_dbs_mz;
236 	uint32_t mgmt_msix_idx;
237 	rte_be32_t *cnt_array; /* array of num_event_counters */
238 	const struct rte_memzone *cnt_array_mz;
239 
240 	uint16_t num_event_counters;
241 
242 	/* TX ring size default and limits. */
243 	uint16_t default_tx_desc_cnt;
244 	uint16_t max_tx_desc_cnt;
245 	uint16_t min_tx_desc_cnt;
246 
247 	/* RX ring size default and limits. */
248 	uint16_t default_rx_desc_cnt;
249 	uint16_t max_rx_desc_cnt;
250 	uint16_t min_rx_desc_cnt;
251 
252 	uint16_t tx_pages_per_qpl;
253 
254 	/* Only valid for DQO_RDA queue format */
255 	uint16_t tx_compq_size; /* tx completion queue size */
256 	uint16_t rx_bufq_size; /* rx buff queue size */
257 
258 	uint64_t max_registered_pages;
259 	uint64_t num_registered_pages; /* num pages registered with NIC */
260 	uint16_t default_num_queues; /* default num queues to set up */
261 	enum gve_queue_format queue_format; /* see enum gve_queue_format */
262 	uint8_t enable_rsc;
263 
264 	uint16_t max_nb_txq;
265 	uint16_t max_nb_rxq;
266 	uint32_t num_ntfy_blks; /* spilt between TX and RX so must be even */
267 
268 	struct gve_registers __iomem *reg_bar0; /* see gve_register.h */
269 	rte_be32_t __iomem *db_bar2; /* "array" of doorbells */
270 	struct rte_pci_device *pci_dev;
271 
272 	/* Admin queue - see gve_adminq.h*/
273 	union gve_adminq_command *adminq;
274 	struct gve_dma_mem adminq_dma_mem;
275 	uint32_t adminq_mask; /* masks prod_cnt to adminq size */
276 	uint32_t adminq_prod_cnt; /* free-running count of AQ cmds executed */
277 	uint32_t adminq_cmd_fail; /* free-running count of AQ cmds failed */
278 	uint32_t adminq_timeouts; /* free-running count of AQ cmds timeouts */
279 	/* free-running count of per AQ cmd executed */
280 	uint32_t adminq_describe_device_cnt;
281 	uint32_t adminq_cfg_device_resources_cnt;
282 	uint32_t adminq_register_page_list_cnt;
283 	uint32_t adminq_unregister_page_list_cnt;
284 	uint32_t adminq_create_tx_queue_cnt;
285 	uint32_t adminq_create_rx_queue_cnt;
286 	uint32_t adminq_destroy_tx_queue_cnt;
287 	uint32_t adminq_destroy_rx_queue_cnt;
288 	uint32_t adminq_dcfg_device_resources_cnt;
289 	uint32_t adminq_cfg_rss_cnt;
290 	uint32_t adminq_set_driver_parameter_cnt;
291 	uint32_t adminq_report_stats_cnt;
292 	uint32_t adminq_report_link_speed_cnt;
293 	uint32_t adminq_get_ptype_map_cnt;
294 	uint32_t adminq_verify_driver_compatibility_cnt;
295 	volatile uint32_t state_flags;
296 
297 	/* Gvnic device link speed from hypervisor. */
298 	uint64_t link_speed;
299 
300 	uint16_t max_mtu;
301 	struct rte_ether_addr dev_addr; /* mac address */
302 
303 	struct gve_tx_queue **txqs;
304 	struct gve_rx_queue **rxqs;
305 
306 	uint32_t stats_report_len;
307 	const struct rte_memzone *stats_report_mem;
308 	uint16_t stats_start_idx; /* start index of array of stats written by NIC */
309 	uint16_t stats_end_idx; /* end index of array of stats written by NIC */
310 
311 	struct gve_rss_config rss_config;
312 	struct gve_ptype_lut *ptype_lut_dqo;
313 };
314 
315 static inline bool
316 gve_is_gqi(struct gve_priv *priv)
317 {
318 	return priv->queue_format == GVE_GQI_RDA_FORMAT ||
319 		priv->queue_format == GVE_GQI_QPL_FORMAT;
320 }
321 
322 static inline bool
323 gve_get_admin_queue_ok(struct gve_priv *priv)
324 {
325 	return !!rte_bit_relaxed_get32(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK,
326 				       &priv->state_flags);
327 }
328 
329 static inline void
330 gve_set_admin_queue_ok(struct gve_priv *priv)
331 {
332 	rte_bit_relaxed_set32(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK,
333 			      &priv->state_flags);
334 }
335 
336 static inline void
337 gve_clear_admin_queue_ok(struct gve_priv *priv)
338 {
339 	rte_bit_relaxed_clear32(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK,
340 				&priv->state_flags);
341 }
342 
343 static inline bool
344 gve_get_device_resources_ok(struct gve_priv *priv)
345 {
346 	return !!rte_bit_relaxed_get32(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK,
347 				       &priv->state_flags);
348 }
349 
350 static inline void
351 gve_set_device_resources_ok(struct gve_priv *priv)
352 {
353 	rte_bit_relaxed_set32(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK,
354 			      &priv->state_flags);
355 }
356 
357 static inline void
358 gve_clear_device_resources_ok(struct gve_priv *priv)
359 {
360 	rte_bit_relaxed_clear32(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK,
361 				&priv->state_flags);
362 }
363 
364 static inline bool
365 gve_get_device_rings_ok(struct gve_priv *priv)
366 {
367 	return !!rte_bit_relaxed_get32(GVE_PRIV_FLAGS_DEVICE_RINGS_OK,
368 				       &priv->state_flags);
369 }
370 
371 static inline void
372 gve_set_device_rings_ok(struct gve_priv *priv)
373 {
374 	rte_bit_relaxed_set32(GVE_PRIV_FLAGS_DEVICE_RINGS_OK,
375 			      &priv->state_flags);
376 }
377 
378 static inline void
379 gve_clear_device_rings_ok(struct gve_priv *priv)
380 {
381 	rte_bit_relaxed_clear32(GVE_PRIV_FLAGS_DEVICE_RINGS_OK,
382 				&priv->state_flags);
383 }
384 
385 int
386 gve_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id, uint16_t nb_desc,
387 		   unsigned int socket_id, const struct rte_eth_rxconf *conf,
388 		   struct rte_mempool *pool);
389 int
390 gve_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id, uint16_t nb_desc,
391 		   unsigned int socket_id, const struct rte_eth_txconf *conf);
392 
393 void
394 gve_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
395 
396 void
397 gve_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
398 
399 int
400 gve_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);
401 
402 int
403 gve_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);
404 
405 int
406 gve_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);
407 
408 int
409 gve_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);
410 
411 void
412 gve_stop_tx_queues(struct rte_eth_dev *dev);
413 
414 void
415 gve_stop_rx_queues(struct rte_eth_dev *dev);
416 
417 uint16_t
418 gve_rx_burst(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
419 
420 uint16_t
421 gve_tx_burst(void *txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts);
422 
423 void
424 gve_set_rx_function(struct rte_eth_dev *dev);
425 
426 void
427 gve_set_tx_function(struct rte_eth_dev *dev);
428 
429 struct gve_queue_page_list *
430 gve_setup_queue_page_list(struct gve_priv *priv, uint16_t queue_id, bool is_rx,
431 	uint32_t num_pages);
432 int
433 gve_teardown_queue_page_list(struct gve_priv *priv,
434 	struct gve_queue_page_list *qpl);
435 
436 /* Below functions are used for DQO */
437 
438 int
439 gve_rx_queue_setup_dqo(struct rte_eth_dev *dev, uint16_t queue_id,
440 		       uint16_t nb_desc, unsigned int socket_id,
441 		       const struct rte_eth_rxconf *conf,
442 		       struct rte_mempool *pool);
443 int
444 gve_tx_queue_setup_dqo(struct rte_eth_dev *dev, uint16_t queue_id,
445 		       uint16_t nb_desc, unsigned int socket_id,
446 		       const struct rte_eth_txconf *conf);
447 
448 void
449 gve_tx_queue_release_dqo(struct rte_eth_dev *dev, uint16_t qid);
450 
451 void
452 gve_rx_queue_release_dqo(struct rte_eth_dev *dev, uint16_t qid);
453 
454 int
455 gve_rx_queue_start_dqo(struct rte_eth_dev *dev, uint16_t rx_queue_id);
456 
457 int
458 gve_tx_queue_start_dqo(struct rte_eth_dev *dev, uint16_t tx_queue_id);
459 
460 int
461 gve_rx_queue_stop_dqo(struct rte_eth_dev *dev, uint16_t rx_queue_id);
462 
463 int
464 gve_tx_queue_stop_dqo(struct rte_eth_dev *dev, uint16_t tx_queue_id);
465 
466 void
467 gve_stop_tx_queues_dqo(struct rte_eth_dev *dev);
468 
469 void
470 gve_stop_rx_queues_dqo(struct rte_eth_dev *dev);
471 
472 uint16_t
473 gve_rx_burst_dqo(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
474 
475 uint16_t
476 gve_tx_burst_dqo(void *txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts);
477 
478 void
479 gve_set_rx_function_dqo(struct rte_eth_dev *dev);
480 
481 void
482 gve_set_tx_function_dqo(struct rte_eth_dev *dev);
483 
484 #endif /* _GVE_ETHDEV_H_ */
485