xref: /dpdk/drivers/net/gve/gve_ethdev.h (revision e9fd1ebf981f361844aea9ec94e17f4bda5e1479)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2022 Intel Corporation
3  */
4 
5 #ifndef _GVE_ETHDEV_H_
6 #define _GVE_ETHDEV_H_
7 
8 #include <ethdev_driver.h>
9 #include <ethdev_pci.h>
10 #include <rte_ether.h>
11 #include <rte_pci.h>
12 
13 #include "base/gve.h"
14 
15 /* TODO: this is a workaround to ensure that Tx complq is enough */
16 #define DQO_TX_MULTIPLIER 4
17 
18 #define GVE_DEFAULT_RX_FREE_THRESH   64
19 #define GVE_DEFAULT_TX_FREE_THRESH   32
20 #define GVE_DEFAULT_TX_RS_THRESH     32
21 #define GVE_TX_MAX_FREE_SZ          512
22 
23 #define GVE_RX_BUF_ALIGN_DQO        128
24 #define GVE_RX_MIN_BUF_SIZE_DQO    1024
25 #define GVE_RX_MAX_BUF_SIZE_DQO    ((16 * 1024) - GVE_RX_BUF_ALIGN_DQO)
26 #define GVE_MAX_QUEUE_SIZE_DQO     4096
27 
28 #define GVE_RX_BUF_ALIGN_GQI       2048
29 #define GVE_RX_MIN_BUF_SIZE_GQI    2048
30 #define GVE_RX_MAX_BUF_SIZE_GQI    4096
31 
32 #define GVE_RSS_HASH_KEY_SIZE 40
33 #define GVE_RSS_INDIR_SIZE 128
34 
35 #define GVE_TX_CKSUM_OFFLOAD_MASK (		\
36 		RTE_MBUF_F_TX_L4_MASK  |	\
37 		RTE_MBUF_F_TX_TCP_SEG)
38 
39 #define GVE_RTE_RSS_OFFLOAD_ALL (	\
40 	RTE_ETH_RSS_IPV4 |		\
41 	RTE_ETH_RSS_NONFRAG_IPV4_TCP |	\
42 	RTE_ETH_RSS_IPV6 |		\
43 	RTE_ETH_RSS_IPV6_EX |		\
44 	RTE_ETH_RSS_NONFRAG_IPV6_TCP |	\
45 	RTE_ETH_RSS_IPV6_TCP_EX |	\
46 	RTE_ETH_RSS_NONFRAG_IPV4_UDP |	\
47 	RTE_ETH_RSS_NONFRAG_IPV6_UDP |	\
48 	RTE_ETH_RSS_IPV6_UDP_EX)
49 
50 /* A list of pages registered with the device during setup and used by a queue
51  * as buffers
52  */
53 struct gve_queue_page_list {
54 	uint32_t id; /* unique id */
55 	uint32_t num_entries;
56 	dma_addr_t *page_buses; /* the dma addrs of the pages */
57 	const struct rte_memzone *mz;
58 };
59 
60 /* A TX desc ring entry */
61 union gve_tx_desc {
62 	struct gve_tx_pkt_desc pkt; /* first desc for a packet */
63 	struct gve_tx_seg_desc seg; /* subsequent descs for a packet */
64 };
65 
66 /* Tx desc for DQO format */
67 union gve_tx_desc_dqo {
68 	struct gve_tx_pkt_desc_dqo pkt;
69 	struct gve_tx_tso_context_desc_dqo tso_ctx;
70 	struct gve_tx_general_context_desc_dqo general_ctx;
71 };
72 
73 /* Offload features */
74 union gve_tx_offload {
75 	uint64_t data;
76 	struct {
77 		uint64_t l2_len:7; /* L2 (MAC) Header Length. */
78 		uint64_t l3_len:9; /* L3 (IP) Header Length. */
79 		uint64_t l4_len:8; /* L4 Header Length. */
80 		uint64_t tso_segsz:16; /* TCP TSO segment size */
81 		/* uint64_t unused : 24; */
82 	};
83 };
84 
85 struct gve_tx_iovec {
86 	uint32_t iov_base; /* offset in fifo */
87 	uint32_t iov_len;
88 };
89 
90 struct gve_tx_stats {
91 	uint64_t packets;
92 	uint64_t bytes;
93 	uint64_t errors;
94 };
95 
96 struct gve_rx_stats {
97 	uint64_t packets;
98 	uint64_t bytes;
99 	uint64_t errors;
100 	uint64_t no_mbufs;
101 	uint64_t no_mbufs_bulk;
102 	uint64_t imissed;
103 };
104 
105 struct gve_xstats_name_offset {
106 	char name[RTE_ETH_XSTATS_NAME_SIZE];
107 	unsigned int offset;
108 };
109 
110 struct gve_tx_queue {
111 	volatile union gve_tx_desc *tx_desc_ring;
112 	const struct rte_memzone *mz;
113 	uint64_t tx_ring_phys_addr;
114 	struct rte_mbuf **sw_ring;
115 	volatile rte_be32_t *qtx_tail;
116 	volatile rte_be32_t *qtx_head;
117 
118 	uint32_t tx_tail;
119 	uint16_t nb_tx_desc;
120 	uint16_t nb_free;
121 	uint16_t nb_used;
122 	uint32_t next_to_clean;
123 	uint16_t free_thresh;
124 	uint16_t rs_thresh;
125 
126 	/* Only valid for DQO_QPL queue format */
127 	uint16_t sw_tail;
128 	uint16_t sw_ntc;
129 	uint16_t sw_nb_free;
130 	uint32_t fifo_size;
131 	uint32_t fifo_head;
132 	uint32_t fifo_avail;
133 	uint64_t fifo_base;
134 	struct gve_queue_page_list *qpl;
135 	struct gve_tx_iovec *iov_ring;
136 
137 	/* stats items */
138 	struct gve_tx_stats stats;
139 
140 	uint16_t port_id;
141 	uint16_t queue_id;
142 
143 	uint16_t ntfy_id;
144 	volatile rte_be32_t *ntfy_addr;
145 
146 	struct gve_priv *hw;
147 	const struct rte_memzone *qres_mz;
148 	struct gve_queue_resources *qres;
149 
150 	/* newly added for DQO */
151 	volatile union gve_tx_desc_dqo *tx_ring;
152 	struct gve_tx_compl_desc *compl_ring;
153 	const struct rte_memzone *compl_ring_mz;
154 	uint64_t compl_ring_phys_addr;
155 	uint32_t complq_tail;
156 	uint16_t sw_size;
157 	uint8_t cur_gen_bit;
158 	uint32_t last_desc_cleaned;
159 	void **txqs;
160 	uint16_t re_cnt;
161 
162 	/* Only valid for DQO_RDA queue format */
163 	struct gve_tx_queue *complq;
164 
165 	uint8_t is_gqi_qpl;
166 };
167 
168 struct gve_rx_ctx {
169 	struct rte_mbuf *mbuf_head;
170 	struct rte_mbuf *mbuf_tail;
171 	uint16_t total_frags;
172 	bool drop_pkt;
173 };
174 
175 struct gve_rx_queue {
176 	volatile struct gve_rx_desc *rx_desc_ring;
177 	volatile union gve_rx_data_slot *rx_data_ring;
178 	const struct rte_memzone *mz;
179 	const struct rte_memzone *data_mz;
180 	uint64_t rx_ring_phys_addr;
181 	struct rte_mbuf **sw_ring;
182 	struct rte_mempool *mpool;
183 	struct gve_rx_ctx ctx;
184 
185 	uint16_t rx_tail;
186 	uint16_t nb_rx_desc;
187 	uint16_t expected_seqno; /* the next expected seqno */
188 	uint16_t free_thresh;
189 	uint16_t nb_rx_hold;
190 	uint32_t next_avail;
191 	uint32_t nb_avail;
192 
193 	volatile rte_be32_t *qrx_tail;
194 	volatile rte_be32_t *ntfy_addr;
195 
196 	/* only valid for GQI_QPL queue format */
197 	struct gve_queue_page_list *qpl;
198 
199 	/* stats items */
200 	struct gve_rx_stats stats;
201 
202 	struct gve_priv *hw;
203 	const struct rte_memzone *qres_mz;
204 	struct gve_queue_resources *qres;
205 
206 	uint16_t port_id;
207 	uint16_t queue_id;
208 	uint16_t ntfy_id;
209 	uint16_t rx_buf_len;
210 
211 	/* newly added for DQO */
212 	volatile struct gve_rx_desc_dqo *rx_ring;
213 	struct gve_rx_compl_desc_dqo *compl_ring;
214 	const struct rte_memzone *compl_ring_mz;
215 	uint64_t compl_ring_phys_addr;
216 	uint8_t cur_gen_bit;
217 	uint16_t bufq_tail;
218 
219 	/* Only valid for DQO_RDA queue format */
220 	struct gve_rx_queue *bufq;
221 
222 	uint8_t is_gqi_qpl;
223 };
224 
225 struct gve_priv {
226 	struct gve_irq_db *irq_dbs; /* array of num_ntfy_blks */
227 	const struct rte_memzone *irq_dbs_mz;
228 	uint32_t mgmt_msix_idx;
229 	rte_be32_t *cnt_array; /* array of num_event_counters */
230 	const struct rte_memzone *cnt_array_mz;
231 
232 	uint16_t num_event_counters;
233 	uint16_t tx_desc_cnt; /* txq size */
234 	uint16_t rx_desc_cnt; /* rxq size */
235 	uint16_t tx_pages_per_qpl; /* tx buffer length */
236 	uint16_t rx_data_slot_cnt; /* rx buffer length */
237 
238 	/* Only valid for DQO_RDA queue format */
239 	uint16_t tx_compq_size; /* tx completion queue size */
240 	uint16_t rx_bufq_size; /* rx buff queue size */
241 
242 	uint64_t max_registered_pages;
243 	uint64_t num_registered_pages; /* num pages registered with NIC */
244 	uint16_t default_num_queues; /* default num queues to set up */
245 	enum gve_queue_format queue_format; /* see enum gve_queue_format */
246 	uint8_t enable_rsc;
247 
248 	uint16_t max_nb_txq;
249 	uint16_t max_nb_rxq;
250 	uint32_t num_ntfy_blks; /* spilt between TX and RX so must be even */
251 
252 	struct gve_registers __iomem *reg_bar0; /* see gve_register.h */
253 	rte_be32_t __iomem *db_bar2; /* "array" of doorbells */
254 	struct rte_pci_device *pci_dev;
255 
256 	/* Admin queue - see gve_adminq.h*/
257 	union gve_adminq_command *adminq;
258 	struct gve_dma_mem adminq_dma_mem;
259 	uint32_t adminq_mask; /* masks prod_cnt to adminq size */
260 	uint32_t adminq_prod_cnt; /* free-running count of AQ cmds executed */
261 	uint32_t adminq_cmd_fail; /* free-running count of AQ cmds failed */
262 	uint32_t adminq_timeouts; /* free-running count of AQ cmds timeouts */
263 	/* free-running count of per AQ cmd executed */
264 	uint32_t adminq_describe_device_cnt;
265 	uint32_t adminq_cfg_device_resources_cnt;
266 	uint32_t adminq_register_page_list_cnt;
267 	uint32_t adminq_unregister_page_list_cnt;
268 	uint32_t adminq_create_tx_queue_cnt;
269 	uint32_t adminq_create_rx_queue_cnt;
270 	uint32_t adminq_destroy_tx_queue_cnt;
271 	uint32_t adminq_destroy_rx_queue_cnt;
272 	uint32_t adminq_dcfg_device_resources_cnt;
273 	uint32_t adminq_cfg_rss_cnt;
274 	uint32_t adminq_set_driver_parameter_cnt;
275 	uint32_t adminq_report_stats_cnt;
276 	uint32_t adminq_report_link_speed_cnt;
277 	uint32_t adminq_get_ptype_map_cnt;
278 	uint32_t adminq_verify_driver_compatibility_cnt;
279 	volatile uint32_t state_flags;
280 
281 	/* Gvnic device link speed from hypervisor. */
282 	uint64_t link_speed;
283 
284 	uint16_t max_mtu;
285 	struct rte_ether_addr dev_addr; /* mac address */
286 
287 	struct gve_queue_page_list *qpl;
288 
289 	struct gve_tx_queue **txqs;
290 	struct gve_rx_queue **rxqs;
291 
292 	uint32_t stats_report_len;
293 	const struct rte_memzone *stats_report_mem;
294 	uint16_t stats_start_idx; /* start index of array of stats written by NIC */
295 	uint16_t stats_end_idx; /* end index of array of stats written by NIC */
296 
297 	struct gve_rss_config rss_config;
298 };
299 
300 static inline bool
301 gve_is_gqi(struct gve_priv *priv)
302 {
303 	return priv->queue_format == GVE_GQI_RDA_FORMAT ||
304 		priv->queue_format == GVE_GQI_QPL_FORMAT;
305 }
306 
307 static inline bool
308 gve_get_admin_queue_ok(struct gve_priv *priv)
309 {
310 	return !!rte_bit_relaxed_get32(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK,
311 				       &priv->state_flags);
312 }
313 
314 static inline void
315 gve_set_admin_queue_ok(struct gve_priv *priv)
316 {
317 	rte_bit_relaxed_set32(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK,
318 			      &priv->state_flags);
319 }
320 
321 static inline void
322 gve_clear_admin_queue_ok(struct gve_priv *priv)
323 {
324 	rte_bit_relaxed_clear32(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK,
325 				&priv->state_flags);
326 }
327 
328 static inline bool
329 gve_get_device_resources_ok(struct gve_priv *priv)
330 {
331 	return !!rte_bit_relaxed_get32(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK,
332 				       &priv->state_flags);
333 }
334 
335 static inline void
336 gve_set_device_resources_ok(struct gve_priv *priv)
337 {
338 	rte_bit_relaxed_set32(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK,
339 			      &priv->state_flags);
340 }
341 
342 static inline void
343 gve_clear_device_resources_ok(struct gve_priv *priv)
344 {
345 	rte_bit_relaxed_clear32(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK,
346 				&priv->state_flags);
347 }
348 
349 static inline bool
350 gve_get_device_rings_ok(struct gve_priv *priv)
351 {
352 	return !!rte_bit_relaxed_get32(GVE_PRIV_FLAGS_DEVICE_RINGS_OK,
353 				       &priv->state_flags);
354 }
355 
356 static inline void
357 gve_set_device_rings_ok(struct gve_priv *priv)
358 {
359 	rte_bit_relaxed_set32(GVE_PRIV_FLAGS_DEVICE_RINGS_OK,
360 			      &priv->state_flags);
361 }
362 
363 static inline void
364 gve_clear_device_rings_ok(struct gve_priv *priv)
365 {
366 	rte_bit_relaxed_clear32(GVE_PRIV_FLAGS_DEVICE_RINGS_OK,
367 				&priv->state_flags);
368 }
369 
370 int
371 gve_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id, uint16_t nb_desc,
372 		   unsigned int socket_id, const struct rte_eth_rxconf *conf,
373 		   struct rte_mempool *pool);
374 int
375 gve_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id, uint16_t nb_desc,
376 		   unsigned int socket_id, const struct rte_eth_txconf *conf);
377 
378 void
379 gve_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
380 
381 void
382 gve_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
383 
384 int
385 gve_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);
386 
387 int
388 gve_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);
389 
390 int
391 gve_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);
392 
393 int
394 gve_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);
395 
396 void
397 gve_stop_tx_queues(struct rte_eth_dev *dev);
398 
399 void
400 gve_stop_rx_queues(struct rte_eth_dev *dev);
401 
402 uint16_t
403 gve_rx_burst(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
404 
405 uint16_t
406 gve_tx_burst(void *txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts);
407 
408 void
409 gve_set_rx_function(struct rte_eth_dev *dev);
410 
411 void
412 gve_set_tx_function(struct rte_eth_dev *dev);
413 
414 /* Below functions are used for DQO */
415 
416 int
417 gve_rx_queue_setup_dqo(struct rte_eth_dev *dev, uint16_t queue_id,
418 		       uint16_t nb_desc, unsigned int socket_id,
419 		       const struct rte_eth_rxconf *conf,
420 		       struct rte_mempool *pool);
421 int
422 gve_tx_queue_setup_dqo(struct rte_eth_dev *dev, uint16_t queue_id,
423 		       uint16_t nb_desc, unsigned int socket_id,
424 		       const struct rte_eth_txconf *conf);
425 
426 void
427 gve_tx_queue_release_dqo(struct rte_eth_dev *dev, uint16_t qid);
428 
429 void
430 gve_rx_queue_release_dqo(struct rte_eth_dev *dev, uint16_t qid);
431 
432 int
433 gve_rx_queue_start_dqo(struct rte_eth_dev *dev, uint16_t rx_queue_id);
434 
435 int
436 gve_tx_queue_start_dqo(struct rte_eth_dev *dev, uint16_t tx_queue_id);
437 
438 int
439 gve_rx_queue_stop_dqo(struct rte_eth_dev *dev, uint16_t rx_queue_id);
440 
441 int
442 gve_tx_queue_stop_dqo(struct rte_eth_dev *dev, uint16_t tx_queue_id);
443 
444 void
445 gve_stop_tx_queues_dqo(struct rte_eth_dev *dev);
446 
447 void
448 gve_stop_rx_queues_dqo(struct rte_eth_dev *dev);
449 
450 uint16_t
451 gve_rx_burst_dqo(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
452 
453 uint16_t
454 gve_tx_burst_dqo(void *txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts);
455 
456 void
457 gve_set_rx_function_dqo(struct rte_eth_dev *dev);
458 
459 void
460 gve_set_tx_function_dqo(struct rte_eth_dev *dev);
461 
462 #endif /* _GVE_ETHDEV_H_ */
463