xref: /dpdk/drivers/net/ena/ena_ethdev.h (revision d38febb08d57fec29fed27a2d12a507fc6fcdfa1)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2015-2020 Amazon.com, Inc. or its affiliates.
3  * All rights reserved.
4  */
5 
6 #ifndef _ENA_ETHDEV_H_
7 #define _ENA_ETHDEV_H_
8 
9 #include <rte_atomic.h>
10 #include <rte_ether.h>
11 #include <ethdev_driver.h>
12 #include <ethdev_pci.h>
13 #include <rte_cycles.h>
14 #include <rte_pci.h>
15 #include <rte_bus_pci.h>
16 #include <rte_timer.h>
17 #include <rte_dev.h>
18 #include <rte_net.h>
19 
20 #include "ena_com.h"
21 
22 #define ENA_REGS_BAR	0
23 #define ENA_MEM_BAR	2
24 
25 #define ENA_MAX_NUM_QUEUES	128
26 #define ENA_MIN_FRAME_LEN	64
27 #define ENA_NAME_MAX_LEN	20
28 #define ENA_PKT_MAX_BUFS	17
29 #define ENA_RX_BUF_MIN_SIZE	1400
30 #define ENA_DEFAULT_RING_SIZE	1024
31 
32 #define ENA_MIN_MTU		128
33 
34 #define ENA_MMIO_DISABLE_REG_READ	BIT(0)
35 
36 #define ENA_WD_TIMEOUT_SEC	3
37 #define ENA_DEVICE_KALIVE_TIMEOUT (ENA_WD_TIMEOUT_SEC * rte_get_timer_hz())
38 
39 /* While processing submitted and completed descriptors (rx and tx path
40  * respectively) in a loop it is desired to:
41  *  - perform batch submissions while populating sumbissmion queue
42  *  - avoid blocking transmission of other packets during cleanup phase
43  * Hence the utilization ratio of 1/8 of a queue size or max value if the size
44  * of the ring is very big - like 8k Rx rings.
45  */
46 #define ENA_REFILL_THRESH_DIVIDER      8
47 #define ENA_REFILL_THRESH_PACKET       256
48 
49 #define ENA_IDX_NEXT_MASKED(idx, mask) (((idx) + 1) & (mask))
50 #define ENA_IDX_ADD_MASKED(idx, n, mask) (((idx) + (n)) & (mask))
51 
52 #define ENA_RX_RSS_TABLE_LOG_SIZE	7
53 #define ENA_RX_RSS_TABLE_SIZE		(1 << ENA_RX_RSS_TABLE_LOG_SIZE)
54 
55 #define ENA_HASH_KEY_SIZE		40
56 
57 #define ENA_ALL_RSS_HF (ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV4_UDP | \
58 			ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_NONFRAG_IPV6_UDP)
59 
60 #define ENA_IO_TXQ_IDX(q)		(2 * (q))
61 #define ENA_IO_RXQ_IDX(q)		(2 * (q) + 1)
62 /* Reversed version of ENA_IO_RXQ_IDX */
63 #define ENA_IO_RXQ_IDX_REV(q)		(((q) - 1) / 2)
64 
65 extern struct ena_shared_data *ena_shared_data;
66 
67 struct ena_adapter;
68 
69 enum ena_ring_type {
70 	ENA_RING_TYPE_RX = 1,
71 	ENA_RING_TYPE_TX = 2,
72 };
73 
74 struct ena_tx_buffer {
75 	struct rte_mbuf *mbuf;
76 	unsigned int tx_descs;
77 	unsigned int num_of_bufs;
78 	struct ena_com_buf bufs[ENA_PKT_MAX_BUFS];
79 };
80 
81 /* Rx buffer holds only pointer to the mbuf - may be expanded in the future */
82 struct ena_rx_buffer {
83 	struct rte_mbuf *mbuf;
84 	struct ena_com_buf ena_buf;
85 };
86 
87 struct ena_calc_queue_size_ctx {
88 	struct ena_com_dev_get_features_ctx *get_feat_ctx;
89 	struct ena_com_dev *ena_dev;
90 	u32 max_rx_queue_size;
91 	u32 max_tx_queue_size;
92 	u16 max_tx_sgl_size;
93 	u16 max_rx_sgl_size;
94 };
95 
96 struct ena_stats_tx {
97 	u64 cnt;
98 	u64 bytes;
99 	u64 prepare_ctx_err;
100 	u64 linearize;
101 	u64 linearize_failed;
102 	u64 tx_poll;
103 	u64 doorbells;
104 	u64 bad_req_id;
105 	u64 available_desc;
106 };
107 
108 struct ena_stats_rx {
109 	u64 cnt;
110 	u64 bytes;
111 	u64 refill_partial;
112 	u64 bad_csum;
113 	u64 mbuf_alloc_fail;
114 	u64 bad_desc_num;
115 	u64 bad_req_id;
116 };
117 
118 struct ena_ring {
119 	u16 next_to_use;
120 	u16 next_to_clean;
121 
122 	enum ena_ring_type type;
123 	enum ena_admin_placement_policy_type tx_mem_queue_type;
124 
125 	/* Indicate there are Tx packets pushed to the device and wait for db */
126 	bool pkts_without_db;
127 
128 	/* Holds the empty requests for TX/RX OOO completions */
129 	union {
130 		uint16_t *empty_tx_reqs;
131 		uint16_t *empty_rx_reqs;
132 	};
133 
134 	union {
135 		struct ena_tx_buffer *tx_buffer_info; /* contex of tx packet */
136 		struct ena_rx_buffer *rx_buffer_info; /* contex of rx packet */
137 	};
138 	struct rte_mbuf **rx_refill_buffer;
139 	unsigned int ring_size; /* number of tx/rx_buffer_info's entries */
140 	unsigned int size_mask;
141 
142 	struct ena_com_io_cq *ena_com_io_cq;
143 	struct ena_com_io_sq *ena_com_io_sq;
144 
145 	struct ena_com_rx_buf_info ena_bufs[ENA_PKT_MAX_BUFS]
146 						__rte_cache_aligned;
147 
148 	struct rte_mempool *mb_pool;
149 	unsigned int port_id;
150 	unsigned int id;
151 	/* Max length PMD can push to device for LLQ */
152 	uint8_t tx_max_header_size;
153 	int configured;
154 
155 	uint8_t *push_buf_intermediate_buf;
156 
157 	struct ena_adapter *adapter;
158 	uint64_t offloads;
159 	u16 sgl_size;
160 
161 	bool disable_meta_caching;
162 
163 	union {
164 		struct ena_stats_rx rx_stats;
165 		struct ena_stats_tx tx_stats;
166 	};
167 
168 	unsigned int numa_socket_id;
169 } __rte_cache_aligned;
170 
171 enum ena_adapter_state {
172 	ENA_ADAPTER_STATE_FREE    = 0,
173 	ENA_ADAPTER_STATE_INIT    = 1,
174 	ENA_ADAPTER_STATE_RUNNING = 2,
175 	ENA_ADAPTER_STATE_STOPPED = 3,
176 	ENA_ADAPTER_STATE_CONFIG  = 4,
177 	ENA_ADAPTER_STATE_CLOSED  = 5,
178 };
179 
180 struct ena_driver_stats {
181 	rte_atomic64_t ierrors;
182 	rte_atomic64_t oerrors;
183 	rte_atomic64_t rx_nombuf;
184 	u64 rx_drops;
185 };
186 
187 struct ena_stats_dev {
188 	u64 wd_expired;
189 	u64 dev_start;
190 	u64 dev_stop;
191 	/*
192 	 * Tx drops cannot be reported as the driver statistic, because DPDK
193 	 * rte_eth_stats structure isn't providing appropriate field for that.
194 	 * As a workaround it is being published as an extended statistic.
195 	 */
196 	u64 tx_drops;
197 };
198 
199 struct ena_stats_eni {
200 	/*
201 	 * The number of packets shaped due to inbound aggregate BW
202 	 * allowance being exceeded
203 	 */
204 	uint64_t bw_in_allowance_exceeded;
205 	/*
206 	 * The number of packets shaped due to outbound aggregate BW
207 	 * allowance being exceeded
208 	 */
209 	uint64_t bw_out_allowance_exceeded;
210 	/* The number of packets shaped due to PPS allowance being exceeded */
211 	uint64_t pps_allowance_exceeded;
212 	/*
213 	 * The number of packets shaped due to connection tracking
214 	 * allowance being exceeded and leading to failure in establishment
215 	 * of new connections
216 	 */
217 	uint64_t conntrack_allowance_exceeded;
218 	/*
219 	 * The number of packets shaped due to linklocal packet rate
220 	 * allowance being exceeded
221 	 */
222 	uint64_t linklocal_allowance_exceeded;
223 };
224 
225 struct ena_offloads {
226 	bool tso4_supported;
227 	bool tx_csum_supported;
228 	bool rx_csum_supported;
229 	bool rss_hash_supported;
230 };
231 
232 /* board specific private data structure */
233 struct ena_adapter {
234 	/* OS defined structs */
235 	struct rte_eth_dev_data *edev_data;
236 
237 	struct ena_com_dev ena_dev __rte_cache_aligned;
238 
239 	/* TX */
240 	struct ena_ring tx_ring[ENA_MAX_NUM_QUEUES] __rte_cache_aligned;
241 	u32 max_tx_ring_size;
242 	u16 max_tx_sgl_size;
243 
244 	/* RX */
245 	struct ena_ring rx_ring[ENA_MAX_NUM_QUEUES] __rte_cache_aligned;
246 	u32 max_rx_ring_size;
247 	u16 max_rx_sgl_size;
248 
249 	u32 max_num_io_queues;
250 	u16 max_mtu;
251 	struct ena_offloads offloads;
252 
253 	/* The admin queue isn't protected by the lock and is used to
254 	 * retrieve statistics from the device. As there is no guarantee that
255 	 * application won't try to get statistics from multiple threads, it is
256 	 * safer to lock the queue to avoid admin queue failure.
257 	 */
258 	rte_spinlock_t admin_lock;
259 
260 	int id_number;
261 	char name[ENA_NAME_MAX_LEN];
262 	u8 mac_addr[RTE_ETHER_ADDR_LEN];
263 
264 	void *regs;
265 	void *dev_mem_base;
266 
267 	struct ena_driver_stats *drv_stats;
268 	enum ena_adapter_state state;
269 
270 	uint64_t tx_supported_offloads;
271 	uint64_t tx_selected_offloads;
272 	uint64_t rx_supported_offloads;
273 	uint64_t rx_selected_offloads;
274 
275 	bool link_status;
276 
277 	enum ena_regs_reset_reason_types reset_reason;
278 
279 	struct rte_timer timer_wd;
280 	uint64_t timestamp_wd;
281 	uint64_t keep_alive_timeout;
282 
283 	struct ena_stats_dev dev_stats;
284 	struct ena_stats_eni eni_stats;
285 
286 	bool trigger_reset;
287 
288 	bool wd_state;
289 
290 	bool use_large_llq_hdr;
291 };
292 
293 int ena_rss_reta_update(struct rte_eth_dev *dev,
294 			struct rte_eth_rss_reta_entry64 *reta_conf,
295 			uint16_t reta_size);
296 int ena_rss_reta_query(struct rte_eth_dev *dev,
297 		       struct rte_eth_rss_reta_entry64 *reta_conf,
298 		       uint16_t reta_size);
299 int ena_rss_hash_update(struct rte_eth_dev *dev,
300 			struct rte_eth_rss_conf *rss_conf);
301 int ena_rss_hash_conf_get(struct rte_eth_dev *dev,
302 			  struct rte_eth_rss_conf *rss_conf);
303 int ena_rss_configure(struct ena_adapter *adapter);
304 
305 #endif /* _ENA_ETHDEV_H_ */
306