xref: /dpdk/drivers/net/ena/ena_ethdev.h (revision 68a03efeed657e6e05f281479b33b51102797e15)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2015-2020 Amazon.com, Inc. or its affiliates.
3  * All rights reserved.
4  */
5 
6 #ifndef _ENA_ETHDEV_H_
7 #define _ENA_ETHDEV_H_
8 
9 #include <rte_cycles.h>
10 #include <rte_pci.h>
11 #include <rte_bus_pci.h>
12 #include <rte_timer.h>
13 
14 #include "ena_com.h"
15 
16 #define ENA_REGS_BAR	0
17 #define ENA_MEM_BAR	2
18 
19 #define ENA_MAX_NUM_QUEUES	128
20 #define ENA_MIN_FRAME_LEN	64
21 #define ENA_NAME_MAX_LEN	20
22 #define ENA_PKT_MAX_BUFS	17
23 #define ENA_RX_BUF_MIN_SIZE	1400
24 #define ENA_DEFAULT_RING_SIZE	1024
25 
26 #define ENA_MIN_MTU		128
27 
28 #define ENA_MMIO_DISABLE_REG_READ	BIT(0)
29 
30 #define ENA_WD_TIMEOUT_SEC	3
31 #define ENA_DEVICE_KALIVE_TIMEOUT (ENA_WD_TIMEOUT_SEC * rte_get_timer_hz())
32 
33 /* While processing submitted and completed descriptors (rx and tx path
34  * respectively) in a loop it is desired to:
35  *  - perform batch submissions while populating sumbissmion queue
36  *  - avoid blocking transmission of other packets during cleanup phase
37  * Hence the utilization ratio of 1/8 of a queue size or max value if the size
38  * of the ring is very big - like 8k Rx rings.
39  */
40 #define ENA_REFILL_THRESH_DIVIDER      8
41 #define ENA_REFILL_THRESH_PACKET       256
42 
43 #define ENA_IDX_NEXT_MASKED(idx, mask) (((idx) + 1) & (mask))
44 #define ENA_IDX_ADD_MASKED(idx, n, mask) (((idx) + (n)) & (mask))
45 
46 struct ena_adapter;
47 
48 enum ena_ring_type {
49 	ENA_RING_TYPE_RX = 1,
50 	ENA_RING_TYPE_TX = 2,
51 };
52 
53 struct ena_tx_buffer {
54 	struct rte_mbuf *mbuf;
55 	unsigned int tx_descs;
56 	unsigned int num_of_bufs;
57 	struct ena_com_buf bufs[ENA_PKT_MAX_BUFS];
58 };
59 
60 /* Rx buffer holds only pointer to the mbuf - may be expanded in the future */
61 struct ena_rx_buffer {
62 	struct rte_mbuf *mbuf;
63 	struct ena_com_buf ena_buf;
64 };
65 
66 struct ena_calc_queue_size_ctx {
67 	struct ena_com_dev_get_features_ctx *get_feat_ctx;
68 	struct ena_com_dev *ena_dev;
69 	u32 max_rx_queue_size;
70 	u32 max_tx_queue_size;
71 	u16 max_tx_sgl_size;
72 	u16 max_rx_sgl_size;
73 };
74 
75 struct ena_stats_tx {
76 	u64 cnt;
77 	u64 bytes;
78 	u64 prepare_ctx_err;
79 	u64 linearize;
80 	u64 linearize_failed;
81 	u64 tx_poll;
82 	u64 doorbells;
83 	u64 bad_req_id;
84 	u64 available_desc;
85 };
86 
87 struct ena_stats_rx {
88 	u64 cnt;
89 	u64 bytes;
90 	u64 refill_partial;
91 	u64 bad_csum;
92 	u64 mbuf_alloc_fail;
93 	u64 bad_desc_num;
94 	u64 bad_req_id;
95 };
96 
97 struct ena_ring {
98 	u16 next_to_use;
99 	u16 next_to_clean;
100 
101 	enum ena_ring_type type;
102 	enum ena_admin_placement_policy_type tx_mem_queue_type;
103 
104 	/* Indicate there are Tx packets pushed to the device and wait for db */
105 	bool pkts_without_db;
106 
107 	/* Holds the empty requests for TX/RX OOO completions */
108 	union {
109 		uint16_t *empty_tx_reqs;
110 		uint16_t *empty_rx_reqs;
111 	};
112 
113 	union {
114 		struct ena_tx_buffer *tx_buffer_info; /* contex of tx packet */
115 		struct ena_rx_buffer *rx_buffer_info; /* contex of rx packet */
116 	};
117 	struct rte_mbuf **rx_refill_buffer;
118 	unsigned int ring_size; /* number of tx/rx_buffer_info's entries */
119 	unsigned int size_mask;
120 
121 	struct ena_com_io_cq *ena_com_io_cq;
122 	struct ena_com_io_sq *ena_com_io_sq;
123 
124 	struct ena_com_rx_buf_info ena_bufs[ENA_PKT_MAX_BUFS]
125 						__rte_cache_aligned;
126 
127 	struct rte_mempool *mb_pool;
128 	unsigned int port_id;
129 	unsigned int id;
130 	/* Max length PMD can push to device for LLQ */
131 	uint8_t tx_max_header_size;
132 	int configured;
133 
134 	uint8_t *push_buf_intermediate_buf;
135 
136 	struct ena_adapter *adapter;
137 	uint64_t offloads;
138 	u16 sgl_size;
139 
140 	bool disable_meta_caching;
141 
142 	union {
143 		struct ena_stats_rx rx_stats;
144 		struct ena_stats_tx tx_stats;
145 	};
146 
147 	unsigned int numa_socket_id;
148 } __rte_cache_aligned;
149 
150 enum ena_adapter_state {
151 	ENA_ADAPTER_STATE_FREE    = 0,
152 	ENA_ADAPTER_STATE_INIT    = 1,
153 	ENA_ADAPTER_STATE_RUNNING = 2,
154 	ENA_ADAPTER_STATE_STOPPED = 3,
155 	ENA_ADAPTER_STATE_CONFIG  = 4,
156 	ENA_ADAPTER_STATE_CLOSED  = 5,
157 };
158 
159 struct ena_driver_stats {
160 	rte_atomic64_t ierrors;
161 	rte_atomic64_t oerrors;
162 	rte_atomic64_t rx_nombuf;
163 	u64 rx_drops;
164 };
165 
166 struct ena_stats_dev {
167 	u64 wd_expired;
168 	u64 dev_start;
169 	u64 dev_stop;
170 	/*
171 	 * Tx drops cannot be reported as the driver statistic, because DPDK
172 	 * rte_eth_stats structure isn't providing appropriate field for that.
173 	 * As a workaround it is being published as an extended statistic.
174 	 */
175 	u64 tx_drops;
176 };
177 
178 struct ena_stats_eni {
179 	/*
180 	 * The number of packets shaped due to inbound aggregate BW
181 	 * allowance being exceeded
182 	 */
183 	uint64_t bw_in_allowance_exceeded;
184 	/*
185 	 * The number of packets shaped due to outbound aggregate BW
186 	 * allowance being exceeded
187 	 */
188 	uint64_t bw_out_allowance_exceeded;
189 	/* The number of packets shaped due to PPS allowance being exceeded */
190 	uint64_t pps_allowance_exceeded;
191 	/*
192 	 * The number of packets shaped due to connection tracking
193 	 * allowance being exceeded and leading to failure in establishment
194 	 * of new connections
195 	 */
196 	uint64_t conntrack_allowance_exceeded;
197 	/*
198 	 * The number of packets shaped due to linklocal packet rate
199 	 * allowance being exceeded
200 	 */
201 	uint64_t linklocal_allowance_exceeded;
202 };
203 
204 struct ena_offloads {
205 	bool tso4_supported;
206 	bool tx_csum_supported;
207 	bool rx_csum_supported;
208 };
209 
210 /* board specific private data structure */
211 struct ena_adapter {
212 	/* OS defined structs */
213 	struct rte_pci_device *pdev;
214 	struct rte_eth_dev_data *rte_eth_dev_data;
215 	struct rte_eth_dev *rte_dev;
216 
217 	struct ena_com_dev ena_dev __rte_cache_aligned;
218 
219 	/* TX */
220 	struct ena_ring tx_ring[ENA_MAX_NUM_QUEUES] __rte_cache_aligned;
221 	u32 max_tx_ring_size;
222 	u16 max_tx_sgl_size;
223 
224 	/* RX */
225 	struct ena_ring rx_ring[ENA_MAX_NUM_QUEUES] __rte_cache_aligned;
226 	u32 max_rx_ring_size;
227 	u16 max_rx_sgl_size;
228 
229 	u32 max_num_io_queues;
230 	u16 max_mtu;
231 	struct ena_offloads offloads;
232 
233 	/* The admin queue isn't protected by the lock and is used to
234 	 * retrieve statistics from the device. As there is no guarantee that
235 	 * application won't try to get statistics from multiple threads, it is
236 	 * safer to lock the queue to avoid admin queue failure.
237 	 */
238 	rte_spinlock_t admin_lock;
239 
240 	int id_number;
241 	char name[ENA_NAME_MAX_LEN];
242 	u8 mac_addr[RTE_ETHER_ADDR_LEN];
243 
244 	void *regs;
245 	void *dev_mem_base;
246 
247 	struct ena_driver_stats *drv_stats;
248 	enum ena_adapter_state state;
249 
250 	uint64_t tx_supported_offloads;
251 	uint64_t tx_selected_offloads;
252 	uint64_t rx_supported_offloads;
253 	uint64_t rx_selected_offloads;
254 
255 	bool link_status;
256 
257 	enum ena_regs_reset_reason_types reset_reason;
258 
259 	struct rte_timer timer_wd;
260 	uint64_t timestamp_wd;
261 	uint64_t keep_alive_timeout;
262 
263 	struct ena_stats_dev dev_stats;
264 	struct ena_stats_eni eni_stats;
265 
266 	bool trigger_reset;
267 
268 	bool wd_state;
269 
270 	bool use_large_llq_hdr;
271 };
272 
273 #endif /* _ENA_ETHDEV_H_ */
274