xref: /dpdk/drivers/net/ena/ena_ethdev.c (revision e4f0e2158b8e210065e91f45fd83aee118cbbd96)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2015-2020 Amazon.com, Inc. or its affiliates.
3  * All rights reserved.
4  */
5 
6 #include <rte_string_fns.h>
7 #include <rte_errno.h>
8 #include <rte_version.h>
9 #include <rte_net.h>
10 #include <rte_kvargs.h>
11 
12 #include "ena_ethdev.h"
13 #include "ena_logs.h"
14 #include "ena_platform.h"
15 #include "ena_com.h"
16 #include "ena_eth_com.h"
17 
18 #include <ena_common_defs.h>
19 #include <ena_regs_defs.h>
20 #include <ena_admin_defs.h>
21 #include <ena_eth_io_defs.h>
22 
23 #define DRV_MODULE_VER_MAJOR	2
24 #define DRV_MODULE_VER_MINOR	7
25 #define DRV_MODULE_VER_SUBMINOR	0
26 
27 #define __MERGE_64B_H_L(h, l) (((uint64_t)h << 32) | l)
28 
29 #define GET_L4_HDR_LEN(mbuf)					\
30 	((rte_pktmbuf_mtod_offset(mbuf,	struct rte_tcp_hdr *,	\
31 		mbuf->l3_len + mbuf->l2_len)->data_off) >> 4)
32 
33 #define ETH_GSTRING_LEN	32
34 
35 #define ARRAY_SIZE(x) RTE_DIM(x)
36 
37 #define ENA_MIN_RING_DESC	128
38 
39 /*
40  * We should try to keep ENA_CLEANUP_BUF_SIZE lower than
41  * RTE_MEMPOOL_CACHE_MAX_SIZE, so we can fit this in mempool local cache.
42  */
43 #define ENA_CLEANUP_BUF_SIZE	256
44 
45 #define ENA_PTYPE_HAS_HASH	(RTE_PTYPE_L4_TCP | RTE_PTYPE_L4_UDP)
46 
47 struct ena_stats {
48 	char name[ETH_GSTRING_LEN];
49 	int stat_offset;
50 };
51 
52 #define ENA_STAT_ENTRY(stat, stat_type) { \
53 	.name = #stat, \
54 	.stat_offset = offsetof(struct ena_stats_##stat_type, stat) \
55 }
56 
57 #define ENA_STAT_RX_ENTRY(stat) \
58 	ENA_STAT_ENTRY(stat, rx)
59 
60 #define ENA_STAT_TX_ENTRY(stat) \
61 	ENA_STAT_ENTRY(stat, tx)
62 
63 #define ENA_STAT_ENI_ENTRY(stat) \
64 	ENA_STAT_ENTRY(stat, eni)
65 
66 #define ENA_STAT_GLOBAL_ENTRY(stat) \
67 	ENA_STAT_ENTRY(stat, dev)
68 
69 /* Device arguments */
70 #define ENA_DEVARG_LARGE_LLQ_HDR "large_llq_hdr"
71 /* Timeout in seconds after which a single uncompleted Tx packet should be
72  * considered as a missing.
73  */
74 #define ENA_DEVARG_MISS_TXC_TO "miss_txc_to"
75 /*
76  * Controls whether LLQ should be used (if available). Enabled by default.
77  * NOTE: It's highly not recommended to disable the LLQ, as it may lead to a
78  * huge performance degradation on 6th generation AWS instances.
79  */
80 #define ENA_DEVARG_ENABLE_LLQ "enable_llq"
81 
82 /*
83  * Each rte_memzone should have unique name.
84  * To satisfy it, count number of allocation and add it to name.
85  */
86 rte_atomic64_t ena_alloc_cnt;
87 
88 static const struct ena_stats ena_stats_global_strings[] = {
89 	ENA_STAT_GLOBAL_ENTRY(wd_expired),
90 	ENA_STAT_GLOBAL_ENTRY(dev_start),
91 	ENA_STAT_GLOBAL_ENTRY(dev_stop),
92 	ENA_STAT_GLOBAL_ENTRY(tx_drops),
93 };
94 
95 static const struct ena_stats ena_stats_eni_strings[] = {
96 	ENA_STAT_ENI_ENTRY(bw_in_allowance_exceeded),
97 	ENA_STAT_ENI_ENTRY(bw_out_allowance_exceeded),
98 	ENA_STAT_ENI_ENTRY(pps_allowance_exceeded),
99 	ENA_STAT_ENI_ENTRY(conntrack_allowance_exceeded),
100 	ENA_STAT_ENI_ENTRY(linklocal_allowance_exceeded),
101 };
102 
103 static const struct ena_stats ena_stats_tx_strings[] = {
104 	ENA_STAT_TX_ENTRY(cnt),
105 	ENA_STAT_TX_ENTRY(bytes),
106 	ENA_STAT_TX_ENTRY(prepare_ctx_err),
107 	ENA_STAT_TX_ENTRY(tx_poll),
108 	ENA_STAT_TX_ENTRY(doorbells),
109 	ENA_STAT_TX_ENTRY(bad_req_id),
110 	ENA_STAT_TX_ENTRY(available_desc),
111 	ENA_STAT_TX_ENTRY(missed_tx),
112 };
113 
114 static const struct ena_stats ena_stats_rx_strings[] = {
115 	ENA_STAT_RX_ENTRY(cnt),
116 	ENA_STAT_RX_ENTRY(bytes),
117 	ENA_STAT_RX_ENTRY(refill_partial),
118 	ENA_STAT_RX_ENTRY(l3_csum_bad),
119 	ENA_STAT_RX_ENTRY(l4_csum_bad),
120 	ENA_STAT_RX_ENTRY(l4_csum_good),
121 	ENA_STAT_RX_ENTRY(mbuf_alloc_fail),
122 	ENA_STAT_RX_ENTRY(bad_desc_num),
123 	ENA_STAT_RX_ENTRY(bad_req_id),
124 };
125 
126 #define ENA_STATS_ARRAY_GLOBAL	ARRAY_SIZE(ena_stats_global_strings)
127 #define ENA_STATS_ARRAY_ENI	ARRAY_SIZE(ena_stats_eni_strings)
128 #define ENA_STATS_ARRAY_TX	ARRAY_SIZE(ena_stats_tx_strings)
129 #define ENA_STATS_ARRAY_RX	ARRAY_SIZE(ena_stats_rx_strings)
130 
131 #define QUEUE_OFFLOADS (RTE_ETH_TX_OFFLOAD_TCP_CKSUM |\
132 			RTE_ETH_TX_OFFLOAD_UDP_CKSUM |\
133 			RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |\
134 			RTE_ETH_TX_OFFLOAD_TCP_TSO)
135 #define MBUF_OFFLOADS (RTE_MBUF_F_TX_L4_MASK |\
136 		       RTE_MBUF_F_TX_IP_CKSUM |\
137 		       RTE_MBUF_F_TX_TCP_SEG)
138 
139 /** Vendor ID used by Amazon devices */
140 #define PCI_VENDOR_ID_AMAZON 0x1D0F
141 /** Amazon devices */
142 #define PCI_DEVICE_ID_ENA_VF		0xEC20
143 #define PCI_DEVICE_ID_ENA_VF_RSERV0	0xEC21
144 
145 #define	ENA_TX_OFFLOAD_MASK	(RTE_MBUF_F_TX_L4_MASK |         \
146 	RTE_MBUF_F_TX_IPV6 |            \
147 	RTE_MBUF_F_TX_IPV4 |            \
148 	RTE_MBUF_F_TX_IP_CKSUM |        \
149 	RTE_MBUF_F_TX_TCP_SEG)
150 
151 #define	ENA_TX_OFFLOAD_NOTSUP_MASK	\
152 	(RTE_MBUF_F_TX_OFFLOAD_MASK ^ ENA_TX_OFFLOAD_MASK)
153 
154 /** HW specific offloads capabilities. */
155 /* IPv4 checksum offload. */
156 #define ENA_L3_IPV4_CSUM		0x0001
157 /* TCP/UDP checksum offload for IPv4 packets. */
158 #define ENA_L4_IPV4_CSUM		0x0002
159 /* TCP/UDP checksum offload for IPv4 packets with pseudo header checksum. */
160 #define ENA_L4_IPV4_CSUM_PARTIAL	0x0004
161 /* TCP/UDP checksum offload for IPv6 packets. */
162 #define ENA_L4_IPV6_CSUM		0x0008
163 /* TCP/UDP checksum offload for IPv6 packets with pseudo header checksum. */
164 #define ENA_L4_IPV6_CSUM_PARTIAL	0x0010
165 /* TSO support for IPv4 packets. */
166 #define ENA_IPV4_TSO			0x0020
167 
168 /* Device supports setting RSS hash. */
169 #define ENA_RX_RSS_HASH			0x0040
170 
171 static const struct rte_pci_id pci_id_ena_map[] = {
172 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AMAZON, PCI_DEVICE_ID_ENA_VF) },
173 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AMAZON, PCI_DEVICE_ID_ENA_VF_RSERV0) },
174 	{ .device_id = 0 },
175 };
176 
177 static struct ena_aenq_handlers aenq_handlers;
178 
179 static int ena_device_init(struct ena_adapter *adapter,
180 			   struct rte_pci_device *pdev,
181 			   struct ena_com_dev_get_features_ctx *get_feat_ctx);
182 static int ena_dev_configure(struct rte_eth_dev *dev);
183 static void ena_tx_map_mbuf(struct ena_ring *tx_ring,
184 	struct ena_tx_buffer *tx_info,
185 	struct rte_mbuf *mbuf,
186 	void **push_header,
187 	uint16_t *header_len);
188 static int ena_xmit_mbuf(struct ena_ring *tx_ring, struct rte_mbuf *mbuf);
189 static int ena_tx_cleanup(void *txp, uint32_t free_pkt_cnt);
190 static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
191 				  uint16_t nb_pkts);
192 static uint16_t eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
193 		uint16_t nb_pkts);
194 static int ena_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
195 			      uint16_t nb_desc, unsigned int socket_id,
196 			      const struct rte_eth_txconf *tx_conf);
197 static int ena_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
198 			      uint16_t nb_desc, unsigned int socket_id,
199 			      const struct rte_eth_rxconf *rx_conf,
200 			      struct rte_mempool *mp);
201 static inline void ena_init_rx_mbuf(struct rte_mbuf *mbuf, uint16_t len);
202 static struct rte_mbuf *ena_rx_mbuf(struct ena_ring *rx_ring,
203 				    struct ena_com_rx_buf_info *ena_bufs,
204 				    uint32_t descs,
205 				    uint16_t *next_to_clean,
206 				    uint8_t offset);
207 static uint16_t eth_ena_recv_pkts(void *rx_queue,
208 				  struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
209 static int ena_add_single_rx_desc(struct ena_com_io_sq *io_sq,
210 				  struct rte_mbuf *mbuf, uint16_t id);
211 static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count);
212 static void ena_init_rings(struct ena_adapter *adapter,
213 			   bool disable_meta_caching);
214 static int ena_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
215 static int ena_start(struct rte_eth_dev *dev);
216 static int ena_stop(struct rte_eth_dev *dev);
217 static int ena_close(struct rte_eth_dev *dev);
218 static int ena_dev_reset(struct rte_eth_dev *dev);
219 static int ena_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats);
220 static void ena_rx_queue_release_all(struct rte_eth_dev *dev);
221 static void ena_tx_queue_release_all(struct rte_eth_dev *dev);
222 static void ena_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
223 static void ena_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
224 static void ena_rx_queue_release_bufs(struct ena_ring *ring);
225 static void ena_tx_queue_release_bufs(struct ena_ring *ring);
226 static int ena_link_update(struct rte_eth_dev *dev,
227 			   int wait_to_complete);
228 static int ena_create_io_queue(struct rte_eth_dev *dev, struct ena_ring *ring);
229 static void ena_queue_stop(struct ena_ring *ring);
230 static void ena_queue_stop_all(struct rte_eth_dev *dev,
231 			      enum ena_ring_type ring_type);
232 static int ena_queue_start(struct rte_eth_dev *dev, struct ena_ring *ring);
233 static int ena_queue_start_all(struct rte_eth_dev *dev,
234 			       enum ena_ring_type ring_type);
235 static void ena_stats_restart(struct rte_eth_dev *dev);
236 static uint64_t ena_get_rx_port_offloads(struct ena_adapter *adapter);
237 static uint64_t ena_get_tx_port_offloads(struct ena_adapter *adapter);
238 static uint64_t ena_get_rx_queue_offloads(struct ena_adapter *adapter);
239 static uint64_t ena_get_tx_queue_offloads(struct ena_adapter *adapter);
240 static int ena_infos_get(struct rte_eth_dev *dev,
241 			 struct rte_eth_dev_info *dev_info);
242 static void ena_interrupt_handler_rte(void *cb_arg);
243 static void ena_timer_wd_callback(struct rte_timer *timer, void *arg);
244 static void ena_destroy_device(struct rte_eth_dev *eth_dev);
245 static int eth_ena_dev_init(struct rte_eth_dev *eth_dev);
246 static int ena_xstats_get_names(struct rte_eth_dev *dev,
247 				struct rte_eth_xstat_name *xstats_names,
248 				unsigned int n);
249 static int ena_xstats_get_names_by_id(struct rte_eth_dev *dev,
250 				      const uint64_t *ids,
251 				      struct rte_eth_xstat_name *xstats_names,
252 				      unsigned int size);
253 static int ena_xstats_get(struct rte_eth_dev *dev,
254 			  struct rte_eth_xstat *stats,
255 			  unsigned int n);
256 static int ena_xstats_get_by_id(struct rte_eth_dev *dev,
257 				const uint64_t *ids,
258 				uint64_t *values,
259 				unsigned int n);
260 static int ena_process_bool_devarg(const char *key,
261 				   const char *value,
262 				   void *opaque);
263 static int ena_parse_devargs(struct ena_adapter *adapter,
264 			     struct rte_devargs *devargs);
265 static int ena_copy_eni_stats(struct ena_adapter *adapter,
266 			      struct ena_stats_eni *stats);
267 static int ena_setup_rx_intr(struct rte_eth_dev *dev);
268 static int ena_rx_queue_intr_enable(struct rte_eth_dev *dev,
269 				    uint16_t queue_id);
270 static int ena_rx_queue_intr_disable(struct rte_eth_dev *dev,
271 				     uint16_t queue_id);
272 static int ena_configure_aenq(struct ena_adapter *adapter);
273 static int ena_mp_primary_handle(const struct rte_mp_msg *mp_msg,
274 				 const void *peer);
275 
276 static const struct eth_dev_ops ena_dev_ops = {
277 	.dev_configure          = ena_dev_configure,
278 	.dev_infos_get          = ena_infos_get,
279 	.rx_queue_setup         = ena_rx_queue_setup,
280 	.tx_queue_setup         = ena_tx_queue_setup,
281 	.dev_start              = ena_start,
282 	.dev_stop               = ena_stop,
283 	.link_update            = ena_link_update,
284 	.stats_get              = ena_stats_get,
285 	.xstats_get_names       = ena_xstats_get_names,
286 	.xstats_get_names_by_id = ena_xstats_get_names_by_id,
287 	.xstats_get             = ena_xstats_get,
288 	.xstats_get_by_id       = ena_xstats_get_by_id,
289 	.mtu_set                = ena_mtu_set,
290 	.rx_queue_release       = ena_rx_queue_release,
291 	.tx_queue_release       = ena_tx_queue_release,
292 	.dev_close              = ena_close,
293 	.dev_reset              = ena_dev_reset,
294 	.reta_update            = ena_rss_reta_update,
295 	.reta_query             = ena_rss_reta_query,
296 	.rx_queue_intr_enable   = ena_rx_queue_intr_enable,
297 	.rx_queue_intr_disable  = ena_rx_queue_intr_disable,
298 	.rss_hash_update        = ena_rss_hash_update,
299 	.rss_hash_conf_get      = ena_rss_hash_conf_get,
300 	.tx_done_cleanup        = ena_tx_cleanup,
301 };
302 
303 /*********************************************************************
304  *  Multi-Process communication bits
305  *********************************************************************/
306 /* rte_mp IPC message name */
307 #define ENA_MP_NAME	"net_ena_mp"
308 /* Request timeout in seconds */
309 #define ENA_MP_REQ_TMO	5
310 
311 /** Proxy request type */
312 enum ena_mp_req {
313 	ENA_MP_DEV_STATS_GET,
314 	ENA_MP_ENI_STATS_GET,
315 	ENA_MP_MTU_SET,
316 	ENA_MP_IND_TBL_GET,
317 	ENA_MP_IND_TBL_SET
318 };
319 
320 /** Proxy message body. Shared between requests and responses. */
321 struct ena_mp_body {
322 	/* Message type */
323 	enum ena_mp_req type;
324 	int port_id;
325 	/* Processing result. Set in replies. 0 if message succeeded, negative
326 	 * error code otherwise.
327 	 */
328 	int result;
329 	union {
330 		int mtu; /* For ENA_MP_MTU_SET */
331 	} args;
332 };
333 
334 /**
335  * Initialize IPC message.
336  *
337  * @param[out] msg
338  *   Pointer to the message to initialize.
339  * @param[in] type
340  *   Message type.
341  * @param[in] port_id
342  *   Port ID of target device.
343  *
344  */
345 static void
346 mp_msg_init(struct rte_mp_msg *msg, enum ena_mp_req type, int port_id)
347 {
348 	struct ena_mp_body *body = (struct ena_mp_body *)&msg->param;
349 
350 	memset(msg, 0, sizeof(*msg));
351 	strlcpy(msg->name, ENA_MP_NAME, sizeof(msg->name));
352 	msg->len_param = sizeof(*body);
353 	body->type = type;
354 	body->port_id = port_id;
355 }
356 
357 /*********************************************************************
358  *  Multi-Process communication PMD API
359  *********************************************************************/
360 /**
361  * Define proxy request descriptor
362  *
363  * Used to define all structures and functions required for proxying a given
364  * function to the primary process including the code to perform to prepare the
365  * request and process the response.
366  *
367  * @param[in] f
368  *   Name of the function to proxy
369  * @param[in] t
370  *   Message type to use
371  * @param[in] prep
372  *   Body of a function to prepare the request in form of a statement
373  *   expression. It is passed all the original function arguments along with two
374  *   extra ones:
375  *   - struct ena_adapter *adapter - PMD data of the device calling the proxy.
376  *   - struct ena_mp_body *req - body of a request to prepare.
377  * @param[in] proc
378  *   Body of a function to process the response in form of a statement
379  *   expression. It is passed all the original function arguments along with two
380  *   extra ones:
381  *   - struct ena_adapter *adapter - PMD data of the device calling the proxy.
382  *   - struct ena_mp_body *rsp - body of a response to process.
383  * @param ...
384  *   Proxied function's arguments
385  *
386  * @note Inside prep and proc any parameters which aren't used should be marked
387  *       as such (with ENA_TOUCH or __rte_unused).
388  */
389 #define ENA_PROXY_DESC(f, t, prep, proc, ...)			\
390 	static const enum ena_mp_req mp_type_ ## f =  t;	\
391 	static const char *mp_name_ ## f = #t;			\
392 	static void mp_prep_ ## f(struct ena_adapter *adapter,	\
393 				  struct ena_mp_body *req,	\
394 				  __VA_ARGS__)			\
395 	{							\
396 		prep;						\
397 	}							\
398 	static void mp_proc_ ## f(struct ena_adapter *adapter,	\
399 				  struct ena_mp_body *rsp,	\
400 				  __VA_ARGS__)			\
401 	{							\
402 		proc;						\
403 	}
404 
405 /**
406  * Proxy wrapper for calling primary functions in a secondary process.
407  *
408  * Depending on whether called in primary or secondary process, calls the
409  * @p func directly or proxies the call to the primary process via rte_mp IPC.
410  * This macro requires a proxy request descriptor to be defined for @p func
411  * using ENA_PROXY_DESC() macro.
412  *
413  * @param[in/out] a
414  *   Device PMD data. Used for sending the message and sharing message results
415  *   between primary and secondary.
416  * @param[in] f
417  *   Function to proxy.
418  * @param ...
419  *   Arguments of @p func.
420  *
421  * @return
422  *   - 0: Processing succeeded and response handler was called.
423  *   - -EPERM: IPC is unavailable on this platform. This means only primary
424  *             process may call the proxied function.
425  *   - -EIO:   IPC returned error on request send. Inspect rte_errno detailed
426  *             error code.
427  *   - Negative error code from the proxied function.
428  *
429  * @note This mechanism is geared towards control-path tasks. Avoid calling it
430  *       in fast-path unless unbound delays are allowed. This is due to the IPC
431  *       mechanism itself (socket based).
432  * @note Due to IPC parameter size limitations the proxy logic shares call
433  *       results through the struct ena_adapter shared memory. This makes the
434  *       proxy mechanism strictly single-threaded. Therefore be sure to make all
435  *       calls to the same proxied function under the same lock.
436  */
437 #define ENA_PROXY(a, f, ...)						\
438 ({									\
439 	struct ena_adapter *_a = (a);					\
440 	struct timespec ts = { .tv_sec = ENA_MP_REQ_TMO };		\
441 	struct ena_mp_body *req, *rsp;					\
442 	struct rte_mp_reply mp_rep;					\
443 	struct rte_mp_msg mp_req;					\
444 	int ret;							\
445 									\
446 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {		\
447 		ret = f(__VA_ARGS__);					\
448 	} else {							\
449 		/* Prepare and send request */				\
450 		req = (struct ena_mp_body *)&mp_req.param;		\
451 		mp_msg_init(&mp_req, mp_type_ ## f, _a->edev_data->port_id); \
452 		mp_prep_ ## f(_a, req, ## __VA_ARGS__);			\
453 									\
454 		ret = rte_mp_request_sync(&mp_req, &mp_rep, &ts);	\
455 		if (likely(!ret)) {					\
456 			RTE_ASSERT(mp_rep.nb_received == 1);		\
457 			rsp = (struct ena_mp_body *)&mp_rep.msgs[0].param; \
458 			ret = rsp->result;				\
459 			if (ret == 0) {					\
460 				mp_proc_##f(_a, rsp, ## __VA_ARGS__);	\
461 			} else {					\
462 				PMD_DRV_LOG(ERR,			\
463 					    "%s returned error: %d\n",	\
464 					    mp_name_ ## f, rsp->result);\
465 			}						\
466 			free(mp_rep.msgs);				\
467 		} else if (rte_errno == ENOTSUP) {			\
468 			PMD_DRV_LOG(ERR,				\
469 				    "No IPC, can't proxy to primary\n");\
470 			ret = -rte_errno;				\
471 		} else {						\
472 			PMD_DRV_LOG(ERR, "Request %s failed: %s\n",	\
473 				    mp_name_ ## f,			\
474 				    rte_strerror(rte_errno));		\
475 			ret = -EIO;					\
476 		}							\
477 	}								\
478 	ret;								\
479 })
480 
481 /*********************************************************************
482  *  Multi-Process communication request descriptors
483  *********************************************************************/
484 
485 ENA_PROXY_DESC(ena_com_get_dev_basic_stats, ENA_MP_DEV_STATS_GET,
486 ({
487 	ENA_TOUCH(adapter);
488 	ENA_TOUCH(req);
489 	ENA_TOUCH(ena_dev);
490 	ENA_TOUCH(stats);
491 }),
492 ({
493 	ENA_TOUCH(rsp);
494 	ENA_TOUCH(ena_dev);
495 	if (stats != &adapter->basic_stats)
496 		rte_memcpy(stats, &adapter->basic_stats, sizeof(*stats));
497 }),
498 	struct ena_com_dev *ena_dev, struct ena_admin_basic_stats *stats);
499 
500 ENA_PROXY_DESC(ena_com_get_eni_stats, ENA_MP_ENI_STATS_GET,
501 ({
502 	ENA_TOUCH(adapter);
503 	ENA_TOUCH(req);
504 	ENA_TOUCH(ena_dev);
505 	ENA_TOUCH(stats);
506 }),
507 ({
508 	ENA_TOUCH(rsp);
509 	ENA_TOUCH(ena_dev);
510 	if (stats != (struct ena_admin_eni_stats *)&adapter->eni_stats)
511 		rte_memcpy(stats, &adapter->eni_stats, sizeof(*stats));
512 }),
513 	struct ena_com_dev *ena_dev, struct ena_admin_eni_stats *stats);
514 
515 ENA_PROXY_DESC(ena_com_set_dev_mtu, ENA_MP_MTU_SET,
516 ({
517 	ENA_TOUCH(adapter);
518 	ENA_TOUCH(ena_dev);
519 	req->args.mtu = mtu;
520 }),
521 ({
522 	ENA_TOUCH(adapter);
523 	ENA_TOUCH(rsp);
524 	ENA_TOUCH(ena_dev);
525 	ENA_TOUCH(mtu);
526 }),
527 	struct ena_com_dev *ena_dev, int mtu);
528 
529 ENA_PROXY_DESC(ena_com_indirect_table_set, ENA_MP_IND_TBL_SET,
530 ({
531 	ENA_TOUCH(adapter);
532 	ENA_TOUCH(req);
533 	ENA_TOUCH(ena_dev);
534 }),
535 ({
536 	ENA_TOUCH(adapter);
537 	ENA_TOUCH(rsp);
538 	ENA_TOUCH(ena_dev);
539 }),
540 	struct ena_com_dev *ena_dev);
541 
542 ENA_PROXY_DESC(ena_com_indirect_table_get, ENA_MP_IND_TBL_GET,
543 ({
544 	ENA_TOUCH(adapter);
545 	ENA_TOUCH(req);
546 	ENA_TOUCH(ena_dev);
547 	ENA_TOUCH(ind_tbl);
548 }),
549 ({
550 	ENA_TOUCH(rsp);
551 	ENA_TOUCH(ena_dev);
552 	if (ind_tbl != adapter->indirect_table)
553 		rte_memcpy(ind_tbl, adapter->indirect_table,
554 			   sizeof(adapter->indirect_table));
555 }),
556 	struct ena_com_dev *ena_dev, u32 *ind_tbl);
557 
558 static inline void ena_trigger_reset(struct ena_adapter *adapter,
559 				     enum ena_regs_reset_reason_types reason)
560 {
561 	if (likely(!adapter->trigger_reset)) {
562 		adapter->reset_reason = reason;
563 		adapter->trigger_reset = true;
564 	}
565 }
566 
567 static inline void ena_rx_mbuf_prepare(struct ena_ring *rx_ring,
568 				       struct rte_mbuf *mbuf,
569 				       struct ena_com_rx_ctx *ena_rx_ctx,
570 				       bool fill_hash)
571 {
572 	struct ena_stats_rx *rx_stats = &rx_ring->rx_stats;
573 	uint64_t ol_flags = 0;
574 	uint32_t packet_type = 0;
575 
576 	if (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP)
577 		packet_type |= RTE_PTYPE_L4_TCP;
578 	else if (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP)
579 		packet_type |= RTE_PTYPE_L4_UDP;
580 
581 	if (ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV4) {
582 		packet_type |= RTE_PTYPE_L3_IPV4;
583 		if (unlikely(ena_rx_ctx->l3_csum_err)) {
584 			++rx_stats->l3_csum_bad;
585 			ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
586 		} else {
587 			ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
588 		}
589 	} else if (ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV6) {
590 		packet_type |= RTE_PTYPE_L3_IPV6;
591 	}
592 
593 	if (!ena_rx_ctx->l4_csum_checked || ena_rx_ctx->frag) {
594 		ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN;
595 	} else {
596 		if (unlikely(ena_rx_ctx->l4_csum_err)) {
597 			++rx_stats->l4_csum_bad;
598 			/*
599 			 * For the L4 Rx checksum offload the HW may indicate
600 			 * bad checksum although it's valid. Because of that,
601 			 * we're setting the UNKNOWN flag to let the app
602 			 * re-verify the checksum.
603 			 */
604 			ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN;
605 		} else {
606 			++rx_stats->l4_csum_good;
607 			ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
608 		}
609 	}
610 
611 	if (fill_hash &&
612 	    likely((packet_type & ENA_PTYPE_HAS_HASH) && !ena_rx_ctx->frag)) {
613 		ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
614 		mbuf->hash.rss = ena_rx_ctx->hash;
615 	}
616 
617 	mbuf->ol_flags = ol_flags;
618 	mbuf->packet_type = packet_type;
619 }
620 
621 static inline void ena_tx_mbuf_prepare(struct rte_mbuf *mbuf,
622 				       struct ena_com_tx_ctx *ena_tx_ctx,
623 				       uint64_t queue_offloads,
624 				       bool disable_meta_caching)
625 {
626 	struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta;
627 
628 	if ((mbuf->ol_flags & MBUF_OFFLOADS) &&
629 	    (queue_offloads & QUEUE_OFFLOADS)) {
630 		/* check if TSO is required */
631 		if ((mbuf->ol_flags & RTE_MBUF_F_TX_TCP_SEG) &&
632 		    (queue_offloads & RTE_ETH_TX_OFFLOAD_TCP_TSO)) {
633 			ena_tx_ctx->tso_enable = true;
634 
635 			ena_meta->l4_hdr_len = GET_L4_HDR_LEN(mbuf);
636 		}
637 
638 		/* check if L3 checksum is needed */
639 		if ((mbuf->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) &&
640 		    (queue_offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM))
641 			ena_tx_ctx->l3_csum_enable = true;
642 
643 		if (mbuf->ol_flags & RTE_MBUF_F_TX_IPV6) {
644 			ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV6;
645 			/* For the IPv6 packets, DF always needs to be true. */
646 			ena_tx_ctx->df = 1;
647 		} else {
648 			ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV4;
649 
650 			/* set don't fragment (DF) flag */
651 			if (mbuf->packet_type &
652 				(RTE_PTYPE_L4_NONFRAG
653 				 | RTE_PTYPE_INNER_L4_NONFRAG))
654 				ena_tx_ctx->df = 1;
655 		}
656 
657 		/* check if L4 checksum is needed */
658 		if (((mbuf->ol_flags & RTE_MBUF_F_TX_L4_MASK) == RTE_MBUF_F_TX_TCP_CKSUM) &&
659 		    (queue_offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM)) {
660 			ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_TCP;
661 			ena_tx_ctx->l4_csum_enable = true;
662 		} else if (((mbuf->ol_flags & RTE_MBUF_F_TX_L4_MASK) ==
663 				RTE_MBUF_F_TX_UDP_CKSUM) &&
664 				(queue_offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM)) {
665 			ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UDP;
666 			ena_tx_ctx->l4_csum_enable = true;
667 		} else {
668 			ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UNKNOWN;
669 			ena_tx_ctx->l4_csum_enable = false;
670 		}
671 
672 		ena_meta->mss = mbuf->tso_segsz;
673 		ena_meta->l3_hdr_len = mbuf->l3_len;
674 		ena_meta->l3_hdr_offset = mbuf->l2_len;
675 
676 		ena_tx_ctx->meta_valid = true;
677 	} else if (disable_meta_caching) {
678 		memset(ena_meta, 0, sizeof(*ena_meta));
679 		ena_tx_ctx->meta_valid = true;
680 	} else {
681 		ena_tx_ctx->meta_valid = false;
682 	}
683 }
684 
685 static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id)
686 {
687 	struct ena_tx_buffer *tx_info = NULL;
688 
689 	if (likely(req_id < tx_ring->ring_size)) {
690 		tx_info = &tx_ring->tx_buffer_info[req_id];
691 		if (likely(tx_info->mbuf))
692 			return 0;
693 	}
694 
695 	if (tx_info)
696 		PMD_TX_LOG(ERR, "tx_info doesn't have valid mbuf. queue %d:%d req_id %u\n",
697 			tx_ring->port_id, tx_ring->id, req_id);
698 	else
699 		PMD_TX_LOG(ERR, "Invalid req_id: %hu in queue %d:%d\n",
700 			req_id, tx_ring->port_id, tx_ring->id);
701 
702 	/* Trigger device reset */
703 	++tx_ring->tx_stats.bad_req_id;
704 	ena_trigger_reset(tx_ring->adapter, ENA_REGS_RESET_INV_TX_REQ_ID);
705 	return -EFAULT;
706 }
707 
708 static void ena_config_host_info(struct ena_com_dev *ena_dev)
709 {
710 	struct ena_admin_host_info *host_info;
711 	int rc;
712 
713 	/* Allocate only the host info */
714 	rc = ena_com_allocate_host_info(ena_dev);
715 	if (rc) {
716 		PMD_DRV_LOG(ERR, "Cannot allocate host info\n");
717 		return;
718 	}
719 
720 	host_info = ena_dev->host_attr.host_info;
721 
722 	host_info->os_type = ENA_ADMIN_OS_DPDK;
723 	host_info->kernel_ver = RTE_VERSION;
724 	strlcpy((char *)host_info->kernel_ver_str, rte_version(),
725 		sizeof(host_info->kernel_ver_str));
726 	host_info->os_dist = RTE_VERSION;
727 	strlcpy((char *)host_info->os_dist_str, rte_version(),
728 		sizeof(host_info->os_dist_str));
729 	host_info->driver_version =
730 		(DRV_MODULE_VER_MAJOR) |
731 		(DRV_MODULE_VER_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) |
732 		(DRV_MODULE_VER_SUBMINOR <<
733 			ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT);
734 	host_info->num_cpus = rte_lcore_count();
735 
736 	host_info->driver_supported_features =
737 		ENA_ADMIN_HOST_INFO_RX_OFFSET_MASK |
738 		ENA_ADMIN_HOST_INFO_RSS_CONFIGURABLE_FUNCTION_KEY_MASK;
739 
740 	rc = ena_com_set_host_attributes(ena_dev);
741 	if (rc) {
742 		if (rc == -ENA_COM_UNSUPPORTED)
743 			PMD_DRV_LOG(WARNING, "Cannot set host attributes\n");
744 		else
745 			PMD_DRV_LOG(ERR, "Cannot set host attributes\n");
746 
747 		goto err;
748 	}
749 
750 	return;
751 
752 err:
753 	ena_com_delete_host_info(ena_dev);
754 }
755 
756 /* This function calculates the number of xstats based on the current config */
757 static unsigned int ena_xstats_calc_num(struct rte_eth_dev_data *data)
758 {
759 	return ENA_STATS_ARRAY_GLOBAL + ENA_STATS_ARRAY_ENI +
760 		(data->nb_tx_queues * ENA_STATS_ARRAY_TX) +
761 		(data->nb_rx_queues * ENA_STATS_ARRAY_RX);
762 }
763 
764 static void ena_config_debug_area(struct ena_adapter *adapter)
765 {
766 	u32 debug_area_size;
767 	int rc, ss_count;
768 
769 	ss_count = ena_xstats_calc_num(adapter->edev_data);
770 
771 	/* allocate 32 bytes for each string and 64bit for the value */
772 	debug_area_size = ss_count * ETH_GSTRING_LEN + sizeof(u64) * ss_count;
773 
774 	rc = ena_com_allocate_debug_area(&adapter->ena_dev, debug_area_size);
775 	if (rc) {
776 		PMD_DRV_LOG(ERR, "Cannot allocate debug area\n");
777 		return;
778 	}
779 
780 	rc = ena_com_set_host_attributes(&adapter->ena_dev);
781 	if (rc) {
782 		if (rc == -ENA_COM_UNSUPPORTED)
783 			PMD_DRV_LOG(WARNING, "Cannot set host attributes\n");
784 		else
785 			PMD_DRV_LOG(ERR, "Cannot set host attributes\n");
786 
787 		goto err;
788 	}
789 
790 	return;
791 err:
792 	ena_com_delete_debug_area(&adapter->ena_dev);
793 }
794 
795 static int ena_close(struct rte_eth_dev *dev)
796 {
797 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
798 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
799 	struct ena_adapter *adapter = dev->data->dev_private;
800 	int ret = 0;
801 
802 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
803 		return 0;
804 
805 	if (adapter->state == ENA_ADAPTER_STATE_RUNNING)
806 		ret = ena_stop(dev);
807 	adapter->state = ENA_ADAPTER_STATE_CLOSED;
808 
809 	ena_rx_queue_release_all(dev);
810 	ena_tx_queue_release_all(dev);
811 
812 	rte_free(adapter->drv_stats);
813 	adapter->drv_stats = NULL;
814 
815 	rte_intr_disable(intr_handle);
816 	rte_intr_callback_unregister(intr_handle,
817 				     ena_interrupt_handler_rte,
818 				     dev);
819 
820 	/*
821 	 * MAC is not allocated dynamically. Setting NULL should prevent from
822 	 * release of the resource in the rte_eth_dev_release_port().
823 	 */
824 	dev->data->mac_addrs = NULL;
825 
826 	return ret;
827 }
828 
829 static int
830 ena_dev_reset(struct rte_eth_dev *dev)
831 {
832 	int rc = 0;
833 
834 	/* Cannot release memory in secondary process */
835 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
836 		PMD_DRV_LOG(WARNING, "dev_reset not supported in secondary.\n");
837 		return -EPERM;
838 	}
839 
840 	ena_destroy_device(dev);
841 	rc = eth_ena_dev_init(dev);
842 	if (rc)
843 		PMD_INIT_LOG(CRIT, "Cannot initialize device\n");
844 
845 	return rc;
846 }
847 
848 static void ena_rx_queue_release_all(struct rte_eth_dev *dev)
849 {
850 	int nb_queues = dev->data->nb_rx_queues;
851 	int i;
852 
853 	for (i = 0; i < nb_queues; i++)
854 		ena_rx_queue_release(dev, i);
855 }
856 
857 static void ena_tx_queue_release_all(struct rte_eth_dev *dev)
858 {
859 	int nb_queues = dev->data->nb_tx_queues;
860 	int i;
861 
862 	for (i = 0; i < nb_queues; i++)
863 		ena_tx_queue_release(dev, i);
864 }
865 
866 static void ena_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
867 {
868 	struct ena_ring *ring = dev->data->rx_queues[qid];
869 
870 	/* Free ring resources */
871 	rte_free(ring->rx_buffer_info);
872 	ring->rx_buffer_info = NULL;
873 
874 	rte_free(ring->rx_refill_buffer);
875 	ring->rx_refill_buffer = NULL;
876 
877 	rte_free(ring->empty_rx_reqs);
878 	ring->empty_rx_reqs = NULL;
879 
880 	ring->configured = 0;
881 
882 	PMD_DRV_LOG(NOTICE, "Rx queue %d:%d released\n",
883 		ring->port_id, ring->id);
884 }
885 
886 static void ena_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
887 {
888 	struct ena_ring *ring = dev->data->tx_queues[qid];
889 
890 	/* Free ring resources */
891 	rte_free(ring->push_buf_intermediate_buf);
892 
893 	rte_free(ring->tx_buffer_info);
894 
895 	rte_free(ring->empty_tx_reqs);
896 
897 	ring->empty_tx_reqs = NULL;
898 	ring->tx_buffer_info = NULL;
899 	ring->push_buf_intermediate_buf = NULL;
900 
901 	ring->configured = 0;
902 
903 	PMD_DRV_LOG(NOTICE, "Tx queue %d:%d released\n",
904 		ring->port_id, ring->id);
905 }
906 
907 static void ena_rx_queue_release_bufs(struct ena_ring *ring)
908 {
909 	unsigned int i;
910 
911 	for (i = 0; i < ring->ring_size; ++i) {
912 		struct ena_rx_buffer *rx_info = &ring->rx_buffer_info[i];
913 		if (rx_info->mbuf) {
914 			rte_mbuf_raw_free(rx_info->mbuf);
915 			rx_info->mbuf = NULL;
916 		}
917 	}
918 }
919 
920 static void ena_tx_queue_release_bufs(struct ena_ring *ring)
921 {
922 	unsigned int i;
923 
924 	for (i = 0; i < ring->ring_size; ++i) {
925 		struct ena_tx_buffer *tx_buf = &ring->tx_buffer_info[i];
926 
927 		if (tx_buf->mbuf) {
928 			rte_pktmbuf_free(tx_buf->mbuf);
929 			tx_buf->mbuf = NULL;
930 		}
931 	}
932 }
933 
934 static int ena_link_update(struct rte_eth_dev *dev,
935 			   __rte_unused int wait_to_complete)
936 {
937 	struct rte_eth_link *link = &dev->data->dev_link;
938 	struct ena_adapter *adapter = dev->data->dev_private;
939 
940 	link->link_status = adapter->link_status ? RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN;
941 	link->link_speed = RTE_ETH_SPEED_NUM_NONE;
942 	link->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
943 
944 	return 0;
945 }
946 
947 static int ena_queue_start_all(struct rte_eth_dev *dev,
948 			       enum ena_ring_type ring_type)
949 {
950 	struct ena_adapter *adapter = dev->data->dev_private;
951 	struct ena_ring *queues = NULL;
952 	int nb_queues;
953 	int i = 0;
954 	int rc = 0;
955 
956 	if (ring_type == ENA_RING_TYPE_RX) {
957 		queues = adapter->rx_ring;
958 		nb_queues = dev->data->nb_rx_queues;
959 	} else {
960 		queues = adapter->tx_ring;
961 		nb_queues = dev->data->nb_tx_queues;
962 	}
963 	for (i = 0; i < nb_queues; i++) {
964 		if (queues[i].configured) {
965 			if (ring_type == ENA_RING_TYPE_RX) {
966 				ena_assert_msg(
967 					dev->data->rx_queues[i] == &queues[i],
968 					"Inconsistent state of Rx queues\n");
969 			} else {
970 				ena_assert_msg(
971 					dev->data->tx_queues[i] == &queues[i],
972 					"Inconsistent state of Tx queues\n");
973 			}
974 
975 			rc = ena_queue_start(dev, &queues[i]);
976 
977 			if (rc) {
978 				PMD_INIT_LOG(ERR,
979 					"Failed to start queue[%d] of type(%d)\n",
980 					i, ring_type);
981 				goto err;
982 			}
983 		}
984 	}
985 
986 	return 0;
987 
988 err:
989 	while (i--)
990 		if (queues[i].configured)
991 			ena_queue_stop(&queues[i]);
992 
993 	return rc;
994 }
995 
996 static int
997 ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx,
998 		       bool use_large_llq_hdr)
999 {
1000 	struct ena_admin_feature_llq_desc *llq = &ctx->get_feat_ctx->llq;
1001 	struct ena_com_dev *ena_dev = ctx->ena_dev;
1002 	uint32_t max_tx_queue_size;
1003 	uint32_t max_rx_queue_size;
1004 
1005 	if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
1006 		struct ena_admin_queue_ext_feature_fields *max_queue_ext =
1007 			&ctx->get_feat_ctx->max_queue_ext.max_queue_ext;
1008 		max_rx_queue_size = RTE_MIN(max_queue_ext->max_rx_cq_depth,
1009 			max_queue_ext->max_rx_sq_depth);
1010 		max_tx_queue_size = max_queue_ext->max_tx_cq_depth;
1011 
1012 		if (ena_dev->tx_mem_queue_type ==
1013 		    ENA_ADMIN_PLACEMENT_POLICY_DEV) {
1014 			max_tx_queue_size = RTE_MIN(max_tx_queue_size,
1015 				llq->max_llq_depth);
1016 		} else {
1017 			max_tx_queue_size = RTE_MIN(max_tx_queue_size,
1018 				max_queue_ext->max_tx_sq_depth);
1019 		}
1020 
1021 		ctx->max_rx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS,
1022 			max_queue_ext->max_per_packet_rx_descs);
1023 		ctx->max_tx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS,
1024 			max_queue_ext->max_per_packet_tx_descs);
1025 	} else {
1026 		struct ena_admin_queue_feature_desc *max_queues =
1027 			&ctx->get_feat_ctx->max_queues;
1028 		max_rx_queue_size = RTE_MIN(max_queues->max_cq_depth,
1029 			max_queues->max_sq_depth);
1030 		max_tx_queue_size = max_queues->max_cq_depth;
1031 
1032 		if (ena_dev->tx_mem_queue_type ==
1033 		    ENA_ADMIN_PLACEMENT_POLICY_DEV) {
1034 			max_tx_queue_size = RTE_MIN(max_tx_queue_size,
1035 				llq->max_llq_depth);
1036 		} else {
1037 			max_tx_queue_size = RTE_MIN(max_tx_queue_size,
1038 				max_queues->max_sq_depth);
1039 		}
1040 
1041 		ctx->max_rx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS,
1042 			max_queues->max_packet_rx_descs);
1043 		ctx->max_tx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS,
1044 			max_queues->max_packet_tx_descs);
1045 	}
1046 
1047 	/* Round down to the nearest power of 2 */
1048 	max_rx_queue_size = rte_align32prevpow2(max_rx_queue_size);
1049 	max_tx_queue_size = rte_align32prevpow2(max_tx_queue_size);
1050 
1051 	if (use_large_llq_hdr) {
1052 		if ((llq->entry_size_ctrl_supported &
1053 		     ENA_ADMIN_LIST_ENTRY_SIZE_256B) &&
1054 		    (ena_dev->tx_mem_queue_type ==
1055 		     ENA_ADMIN_PLACEMENT_POLICY_DEV)) {
1056 			max_tx_queue_size /= 2;
1057 			PMD_INIT_LOG(INFO,
1058 				"Forcing large headers and decreasing maximum Tx queue size to %d\n",
1059 				max_tx_queue_size);
1060 		} else {
1061 			PMD_INIT_LOG(ERR,
1062 				"Forcing large headers failed: LLQ is disabled or device does not support large headers\n");
1063 		}
1064 	}
1065 
1066 	if (unlikely(max_rx_queue_size == 0 || max_tx_queue_size == 0)) {
1067 		PMD_INIT_LOG(ERR, "Invalid queue size\n");
1068 		return -EFAULT;
1069 	}
1070 
1071 	ctx->max_tx_queue_size = max_tx_queue_size;
1072 	ctx->max_rx_queue_size = max_rx_queue_size;
1073 
1074 	return 0;
1075 }
1076 
1077 static void ena_stats_restart(struct rte_eth_dev *dev)
1078 {
1079 	struct ena_adapter *adapter = dev->data->dev_private;
1080 
1081 	rte_atomic64_init(&adapter->drv_stats->ierrors);
1082 	rte_atomic64_init(&adapter->drv_stats->oerrors);
1083 	rte_atomic64_init(&adapter->drv_stats->rx_nombuf);
1084 	adapter->drv_stats->rx_drops = 0;
1085 }
1086 
1087 static int ena_stats_get(struct rte_eth_dev *dev,
1088 			  struct rte_eth_stats *stats)
1089 {
1090 	struct ena_admin_basic_stats ena_stats;
1091 	struct ena_adapter *adapter = dev->data->dev_private;
1092 	struct ena_com_dev *ena_dev = &adapter->ena_dev;
1093 	int rc;
1094 	int i;
1095 	int max_rings_stats;
1096 
1097 	memset(&ena_stats, 0, sizeof(ena_stats));
1098 
1099 	rte_spinlock_lock(&adapter->admin_lock);
1100 	rc = ENA_PROXY(adapter, ena_com_get_dev_basic_stats, ena_dev,
1101 		       &ena_stats);
1102 	rte_spinlock_unlock(&adapter->admin_lock);
1103 	if (unlikely(rc)) {
1104 		PMD_DRV_LOG(ERR, "Could not retrieve statistics from ENA\n");
1105 		return rc;
1106 	}
1107 
1108 	/* Set of basic statistics from ENA */
1109 	stats->ipackets = __MERGE_64B_H_L(ena_stats.rx_pkts_high,
1110 					  ena_stats.rx_pkts_low);
1111 	stats->opackets = __MERGE_64B_H_L(ena_stats.tx_pkts_high,
1112 					  ena_stats.tx_pkts_low);
1113 	stats->ibytes = __MERGE_64B_H_L(ena_stats.rx_bytes_high,
1114 					ena_stats.rx_bytes_low);
1115 	stats->obytes = __MERGE_64B_H_L(ena_stats.tx_bytes_high,
1116 					ena_stats.tx_bytes_low);
1117 
1118 	/* Driver related stats */
1119 	stats->imissed = adapter->drv_stats->rx_drops;
1120 	stats->ierrors = rte_atomic64_read(&adapter->drv_stats->ierrors);
1121 	stats->oerrors = rte_atomic64_read(&adapter->drv_stats->oerrors);
1122 	stats->rx_nombuf = rte_atomic64_read(&adapter->drv_stats->rx_nombuf);
1123 
1124 	max_rings_stats = RTE_MIN(dev->data->nb_rx_queues,
1125 		RTE_ETHDEV_QUEUE_STAT_CNTRS);
1126 	for (i = 0; i < max_rings_stats; ++i) {
1127 		struct ena_stats_rx *rx_stats = &adapter->rx_ring[i].rx_stats;
1128 
1129 		stats->q_ibytes[i] = rx_stats->bytes;
1130 		stats->q_ipackets[i] = rx_stats->cnt;
1131 		stats->q_errors[i] = rx_stats->bad_desc_num +
1132 			rx_stats->bad_req_id;
1133 	}
1134 
1135 	max_rings_stats = RTE_MIN(dev->data->nb_tx_queues,
1136 		RTE_ETHDEV_QUEUE_STAT_CNTRS);
1137 	for (i = 0; i < max_rings_stats; ++i) {
1138 		struct ena_stats_tx *tx_stats = &adapter->tx_ring[i].tx_stats;
1139 
1140 		stats->q_obytes[i] = tx_stats->bytes;
1141 		stats->q_opackets[i] = tx_stats->cnt;
1142 	}
1143 
1144 	return 0;
1145 }
1146 
1147 static int ena_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
1148 {
1149 	struct ena_adapter *adapter;
1150 	struct ena_com_dev *ena_dev;
1151 	int rc = 0;
1152 
1153 	ena_assert_msg(dev->data != NULL, "Uninitialized device\n");
1154 	ena_assert_msg(dev->data->dev_private != NULL, "Uninitialized device\n");
1155 	adapter = dev->data->dev_private;
1156 
1157 	ena_dev = &adapter->ena_dev;
1158 	ena_assert_msg(ena_dev != NULL, "Uninitialized device\n");
1159 
1160 	rc = ENA_PROXY(adapter, ena_com_set_dev_mtu, ena_dev, mtu);
1161 	if (rc)
1162 		PMD_DRV_LOG(ERR, "Could not set MTU: %d\n", mtu);
1163 	else
1164 		PMD_DRV_LOG(NOTICE, "MTU set to: %d\n", mtu);
1165 
1166 	return rc;
1167 }
1168 
1169 static int ena_start(struct rte_eth_dev *dev)
1170 {
1171 	struct ena_adapter *adapter = dev->data->dev_private;
1172 	uint64_t ticks;
1173 	int rc = 0;
1174 	uint16_t i;
1175 
1176 	/* Cannot allocate memory in secondary process */
1177 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1178 		PMD_DRV_LOG(WARNING, "dev_start not supported in secondary.\n");
1179 		return -EPERM;
1180 	}
1181 
1182 	rc = ena_setup_rx_intr(dev);
1183 	if (rc)
1184 		return rc;
1185 
1186 	rc = ena_queue_start_all(dev, ENA_RING_TYPE_RX);
1187 	if (rc)
1188 		return rc;
1189 
1190 	rc = ena_queue_start_all(dev, ENA_RING_TYPE_TX);
1191 	if (rc)
1192 		goto err_start_tx;
1193 
1194 	if (adapter->edev_data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
1195 		rc = ena_rss_configure(adapter);
1196 		if (rc)
1197 			goto err_rss_init;
1198 	}
1199 
1200 	ena_stats_restart(dev);
1201 
1202 	adapter->timestamp_wd = rte_get_timer_cycles();
1203 	adapter->keep_alive_timeout = ENA_DEVICE_KALIVE_TIMEOUT;
1204 
1205 	ticks = rte_get_timer_hz();
1206 	rte_timer_reset(&adapter->timer_wd, ticks, PERIODICAL, rte_lcore_id(),
1207 			ena_timer_wd_callback, dev);
1208 
1209 	++adapter->dev_stats.dev_start;
1210 	adapter->state = ENA_ADAPTER_STATE_RUNNING;
1211 
1212 	for (i = 0; i < dev->data->nb_rx_queues; i++)
1213 		dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
1214 	for (i = 0; i < dev->data->nb_tx_queues; i++)
1215 		dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
1216 
1217 	return 0;
1218 
1219 err_rss_init:
1220 	ena_queue_stop_all(dev, ENA_RING_TYPE_TX);
1221 err_start_tx:
1222 	ena_queue_stop_all(dev, ENA_RING_TYPE_RX);
1223 	return rc;
1224 }
1225 
1226 static int ena_stop(struct rte_eth_dev *dev)
1227 {
1228 	struct ena_adapter *adapter = dev->data->dev_private;
1229 	struct ena_com_dev *ena_dev = &adapter->ena_dev;
1230 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1231 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
1232 	uint16_t i;
1233 	int rc;
1234 
1235 	/* Cannot free memory in secondary process */
1236 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1237 		PMD_DRV_LOG(WARNING, "dev_stop not supported in secondary.\n");
1238 		return -EPERM;
1239 	}
1240 
1241 	rte_timer_stop_sync(&adapter->timer_wd);
1242 	ena_queue_stop_all(dev, ENA_RING_TYPE_TX);
1243 	ena_queue_stop_all(dev, ENA_RING_TYPE_RX);
1244 
1245 	if (adapter->trigger_reset) {
1246 		rc = ena_com_dev_reset(ena_dev, adapter->reset_reason);
1247 		if (rc)
1248 			PMD_DRV_LOG(ERR, "Device reset failed, rc: %d\n", rc);
1249 	}
1250 
1251 	rte_intr_disable(intr_handle);
1252 
1253 	rte_intr_efd_disable(intr_handle);
1254 
1255 	/* Cleanup vector list */
1256 	rte_intr_vec_list_free(intr_handle);
1257 
1258 	rte_intr_enable(intr_handle);
1259 
1260 	++adapter->dev_stats.dev_stop;
1261 	adapter->state = ENA_ADAPTER_STATE_STOPPED;
1262 	dev->data->dev_started = 0;
1263 
1264 	for (i = 0; i < dev->data->nb_rx_queues; i++)
1265 		dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
1266 	for (i = 0; i < dev->data->nb_tx_queues; i++)
1267 		dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
1268 
1269 	return 0;
1270 }
1271 
1272 static int ena_create_io_queue(struct rte_eth_dev *dev, struct ena_ring *ring)
1273 {
1274 	struct ena_adapter *adapter = ring->adapter;
1275 	struct ena_com_dev *ena_dev = &adapter->ena_dev;
1276 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1277 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
1278 	struct ena_com_create_io_ctx ctx =
1279 		/* policy set to _HOST just to satisfy icc compiler */
1280 		{ ENA_ADMIN_PLACEMENT_POLICY_HOST,
1281 		  0, 0, 0, 0, 0 };
1282 	uint16_t ena_qid;
1283 	unsigned int i;
1284 	int rc;
1285 
1286 	ctx.msix_vector = -1;
1287 	if (ring->type == ENA_RING_TYPE_TX) {
1288 		ena_qid = ENA_IO_TXQ_IDX(ring->id);
1289 		ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX;
1290 		ctx.mem_queue_type = ena_dev->tx_mem_queue_type;
1291 		for (i = 0; i < ring->ring_size; i++)
1292 			ring->empty_tx_reqs[i] = i;
1293 	} else {
1294 		ena_qid = ENA_IO_RXQ_IDX(ring->id);
1295 		ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX;
1296 		if (rte_intr_dp_is_en(intr_handle))
1297 			ctx.msix_vector =
1298 				rte_intr_vec_list_index_get(intr_handle,
1299 								   ring->id);
1300 
1301 		for (i = 0; i < ring->ring_size; i++)
1302 			ring->empty_rx_reqs[i] = i;
1303 	}
1304 	ctx.queue_size = ring->ring_size;
1305 	ctx.qid = ena_qid;
1306 	ctx.numa_node = ring->numa_socket_id;
1307 
1308 	rc = ena_com_create_io_queue(ena_dev, &ctx);
1309 	if (rc) {
1310 		PMD_DRV_LOG(ERR,
1311 			"Failed to create IO queue[%d] (qid:%d), rc: %d\n",
1312 			ring->id, ena_qid, rc);
1313 		return rc;
1314 	}
1315 
1316 	rc = ena_com_get_io_handlers(ena_dev, ena_qid,
1317 				     &ring->ena_com_io_sq,
1318 				     &ring->ena_com_io_cq);
1319 	if (rc) {
1320 		PMD_DRV_LOG(ERR,
1321 			"Failed to get IO queue[%d] handlers, rc: %d\n",
1322 			ring->id, rc);
1323 		ena_com_destroy_io_queue(ena_dev, ena_qid);
1324 		return rc;
1325 	}
1326 
1327 	if (ring->type == ENA_RING_TYPE_TX)
1328 		ena_com_update_numa_node(ring->ena_com_io_cq, ctx.numa_node);
1329 
1330 	/* Start with Rx interrupts being masked. */
1331 	if (ring->type == ENA_RING_TYPE_RX && rte_intr_dp_is_en(intr_handle))
1332 		ena_rx_queue_intr_disable(dev, ring->id);
1333 
1334 	return 0;
1335 }
1336 
1337 static void ena_queue_stop(struct ena_ring *ring)
1338 {
1339 	struct ena_com_dev *ena_dev = &ring->adapter->ena_dev;
1340 
1341 	if (ring->type == ENA_RING_TYPE_RX) {
1342 		ena_com_destroy_io_queue(ena_dev, ENA_IO_RXQ_IDX(ring->id));
1343 		ena_rx_queue_release_bufs(ring);
1344 	} else {
1345 		ena_com_destroy_io_queue(ena_dev, ENA_IO_TXQ_IDX(ring->id));
1346 		ena_tx_queue_release_bufs(ring);
1347 	}
1348 }
1349 
1350 static void ena_queue_stop_all(struct rte_eth_dev *dev,
1351 			      enum ena_ring_type ring_type)
1352 {
1353 	struct ena_adapter *adapter = dev->data->dev_private;
1354 	struct ena_ring *queues = NULL;
1355 	uint16_t nb_queues, i;
1356 
1357 	if (ring_type == ENA_RING_TYPE_RX) {
1358 		queues = adapter->rx_ring;
1359 		nb_queues = dev->data->nb_rx_queues;
1360 	} else {
1361 		queues = adapter->tx_ring;
1362 		nb_queues = dev->data->nb_tx_queues;
1363 	}
1364 
1365 	for (i = 0; i < nb_queues; ++i)
1366 		if (queues[i].configured)
1367 			ena_queue_stop(&queues[i]);
1368 }
1369 
1370 static int ena_queue_start(struct rte_eth_dev *dev, struct ena_ring *ring)
1371 {
1372 	int rc, bufs_num;
1373 
1374 	ena_assert_msg(ring->configured == 1,
1375 		       "Trying to start unconfigured queue\n");
1376 
1377 	rc = ena_create_io_queue(dev, ring);
1378 	if (rc) {
1379 		PMD_INIT_LOG(ERR, "Failed to create IO queue\n");
1380 		return rc;
1381 	}
1382 
1383 	ring->next_to_clean = 0;
1384 	ring->next_to_use = 0;
1385 
1386 	if (ring->type == ENA_RING_TYPE_TX) {
1387 		ring->tx_stats.available_desc =
1388 			ena_com_free_q_entries(ring->ena_com_io_sq);
1389 		return 0;
1390 	}
1391 
1392 	bufs_num = ring->ring_size - 1;
1393 	rc = ena_populate_rx_queue(ring, bufs_num);
1394 	if (rc != bufs_num) {
1395 		ena_com_destroy_io_queue(&ring->adapter->ena_dev,
1396 					 ENA_IO_RXQ_IDX(ring->id));
1397 		PMD_INIT_LOG(ERR, "Failed to populate Rx ring\n");
1398 		return ENA_COM_FAULT;
1399 	}
1400 	/* Flush per-core RX buffers pools cache as they can be used on other
1401 	 * cores as well.
1402 	 */
1403 	rte_mempool_cache_flush(NULL, ring->mb_pool);
1404 
1405 	return 0;
1406 }
1407 
1408 static int ena_tx_queue_setup(struct rte_eth_dev *dev,
1409 			      uint16_t queue_idx,
1410 			      uint16_t nb_desc,
1411 			      unsigned int socket_id,
1412 			      const struct rte_eth_txconf *tx_conf)
1413 {
1414 	struct ena_ring *txq = NULL;
1415 	struct ena_adapter *adapter = dev->data->dev_private;
1416 	unsigned int i;
1417 	uint16_t dyn_thresh;
1418 
1419 	txq = &adapter->tx_ring[queue_idx];
1420 
1421 	if (txq->configured) {
1422 		PMD_DRV_LOG(CRIT,
1423 			"API violation. Queue[%d] is already configured\n",
1424 			queue_idx);
1425 		return ENA_COM_FAULT;
1426 	}
1427 
1428 	if (!rte_is_power_of_2(nb_desc)) {
1429 		PMD_DRV_LOG(ERR,
1430 			"Unsupported size of Tx queue: %d is not a power of 2.\n",
1431 			nb_desc);
1432 		return -EINVAL;
1433 	}
1434 
1435 	if (nb_desc > adapter->max_tx_ring_size) {
1436 		PMD_DRV_LOG(ERR,
1437 			"Unsupported size of Tx queue (max size: %d)\n",
1438 			adapter->max_tx_ring_size);
1439 		return -EINVAL;
1440 	}
1441 
1442 	txq->port_id = dev->data->port_id;
1443 	txq->next_to_clean = 0;
1444 	txq->next_to_use = 0;
1445 	txq->ring_size = nb_desc;
1446 	txq->size_mask = nb_desc - 1;
1447 	txq->numa_socket_id = socket_id;
1448 	txq->pkts_without_db = false;
1449 	txq->last_cleanup_ticks = 0;
1450 
1451 	txq->tx_buffer_info = rte_zmalloc_socket("txq->tx_buffer_info",
1452 		sizeof(struct ena_tx_buffer) * txq->ring_size,
1453 		RTE_CACHE_LINE_SIZE,
1454 		socket_id);
1455 	if (!txq->tx_buffer_info) {
1456 		PMD_DRV_LOG(ERR,
1457 			"Failed to allocate memory for Tx buffer info\n");
1458 		return -ENOMEM;
1459 	}
1460 
1461 	txq->empty_tx_reqs = rte_zmalloc_socket("txq->empty_tx_reqs",
1462 		sizeof(uint16_t) * txq->ring_size,
1463 		RTE_CACHE_LINE_SIZE,
1464 		socket_id);
1465 	if (!txq->empty_tx_reqs) {
1466 		PMD_DRV_LOG(ERR,
1467 			"Failed to allocate memory for empty Tx requests\n");
1468 		rte_free(txq->tx_buffer_info);
1469 		return -ENOMEM;
1470 	}
1471 
1472 	txq->push_buf_intermediate_buf =
1473 		rte_zmalloc_socket("txq->push_buf_intermediate_buf",
1474 			txq->tx_max_header_size,
1475 			RTE_CACHE_LINE_SIZE,
1476 			socket_id);
1477 	if (!txq->push_buf_intermediate_buf) {
1478 		PMD_DRV_LOG(ERR, "Failed to alloc push buffer for LLQ\n");
1479 		rte_free(txq->tx_buffer_info);
1480 		rte_free(txq->empty_tx_reqs);
1481 		return -ENOMEM;
1482 	}
1483 
1484 	for (i = 0; i < txq->ring_size; i++)
1485 		txq->empty_tx_reqs[i] = i;
1486 
1487 	txq->offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
1488 
1489 	/* Check if caller provided the Tx cleanup threshold value. */
1490 	if (tx_conf->tx_free_thresh != 0) {
1491 		txq->tx_free_thresh = tx_conf->tx_free_thresh;
1492 	} else {
1493 		dyn_thresh = txq->ring_size -
1494 			txq->ring_size / ENA_REFILL_THRESH_DIVIDER;
1495 		txq->tx_free_thresh = RTE_MAX(dyn_thresh,
1496 			txq->ring_size - ENA_REFILL_THRESH_PACKET);
1497 	}
1498 
1499 	txq->missing_tx_completion_threshold =
1500 		RTE_MIN(txq->ring_size / 2, ENA_DEFAULT_MISSING_COMP);
1501 
1502 	/* Store pointer to this queue in upper layer */
1503 	txq->configured = 1;
1504 	dev->data->tx_queues[queue_idx] = txq;
1505 
1506 	return 0;
1507 }
1508 
1509 static int ena_rx_queue_setup(struct rte_eth_dev *dev,
1510 			      uint16_t queue_idx,
1511 			      uint16_t nb_desc,
1512 			      unsigned int socket_id,
1513 			      const struct rte_eth_rxconf *rx_conf,
1514 			      struct rte_mempool *mp)
1515 {
1516 	struct ena_adapter *adapter = dev->data->dev_private;
1517 	struct ena_ring *rxq = NULL;
1518 	size_t buffer_size;
1519 	int i;
1520 	uint16_t dyn_thresh;
1521 
1522 	rxq = &adapter->rx_ring[queue_idx];
1523 	if (rxq->configured) {
1524 		PMD_DRV_LOG(CRIT,
1525 			"API violation. Queue[%d] is already configured\n",
1526 			queue_idx);
1527 		return ENA_COM_FAULT;
1528 	}
1529 
1530 	if (!rte_is_power_of_2(nb_desc)) {
1531 		PMD_DRV_LOG(ERR,
1532 			"Unsupported size of Rx queue: %d is not a power of 2.\n",
1533 			nb_desc);
1534 		return -EINVAL;
1535 	}
1536 
1537 	if (nb_desc > adapter->max_rx_ring_size) {
1538 		PMD_DRV_LOG(ERR,
1539 			"Unsupported size of Rx queue (max size: %d)\n",
1540 			adapter->max_rx_ring_size);
1541 		return -EINVAL;
1542 	}
1543 
1544 	/* ENA isn't supporting buffers smaller than 1400 bytes */
1545 	buffer_size = rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM;
1546 	if (buffer_size < ENA_RX_BUF_MIN_SIZE) {
1547 		PMD_DRV_LOG(ERR,
1548 			"Unsupported size of Rx buffer: %zu (min size: %d)\n",
1549 			buffer_size, ENA_RX_BUF_MIN_SIZE);
1550 		return -EINVAL;
1551 	}
1552 
1553 	rxq->port_id = dev->data->port_id;
1554 	rxq->next_to_clean = 0;
1555 	rxq->next_to_use = 0;
1556 	rxq->ring_size = nb_desc;
1557 	rxq->size_mask = nb_desc - 1;
1558 	rxq->numa_socket_id = socket_id;
1559 	rxq->mb_pool = mp;
1560 
1561 	rxq->rx_buffer_info = rte_zmalloc_socket("rxq->buffer_info",
1562 		sizeof(struct ena_rx_buffer) * nb_desc,
1563 		RTE_CACHE_LINE_SIZE,
1564 		socket_id);
1565 	if (!rxq->rx_buffer_info) {
1566 		PMD_DRV_LOG(ERR,
1567 			"Failed to allocate memory for Rx buffer info\n");
1568 		return -ENOMEM;
1569 	}
1570 
1571 	rxq->rx_refill_buffer = rte_zmalloc_socket("rxq->rx_refill_buffer",
1572 		sizeof(struct rte_mbuf *) * nb_desc,
1573 		RTE_CACHE_LINE_SIZE,
1574 		socket_id);
1575 	if (!rxq->rx_refill_buffer) {
1576 		PMD_DRV_LOG(ERR,
1577 			"Failed to allocate memory for Rx refill buffer\n");
1578 		rte_free(rxq->rx_buffer_info);
1579 		rxq->rx_buffer_info = NULL;
1580 		return -ENOMEM;
1581 	}
1582 
1583 	rxq->empty_rx_reqs = rte_zmalloc_socket("rxq->empty_rx_reqs",
1584 		sizeof(uint16_t) * nb_desc,
1585 		RTE_CACHE_LINE_SIZE,
1586 		socket_id);
1587 	if (!rxq->empty_rx_reqs) {
1588 		PMD_DRV_LOG(ERR,
1589 			"Failed to allocate memory for empty Rx requests\n");
1590 		rte_free(rxq->rx_buffer_info);
1591 		rxq->rx_buffer_info = NULL;
1592 		rte_free(rxq->rx_refill_buffer);
1593 		rxq->rx_refill_buffer = NULL;
1594 		return -ENOMEM;
1595 	}
1596 
1597 	for (i = 0; i < nb_desc; i++)
1598 		rxq->empty_rx_reqs[i] = i;
1599 
1600 	rxq->offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
1601 
1602 	if (rx_conf->rx_free_thresh != 0) {
1603 		rxq->rx_free_thresh = rx_conf->rx_free_thresh;
1604 	} else {
1605 		dyn_thresh = rxq->ring_size / ENA_REFILL_THRESH_DIVIDER;
1606 		rxq->rx_free_thresh = RTE_MIN(dyn_thresh,
1607 			(uint16_t)(ENA_REFILL_THRESH_PACKET));
1608 	}
1609 
1610 	/* Store pointer to this queue in upper layer */
1611 	rxq->configured = 1;
1612 	dev->data->rx_queues[queue_idx] = rxq;
1613 
1614 	return 0;
1615 }
1616 
1617 static int ena_add_single_rx_desc(struct ena_com_io_sq *io_sq,
1618 				  struct rte_mbuf *mbuf, uint16_t id)
1619 {
1620 	struct ena_com_buf ebuf;
1621 	int rc;
1622 
1623 	/* prepare physical address for DMA transaction */
1624 	ebuf.paddr = mbuf->buf_iova + RTE_PKTMBUF_HEADROOM;
1625 	ebuf.len = mbuf->buf_len - RTE_PKTMBUF_HEADROOM;
1626 
1627 	/* pass resource to device */
1628 	rc = ena_com_add_single_rx_desc(io_sq, &ebuf, id);
1629 	if (unlikely(rc != 0))
1630 		PMD_RX_LOG(WARNING, "Failed adding Rx desc\n");
1631 
1632 	return rc;
1633 }
1634 
1635 static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count)
1636 {
1637 	unsigned int i;
1638 	int rc;
1639 	uint16_t next_to_use = rxq->next_to_use;
1640 	uint16_t req_id;
1641 #ifdef RTE_ETHDEV_DEBUG_RX
1642 	uint16_t in_use;
1643 #endif
1644 	struct rte_mbuf **mbufs = rxq->rx_refill_buffer;
1645 
1646 	if (unlikely(!count))
1647 		return 0;
1648 
1649 #ifdef RTE_ETHDEV_DEBUG_RX
1650 	in_use = rxq->ring_size - 1 -
1651 		ena_com_free_q_entries(rxq->ena_com_io_sq);
1652 	if (unlikely((in_use + count) >= rxq->ring_size))
1653 		PMD_RX_LOG(ERR, "Bad Rx ring state\n");
1654 #endif
1655 
1656 	/* get resources for incoming packets */
1657 	rc = rte_pktmbuf_alloc_bulk(rxq->mb_pool, mbufs, count);
1658 	if (unlikely(rc < 0)) {
1659 		rte_atomic64_inc(&rxq->adapter->drv_stats->rx_nombuf);
1660 		++rxq->rx_stats.mbuf_alloc_fail;
1661 		PMD_RX_LOG(DEBUG, "There are not enough free buffers\n");
1662 		return 0;
1663 	}
1664 
1665 	for (i = 0; i < count; i++) {
1666 		struct rte_mbuf *mbuf = mbufs[i];
1667 		struct ena_rx_buffer *rx_info;
1668 
1669 		if (likely((i + 4) < count))
1670 			rte_prefetch0(mbufs[i + 4]);
1671 
1672 		req_id = rxq->empty_rx_reqs[next_to_use];
1673 		rx_info = &rxq->rx_buffer_info[req_id];
1674 
1675 		rc = ena_add_single_rx_desc(rxq->ena_com_io_sq, mbuf, req_id);
1676 		if (unlikely(rc != 0))
1677 			break;
1678 
1679 		rx_info->mbuf = mbuf;
1680 		next_to_use = ENA_IDX_NEXT_MASKED(next_to_use, rxq->size_mask);
1681 	}
1682 
1683 	if (unlikely(i < count)) {
1684 		PMD_RX_LOG(WARNING,
1685 			"Refilled Rx queue[%d] with only %d/%d buffers\n",
1686 			rxq->id, i, count);
1687 		rte_pktmbuf_free_bulk(&mbufs[i], count - i);
1688 		++rxq->rx_stats.refill_partial;
1689 	}
1690 
1691 	/* When we submitted free resources to device... */
1692 	if (likely(i > 0)) {
1693 		/* ...let HW know that it can fill buffers with data. */
1694 		ena_com_write_sq_doorbell(rxq->ena_com_io_sq);
1695 
1696 		rxq->next_to_use = next_to_use;
1697 	}
1698 
1699 	return i;
1700 }
1701 
1702 static int ena_device_init(struct ena_adapter *adapter,
1703 			   struct rte_pci_device *pdev,
1704 			   struct ena_com_dev_get_features_ctx *get_feat_ctx)
1705 {
1706 	struct ena_com_dev *ena_dev = &adapter->ena_dev;
1707 	uint32_t aenq_groups;
1708 	int rc;
1709 	bool readless_supported;
1710 
1711 	/* Initialize mmio registers */
1712 	rc = ena_com_mmio_reg_read_request_init(ena_dev);
1713 	if (rc) {
1714 		PMD_DRV_LOG(ERR, "Failed to init MMIO read less\n");
1715 		return rc;
1716 	}
1717 
1718 	/* The PCIe configuration space revision id indicate if mmio reg
1719 	 * read is disabled.
1720 	 */
1721 	readless_supported = !(pdev->id.class_id & ENA_MMIO_DISABLE_REG_READ);
1722 	ena_com_set_mmio_read_mode(ena_dev, readless_supported);
1723 
1724 	/* reset device */
1725 	rc = ena_com_dev_reset(ena_dev, ENA_REGS_RESET_NORMAL);
1726 	if (rc) {
1727 		PMD_DRV_LOG(ERR, "Cannot reset device\n");
1728 		goto err_mmio_read_less;
1729 	}
1730 
1731 	/* check FW version */
1732 	rc = ena_com_validate_version(ena_dev);
1733 	if (rc) {
1734 		PMD_DRV_LOG(ERR, "Device version is too low\n");
1735 		goto err_mmio_read_less;
1736 	}
1737 
1738 	ena_dev->dma_addr_bits = ena_com_get_dma_width(ena_dev);
1739 
1740 	/* ENA device administration layer init */
1741 	rc = ena_com_admin_init(ena_dev, &aenq_handlers);
1742 	if (rc) {
1743 		PMD_DRV_LOG(ERR,
1744 			"Cannot initialize ENA admin queue\n");
1745 		goto err_mmio_read_less;
1746 	}
1747 
1748 	/* To enable the msix interrupts the driver needs to know the number
1749 	 * of queues. So the driver uses polling mode to retrieve this
1750 	 * information.
1751 	 */
1752 	ena_com_set_admin_polling_mode(ena_dev, true);
1753 
1754 	ena_config_host_info(ena_dev);
1755 
1756 	/* Get Device Attributes and features */
1757 	rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx);
1758 	if (rc) {
1759 		PMD_DRV_LOG(ERR,
1760 			"Cannot get attribute for ENA device, rc: %d\n", rc);
1761 		goto err_admin_init;
1762 	}
1763 
1764 	aenq_groups = BIT(ENA_ADMIN_LINK_CHANGE) |
1765 		      BIT(ENA_ADMIN_NOTIFICATION) |
1766 		      BIT(ENA_ADMIN_KEEP_ALIVE) |
1767 		      BIT(ENA_ADMIN_FATAL_ERROR) |
1768 		      BIT(ENA_ADMIN_WARNING);
1769 
1770 	aenq_groups &= get_feat_ctx->aenq.supported_groups;
1771 
1772 	adapter->all_aenq_groups = aenq_groups;
1773 
1774 	return 0;
1775 
1776 err_admin_init:
1777 	ena_com_admin_destroy(ena_dev);
1778 
1779 err_mmio_read_less:
1780 	ena_com_mmio_reg_read_request_destroy(ena_dev);
1781 
1782 	return rc;
1783 }
1784 
1785 static void ena_interrupt_handler_rte(void *cb_arg)
1786 {
1787 	struct rte_eth_dev *dev = cb_arg;
1788 	struct ena_adapter *adapter = dev->data->dev_private;
1789 	struct ena_com_dev *ena_dev = &adapter->ena_dev;
1790 
1791 	ena_com_admin_q_comp_intr_handler(ena_dev);
1792 	if (likely(adapter->state != ENA_ADAPTER_STATE_CLOSED))
1793 		ena_com_aenq_intr_handler(ena_dev, dev);
1794 }
1795 
1796 static void check_for_missing_keep_alive(struct ena_adapter *adapter)
1797 {
1798 	if (!(adapter->active_aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE)))
1799 		return;
1800 
1801 	if (adapter->keep_alive_timeout == ENA_HW_HINTS_NO_TIMEOUT)
1802 		return;
1803 
1804 	if (unlikely((rte_get_timer_cycles() - adapter->timestamp_wd) >=
1805 	    adapter->keep_alive_timeout)) {
1806 		PMD_DRV_LOG(ERR, "Keep alive timeout\n");
1807 		ena_trigger_reset(adapter, ENA_REGS_RESET_KEEP_ALIVE_TO);
1808 		++adapter->dev_stats.wd_expired;
1809 	}
1810 }
1811 
1812 /* Check if admin queue is enabled */
1813 static void check_for_admin_com_state(struct ena_adapter *adapter)
1814 {
1815 	if (unlikely(!ena_com_get_admin_running_state(&adapter->ena_dev))) {
1816 		PMD_DRV_LOG(ERR, "ENA admin queue is not in running state\n");
1817 		ena_trigger_reset(adapter, ENA_REGS_RESET_ADMIN_TO);
1818 	}
1819 }
1820 
1821 static int check_for_tx_completion_in_queue(struct ena_adapter *adapter,
1822 					    struct ena_ring *tx_ring)
1823 {
1824 	struct ena_tx_buffer *tx_buf;
1825 	uint64_t timestamp;
1826 	uint64_t completion_delay;
1827 	uint32_t missed_tx = 0;
1828 	unsigned int i;
1829 	int rc = 0;
1830 
1831 	for (i = 0; i < tx_ring->ring_size; ++i) {
1832 		tx_buf = &tx_ring->tx_buffer_info[i];
1833 		timestamp = tx_buf->timestamp;
1834 
1835 		if (timestamp == 0)
1836 			continue;
1837 
1838 		completion_delay = rte_get_timer_cycles() - timestamp;
1839 		if (completion_delay > adapter->missing_tx_completion_to) {
1840 			if (unlikely(!tx_buf->print_once)) {
1841 				PMD_TX_LOG(WARNING,
1842 					"Found a Tx that wasn't completed on time, qid %d, index %d. "
1843 					"Missing Tx outstanding for %" PRIu64 " msecs.\n",
1844 					tx_ring->id, i,	completion_delay /
1845 					rte_get_timer_hz() * 1000);
1846 				tx_buf->print_once = true;
1847 			}
1848 			++missed_tx;
1849 		}
1850 	}
1851 
1852 	if (unlikely(missed_tx > tx_ring->missing_tx_completion_threshold)) {
1853 		PMD_DRV_LOG(ERR,
1854 			"The number of lost Tx completions is above the threshold (%d > %d). "
1855 			"Trigger the device reset.\n",
1856 			missed_tx,
1857 			tx_ring->missing_tx_completion_threshold);
1858 		adapter->reset_reason = ENA_REGS_RESET_MISS_TX_CMPL;
1859 		adapter->trigger_reset = true;
1860 		rc = -EIO;
1861 	}
1862 
1863 	tx_ring->tx_stats.missed_tx += missed_tx;
1864 
1865 	return rc;
1866 }
1867 
1868 static void check_for_tx_completions(struct ena_adapter *adapter)
1869 {
1870 	struct ena_ring *tx_ring;
1871 	uint64_t tx_cleanup_delay;
1872 	size_t qid;
1873 	int budget;
1874 	uint16_t nb_tx_queues = adapter->edev_data->nb_tx_queues;
1875 
1876 	if (adapter->missing_tx_completion_to == ENA_HW_HINTS_NO_TIMEOUT)
1877 		return;
1878 
1879 	nb_tx_queues = adapter->edev_data->nb_tx_queues;
1880 	budget = adapter->missing_tx_completion_budget;
1881 
1882 	qid = adapter->last_tx_comp_qid;
1883 	while (budget-- > 0) {
1884 		tx_ring = &adapter->tx_ring[qid];
1885 
1886 		/* Tx cleanup is called only by the burst function and can be
1887 		 * called dynamically by the application. Also cleanup is
1888 		 * limited by the threshold. To avoid false detection of the
1889 		 * missing HW Tx completion, get the delay since last cleanup
1890 		 * function was called.
1891 		 */
1892 		tx_cleanup_delay = rte_get_timer_cycles() -
1893 			tx_ring->last_cleanup_ticks;
1894 		if (tx_cleanup_delay < adapter->tx_cleanup_stall_delay)
1895 			check_for_tx_completion_in_queue(adapter, tx_ring);
1896 		qid = (qid + 1) % nb_tx_queues;
1897 	}
1898 
1899 	adapter->last_tx_comp_qid = qid;
1900 }
1901 
1902 static void ena_timer_wd_callback(__rte_unused struct rte_timer *timer,
1903 				  void *arg)
1904 {
1905 	struct rte_eth_dev *dev = arg;
1906 	struct ena_adapter *adapter = dev->data->dev_private;
1907 
1908 	if (unlikely(adapter->trigger_reset))
1909 		return;
1910 
1911 	check_for_missing_keep_alive(adapter);
1912 	check_for_admin_com_state(adapter);
1913 	check_for_tx_completions(adapter);
1914 
1915 	if (unlikely(adapter->trigger_reset)) {
1916 		PMD_DRV_LOG(ERR, "Trigger reset is on\n");
1917 		rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET,
1918 			NULL);
1919 	}
1920 }
1921 
1922 static inline void
1923 set_default_llq_configurations(struct ena_llq_configurations *llq_config,
1924 			       struct ena_admin_feature_llq_desc *llq,
1925 			       bool use_large_llq_hdr)
1926 {
1927 	llq_config->llq_header_location = ENA_ADMIN_INLINE_HEADER;
1928 	llq_config->llq_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY;
1929 	llq_config->llq_num_decs_before_header =
1930 		ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2;
1931 
1932 	if (use_large_llq_hdr &&
1933 	    (llq->entry_size_ctrl_supported & ENA_ADMIN_LIST_ENTRY_SIZE_256B)) {
1934 		llq_config->llq_ring_entry_size =
1935 			ENA_ADMIN_LIST_ENTRY_SIZE_256B;
1936 		llq_config->llq_ring_entry_size_value = 256;
1937 	} else {
1938 		llq_config->llq_ring_entry_size =
1939 			ENA_ADMIN_LIST_ENTRY_SIZE_128B;
1940 		llq_config->llq_ring_entry_size_value = 128;
1941 	}
1942 }
1943 
1944 static int
1945 ena_set_queues_placement_policy(struct ena_adapter *adapter,
1946 				struct ena_com_dev *ena_dev,
1947 				struct ena_admin_feature_llq_desc *llq,
1948 				struct ena_llq_configurations *llq_default_configurations)
1949 {
1950 	int rc;
1951 	u32 llq_feature_mask;
1952 
1953 	if (!adapter->enable_llq) {
1954 		PMD_DRV_LOG(WARNING,
1955 			"NOTE: LLQ has been disabled as per user's request. "
1956 			"This may lead to a huge performance degradation!\n");
1957 		ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
1958 		return 0;
1959 	}
1960 
1961 	llq_feature_mask = 1 << ENA_ADMIN_LLQ;
1962 	if (!(ena_dev->supported_features & llq_feature_mask)) {
1963 		PMD_DRV_LOG(INFO,
1964 			"LLQ is not supported. Fallback to host mode policy.\n");
1965 		ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
1966 		return 0;
1967 	}
1968 
1969 	if (adapter->dev_mem_base == NULL) {
1970 		PMD_DRV_LOG(ERR,
1971 			"LLQ is advertised as supported, but device doesn't expose mem bar\n");
1972 		ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
1973 		return 0;
1974 	}
1975 
1976 	rc = ena_com_config_dev_mode(ena_dev, llq, llq_default_configurations);
1977 	if (unlikely(rc)) {
1978 		PMD_INIT_LOG(WARNING,
1979 			"Failed to config dev mode. Fallback to host mode policy.\n");
1980 		ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
1981 		return 0;
1982 	}
1983 
1984 	/* Nothing to config, exit */
1985 	if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
1986 		return 0;
1987 
1988 	ena_dev->mem_bar = adapter->dev_mem_base;
1989 
1990 	return 0;
1991 }
1992 
1993 static uint32_t ena_calc_max_io_queue_num(struct ena_com_dev *ena_dev,
1994 	struct ena_com_dev_get_features_ctx *get_feat_ctx)
1995 {
1996 	uint32_t io_tx_sq_num, io_tx_cq_num, io_rx_num, max_num_io_queues;
1997 
1998 	/* Regular queues capabilities */
1999 	if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
2000 		struct ena_admin_queue_ext_feature_fields *max_queue_ext =
2001 			&get_feat_ctx->max_queue_ext.max_queue_ext;
2002 		io_rx_num = RTE_MIN(max_queue_ext->max_rx_sq_num,
2003 				    max_queue_ext->max_rx_cq_num);
2004 		io_tx_sq_num = max_queue_ext->max_tx_sq_num;
2005 		io_tx_cq_num = max_queue_ext->max_tx_cq_num;
2006 	} else {
2007 		struct ena_admin_queue_feature_desc *max_queues =
2008 			&get_feat_ctx->max_queues;
2009 		io_tx_sq_num = max_queues->max_sq_num;
2010 		io_tx_cq_num = max_queues->max_cq_num;
2011 		io_rx_num = RTE_MIN(io_tx_sq_num, io_tx_cq_num);
2012 	}
2013 
2014 	/* In case of LLQ use the llq number in the get feature cmd */
2015 	if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
2016 		io_tx_sq_num = get_feat_ctx->llq.max_llq_num;
2017 
2018 	max_num_io_queues = RTE_MIN(ENA_MAX_NUM_IO_QUEUES, io_rx_num);
2019 	max_num_io_queues = RTE_MIN(max_num_io_queues, io_tx_sq_num);
2020 	max_num_io_queues = RTE_MIN(max_num_io_queues, io_tx_cq_num);
2021 
2022 	if (unlikely(max_num_io_queues == 0)) {
2023 		PMD_DRV_LOG(ERR, "Number of IO queues cannot not be 0\n");
2024 		return -EFAULT;
2025 	}
2026 
2027 	return max_num_io_queues;
2028 }
2029 
2030 static void
2031 ena_set_offloads(struct ena_offloads *offloads,
2032 		 struct ena_admin_feature_offload_desc *offload_desc)
2033 {
2034 	if (offload_desc->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK)
2035 		offloads->tx_offloads |= ENA_IPV4_TSO;
2036 
2037 	/* Tx IPv4 checksum offloads */
2038 	if (offload_desc->tx &
2039 	    ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L3_CSUM_IPV4_MASK)
2040 		offloads->tx_offloads |= ENA_L3_IPV4_CSUM;
2041 	if (offload_desc->tx &
2042 	    ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_MASK)
2043 		offloads->tx_offloads |= ENA_L4_IPV4_CSUM;
2044 	if (offload_desc->tx &
2045 	    ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK)
2046 		offloads->tx_offloads |= ENA_L4_IPV4_CSUM_PARTIAL;
2047 
2048 	/* Tx IPv6 checksum offloads */
2049 	if (offload_desc->tx &
2050 	    ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_MASK)
2051 		offloads->tx_offloads |= ENA_L4_IPV6_CSUM;
2052 	if (offload_desc->tx &
2053 	     ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK)
2054 		offloads->tx_offloads |= ENA_L4_IPV6_CSUM_PARTIAL;
2055 
2056 	/* Rx IPv4 checksum offloads */
2057 	if (offload_desc->rx_supported &
2058 	    ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L3_CSUM_IPV4_MASK)
2059 		offloads->rx_offloads |= ENA_L3_IPV4_CSUM;
2060 	if (offload_desc->rx_supported &
2061 	    ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK)
2062 		offloads->rx_offloads |= ENA_L4_IPV4_CSUM;
2063 
2064 	/* Rx IPv6 checksum offloads */
2065 	if (offload_desc->rx_supported &
2066 	    ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK)
2067 		offloads->rx_offloads |= ENA_L4_IPV6_CSUM;
2068 
2069 	if (offload_desc->rx_supported &
2070 	    ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_MASK)
2071 		offloads->rx_offloads |= ENA_RX_RSS_HASH;
2072 }
2073 
2074 static int ena_init_once(void)
2075 {
2076 	static bool init_done;
2077 
2078 	if (init_done)
2079 		return 0;
2080 
2081 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
2082 		/* Init timer subsystem for the ENA timer service. */
2083 		rte_timer_subsystem_init();
2084 		/* Register handler for requests from secondary processes. */
2085 		rte_mp_action_register(ENA_MP_NAME, ena_mp_primary_handle);
2086 	}
2087 
2088 	init_done = true;
2089 	return 0;
2090 }
2091 
2092 static int eth_ena_dev_init(struct rte_eth_dev *eth_dev)
2093 {
2094 	struct ena_calc_queue_size_ctx calc_queue_ctx = { 0 };
2095 	struct rte_pci_device *pci_dev;
2096 	struct rte_intr_handle *intr_handle;
2097 	struct ena_adapter *adapter = eth_dev->data->dev_private;
2098 	struct ena_com_dev *ena_dev = &adapter->ena_dev;
2099 	struct ena_com_dev_get_features_ctx get_feat_ctx;
2100 	struct ena_llq_configurations llq_config;
2101 	const char *queue_type_str;
2102 	uint32_t max_num_io_queues;
2103 	int rc;
2104 	static int adapters_found;
2105 	bool disable_meta_caching;
2106 
2107 	eth_dev->dev_ops = &ena_dev_ops;
2108 	eth_dev->rx_pkt_burst = &eth_ena_recv_pkts;
2109 	eth_dev->tx_pkt_burst = &eth_ena_xmit_pkts;
2110 	eth_dev->tx_pkt_prepare = &eth_ena_prep_pkts;
2111 
2112 	rc = ena_init_once();
2113 	if (rc != 0)
2114 		return rc;
2115 
2116 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2117 		return 0;
2118 
2119 	eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
2120 
2121 	memset(adapter, 0, sizeof(struct ena_adapter));
2122 	ena_dev = &adapter->ena_dev;
2123 
2124 	adapter->edev_data = eth_dev->data;
2125 
2126 	pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
2127 
2128 	PMD_INIT_LOG(INFO, "Initializing " PCI_PRI_FMT "\n",
2129 		     pci_dev->addr.domain,
2130 		     pci_dev->addr.bus,
2131 		     pci_dev->addr.devid,
2132 		     pci_dev->addr.function);
2133 
2134 	intr_handle = pci_dev->intr_handle;
2135 
2136 	adapter->regs = pci_dev->mem_resource[ENA_REGS_BAR].addr;
2137 	adapter->dev_mem_base = pci_dev->mem_resource[ENA_MEM_BAR].addr;
2138 
2139 	if (!adapter->regs) {
2140 		PMD_INIT_LOG(CRIT, "Failed to access registers BAR(%d)\n",
2141 			     ENA_REGS_BAR);
2142 		return -ENXIO;
2143 	}
2144 
2145 	ena_dev->reg_bar = adapter->regs;
2146 	/* Pass device data as a pointer which can be passed to the IO functions
2147 	 * by the ena_com (for example - the memory allocation).
2148 	 */
2149 	ena_dev->dmadev = eth_dev->data;
2150 
2151 	adapter->id_number = adapters_found;
2152 
2153 	snprintf(adapter->name, ENA_NAME_MAX_LEN, "ena_%d",
2154 		 adapter->id_number);
2155 
2156 	/* Assign default devargs values */
2157 	adapter->missing_tx_completion_to = ENA_TX_TIMEOUT;
2158 	adapter->enable_llq = true;
2159 	adapter->use_large_llq_hdr = false;
2160 
2161 	rc = ena_parse_devargs(adapter, pci_dev->device.devargs);
2162 	if (rc != 0) {
2163 		PMD_INIT_LOG(CRIT, "Failed to parse devargs\n");
2164 		goto err;
2165 	}
2166 
2167 	/* device specific initialization routine */
2168 	rc = ena_device_init(adapter, pci_dev, &get_feat_ctx);
2169 	if (rc) {
2170 		PMD_INIT_LOG(CRIT, "Failed to init ENA device\n");
2171 		goto err;
2172 	}
2173 
2174 	/* Check if device supports LSC */
2175 	if (!(adapter->all_aenq_groups & BIT(ENA_ADMIN_LINK_CHANGE)))
2176 		adapter->edev_data->dev_flags &= ~RTE_ETH_DEV_INTR_LSC;
2177 
2178 	set_default_llq_configurations(&llq_config, &get_feat_ctx.llq,
2179 		adapter->use_large_llq_hdr);
2180 	rc = ena_set_queues_placement_policy(adapter, ena_dev,
2181 					     &get_feat_ctx.llq, &llq_config);
2182 	if (unlikely(rc)) {
2183 		PMD_INIT_LOG(CRIT, "Failed to set placement policy\n");
2184 		return rc;
2185 	}
2186 
2187 	if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
2188 		queue_type_str = "Regular";
2189 	else
2190 		queue_type_str = "Low latency";
2191 	PMD_DRV_LOG(INFO, "Placement policy: %s\n", queue_type_str);
2192 
2193 	calc_queue_ctx.ena_dev = ena_dev;
2194 	calc_queue_ctx.get_feat_ctx = &get_feat_ctx;
2195 
2196 	max_num_io_queues = ena_calc_max_io_queue_num(ena_dev, &get_feat_ctx);
2197 	rc = ena_calc_io_queue_size(&calc_queue_ctx,
2198 		adapter->use_large_llq_hdr);
2199 	if (unlikely((rc != 0) || (max_num_io_queues == 0))) {
2200 		rc = -EFAULT;
2201 		goto err_device_destroy;
2202 	}
2203 
2204 	adapter->max_tx_ring_size = calc_queue_ctx.max_tx_queue_size;
2205 	adapter->max_rx_ring_size = calc_queue_ctx.max_rx_queue_size;
2206 	adapter->max_tx_sgl_size = calc_queue_ctx.max_tx_sgl_size;
2207 	adapter->max_rx_sgl_size = calc_queue_ctx.max_rx_sgl_size;
2208 	adapter->max_num_io_queues = max_num_io_queues;
2209 
2210 	if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
2211 		disable_meta_caching =
2212 			!!(get_feat_ctx.llq.accel_mode.u.get.supported_flags &
2213 			BIT(ENA_ADMIN_DISABLE_META_CACHING));
2214 	} else {
2215 		disable_meta_caching = false;
2216 	}
2217 
2218 	/* prepare ring structures */
2219 	ena_init_rings(adapter, disable_meta_caching);
2220 
2221 	ena_config_debug_area(adapter);
2222 
2223 	/* Set max MTU for this device */
2224 	adapter->max_mtu = get_feat_ctx.dev_attr.max_mtu;
2225 
2226 	ena_set_offloads(&adapter->offloads, &get_feat_ctx.offload);
2227 
2228 	/* Copy MAC address and point DPDK to it */
2229 	eth_dev->data->mac_addrs = (struct rte_ether_addr *)adapter->mac_addr;
2230 	rte_ether_addr_copy((struct rte_ether_addr *)
2231 			get_feat_ctx.dev_attr.mac_addr,
2232 			(struct rte_ether_addr *)adapter->mac_addr);
2233 
2234 	rc = ena_com_rss_init(ena_dev, ENA_RX_RSS_TABLE_LOG_SIZE);
2235 	if (unlikely(rc != 0)) {
2236 		PMD_DRV_LOG(ERR, "Failed to initialize RSS in ENA device\n");
2237 		goto err_delete_debug_area;
2238 	}
2239 
2240 	adapter->drv_stats = rte_zmalloc("adapter stats",
2241 					 sizeof(*adapter->drv_stats),
2242 					 RTE_CACHE_LINE_SIZE);
2243 	if (!adapter->drv_stats) {
2244 		PMD_DRV_LOG(ERR,
2245 			"Failed to allocate memory for adapter statistics\n");
2246 		rc = -ENOMEM;
2247 		goto err_rss_destroy;
2248 	}
2249 
2250 	rte_spinlock_init(&adapter->admin_lock);
2251 
2252 	rte_intr_callback_register(intr_handle,
2253 				   ena_interrupt_handler_rte,
2254 				   eth_dev);
2255 	rte_intr_enable(intr_handle);
2256 	ena_com_set_admin_polling_mode(ena_dev, false);
2257 	ena_com_admin_aenq_enable(ena_dev);
2258 
2259 	rte_timer_init(&adapter->timer_wd);
2260 
2261 	adapters_found++;
2262 	adapter->state = ENA_ADAPTER_STATE_INIT;
2263 
2264 	return 0;
2265 
2266 err_rss_destroy:
2267 	ena_com_rss_destroy(ena_dev);
2268 err_delete_debug_area:
2269 	ena_com_delete_debug_area(ena_dev);
2270 
2271 err_device_destroy:
2272 	ena_com_delete_host_info(ena_dev);
2273 	ena_com_admin_destroy(ena_dev);
2274 
2275 err:
2276 	return rc;
2277 }
2278 
2279 static void ena_destroy_device(struct rte_eth_dev *eth_dev)
2280 {
2281 	struct ena_adapter *adapter = eth_dev->data->dev_private;
2282 	struct ena_com_dev *ena_dev = &adapter->ena_dev;
2283 
2284 	if (adapter->state == ENA_ADAPTER_STATE_FREE)
2285 		return;
2286 
2287 	ena_com_set_admin_running_state(ena_dev, false);
2288 
2289 	if (adapter->state != ENA_ADAPTER_STATE_CLOSED)
2290 		ena_close(eth_dev);
2291 
2292 	ena_com_rss_destroy(ena_dev);
2293 
2294 	ena_com_delete_debug_area(ena_dev);
2295 	ena_com_delete_host_info(ena_dev);
2296 
2297 	ena_com_abort_admin_commands(ena_dev);
2298 	ena_com_wait_for_abort_completion(ena_dev);
2299 	ena_com_admin_destroy(ena_dev);
2300 	ena_com_mmio_reg_read_request_destroy(ena_dev);
2301 
2302 	adapter->state = ENA_ADAPTER_STATE_FREE;
2303 }
2304 
2305 static int eth_ena_dev_uninit(struct rte_eth_dev *eth_dev)
2306 {
2307 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2308 		return 0;
2309 
2310 	ena_destroy_device(eth_dev);
2311 
2312 	return 0;
2313 }
2314 
2315 static int ena_dev_configure(struct rte_eth_dev *dev)
2316 {
2317 	struct ena_adapter *adapter = dev->data->dev_private;
2318 	int rc;
2319 
2320 	adapter->state = ENA_ADAPTER_STATE_CONFIG;
2321 
2322 	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
2323 		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
2324 	dev->data->dev_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
2325 
2326 	/* Scattered Rx cannot be turned off in the HW, so this capability must
2327 	 * be forced.
2328 	 */
2329 	dev->data->scattered_rx = 1;
2330 
2331 	adapter->last_tx_comp_qid = 0;
2332 
2333 	adapter->missing_tx_completion_budget =
2334 		RTE_MIN(ENA_MONITORED_TX_QUEUES, dev->data->nb_tx_queues);
2335 
2336 	/* To avoid detection of the spurious Tx completion timeout due to
2337 	 * application not calling the Tx cleanup function, set timeout for the
2338 	 * Tx queue which should be half of the missing completion timeout for a
2339 	 * safety. If there will be a lot of missing Tx completions in the
2340 	 * queue, they will be detected sooner or later.
2341 	 */
2342 	adapter->tx_cleanup_stall_delay = adapter->missing_tx_completion_to / 2;
2343 
2344 	rc = ena_configure_aenq(adapter);
2345 
2346 	return rc;
2347 }
2348 
2349 static void ena_init_rings(struct ena_adapter *adapter,
2350 			   bool disable_meta_caching)
2351 {
2352 	size_t i;
2353 
2354 	for (i = 0; i < adapter->max_num_io_queues; i++) {
2355 		struct ena_ring *ring = &adapter->tx_ring[i];
2356 
2357 		ring->configured = 0;
2358 		ring->type = ENA_RING_TYPE_TX;
2359 		ring->adapter = adapter;
2360 		ring->id = i;
2361 		ring->tx_mem_queue_type = adapter->ena_dev.tx_mem_queue_type;
2362 		ring->tx_max_header_size = adapter->ena_dev.tx_max_header_size;
2363 		ring->sgl_size = adapter->max_tx_sgl_size;
2364 		ring->disable_meta_caching = disable_meta_caching;
2365 	}
2366 
2367 	for (i = 0; i < adapter->max_num_io_queues; i++) {
2368 		struct ena_ring *ring = &adapter->rx_ring[i];
2369 
2370 		ring->configured = 0;
2371 		ring->type = ENA_RING_TYPE_RX;
2372 		ring->adapter = adapter;
2373 		ring->id = i;
2374 		ring->sgl_size = adapter->max_rx_sgl_size;
2375 	}
2376 }
2377 
2378 static uint64_t ena_get_rx_port_offloads(struct ena_adapter *adapter)
2379 {
2380 	uint64_t port_offloads = 0;
2381 
2382 	if (adapter->offloads.rx_offloads & ENA_L3_IPV4_CSUM)
2383 		port_offloads |= RTE_ETH_RX_OFFLOAD_IPV4_CKSUM;
2384 
2385 	if (adapter->offloads.rx_offloads &
2386 	    (ENA_L4_IPV4_CSUM | ENA_L4_IPV6_CSUM))
2387 		port_offloads |=
2388 			RTE_ETH_RX_OFFLOAD_UDP_CKSUM | RTE_ETH_RX_OFFLOAD_TCP_CKSUM;
2389 
2390 	if (adapter->offloads.rx_offloads & ENA_RX_RSS_HASH)
2391 		port_offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
2392 
2393 	port_offloads |= RTE_ETH_RX_OFFLOAD_SCATTER;
2394 
2395 	return port_offloads;
2396 }
2397 
2398 static uint64_t ena_get_tx_port_offloads(struct ena_adapter *adapter)
2399 {
2400 	uint64_t port_offloads = 0;
2401 
2402 	if (adapter->offloads.tx_offloads & ENA_IPV4_TSO)
2403 		port_offloads |= RTE_ETH_TX_OFFLOAD_TCP_TSO;
2404 
2405 	if (adapter->offloads.tx_offloads & ENA_L3_IPV4_CSUM)
2406 		port_offloads |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM;
2407 	if (adapter->offloads.tx_offloads &
2408 	    (ENA_L4_IPV4_CSUM_PARTIAL | ENA_L4_IPV4_CSUM |
2409 	     ENA_L4_IPV6_CSUM | ENA_L4_IPV6_CSUM_PARTIAL))
2410 		port_offloads |=
2411 			RTE_ETH_TX_OFFLOAD_UDP_CKSUM | RTE_ETH_TX_OFFLOAD_TCP_CKSUM;
2412 
2413 	port_offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
2414 
2415 	port_offloads |= RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
2416 
2417 	return port_offloads;
2418 }
2419 
2420 static uint64_t ena_get_rx_queue_offloads(struct ena_adapter *adapter)
2421 {
2422 	RTE_SET_USED(adapter);
2423 
2424 	return 0;
2425 }
2426 
2427 static uint64_t ena_get_tx_queue_offloads(struct ena_adapter *adapter)
2428 {
2429 	uint64_t queue_offloads = 0;
2430 	RTE_SET_USED(adapter);
2431 
2432 	queue_offloads |= RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
2433 
2434 	return queue_offloads;
2435 }
2436 
2437 static int ena_infos_get(struct rte_eth_dev *dev,
2438 			  struct rte_eth_dev_info *dev_info)
2439 {
2440 	struct ena_adapter *adapter;
2441 	struct ena_com_dev *ena_dev;
2442 
2443 	ena_assert_msg(dev->data != NULL, "Uninitialized device\n");
2444 	ena_assert_msg(dev->data->dev_private != NULL, "Uninitialized device\n");
2445 	adapter = dev->data->dev_private;
2446 
2447 	ena_dev = &adapter->ena_dev;
2448 	ena_assert_msg(ena_dev != NULL, "Uninitialized device\n");
2449 
2450 	dev_info->speed_capa =
2451 			RTE_ETH_LINK_SPEED_1G   |
2452 			RTE_ETH_LINK_SPEED_2_5G |
2453 			RTE_ETH_LINK_SPEED_5G   |
2454 			RTE_ETH_LINK_SPEED_10G  |
2455 			RTE_ETH_LINK_SPEED_25G  |
2456 			RTE_ETH_LINK_SPEED_40G  |
2457 			RTE_ETH_LINK_SPEED_50G  |
2458 			RTE_ETH_LINK_SPEED_100G;
2459 
2460 	/* Inform framework about available features */
2461 	dev_info->rx_offload_capa = ena_get_rx_port_offloads(adapter);
2462 	dev_info->tx_offload_capa = ena_get_tx_port_offloads(adapter);
2463 	dev_info->rx_queue_offload_capa = ena_get_rx_queue_offloads(adapter);
2464 	dev_info->tx_queue_offload_capa = ena_get_tx_queue_offloads(adapter);
2465 
2466 	dev_info->flow_type_rss_offloads = ENA_ALL_RSS_HF;
2467 	dev_info->hash_key_size = ENA_HASH_KEY_SIZE;
2468 
2469 	dev_info->min_rx_bufsize = ENA_MIN_FRAME_LEN;
2470 	dev_info->max_rx_pktlen  = adapter->max_mtu + RTE_ETHER_HDR_LEN +
2471 		RTE_ETHER_CRC_LEN;
2472 	dev_info->min_mtu = ENA_MIN_MTU;
2473 	dev_info->max_mtu = adapter->max_mtu;
2474 	dev_info->max_mac_addrs = 1;
2475 
2476 	dev_info->max_rx_queues = adapter->max_num_io_queues;
2477 	dev_info->max_tx_queues = adapter->max_num_io_queues;
2478 	dev_info->reta_size = ENA_RX_RSS_TABLE_SIZE;
2479 
2480 	dev_info->rx_desc_lim.nb_max = adapter->max_rx_ring_size;
2481 	dev_info->rx_desc_lim.nb_min = ENA_MIN_RING_DESC;
2482 	dev_info->rx_desc_lim.nb_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS,
2483 					adapter->max_rx_sgl_size);
2484 	dev_info->rx_desc_lim.nb_mtu_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS,
2485 					adapter->max_rx_sgl_size);
2486 
2487 	dev_info->tx_desc_lim.nb_max = adapter->max_tx_ring_size;
2488 	dev_info->tx_desc_lim.nb_min = ENA_MIN_RING_DESC;
2489 	dev_info->tx_desc_lim.nb_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS,
2490 					adapter->max_tx_sgl_size);
2491 	dev_info->tx_desc_lim.nb_mtu_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS,
2492 					adapter->max_tx_sgl_size);
2493 
2494 	dev_info->default_rxportconf.ring_size = ENA_DEFAULT_RING_SIZE;
2495 	dev_info->default_txportconf.ring_size = ENA_DEFAULT_RING_SIZE;
2496 
2497 	dev_info->err_handle_mode = RTE_ETH_ERROR_HANDLE_MODE_PASSIVE;
2498 
2499 	return 0;
2500 }
2501 
2502 static inline void ena_init_rx_mbuf(struct rte_mbuf *mbuf, uint16_t len)
2503 {
2504 	mbuf->data_len = len;
2505 	mbuf->data_off = RTE_PKTMBUF_HEADROOM;
2506 	mbuf->refcnt = 1;
2507 	mbuf->next = NULL;
2508 }
2509 
2510 static struct rte_mbuf *ena_rx_mbuf(struct ena_ring *rx_ring,
2511 				    struct ena_com_rx_buf_info *ena_bufs,
2512 				    uint32_t descs,
2513 				    uint16_t *next_to_clean,
2514 				    uint8_t offset)
2515 {
2516 	struct rte_mbuf *mbuf;
2517 	struct rte_mbuf *mbuf_head;
2518 	struct ena_rx_buffer *rx_info;
2519 	int rc;
2520 	uint16_t ntc, len, req_id, buf = 0;
2521 
2522 	if (unlikely(descs == 0))
2523 		return NULL;
2524 
2525 	ntc = *next_to_clean;
2526 
2527 	len = ena_bufs[buf].len;
2528 	req_id = ena_bufs[buf].req_id;
2529 
2530 	rx_info = &rx_ring->rx_buffer_info[req_id];
2531 
2532 	mbuf = rx_info->mbuf;
2533 	RTE_ASSERT(mbuf != NULL);
2534 
2535 	ena_init_rx_mbuf(mbuf, len);
2536 
2537 	/* Fill the mbuf head with the data specific for 1st segment. */
2538 	mbuf_head = mbuf;
2539 	mbuf_head->nb_segs = descs;
2540 	mbuf_head->port = rx_ring->port_id;
2541 	mbuf_head->pkt_len = len;
2542 	mbuf_head->data_off += offset;
2543 
2544 	rx_info->mbuf = NULL;
2545 	rx_ring->empty_rx_reqs[ntc] = req_id;
2546 	ntc = ENA_IDX_NEXT_MASKED(ntc, rx_ring->size_mask);
2547 
2548 	while (--descs) {
2549 		++buf;
2550 		len = ena_bufs[buf].len;
2551 		req_id = ena_bufs[buf].req_id;
2552 
2553 		rx_info = &rx_ring->rx_buffer_info[req_id];
2554 		RTE_ASSERT(rx_info->mbuf != NULL);
2555 
2556 		if (unlikely(len == 0)) {
2557 			/*
2558 			 * Some devices can pass descriptor with the length 0.
2559 			 * To avoid confusion, the PMD is simply putting the
2560 			 * descriptor back, as it was never used. We'll avoid
2561 			 * mbuf allocation that way.
2562 			 */
2563 			rc = ena_add_single_rx_desc(rx_ring->ena_com_io_sq,
2564 				rx_info->mbuf, req_id);
2565 			if (unlikely(rc != 0)) {
2566 				/* Free the mbuf in case of an error. */
2567 				rte_mbuf_raw_free(rx_info->mbuf);
2568 			} else {
2569 				/*
2570 				 * If there was no error, just exit the loop as
2571 				 * 0 length descriptor is always the last one.
2572 				 */
2573 				break;
2574 			}
2575 		} else {
2576 			/* Create an mbuf chain. */
2577 			mbuf->next = rx_info->mbuf;
2578 			mbuf = mbuf->next;
2579 
2580 			ena_init_rx_mbuf(mbuf, len);
2581 			mbuf_head->pkt_len += len;
2582 		}
2583 
2584 		/*
2585 		 * Mark the descriptor as depleted and perform necessary
2586 		 * cleanup.
2587 		 * This code will execute in two cases:
2588 		 *  1. Descriptor len was greater than 0 - normal situation.
2589 		 *  2. Descriptor len was 0 and we failed to add the descriptor
2590 		 *     to the device. In that situation, we should try to add
2591 		 *     the mbuf again in the populate routine and mark the
2592 		 *     descriptor as used up by the device.
2593 		 */
2594 		rx_info->mbuf = NULL;
2595 		rx_ring->empty_rx_reqs[ntc] = req_id;
2596 		ntc = ENA_IDX_NEXT_MASKED(ntc, rx_ring->size_mask);
2597 	}
2598 
2599 	*next_to_clean = ntc;
2600 
2601 	return mbuf_head;
2602 }
2603 
2604 static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
2605 				  uint16_t nb_pkts)
2606 {
2607 	struct ena_ring *rx_ring = (struct ena_ring *)(rx_queue);
2608 	unsigned int free_queue_entries;
2609 	uint16_t next_to_clean = rx_ring->next_to_clean;
2610 	uint16_t descs_in_use;
2611 	struct rte_mbuf *mbuf;
2612 	uint16_t completed;
2613 	struct ena_com_rx_ctx ena_rx_ctx;
2614 	int i, rc = 0;
2615 	bool fill_hash;
2616 
2617 #ifdef RTE_ETHDEV_DEBUG_RX
2618 	/* Check adapter state */
2619 	if (unlikely(rx_ring->adapter->state != ENA_ADAPTER_STATE_RUNNING)) {
2620 		PMD_RX_LOG(ALERT,
2621 			"Trying to receive pkts while device is NOT running\n");
2622 		return 0;
2623 	}
2624 #endif
2625 
2626 	fill_hash = rx_ring->offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH;
2627 
2628 	descs_in_use = rx_ring->ring_size -
2629 		ena_com_free_q_entries(rx_ring->ena_com_io_sq) - 1;
2630 	nb_pkts = RTE_MIN(descs_in_use, nb_pkts);
2631 
2632 	for (completed = 0; completed < nb_pkts; completed++) {
2633 		ena_rx_ctx.max_bufs = rx_ring->sgl_size;
2634 		ena_rx_ctx.ena_bufs = rx_ring->ena_bufs;
2635 		ena_rx_ctx.descs = 0;
2636 		ena_rx_ctx.pkt_offset = 0;
2637 		/* receive packet context */
2638 		rc = ena_com_rx_pkt(rx_ring->ena_com_io_cq,
2639 				    rx_ring->ena_com_io_sq,
2640 				    &ena_rx_ctx);
2641 		if (unlikely(rc)) {
2642 			PMD_RX_LOG(ERR,
2643 				"Failed to get the packet from the device, rc: %d\n",
2644 				rc);
2645 			if (rc == ENA_COM_NO_SPACE) {
2646 				++rx_ring->rx_stats.bad_desc_num;
2647 				ena_trigger_reset(rx_ring->adapter,
2648 					ENA_REGS_RESET_TOO_MANY_RX_DESCS);
2649 			} else {
2650 				++rx_ring->rx_stats.bad_req_id;
2651 				ena_trigger_reset(rx_ring->adapter,
2652 					ENA_REGS_RESET_INV_RX_REQ_ID);
2653 			}
2654 			return 0;
2655 		}
2656 
2657 		mbuf = ena_rx_mbuf(rx_ring,
2658 			ena_rx_ctx.ena_bufs,
2659 			ena_rx_ctx.descs,
2660 			&next_to_clean,
2661 			ena_rx_ctx.pkt_offset);
2662 		if (unlikely(mbuf == NULL)) {
2663 			for (i = 0; i < ena_rx_ctx.descs; ++i) {
2664 				rx_ring->empty_rx_reqs[next_to_clean] =
2665 					rx_ring->ena_bufs[i].req_id;
2666 				next_to_clean = ENA_IDX_NEXT_MASKED(
2667 					next_to_clean, rx_ring->size_mask);
2668 			}
2669 			break;
2670 		}
2671 
2672 		/* fill mbuf attributes if any */
2673 		ena_rx_mbuf_prepare(rx_ring, mbuf, &ena_rx_ctx, fill_hash);
2674 
2675 		if (unlikely(mbuf->ol_flags &
2676 				(RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD)))
2677 			rte_atomic64_inc(&rx_ring->adapter->drv_stats->ierrors);
2678 
2679 		rx_pkts[completed] = mbuf;
2680 		rx_ring->rx_stats.bytes += mbuf->pkt_len;
2681 	}
2682 
2683 	rx_ring->rx_stats.cnt += completed;
2684 	rx_ring->next_to_clean = next_to_clean;
2685 
2686 	free_queue_entries = ena_com_free_q_entries(rx_ring->ena_com_io_sq);
2687 
2688 	/* Burst refill to save doorbells, memory barriers, const interval */
2689 	if (free_queue_entries >= rx_ring->rx_free_thresh) {
2690 		ena_com_update_dev_comp_head(rx_ring->ena_com_io_cq);
2691 		ena_populate_rx_queue(rx_ring, free_queue_entries);
2692 	}
2693 
2694 	return completed;
2695 }
2696 
2697 static uint16_t
2698 eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
2699 		uint16_t nb_pkts)
2700 {
2701 	int32_t ret;
2702 	uint32_t i;
2703 	struct rte_mbuf *m;
2704 	struct ena_ring *tx_ring = (struct ena_ring *)(tx_queue);
2705 	struct ena_adapter *adapter = tx_ring->adapter;
2706 	struct rte_ipv4_hdr *ip_hdr;
2707 	uint64_t ol_flags;
2708 	uint64_t l4_csum_flag;
2709 	uint64_t dev_offload_capa;
2710 	uint16_t frag_field;
2711 	bool need_pseudo_csum;
2712 
2713 	dev_offload_capa = adapter->offloads.tx_offloads;
2714 	for (i = 0; i != nb_pkts; i++) {
2715 		m = tx_pkts[i];
2716 		ol_flags = m->ol_flags;
2717 
2718 		/* Check if any offload flag was set */
2719 		if (ol_flags == 0)
2720 			continue;
2721 
2722 		l4_csum_flag = ol_flags & RTE_MBUF_F_TX_L4_MASK;
2723 		/* SCTP checksum offload is not supported by the ENA. */
2724 		if ((ol_flags & ENA_TX_OFFLOAD_NOTSUP_MASK) ||
2725 		    l4_csum_flag == RTE_MBUF_F_TX_SCTP_CKSUM) {
2726 			PMD_TX_LOG(DEBUG,
2727 				"mbuf[%" PRIu32 "] has unsupported offloads flags set: 0x%" PRIu64 "\n",
2728 				i, ol_flags);
2729 			rte_errno = ENOTSUP;
2730 			return i;
2731 		}
2732 
2733 		if (unlikely(m->nb_segs >= tx_ring->sgl_size &&
2734 		    !(tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV &&
2735 		      m->nb_segs == tx_ring->sgl_size &&
2736 		      m->data_len < tx_ring->tx_max_header_size))) {
2737 			PMD_TX_LOG(DEBUG,
2738 				"mbuf[%" PRIu32 "] has too many segments: %" PRIu16 "\n",
2739 				i, m->nb_segs);
2740 			rte_errno = EINVAL;
2741 			return i;
2742 		}
2743 
2744 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
2745 		/* Check if requested offload is also enabled for the queue */
2746 		if ((ol_flags & RTE_MBUF_F_TX_IP_CKSUM &&
2747 		     !(tx_ring->offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)) ||
2748 		    (l4_csum_flag == RTE_MBUF_F_TX_TCP_CKSUM &&
2749 		     !(tx_ring->offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM)) ||
2750 		    (l4_csum_flag == RTE_MBUF_F_TX_UDP_CKSUM &&
2751 		     !(tx_ring->offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM))) {
2752 			PMD_TX_LOG(DEBUG,
2753 				"mbuf[%" PRIu32 "]: requested offloads: %" PRIu16 " are not enabled for the queue[%u]\n",
2754 				i, m->nb_segs, tx_ring->id);
2755 			rte_errno = EINVAL;
2756 			return i;
2757 		}
2758 
2759 		/* The caller is obligated to set l2 and l3 len if any cksum
2760 		 * offload is enabled.
2761 		 */
2762 		if (unlikely(ol_flags & (RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_L4_MASK) &&
2763 		    (m->l2_len == 0 || m->l3_len == 0))) {
2764 			PMD_TX_LOG(DEBUG,
2765 				"mbuf[%" PRIu32 "]: l2_len or l3_len values are 0 while the offload was requested\n",
2766 				i);
2767 			rte_errno = EINVAL;
2768 			return i;
2769 		}
2770 		ret = rte_validate_tx_offload(m);
2771 		if (ret != 0) {
2772 			rte_errno = -ret;
2773 			return i;
2774 		}
2775 #endif
2776 
2777 		/* Verify HW support for requested offloads and determine if
2778 		 * pseudo header checksum is needed.
2779 		 */
2780 		need_pseudo_csum = false;
2781 		if (ol_flags & RTE_MBUF_F_TX_IPV4) {
2782 			if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM &&
2783 			    !(dev_offload_capa & ENA_L3_IPV4_CSUM)) {
2784 				rte_errno = ENOTSUP;
2785 				return i;
2786 			}
2787 
2788 			if (ol_flags & RTE_MBUF_F_TX_TCP_SEG &&
2789 			    !(dev_offload_capa & ENA_IPV4_TSO)) {
2790 				rte_errno = ENOTSUP;
2791 				return i;
2792 			}
2793 
2794 			/* Check HW capabilities and if pseudo csum is needed
2795 			 * for L4 offloads.
2796 			 */
2797 			if (l4_csum_flag != RTE_MBUF_F_TX_L4_NO_CKSUM &&
2798 			    !(dev_offload_capa & ENA_L4_IPV4_CSUM)) {
2799 				if (dev_offload_capa &
2800 				    ENA_L4_IPV4_CSUM_PARTIAL) {
2801 					need_pseudo_csum = true;
2802 				} else {
2803 					rte_errno = ENOTSUP;
2804 					return i;
2805 				}
2806 			}
2807 
2808 			/* Parse the DF flag */
2809 			ip_hdr = rte_pktmbuf_mtod_offset(m,
2810 				struct rte_ipv4_hdr *, m->l2_len);
2811 			frag_field = rte_be_to_cpu_16(ip_hdr->fragment_offset);
2812 			if (frag_field & RTE_IPV4_HDR_DF_FLAG) {
2813 				m->packet_type |= RTE_PTYPE_L4_NONFRAG;
2814 			} else if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
2815 				/* In case we are supposed to TSO and have DF
2816 				 * not set (DF=0) hardware must be provided with
2817 				 * partial checksum.
2818 				 */
2819 				need_pseudo_csum = true;
2820 			}
2821 		} else if (ol_flags & RTE_MBUF_F_TX_IPV6) {
2822 			/* There is no support for IPv6 TSO as for now. */
2823 			if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
2824 				rte_errno = ENOTSUP;
2825 				return i;
2826 			}
2827 
2828 			/* Check HW capabilities and if pseudo csum is needed */
2829 			if (l4_csum_flag != RTE_MBUF_F_TX_L4_NO_CKSUM &&
2830 			    !(dev_offload_capa & ENA_L4_IPV6_CSUM)) {
2831 				if (dev_offload_capa &
2832 				    ENA_L4_IPV6_CSUM_PARTIAL) {
2833 					need_pseudo_csum = true;
2834 				} else {
2835 					rte_errno = ENOTSUP;
2836 					return i;
2837 				}
2838 			}
2839 		}
2840 
2841 		if (need_pseudo_csum) {
2842 			ret = rte_net_intel_cksum_flags_prepare(m, ol_flags);
2843 			if (ret != 0) {
2844 				rte_errno = -ret;
2845 				return i;
2846 			}
2847 		}
2848 	}
2849 
2850 	return i;
2851 }
2852 
2853 static void ena_update_hints(struct ena_adapter *adapter,
2854 			     struct ena_admin_ena_hw_hints *hints)
2855 {
2856 	if (hints->admin_completion_tx_timeout)
2857 		adapter->ena_dev.admin_queue.completion_timeout =
2858 			hints->admin_completion_tx_timeout * 1000;
2859 
2860 	if (hints->mmio_read_timeout)
2861 		/* convert to usec */
2862 		adapter->ena_dev.mmio_read.reg_read_to =
2863 			hints->mmio_read_timeout * 1000;
2864 
2865 	if (hints->driver_watchdog_timeout) {
2866 		if (hints->driver_watchdog_timeout == ENA_HW_HINTS_NO_TIMEOUT)
2867 			adapter->keep_alive_timeout = ENA_HW_HINTS_NO_TIMEOUT;
2868 		else
2869 			// Convert msecs to ticks
2870 			adapter->keep_alive_timeout =
2871 				(hints->driver_watchdog_timeout *
2872 				rte_get_timer_hz()) / 1000;
2873 	}
2874 }
2875 
2876 static void ena_tx_map_mbuf(struct ena_ring *tx_ring,
2877 	struct ena_tx_buffer *tx_info,
2878 	struct rte_mbuf *mbuf,
2879 	void **push_header,
2880 	uint16_t *header_len)
2881 {
2882 	struct ena_com_buf *ena_buf;
2883 	uint16_t delta, seg_len, push_len;
2884 
2885 	delta = 0;
2886 	seg_len = mbuf->data_len;
2887 
2888 	tx_info->mbuf = mbuf;
2889 	ena_buf = tx_info->bufs;
2890 
2891 	if (tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
2892 		/*
2893 		 * Tx header might be (and will be in most cases) smaller than
2894 		 * tx_max_header_size. But it's not an issue to send more data
2895 		 * to the device, than actually needed if the mbuf size is
2896 		 * greater than tx_max_header_size.
2897 		 */
2898 		push_len = RTE_MIN(mbuf->pkt_len, tx_ring->tx_max_header_size);
2899 		*header_len = push_len;
2900 
2901 		if (likely(push_len <= seg_len)) {
2902 			/* If the push header is in the single segment, then
2903 			 * just point it to the 1st mbuf data.
2904 			 */
2905 			*push_header = rte_pktmbuf_mtod(mbuf, uint8_t *);
2906 		} else {
2907 			/* If the push header lays in the several segments, copy
2908 			 * it to the intermediate buffer.
2909 			 */
2910 			rte_pktmbuf_read(mbuf, 0, push_len,
2911 				tx_ring->push_buf_intermediate_buf);
2912 			*push_header = tx_ring->push_buf_intermediate_buf;
2913 			delta = push_len - seg_len;
2914 		}
2915 	} else {
2916 		*push_header = NULL;
2917 		*header_len = 0;
2918 		push_len = 0;
2919 	}
2920 
2921 	/* Process first segment taking into consideration pushed header */
2922 	if (seg_len > push_len) {
2923 		ena_buf->paddr = mbuf->buf_iova +
2924 				mbuf->data_off +
2925 				push_len;
2926 		ena_buf->len = seg_len - push_len;
2927 		ena_buf++;
2928 		tx_info->num_of_bufs++;
2929 	}
2930 
2931 	while ((mbuf = mbuf->next) != NULL) {
2932 		seg_len = mbuf->data_len;
2933 
2934 		/* Skip mbufs if whole data is pushed as a header */
2935 		if (unlikely(delta > seg_len)) {
2936 			delta -= seg_len;
2937 			continue;
2938 		}
2939 
2940 		ena_buf->paddr = mbuf->buf_iova + mbuf->data_off + delta;
2941 		ena_buf->len = seg_len - delta;
2942 		ena_buf++;
2943 		tx_info->num_of_bufs++;
2944 
2945 		delta = 0;
2946 	}
2947 }
2948 
2949 static int ena_xmit_mbuf(struct ena_ring *tx_ring, struct rte_mbuf *mbuf)
2950 {
2951 	struct ena_tx_buffer *tx_info;
2952 	struct ena_com_tx_ctx ena_tx_ctx = { { 0 } };
2953 	uint16_t next_to_use;
2954 	uint16_t header_len;
2955 	uint16_t req_id;
2956 	void *push_header;
2957 	int nb_hw_desc;
2958 	int rc;
2959 
2960 	/* Checking for space for 2 additional metadata descriptors due to
2961 	 * possible header split and metadata descriptor
2962 	 */
2963 	if (!ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
2964 					  mbuf->nb_segs + 2)) {
2965 		PMD_DRV_LOG(DEBUG, "Not enough space in the tx queue\n");
2966 		return ENA_COM_NO_MEM;
2967 	}
2968 
2969 	next_to_use = tx_ring->next_to_use;
2970 
2971 	req_id = tx_ring->empty_tx_reqs[next_to_use];
2972 	tx_info = &tx_ring->tx_buffer_info[req_id];
2973 	tx_info->num_of_bufs = 0;
2974 	RTE_ASSERT(tx_info->mbuf == NULL);
2975 
2976 	ena_tx_map_mbuf(tx_ring, tx_info, mbuf, &push_header, &header_len);
2977 
2978 	ena_tx_ctx.ena_bufs = tx_info->bufs;
2979 	ena_tx_ctx.push_header = push_header;
2980 	ena_tx_ctx.num_bufs = tx_info->num_of_bufs;
2981 	ena_tx_ctx.req_id = req_id;
2982 	ena_tx_ctx.header_len = header_len;
2983 
2984 	/* Set Tx offloads flags, if applicable */
2985 	ena_tx_mbuf_prepare(mbuf, &ena_tx_ctx, tx_ring->offloads,
2986 		tx_ring->disable_meta_caching);
2987 
2988 	if (unlikely(ena_com_is_doorbell_needed(tx_ring->ena_com_io_sq,
2989 			&ena_tx_ctx))) {
2990 		PMD_TX_LOG(DEBUG,
2991 			"LLQ Tx max burst size of queue %d achieved, writing doorbell to send burst\n",
2992 			tx_ring->id);
2993 		ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq);
2994 		tx_ring->tx_stats.doorbells++;
2995 		tx_ring->pkts_without_db = false;
2996 	}
2997 
2998 	/* prepare the packet's descriptors to dma engine */
2999 	rc = ena_com_prepare_tx(tx_ring->ena_com_io_sq,	&ena_tx_ctx,
3000 		&nb_hw_desc);
3001 	if (unlikely(rc)) {
3002 		PMD_DRV_LOG(ERR, "Failed to prepare Tx buffers, rc: %d\n", rc);
3003 		++tx_ring->tx_stats.prepare_ctx_err;
3004 		ena_trigger_reset(tx_ring->adapter,
3005 			ENA_REGS_RESET_DRIVER_INVALID_STATE);
3006 		return rc;
3007 	}
3008 
3009 	tx_info->tx_descs = nb_hw_desc;
3010 	tx_info->timestamp = rte_get_timer_cycles();
3011 
3012 	tx_ring->tx_stats.cnt++;
3013 	tx_ring->tx_stats.bytes += mbuf->pkt_len;
3014 
3015 	tx_ring->next_to_use = ENA_IDX_NEXT_MASKED(next_to_use,
3016 		tx_ring->size_mask);
3017 
3018 	return 0;
3019 }
3020 
3021 static __rte_always_inline size_t
3022 ena_tx_cleanup_mbuf_fast(struct rte_mbuf **mbufs_to_clean,
3023 			 struct rte_mbuf *mbuf,
3024 			 size_t mbuf_cnt,
3025 			 size_t buf_size)
3026 {
3027 	struct rte_mbuf *m_next;
3028 
3029 	while (mbuf != NULL) {
3030 		m_next = mbuf->next;
3031 		mbufs_to_clean[mbuf_cnt++] = mbuf;
3032 		if (mbuf_cnt == buf_size) {
3033 			rte_mempool_put_bulk(mbufs_to_clean[0]->pool, (void **)mbufs_to_clean,
3034 				(unsigned int)mbuf_cnt);
3035 			mbuf_cnt = 0;
3036 		}
3037 		mbuf = m_next;
3038 	}
3039 
3040 	return mbuf_cnt;
3041 }
3042 
3043 static int ena_tx_cleanup(void *txp, uint32_t free_pkt_cnt)
3044 {
3045 	struct rte_mbuf *mbufs_to_clean[ENA_CLEANUP_BUF_SIZE];
3046 	struct ena_ring *tx_ring = (struct ena_ring *)txp;
3047 	size_t mbuf_cnt = 0;
3048 	unsigned int total_tx_descs = 0;
3049 	unsigned int total_tx_pkts = 0;
3050 	uint16_t cleanup_budget;
3051 	uint16_t next_to_clean = tx_ring->next_to_clean;
3052 	bool fast_free = tx_ring->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
3053 
3054 	/*
3055 	 * If free_pkt_cnt is equal to 0, it means that the user requested
3056 	 * full cleanup, so attempt to release all Tx descriptors
3057 	 * (ring_size - 1 -> size_mask)
3058 	 */
3059 	cleanup_budget = (free_pkt_cnt == 0) ? tx_ring->size_mask : free_pkt_cnt;
3060 
3061 	while (likely(total_tx_pkts < cleanup_budget)) {
3062 		struct rte_mbuf *mbuf;
3063 		struct ena_tx_buffer *tx_info;
3064 		uint16_t req_id;
3065 
3066 		if (ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq, &req_id) != 0)
3067 			break;
3068 
3069 		if (unlikely(validate_tx_req_id(tx_ring, req_id) != 0))
3070 			break;
3071 
3072 		/* Get Tx info & store how many descs were processed  */
3073 		tx_info = &tx_ring->tx_buffer_info[req_id];
3074 		tx_info->timestamp = 0;
3075 
3076 		mbuf = tx_info->mbuf;
3077 		if (fast_free) {
3078 			mbuf_cnt = ena_tx_cleanup_mbuf_fast(mbufs_to_clean, mbuf, mbuf_cnt,
3079 				ENA_CLEANUP_BUF_SIZE);
3080 		} else {
3081 			rte_pktmbuf_free(mbuf);
3082 		}
3083 
3084 		tx_info->mbuf = NULL;
3085 		tx_ring->empty_tx_reqs[next_to_clean] = req_id;
3086 
3087 		total_tx_descs += tx_info->tx_descs;
3088 		total_tx_pkts++;
3089 
3090 		/* Put back descriptor to the ring for reuse */
3091 		next_to_clean = ENA_IDX_NEXT_MASKED(next_to_clean,
3092 			tx_ring->size_mask);
3093 	}
3094 
3095 	if (likely(total_tx_descs > 0)) {
3096 		/* acknowledge completion of sent packets */
3097 		tx_ring->next_to_clean = next_to_clean;
3098 		ena_com_comp_ack(tx_ring->ena_com_io_sq, total_tx_descs);
3099 		ena_com_update_dev_comp_head(tx_ring->ena_com_io_cq);
3100 	}
3101 
3102 	if (mbuf_cnt != 0)
3103 		rte_mempool_put_bulk(mbufs_to_clean[0]->pool,
3104 			(void **)mbufs_to_clean, mbuf_cnt);
3105 
3106 	/* Notify completion handler that full cleanup was performed */
3107 	if (free_pkt_cnt == 0 || total_tx_pkts < cleanup_budget)
3108 		tx_ring->last_cleanup_ticks = rte_get_timer_cycles();
3109 
3110 	return total_tx_pkts;
3111 }
3112 
3113 static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
3114 				  uint16_t nb_pkts)
3115 {
3116 	struct ena_ring *tx_ring = (struct ena_ring *)(tx_queue);
3117 	int available_desc;
3118 	uint16_t sent_idx = 0;
3119 
3120 #ifdef RTE_ETHDEV_DEBUG_TX
3121 	/* Check adapter state */
3122 	if (unlikely(tx_ring->adapter->state != ENA_ADAPTER_STATE_RUNNING)) {
3123 		PMD_TX_LOG(ALERT,
3124 			"Trying to xmit pkts while device is NOT running\n");
3125 		return 0;
3126 	}
3127 #endif
3128 
3129 	available_desc = ena_com_free_q_entries(tx_ring->ena_com_io_sq);
3130 	if (available_desc < tx_ring->tx_free_thresh)
3131 		ena_tx_cleanup((void *)tx_ring, 0);
3132 
3133 	for (sent_idx = 0; sent_idx < nb_pkts; sent_idx++) {
3134 		if (ena_xmit_mbuf(tx_ring, tx_pkts[sent_idx]))
3135 			break;
3136 		tx_ring->pkts_without_db = true;
3137 		rte_prefetch0(tx_pkts[ENA_IDX_ADD_MASKED(sent_idx, 4,
3138 			tx_ring->size_mask)]);
3139 	}
3140 
3141 	/* If there are ready packets to be xmitted... */
3142 	if (likely(tx_ring->pkts_without_db)) {
3143 		/* ...let HW do its best :-) */
3144 		ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq);
3145 		tx_ring->tx_stats.doorbells++;
3146 		tx_ring->pkts_without_db = false;
3147 	}
3148 
3149 	tx_ring->tx_stats.available_desc =
3150 		ena_com_free_q_entries(tx_ring->ena_com_io_sq);
3151 	tx_ring->tx_stats.tx_poll++;
3152 
3153 	return sent_idx;
3154 }
3155 
3156 int ena_copy_eni_stats(struct ena_adapter *adapter, struct ena_stats_eni *stats)
3157 {
3158 	int rc;
3159 
3160 	rte_spinlock_lock(&adapter->admin_lock);
3161 	/* Retrieve and store the latest statistics from the AQ. This ensures
3162 	 * that previous value is returned in case of a com error.
3163 	 */
3164 	rc = ENA_PROXY(adapter, ena_com_get_eni_stats, &adapter->ena_dev,
3165 		(struct ena_admin_eni_stats *)stats);
3166 	rte_spinlock_unlock(&adapter->admin_lock);
3167 	if (rc != 0) {
3168 		if (rc == ENA_COM_UNSUPPORTED) {
3169 			PMD_DRV_LOG(DEBUG,
3170 				"Retrieving ENI metrics is not supported\n");
3171 		} else {
3172 			PMD_DRV_LOG(WARNING,
3173 				"Failed to get ENI metrics, rc: %d\n", rc);
3174 		}
3175 		return rc;
3176 	}
3177 
3178 	return 0;
3179 }
3180 
3181 /**
3182  * DPDK callback to retrieve names of extended device statistics
3183  *
3184  * @param dev
3185  *   Pointer to Ethernet device structure.
3186  * @param[out] xstats_names
3187  *   Buffer to insert names into.
3188  * @param n
3189  *   Number of names.
3190  *
3191  * @return
3192  *   Number of xstats names.
3193  */
3194 static int ena_xstats_get_names(struct rte_eth_dev *dev,
3195 				struct rte_eth_xstat_name *xstats_names,
3196 				unsigned int n)
3197 {
3198 	unsigned int xstats_count = ena_xstats_calc_num(dev->data);
3199 	unsigned int stat, i, count = 0;
3200 
3201 	if (n < xstats_count || !xstats_names)
3202 		return xstats_count;
3203 
3204 	for (stat = 0; stat < ENA_STATS_ARRAY_GLOBAL; stat++, count++)
3205 		strcpy(xstats_names[count].name,
3206 			ena_stats_global_strings[stat].name);
3207 
3208 	for (stat = 0; stat < ENA_STATS_ARRAY_ENI; stat++, count++)
3209 		strcpy(xstats_names[count].name,
3210 			ena_stats_eni_strings[stat].name);
3211 
3212 	for (stat = 0; stat < ENA_STATS_ARRAY_RX; stat++)
3213 		for (i = 0; i < dev->data->nb_rx_queues; i++, count++)
3214 			snprintf(xstats_names[count].name,
3215 				sizeof(xstats_names[count].name),
3216 				"rx_q%d_%s", i,
3217 				ena_stats_rx_strings[stat].name);
3218 
3219 	for (stat = 0; stat < ENA_STATS_ARRAY_TX; stat++)
3220 		for (i = 0; i < dev->data->nb_tx_queues; i++, count++)
3221 			snprintf(xstats_names[count].name,
3222 				sizeof(xstats_names[count].name),
3223 				"tx_q%d_%s", i,
3224 				ena_stats_tx_strings[stat].name);
3225 
3226 	return xstats_count;
3227 }
3228 
3229 /**
3230  * DPDK callback to retrieve names of extended device statistics for the given
3231  * ids.
3232  *
3233  * @param dev
3234  *   Pointer to Ethernet device structure.
3235  * @param[out] xstats_names
3236  *   Buffer to insert names into.
3237  * @param ids
3238  *   IDs array for which the names should be retrieved.
3239  * @param size
3240  *   Number of ids.
3241  *
3242  * @return
3243  *   Positive value: number of xstats names. Negative value: error code.
3244  */
3245 static int ena_xstats_get_names_by_id(struct rte_eth_dev *dev,
3246 				      const uint64_t *ids,
3247 				      struct rte_eth_xstat_name *xstats_names,
3248 				      unsigned int size)
3249 {
3250 	uint64_t xstats_count = ena_xstats_calc_num(dev->data);
3251 	uint64_t id, qid;
3252 	unsigned int i;
3253 
3254 	if (xstats_names == NULL)
3255 		return xstats_count;
3256 
3257 	for (i = 0; i < size; ++i) {
3258 		id = ids[i];
3259 		if (id > xstats_count) {
3260 			PMD_DRV_LOG(ERR,
3261 				"ID value out of range: id=%" PRIu64 ", xstats_num=%" PRIu64 "\n",
3262 				 id, xstats_count);
3263 			return -EINVAL;
3264 		}
3265 
3266 		if (id < ENA_STATS_ARRAY_GLOBAL) {
3267 			strcpy(xstats_names[i].name,
3268 			       ena_stats_global_strings[id].name);
3269 			continue;
3270 		}
3271 
3272 		id -= ENA_STATS_ARRAY_GLOBAL;
3273 		if (id < ENA_STATS_ARRAY_ENI) {
3274 			strcpy(xstats_names[i].name,
3275 			       ena_stats_eni_strings[id].name);
3276 			continue;
3277 		}
3278 
3279 		id -= ENA_STATS_ARRAY_ENI;
3280 		if (id < ENA_STATS_ARRAY_RX) {
3281 			qid = id / dev->data->nb_rx_queues;
3282 			id %= dev->data->nb_rx_queues;
3283 			snprintf(xstats_names[i].name,
3284 				 sizeof(xstats_names[i].name),
3285 				 "rx_q%" PRIu64 "d_%s",
3286 				 qid, ena_stats_rx_strings[id].name);
3287 			continue;
3288 		}
3289 
3290 		id -= ENA_STATS_ARRAY_RX;
3291 		/* Although this condition is not needed, it was added for
3292 		 * compatibility if new xstat structure would be ever added.
3293 		 */
3294 		if (id < ENA_STATS_ARRAY_TX) {
3295 			qid = id / dev->data->nb_tx_queues;
3296 			id %= dev->data->nb_tx_queues;
3297 			snprintf(xstats_names[i].name,
3298 				 sizeof(xstats_names[i].name),
3299 				 "tx_q%" PRIu64 "_%s",
3300 				 qid, ena_stats_tx_strings[id].name);
3301 			continue;
3302 		}
3303 	}
3304 
3305 	return i;
3306 }
3307 
3308 /**
3309  * DPDK callback to get extended device statistics.
3310  *
3311  * @param dev
3312  *   Pointer to Ethernet device structure.
3313  * @param[out] stats
3314  *   Stats table output buffer.
3315  * @param n
3316  *   The size of the stats table.
3317  *
3318  * @return
3319  *   Number of xstats on success, negative on failure.
3320  */
3321 static int ena_xstats_get(struct rte_eth_dev *dev,
3322 			  struct rte_eth_xstat *xstats,
3323 			  unsigned int n)
3324 {
3325 	struct ena_adapter *adapter = dev->data->dev_private;
3326 	unsigned int xstats_count = ena_xstats_calc_num(dev->data);
3327 	struct ena_stats_eni eni_stats;
3328 	unsigned int stat, i, count = 0;
3329 	int stat_offset;
3330 	void *stats_begin;
3331 
3332 	if (n < xstats_count)
3333 		return xstats_count;
3334 
3335 	if (!xstats)
3336 		return 0;
3337 
3338 	for (stat = 0; stat < ENA_STATS_ARRAY_GLOBAL; stat++, count++) {
3339 		stat_offset = ena_stats_global_strings[stat].stat_offset;
3340 		stats_begin = &adapter->dev_stats;
3341 
3342 		xstats[count].id = count;
3343 		xstats[count].value = *((uint64_t *)
3344 			((char *)stats_begin + stat_offset));
3345 	}
3346 
3347 	/* Even if the function below fails, we should copy previous (or initial
3348 	 * values) to keep structure of rte_eth_xstat consistent.
3349 	 */
3350 	ena_copy_eni_stats(adapter, &eni_stats);
3351 	for (stat = 0; stat < ENA_STATS_ARRAY_ENI; stat++, count++) {
3352 		stat_offset = ena_stats_eni_strings[stat].stat_offset;
3353 		stats_begin = &eni_stats;
3354 
3355 		xstats[count].id = count;
3356 		xstats[count].value = *((uint64_t *)
3357 		    ((char *)stats_begin + stat_offset));
3358 	}
3359 
3360 	for (stat = 0; stat < ENA_STATS_ARRAY_RX; stat++) {
3361 		for (i = 0; i < dev->data->nb_rx_queues; i++, count++) {
3362 			stat_offset = ena_stats_rx_strings[stat].stat_offset;
3363 			stats_begin = &adapter->rx_ring[i].rx_stats;
3364 
3365 			xstats[count].id = count;
3366 			xstats[count].value = *((uint64_t *)
3367 				((char *)stats_begin + stat_offset));
3368 		}
3369 	}
3370 
3371 	for (stat = 0; stat < ENA_STATS_ARRAY_TX; stat++) {
3372 		for (i = 0; i < dev->data->nb_tx_queues; i++, count++) {
3373 			stat_offset = ena_stats_tx_strings[stat].stat_offset;
3374 			stats_begin = &adapter->tx_ring[i].rx_stats;
3375 
3376 			xstats[count].id = count;
3377 			xstats[count].value = *((uint64_t *)
3378 				((char *)stats_begin + stat_offset));
3379 		}
3380 	}
3381 
3382 	return count;
3383 }
3384 
3385 static int ena_xstats_get_by_id(struct rte_eth_dev *dev,
3386 				const uint64_t *ids,
3387 				uint64_t *values,
3388 				unsigned int n)
3389 {
3390 	struct ena_adapter *adapter = dev->data->dev_private;
3391 	struct ena_stats_eni eni_stats;
3392 	uint64_t id;
3393 	uint64_t rx_entries, tx_entries;
3394 	unsigned int i;
3395 	int qid;
3396 	int valid = 0;
3397 	bool was_eni_copied = false;
3398 
3399 	for (i = 0; i < n; ++i) {
3400 		id = ids[i];
3401 		/* Check if id belongs to global statistics */
3402 		if (id < ENA_STATS_ARRAY_GLOBAL) {
3403 			values[i] = *((uint64_t *)&adapter->dev_stats + id);
3404 			++valid;
3405 			continue;
3406 		}
3407 
3408 		/* Check if id belongs to ENI statistics */
3409 		id -= ENA_STATS_ARRAY_GLOBAL;
3410 		if (id < ENA_STATS_ARRAY_ENI) {
3411 			/* Avoid reading ENI stats multiple times in a single
3412 			 * function call, as it requires communication with the
3413 			 * admin queue.
3414 			 */
3415 			if (!was_eni_copied) {
3416 				was_eni_copied = true;
3417 				ena_copy_eni_stats(adapter, &eni_stats);
3418 			}
3419 			values[i] = *((uint64_t *)&eni_stats + id);
3420 			++valid;
3421 			continue;
3422 		}
3423 
3424 		/* Check if id belongs to rx queue statistics */
3425 		id -= ENA_STATS_ARRAY_ENI;
3426 		rx_entries = ENA_STATS_ARRAY_RX * dev->data->nb_rx_queues;
3427 		if (id < rx_entries) {
3428 			qid = id % dev->data->nb_rx_queues;
3429 			id /= dev->data->nb_rx_queues;
3430 			values[i] = *((uint64_t *)
3431 				&adapter->rx_ring[qid].rx_stats + id);
3432 			++valid;
3433 			continue;
3434 		}
3435 				/* Check if id belongs to rx queue statistics */
3436 		id -= rx_entries;
3437 		tx_entries = ENA_STATS_ARRAY_TX * dev->data->nb_tx_queues;
3438 		if (id < tx_entries) {
3439 			qid = id % dev->data->nb_tx_queues;
3440 			id /= dev->data->nb_tx_queues;
3441 			values[i] = *((uint64_t *)
3442 				&adapter->tx_ring[qid].tx_stats + id);
3443 			++valid;
3444 			continue;
3445 		}
3446 	}
3447 
3448 	return valid;
3449 }
3450 
3451 static int ena_process_uint_devarg(const char *key,
3452 				  const char *value,
3453 				  void *opaque)
3454 {
3455 	struct ena_adapter *adapter = opaque;
3456 	char *str_end;
3457 	uint64_t uint_value;
3458 
3459 	uint_value = strtoull(value, &str_end, 10);
3460 	if (value == str_end) {
3461 		PMD_INIT_LOG(ERR,
3462 			"Invalid value for key '%s'. Only uint values are accepted.\n",
3463 			key);
3464 		return -EINVAL;
3465 	}
3466 
3467 	if (strcmp(key, ENA_DEVARG_MISS_TXC_TO) == 0) {
3468 		if (uint_value > ENA_MAX_TX_TIMEOUT_SECONDS) {
3469 			PMD_INIT_LOG(ERR,
3470 				"Tx timeout too high: %" PRIu64 " sec. Maximum allowed: %d sec.\n",
3471 				uint_value, ENA_MAX_TX_TIMEOUT_SECONDS);
3472 			return -EINVAL;
3473 		} else if (uint_value == 0) {
3474 			PMD_INIT_LOG(INFO,
3475 				"Check for missing Tx completions has been disabled.\n");
3476 			adapter->missing_tx_completion_to =
3477 				ENA_HW_HINTS_NO_TIMEOUT;
3478 		} else {
3479 			PMD_INIT_LOG(INFO,
3480 				"Tx packet completion timeout set to %" PRIu64 " seconds.\n",
3481 				uint_value);
3482 			adapter->missing_tx_completion_to =
3483 				uint_value * rte_get_timer_hz();
3484 		}
3485 	}
3486 
3487 	return 0;
3488 }
3489 
3490 static int ena_process_bool_devarg(const char *key,
3491 				   const char *value,
3492 				   void *opaque)
3493 {
3494 	struct ena_adapter *adapter = opaque;
3495 	bool bool_value;
3496 
3497 	/* Parse the value. */
3498 	if (strcmp(value, "1") == 0) {
3499 		bool_value = true;
3500 	} else if (strcmp(value, "0") == 0) {
3501 		bool_value = false;
3502 	} else {
3503 		PMD_INIT_LOG(ERR,
3504 			"Invalid value: '%s' for key '%s'. Accepted: '0' or '1'\n",
3505 			value, key);
3506 		return -EINVAL;
3507 	}
3508 
3509 	/* Now, assign it to the proper adapter field. */
3510 	if (strcmp(key, ENA_DEVARG_LARGE_LLQ_HDR) == 0)
3511 		adapter->use_large_llq_hdr = bool_value;
3512 	else if (strcmp(key, ENA_DEVARG_ENABLE_LLQ) == 0)
3513 		adapter->enable_llq = bool_value;
3514 
3515 	return 0;
3516 }
3517 
3518 static int ena_parse_devargs(struct ena_adapter *adapter,
3519 			     struct rte_devargs *devargs)
3520 {
3521 	static const char * const allowed_args[] = {
3522 		ENA_DEVARG_LARGE_LLQ_HDR,
3523 		ENA_DEVARG_MISS_TXC_TO,
3524 		ENA_DEVARG_ENABLE_LLQ,
3525 		NULL,
3526 	};
3527 	struct rte_kvargs *kvlist;
3528 	int rc;
3529 
3530 	if (devargs == NULL)
3531 		return 0;
3532 
3533 	kvlist = rte_kvargs_parse(devargs->args, allowed_args);
3534 	if (kvlist == NULL) {
3535 		PMD_INIT_LOG(ERR, "Invalid device arguments: %s\n",
3536 			devargs->args);
3537 		return -EINVAL;
3538 	}
3539 
3540 	rc = rte_kvargs_process(kvlist, ENA_DEVARG_LARGE_LLQ_HDR,
3541 		ena_process_bool_devarg, adapter);
3542 	if (rc != 0)
3543 		goto exit;
3544 	rc = rte_kvargs_process(kvlist, ENA_DEVARG_MISS_TXC_TO,
3545 		ena_process_uint_devarg, adapter);
3546 	if (rc != 0)
3547 		goto exit;
3548 	rc = rte_kvargs_process(kvlist, ENA_DEVARG_ENABLE_LLQ,
3549 		ena_process_bool_devarg, adapter);
3550 
3551 exit:
3552 	rte_kvargs_free(kvlist);
3553 
3554 	return rc;
3555 }
3556 
3557 static int ena_setup_rx_intr(struct rte_eth_dev *dev)
3558 {
3559 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
3560 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
3561 	int rc;
3562 	uint16_t vectors_nb, i;
3563 	bool rx_intr_requested = dev->data->dev_conf.intr_conf.rxq;
3564 
3565 	if (!rx_intr_requested)
3566 		return 0;
3567 
3568 	if (!rte_intr_cap_multiple(intr_handle)) {
3569 		PMD_DRV_LOG(ERR,
3570 			"Rx interrupt requested, but it isn't supported by the PCI driver\n");
3571 		return -ENOTSUP;
3572 	}
3573 
3574 	/* Disable interrupt mapping before the configuration starts. */
3575 	rte_intr_disable(intr_handle);
3576 
3577 	/* Verify if there are enough vectors available. */
3578 	vectors_nb = dev->data->nb_rx_queues;
3579 	if (vectors_nb > RTE_MAX_RXTX_INTR_VEC_ID) {
3580 		PMD_DRV_LOG(ERR,
3581 			"Too many Rx interrupts requested, maximum number: %d\n",
3582 			RTE_MAX_RXTX_INTR_VEC_ID);
3583 		rc = -ENOTSUP;
3584 		goto enable_intr;
3585 	}
3586 
3587 	/* Allocate the vector list */
3588 	if (rte_intr_vec_list_alloc(intr_handle, "intr_vec",
3589 					   dev->data->nb_rx_queues)) {
3590 		PMD_DRV_LOG(ERR,
3591 			"Failed to allocate interrupt vector for %d queues\n",
3592 			dev->data->nb_rx_queues);
3593 		rc = -ENOMEM;
3594 		goto enable_intr;
3595 	}
3596 
3597 	rc = rte_intr_efd_enable(intr_handle, vectors_nb);
3598 	if (rc != 0)
3599 		goto free_intr_vec;
3600 
3601 	if (!rte_intr_allow_others(intr_handle)) {
3602 		PMD_DRV_LOG(ERR,
3603 			"Not enough interrupts available to use both ENA Admin and Rx interrupts\n");
3604 		goto disable_intr_efd;
3605 	}
3606 
3607 	for (i = 0; i < vectors_nb; ++i)
3608 		if (rte_intr_vec_list_index_set(intr_handle, i,
3609 					   RTE_INTR_VEC_RXTX_OFFSET + i))
3610 			goto disable_intr_efd;
3611 
3612 	rte_intr_enable(intr_handle);
3613 	return 0;
3614 
3615 disable_intr_efd:
3616 	rte_intr_efd_disable(intr_handle);
3617 free_intr_vec:
3618 	rte_intr_vec_list_free(intr_handle);
3619 enable_intr:
3620 	rte_intr_enable(intr_handle);
3621 	return rc;
3622 }
3623 
3624 static void ena_rx_queue_intr_set(struct rte_eth_dev *dev,
3625 				 uint16_t queue_id,
3626 				 bool unmask)
3627 {
3628 	struct ena_adapter *adapter = dev->data->dev_private;
3629 	struct ena_ring *rxq = &adapter->rx_ring[queue_id];
3630 	struct ena_eth_io_intr_reg intr_reg;
3631 
3632 	ena_com_update_intr_reg(&intr_reg, 0, 0, unmask);
3633 	ena_com_unmask_intr(rxq->ena_com_io_cq, &intr_reg);
3634 }
3635 
3636 static int ena_rx_queue_intr_enable(struct rte_eth_dev *dev,
3637 				    uint16_t queue_id)
3638 {
3639 	ena_rx_queue_intr_set(dev, queue_id, true);
3640 
3641 	return 0;
3642 }
3643 
3644 static int ena_rx_queue_intr_disable(struct rte_eth_dev *dev,
3645 				     uint16_t queue_id)
3646 {
3647 	ena_rx_queue_intr_set(dev, queue_id, false);
3648 
3649 	return 0;
3650 }
3651 
3652 static int ena_configure_aenq(struct ena_adapter *adapter)
3653 {
3654 	uint32_t aenq_groups = adapter->all_aenq_groups;
3655 	int rc;
3656 
3657 	/* All_aenq_groups holds all AENQ functions supported by the device and
3658 	 * the HW, so at first we need to be sure the LSC request is valid.
3659 	 */
3660 	if (adapter->edev_data->dev_conf.intr_conf.lsc != 0) {
3661 		if (!(aenq_groups & BIT(ENA_ADMIN_LINK_CHANGE))) {
3662 			PMD_DRV_LOG(ERR,
3663 				"LSC requested, but it's not supported by the AENQ\n");
3664 			return -EINVAL;
3665 		}
3666 	} else {
3667 		/* If LSC wasn't enabled by the app, let's enable all supported
3668 		 * AENQ procedures except the LSC.
3669 		 */
3670 		aenq_groups &= ~BIT(ENA_ADMIN_LINK_CHANGE);
3671 	}
3672 
3673 	rc = ena_com_set_aenq_config(&adapter->ena_dev, aenq_groups);
3674 	if (rc != 0) {
3675 		PMD_DRV_LOG(ERR, "Cannot configure AENQ groups, rc=%d\n", rc);
3676 		return rc;
3677 	}
3678 
3679 	adapter->active_aenq_groups = aenq_groups;
3680 
3681 	return 0;
3682 }
3683 
3684 int ena_mp_indirect_table_set(struct ena_adapter *adapter)
3685 {
3686 	return ENA_PROXY(adapter, ena_com_indirect_table_set, &adapter->ena_dev);
3687 }
3688 
3689 int ena_mp_indirect_table_get(struct ena_adapter *adapter,
3690 			      uint32_t *indirect_table)
3691 {
3692 	return ENA_PROXY(adapter, ena_com_indirect_table_get, &adapter->ena_dev,
3693 		indirect_table);
3694 }
3695 
3696 /*********************************************************************
3697  *  ena_plat_dpdk.h functions implementations
3698  *********************************************************************/
3699 
3700 const struct rte_memzone *
3701 ena_mem_alloc_coherent(struct rte_eth_dev_data *data, size_t size,
3702 		       int socket_id, unsigned int alignment, void **virt_addr,
3703 		       dma_addr_t *phys_addr)
3704 {
3705 	char z_name[RTE_MEMZONE_NAMESIZE];
3706 	struct ena_adapter *adapter = data->dev_private;
3707 	const struct rte_memzone *memzone;
3708 	int rc;
3709 
3710 	rc = snprintf(z_name, RTE_MEMZONE_NAMESIZE, "ena_p%d_mz%" PRIu64 "",
3711 		data->port_id, adapter->memzone_cnt);
3712 	if (rc >= RTE_MEMZONE_NAMESIZE) {
3713 		PMD_DRV_LOG(ERR,
3714 			"Name for the ena_com memzone is too long. Port: %d, mz_num: %" PRIu64 "\n",
3715 			data->port_id, adapter->memzone_cnt);
3716 		goto error;
3717 	}
3718 	adapter->memzone_cnt++;
3719 
3720 	memzone = rte_memzone_reserve_aligned(z_name, size, socket_id,
3721 		RTE_MEMZONE_IOVA_CONTIG, alignment);
3722 	if (memzone == NULL) {
3723 		PMD_DRV_LOG(ERR, "Failed to allocate ena_com memzone: %s\n",
3724 			z_name);
3725 		goto error;
3726 	}
3727 
3728 	memset(memzone->addr, 0, size);
3729 	*virt_addr = memzone->addr;
3730 	*phys_addr = memzone->iova;
3731 
3732 	return memzone;
3733 
3734 error:
3735 	*virt_addr = NULL;
3736 	*phys_addr = 0;
3737 
3738 	return NULL;
3739 }
3740 
3741 
3742 /*********************************************************************
3743  *  PMD configuration
3744  *********************************************************************/
3745 static int eth_ena_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
3746 	struct rte_pci_device *pci_dev)
3747 {
3748 	return rte_eth_dev_pci_generic_probe(pci_dev,
3749 		sizeof(struct ena_adapter), eth_ena_dev_init);
3750 }
3751 
3752 static int eth_ena_pci_remove(struct rte_pci_device *pci_dev)
3753 {
3754 	return rte_eth_dev_pci_generic_remove(pci_dev, eth_ena_dev_uninit);
3755 }
3756 
3757 static struct rte_pci_driver rte_ena_pmd = {
3758 	.id_table = pci_id_ena_map,
3759 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
3760 		     RTE_PCI_DRV_WC_ACTIVATE,
3761 	.probe = eth_ena_pci_probe,
3762 	.remove = eth_ena_pci_remove,
3763 };
3764 
3765 RTE_PMD_REGISTER_PCI(net_ena, rte_ena_pmd);
3766 RTE_PMD_REGISTER_PCI_TABLE(net_ena, pci_id_ena_map);
3767 RTE_PMD_REGISTER_KMOD_DEP(net_ena, "* igb_uio | uio_pci_generic | vfio-pci");
3768 RTE_PMD_REGISTER_PARAM_STRING(net_ena,
3769 	ENA_DEVARG_LARGE_LLQ_HDR "=<0|1> "
3770 	ENA_DEVARG_ENABLE_LLQ "=<0|1> "
3771 	ENA_DEVARG_MISS_TXC_TO "=<uint>");
3772 RTE_LOG_REGISTER_SUFFIX(ena_logtype_init, init, NOTICE);
3773 RTE_LOG_REGISTER_SUFFIX(ena_logtype_driver, driver, NOTICE);
3774 #ifdef RTE_ETHDEV_DEBUG_RX
3775 RTE_LOG_REGISTER_SUFFIX(ena_logtype_rx, rx, DEBUG);
3776 #endif
3777 #ifdef RTE_ETHDEV_DEBUG_TX
3778 RTE_LOG_REGISTER_SUFFIX(ena_logtype_tx, tx, DEBUG);
3779 #endif
3780 RTE_LOG_REGISTER_SUFFIX(ena_logtype_com, com, WARNING);
3781 
3782 /******************************************************************************
3783  ******************************** AENQ Handlers *******************************
3784  *****************************************************************************/
3785 static void ena_update_on_link_change(void *adapter_data,
3786 				      struct ena_admin_aenq_entry *aenq_e)
3787 {
3788 	struct rte_eth_dev *eth_dev = adapter_data;
3789 	struct ena_adapter *adapter = eth_dev->data->dev_private;
3790 	struct ena_admin_aenq_link_change_desc *aenq_link_desc;
3791 	uint32_t status;
3792 
3793 	aenq_link_desc = (struct ena_admin_aenq_link_change_desc *)aenq_e;
3794 
3795 	status = get_ena_admin_aenq_link_change_desc_link_status(aenq_link_desc);
3796 	adapter->link_status = status;
3797 
3798 	ena_link_update(eth_dev, 0);
3799 	rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL);
3800 }
3801 
3802 static void ena_notification(void *adapter_data,
3803 			     struct ena_admin_aenq_entry *aenq_e)
3804 {
3805 	struct rte_eth_dev *eth_dev = adapter_data;
3806 	struct ena_adapter *adapter = eth_dev->data->dev_private;
3807 	struct ena_admin_ena_hw_hints *hints;
3808 
3809 	if (aenq_e->aenq_common_desc.group != ENA_ADMIN_NOTIFICATION)
3810 		PMD_DRV_LOG(WARNING, "Invalid AENQ group: %x. Expected: %x\n",
3811 			aenq_e->aenq_common_desc.group,
3812 			ENA_ADMIN_NOTIFICATION);
3813 
3814 	switch (aenq_e->aenq_common_desc.syndrome) {
3815 	case ENA_ADMIN_UPDATE_HINTS:
3816 		hints = (struct ena_admin_ena_hw_hints *)
3817 			(&aenq_e->inline_data_w4);
3818 		ena_update_hints(adapter, hints);
3819 		break;
3820 	default:
3821 		PMD_DRV_LOG(ERR, "Invalid AENQ notification link state: %d\n",
3822 			aenq_e->aenq_common_desc.syndrome);
3823 	}
3824 }
3825 
3826 static void ena_keep_alive(void *adapter_data,
3827 			   __rte_unused struct ena_admin_aenq_entry *aenq_e)
3828 {
3829 	struct rte_eth_dev *eth_dev = adapter_data;
3830 	struct ena_adapter *adapter = eth_dev->data->dev_private;
3831 	struct ena_admin_aenq_keep_alive_desc *desc;
3832 	uint64_t rx_drops;
3833 	uint64_t tx_drops;
3834 
3835 	adapter->timestamp_wd = rte_get_timer_cycles();
3836 
3837 	desc = (struct ena_admin_aenq_keep_alive_desc *)aenq_e;
3838 	rx_drops = ((uint64_t)desc->rx_drops_high << 32) | desc->rx_drops_low;
3839 	tx_drops = ((uint64_t)desc->tx_drops_high << 32) | desc->tx_drops_low;
3840 
3841 	adapter->drv_stats->rx_drops = rx_drops;
3842 	adapter->dev_stats.tx_drops = tx_drops;
3843 }
3844 
3845 /**
3846  * This handler will called for unknown event group or unimplemented handlers
3847  **/
3848 static void unimplemented_aenq_handler(__rte_unused void *data,
3849 				       __rte_unused struct ena_admin_aenq_entry *aenq_e)
3850 {
3851 	PMD_DRV_LOG(ERR,
3852 		"Unknown event was received or event with unimplemented handler\n");
3853 }
3854 
3855 static struct ena_aenq_handlers aenq_handlers = {
3856 	.handlers = {
3857 		[ENA_ADMIN_LINK_CHANGE] = ena_update_on_link_change,
3858 		[ENA_ADMIN_NOTIFICATION] = ena_notification,
3859 		[ENA_ADMIN_KEEP_ALIVE] = ena_keep_alive
3860 	},
3861 	.unimplemented_handler = unimplemented_aenq_handler
3862 };
3863 
3864 /*********************************************************************
3865  *  Multi-Process communication request handling (in primary)
3866  *********************************************************************/
3867 static int
3868 ena_mp_primary_handle(const struct rte_mp_msg *mp_msg, const void *peer)
3869 {
3870 	const struct ena_mp_body *req =
3871 		(const struct ena_mp_body *)mp_msg->param;
3872 	struct ena_adapter *adapter;
3873 	struct ena_com_dev *ena_dev;
3874 	struct ena_mp_body *rsp;
3875 	struct rte_mp_msg mp_rsp;
3876 	struct rte_eth_dev *dev;
3877 	int res = 0;
3878 
3879 	rsp = (struct ena_mp_body *)&mp_rsp.param;
3880 	mp_msg_init(&mp_rsp, req->type, req->port_id);
3881 
3882 	if (!rte_eth_dev_is_valid_port(req->port_id)) {
3883 		rte_errno = ENODEV;
3884 		res = -rte_errno;
3885 		PMD_DRV_LOG(ERR, "Unknown port %d in request %d\n",
3886 			    req->port_id, req->type);
3887 		goto end;
3888 	}
3889 	dev = &rte_eth_devices[req->port_id];
3890 	adapter = dev->data->dev_private;
3891 	ena_dev = &adapter->ena_dev;
3892 
3893 	switch (req->type) {
3894 	case ENA_MP_DEV_STATS_GET:
3895 		res = ena_com_get_dev_basic_stats(ena_dev,
3896 						  &adapter->basic_stats);
3897 		break;
3898 	case ENA_MP_ENI_STATS_GET:
3899 		res = ena_com_get_eni_stats(ena_dev,
3900 			(struct ena_admin_eni_stats *)&adapter->eni_stats);
3901 		break;
3902 	case ENA_MP_MTU_SET:
3903 		res = ena_com_set_dev_mtu(ena_dev, req->args.mtu);
3904 		break;
3905 	case ENA_MP_IND_TBL_GET:
3906 		res = ena_com_indirect_table_get(ena_dev,
3907 						 adapter->indirect_table);
3908 		break;
3909 	case ENA_MP_IND_TBL_SET:
3910 		res = ena_com_indirect_table_set(ena_dev);
3911 		break;
3912 	default:
3913 		PMD_DRV_LOG(ERR, "Unknown request type %d\n", req->type);
3914 		res = -EINVAL;
3915 		break;
3916 	}
3917 
3918 end:
3919 	/* Save processing result in the reply */
3920 	rsp->result = res;
3921 	/* Return just IPC processing status */
3922 	return rte_mp_reply(&mp_rsp, peer);
3923 }
3924