xref: /dpdk/drivers/net/ena/ena_ethdev.c (revision 2bae75eaa2e036020b726f61bc607a8f4142c3a8)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2015-2020 Amazon.com, Inc. or its affiliates.
3  * All rights reserved.
4  */
5 
6 #include <rte_string_fns.h>
7 #include <rte_errno.h>
8 #include <rte_version.h>
9 #include <rte_net.h>
10 #include <rte_kvargs.h>
11 
12 #include "ena_ethdev.h"
13 #include "ena_logs.h"
14 #include "ena_platform.h"
15 #include "ena_com.h"
16 #include "ena_eth_com.h"
17 
18 #include <ena_common_defs.h>
19 #include <ena_regs_defs.h>
20 #include <ena_admin_defs.h>
21 #include <ena_eth_io_defs.h>
22 
23 #define DRV_MODULE_VER_MAJOR	2
24 #define DRV_MODULE_VER_MINOR	5
25 #define DRV_MODULE_VER_SUBMINOR	0
26 
27 #define __MERGE_64B_H_L(h, l) (((uint64_t)h << 32) | l)
28 
29 #define GET_L4_HDR_LEN(mbuf)					\
30 	((rte_pktmbuf_mtod_offset(mbuf,	struct rte_tcp_hdr *,	\
31 		mbuf->l3_len + mbuf->l2_len)->data_off) >> 4)
32 
33 #define ETH_GSTRING_LEN	32
34 
35 #define ARRAY_SIZE(x) RTE_DIM(x)
36 
37 #define ENA_MIN_RING_DESC	128
38 
39 #define ENA_PTYPE_HAS_HASH	(RTE_PTYPE_L4_TCP | RTE_PTYPE_L4_UDP)
40 
41 struct ena_stats {
42 	char name[ETH_GSTRING_LEN];
43 	int stat_offset;
44 };
45 
46 #define ENA_STAT_ENTRY(stat, stat_type) { \
47 	.name = #stat, \
48 	.stat_offset = offsetof(struct ena_stats_##stat_type, stat) \
49 }
50 
51 #define ENA_STAT_RX_ENTRY(stat) \
52 	ENA_STAT_ENTRY(stat, rx)
53 
54 #define ENA_STAT_TX_ENTRY(stat) \
55 	ENA_STAT_ENTRY(stat, tx)
56 
57 #define ENA_STAT_ENI_ENTRY(stat) \
58 	ENA_STAT_ENTRY(stat, eni)
59 
60 #define ENA_STAT_GLOBAL_ENTRY(stat) \
61 	ENA_STAT_ENTRY(stat, dev)
62 
63 /* Device arguments */
64 #define ENA_DEVARG_LARGE_LLQ_HDR "large_llq_hdr"
65 
66 /*
67  * Each rte_memzone should have unique name.
68  * To satisfy it, count number of allocation and add it to name.
69  */
70 rte_atomic64_t ena_alloc_cnt;
71 
72 static const struct ena_stats ena_stats_global_strings[] = {
73 	ENA_STAT_GLOBAL_ENTRY(wd_expired),
74 	ENA_STAT_GLOBAL_ENTRY(dev_start),
75 	ENA_STAT_GLOBAL_ENTRY(dev_stop),
76 	ENA_STAT_GLOBAL_ENTRY(tx_drops),
77 };
78 
79 static const struct ena_stats ena_stats_eni_strings[] = {
80 	ENA_STAT_ENI_ENTRY(bw_in_allowance_exceeded),
81 	ENA_STAT_ENI_ENTRY(bw_out_allowance_exceeded),
82 	ENA_STAT_ENI_ENTRY(pps_allowance_exceeded),
83 	ENA_STAT_ENI_ENTRY(conntrack_allowance_exceeded),
84 	ENA_STAT_ENI_ENTRY(linklocal_allowance_exceeded),
85 };
86 
87 static const struct ena_stats ena_stats_tx_strings[] = {
88 	ENA_STAT_TX_ENTRY(cnt),
89 	ENA_STAT_TX_ENTRY(bytes),
90 	ENA_STAT_TX_ENTRY(prepare_ctx_err),
91 	ENA_STAT_TX_ENTRY(tx_poll),
92 	ENA_STAT_TX_ENTRY(doorbells),
93 	ENA_STAT_TX_ENTRY(bad_req_id),
94 	ENA_STAT_TX_ENTRY(available_desc),
95 	ENA_STAT_TX_ENTRY(missed_tx),
96 };
97 
98 static const struct ena_stats ena_stats_rx_strings[] = {
99 	ENA_STAT_RX_ENTRY(cnt),
100 	ENA_STAT_RX_ENTRY(bytes),
101 	ENA_STAT_RX_ENTRY(refill_partial),
102 	ENA_STAT_RX_ENTRY(l3_csum_bad),
103 	ENA_STAT_RX_ENTRY(l4_csum_bad),
104 	ENA_STAT_RX_ENTRY(l4_csum_good),
105 	ENA_STAT_RX_ENTRY(mbuf_alloc_fail),
106 	ENA_STAT_RX_ENTRY(bad_desc_num),
107 	ENA_STAT_RX_ENTRY(bad_req_id),
108 };
109 
110 #define ENA_STATS_ARRAY_GLOBAL	ARRAY_SIZE(ena_stats_global_strings)
111 #define ENA_STATS_ARRAY_ENI	ARRAY_SIZE(ena_stats_eni_strings)
112 #define ENA_STATS_ARRAY_TX	ARRAY_SIZE(ena_stats_tx_strings)
113 #define ENA_STATS_ARRAY_RX	ARRAY_SIZE(ena_stats_rx_strings)
114 
115 #define QUEUE_OFFLOADS (RTE_ETH_TX_OFFLOAD_TCP_CKSUM |\
116 			RTE_ETH_TX_OFFLOAD_UDP_CKSUM |\
117 			RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |\
118 			RTE_ETH_TX_OFFLOAD_TCP_TSO)
119 #define MBUF_OFFLOADS (RTE_MBUF_F_TX_L4_MASK |\
120 		       RTE_MBUF_F_TX_IP_CKSUM |\
121 		       RTE_MBUF_F_TX_TCP_SEG)
122 
123 /** Vendor ID used by Amazon devices */
124 #define PCI_VENDOR_ID_AMAZON 0x1D0F
125 /** Amazon devices */
126 #define PCI_DEVICE_ID_ENA_VF		0xEC20
127 #define PCI_DEVICE_ID_ENA_VF_RSERV0	0xEC21
128 
129 #define	ENA_TX_OFFLOAD_MASK	(RTE_MBUF_F_TX_L4_MASK |         \
130 	RTE_MBUF_F_TX_IPV6 |            \
131 	RTE_MBUF_F_TX_IPV4 |            \
132 	RTE_MBUF_F_TX_IP_CKSUM |        \
133 	RTE_MBUF_F_TX_TCP_SEG)
134 
135 #define	ENA_TX_OFFLOAD_NOTSUP_MASK	\
136 	(RTE_MBUF_F_TX_OFFLOAD_MASK ^ ENA_TX_OFFLOAD_MASK)
137 
138 /** HW specific offloads capabilities. */
139 /* IPv4 checksum offload. */
140 #define ENA_L3_IPV4_CSUM		0x0001
141 /* TCP/UDP checksum offload for IPv4 packets. */
142 #define ENA_L4_IPV4_CSUM		0x0002
143 /* TCP/UDP checksum offload for IPv4 packets with pseudo header checksum. */
144 #define ENA_L4_IPV4_CSUM_PARTIAL	0x0004
145 /* TCP/UDP checksum offload for IPv6 packets. */
146 #define ENA_L4_IPV6_CSUM		0x0008
147 /* TCP/UDP checksum offload for IPv6 packets with pseudo header checksum. */
148 #define ENA_L4_IPV6_CSUM_PARTIAL	0x0010
149 /* TSO support for IPv4 packets. */
150 #define ENA_IPV4_TSO			0x0020
151 
152 /* Device supports setting RSS hash. */
153 #define ENA_RX_RSS_HASH			0x0040
154 
155 static const struct rte_pci_id pci_id_ena_map[] = {
156 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AMAZON, PCI_DEVICE_ID_ENA_VF) },
157 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AMAZON, PCI_DEVICE_ID_ENA_VF_RSERV0) },
158 	{ .device_id = 0 },
159 };
160 
161 static struct ena_aenq_handlers aenq_handlers;
162 
163 static int ena_device_init(struct ena_adapter *adapter,
164 			   struct rte_pci_device *pdev,
165 			   struct ena_com_dev_get_features_ctx *get_feat_ctx);
166 static int ena_dev_configure(struct rte_eth_dev *dev);
167 static void ena_tx_map_mbuf(struct ena_ring *tx_ring,
168 	struct ena_tx_buffer *tx_info,
169 	struct rte_mbuf *mbuf,
170 	void **push_header,
171 	uint16_t *header_len);
172 static int ena_xmit_mbuf(struct ena_ring *tx_ring, struct rte_mbuf *mbuf);
173 static int ena_tx_cleanup(void *txp, uint32_t free_pkt_cnt);
174 static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
175 				  uint16_t nb_pkts);
176 static uint16_t eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
177 		uint16_t nb_pkts);
178 static int ena_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
179 			      uint16_t nb_desc, unsigned int socket_id,
180 			      const struct rte_eth_txconf *tx_conf);
181 static int ena_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
182 			      uint16_t nb_desc, unsigned int socket_id,
183 			      const struct rte_eth_rxconf *rx_conf,
184 			      struct rte_mempool *mp);
185 static inline void ena_init_rx_mbuf(struct rte_mbuf *mbuf, uint16_t len);
186 static struct rte_mbuf *ena_rx_mbuf(struct ena_ring *rx_ring,
187 				    struct ena_com_rx_buf_info *ena_bufs,
188 				    uint32_t descs,
189 				    uint16_t *next_to_clean,
190 				    uint8_t offset);
191 static uint16_t eth_ena_recv_pkts(void *rx_queue,
192 				  struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
193 static int ena_add_single_rx_desc(struct ena_com_io_sq *io_sq,
194 				  struct rte_mbuf *mbuf, uint16_t id);
195 static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count);
196 static void ena_init_rings(struct ena_adapter *adapter,
197 			   bool disable_meta_caching);
198 static int ena_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
199 static int ena_start(struct rte_eth_dev *dev);
200 static int ena_stop(struct rte_eth_dev *dev);
201 static int ena_close(struct rte_eth_dev *dev);
202 static int ena_dev_reset(struct rte_eth_dev *dev);
203 static int ena_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats);
204 static void ena_rx_queue_release_all(struct rte_eth_dev *dev);
205 static void ena_tx_queue_release_all(struct rte_eth_dev *dev);
206 static void ena_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
207 static void ena_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
208 static void ena_rx_queue_release_bufs(struct ena_ring *ring);
209 static void ena_tx_queue_release_bufs(struct ena_ring *ring);
210 static int ena_link_update(struct rte_eth_dev *dev,
211 			   int wait_to_complete);
212 static int ena_create_io_queue(struct rte_eth_dev *dev, struct ena_ring *ring);
213 static void ena_queue_stop(struct ena_ring *ring);
214 static void ena_queue_stop_all(struct rte_eth_dev *dev,
215 			      enum ena_ring_type ring_type);
216 static int ena_queue_start(struct rte_eth_dev *dev, struct ena_ring *ring);
217 static int ena_queue_start_all(struct rte_eth_dev *dev,
218 			       enum ena_ring_type ring_type);
219 static void ena_stats_restart(struct rte_eth_dev *dev);
220 static uint64_t ena_get_rx_port_offloads(struct ena_adapter *adapter);
221 static uint64_t ena_get_tx_port_offloads(struct ena_adapter *adapter);
222 static uint64_t ena_get_rx_queue_offloads(struct ena_adapter *adapter);
223 static uint64_t ena_get_tx_queue_offloads(struct ena_adapter *adapter);
224 static int ena_infos_get(struct rte_eth_dev *dev,
225 			 struct rte_eth_dev_info *dev_info);
226 static void ena_interrupt_handler_rte(void *cb_arg);
227 static void ena_timer_wd_callback(struct rte_timer *timer, void *arg);
228 static void ena_destroy_device(struct rte_eth_dev *eth_dev);
229 static int eth_ena_dev_init(struct rte_eth_dev *eth_dev);
230 static int ena_xstats_get_names(struct rte_eth_dev *dev,
231 				struct rte_eth_xstat_name *xstats_names,
232 				unsigned int n);
233 static int ena_xstats_get_names_by_id(struct rte_eth_dev *dev,
234 				      const uint64_t *ids,
235 				      struct rte_eth_xstat_name *xstats_names,
236 				      unsigned int size);
237 static int ena_xstats_get(struct rte_eth_dev *dev,
238 			  struct rte_eth_xstat *stats,
239 			  unsigned int n);
240 static int ena_xstats_get_by_id(struct rte_eth_dev *dev,
241 				const uint64_t *ids,
242 				uint64_t *values,
243 				unsigned int n);
244 static int ena_process_bool_devarg(const char *key,
245 				   const char *value,
246 				   void *opaque);
247 static int ena_parse_devargs(struct ena_adapter *adapter,
248 			     struct rte_devargs *devargs);
249 static int ena_copy_eni_stats(struct ena_adapter *adapter,
250 			      struct ena_stats_eni *stats);
251 static int ena_setup_rx_intr(struct rte_eth_dev *dev);
252 static int ena_rx_queue_intr_enable(struct rte_eth_dev *dev,
253 				    uint16_t queue_id);
254 static int ena_rx_queue_intr_disable(struct rte_eth_dev *dev,
255 				     uint16_t queue_id);
256 static int ena_configure_aenq(struct ena_adapter *adapter);
257 static int ena_mp_primary_handle(const struct rte_mp_msg *mp_msg,
258 				 const void *peer);
259 
260 static const struct eth_dev_ops ena_dev_ops = {
261 	.dev_configure          = ena_dev_configure,
262 	.dev_infos_get          = ena_infos_get,
263 	.rx_queue_setup         = ena_rx_queue_setup,
264 	.tx_queue_setup         = ena_tx_queue_setup,
265 	.dev_start              = ena_start,
266 	.dev_stop               = ena_stop,
267 	.link_update            = ena_link_update,
268 	.stats_get              = ena_stats_get,
269 	.xstats_get_names       = ena_xstats_get_names,
270 	.xstats_get_names_by_id = ena_xstats_get_names_by_id,
271 	.xstats_get             = ena_xstats_get,
272 	.xstats_get_by_id       = ena_xstats_get_by_id,
273 	.mtu_set                = ena_mtu_set,
274 	.rx_queue_release       = ena_rx_queue_release,
275 	.tx_queue_release       = ena_tx_queue_release,
276 	.dev_close              = ena_close,
277 	.dev_reset              = ena_dev_reset,
278 	.reta_update            = ena_rss_reta_update,
279 	.reta_query             = ena_rss_reta_query,
280 	.rx_queue_intr_enable   = ena_rx_queue_intr_enable,
281 	.rx_queue_intr_disable  = ena_rx_queue_intr_disable,
282 	.rss_hash_update        = ena_rss_hash_update,
283 	.rss_hash_conf_get      = ena_rss_hash_conf_get,
284 	.tx_done_cleanup        = ena_tx_cleanup,
285 };
286 
287 /*********************************************************************
288  *  Multi-Process communication bits
289  *********************************************************************/
290 /* rte_mp IPC message name */
291 #define ENA_MP_NAME	"net_ena_mp"
292 /* Request timeout in seconds */
293 #define ENA_MP_REQ_TMO	5
294 
295 /** Proxy request type */
296 enum ena_mp_req {
297 	ENA_MP_DEV_STATS_GET,
298 	ENA_MP_ENI_STATS_GET,
299 	ENA_MP_MTU_SET,
300 	ENA_MP_IND_TBL_GET,
301 	ENA_MP_IND_TBL_SET
302 };
303 
304 /** Proxy message body. Shared between requests and responses. */
305 struct ena_mp_body {
306 	/* Message type */
307 	enum ena_mp_req type;
308 	int port_id;
309 	/* Processing result. Set in replies. 0 if message succeeded, negative
310 	 * error code otherwise.
311 	 */
312 	int result;
313 	union {
314 		int mtu; /* For ENA_MP_MTU_SET */
315 	} args;
316 };
317 
318 /**
319  * Initialize IPC message.
320  *
321  * @param[out] msg
322  *   Pointer to the message to initialize.
323  * @param[in] type
324  *   Message type.
325  * @param[in] port_id
326  *   Port ID of target device.
327  *
328  */
329 static void
330 mp_msg_init(struct rte_mp_msg *msg, enum ena_mp_req type, int port_id)
331 {
332 	struct ena_mp_body *body = (struct ena_mp_body *)&msg->param;
333 
334 	memset(msg, 0, sizeof(*msg));
335 	strlcpy(msg->name, ENA_MP_NAME, sizeof(msg->name));
336 	msg->len_param = sizeof(*body);
337 	body->type = type;
338 	body->port_id = port_id;
339 }
340 
341 /*********************************************************************
342  *  Multi-Process communication PMD API
343  *********************************************************************/
344 /**
345  * Define proxy request descriptor
346  *
347  * Used to define all structures and functions required for proxying a given
348  * function to the primary process including the code to perform to prepare the
349  * request and process the response.
350  *
351  * @param[in] f
352  *   Name of the function to proxy
353  * @param[in] t
354  *   Message type to use
355  * @param[in] prep
356  *   Body of a function to prepare the request in form of a statement
357  *   expression. It is passed all the original function arguments along with two
358  *   extra ones:
359  *   - struct ena_adapter *adapter - PMD data of the device calling the proxy.
360  *   - struct ena_mp_body *req - body of a request to prepare.
361  * @param[in] proc
362  *   Body of a function to process the response in form of a statement
363  *   expression. It is passed all the original function arguments along with two
364  *   extra ones:
365  *   - struct ena_adapter *adapter - PMD data of the device calling the proxy.
366  *   - struct ena_mp_body *rsp - body of a response to process.
367  * @param ...
368  *   Proxied function's arguments
369  *
370  * @note Inside prep and proc any parameters which aren't used should be marked
371  *       as such (with ENA_TOUCH or __rte_unused).
372  */
373 #define ENA_PROXY_DESC(f, t, prep, proc, ...)			\
374 	static const enum ena_mp_req mp_type_ ## f =  t;	\
375 	static const char *mp_name_ ## f = #t;			\
376 	static void mp_prep_ ## f(struct ena_adapter *adapter,	\
377 				  struct ena_mp_body *req,	\
378 				  __VA_ARGS__)			\
379 	{							\
380 		prep;						\
381 	}							\
382 	static void mp_proc_ ## f(struct ena_adapter *adapter,	\
383 				  struct ena_mp_body *rsp,	\
384 				  __VA_ARGS__)			\
385 	{							\
386 		proc;						\
387 	}
388 
389 /**
390  * Proxy wrapper for calling primary functions in a secondary process.
391  *
392  * Depending on whether called in primary or secondary process, calls the
393  * @p func directly or proxies the call to the primary process via rte_mp IPC.
394  * This macro requires a proxy request descriptor to be defined for @p func
395  * using ENA_PROXY_DESC() macro.
396  *
397  * @param[in/out] a
398  *   Device PMD data. Used for sending the message and sharing message results
399  *   between primary and secondary.
400  * @param[in] f
401  *   Function to proxy.
402  * @param ...
403  *   Arguments of @p func.
404  *
405  * @return
406  *   - 0: Processing succeeded and response handler was called.
407  *   - -EPERM: IPC is unavailable on this platform. This means only primary
408  *             process may call the proxied function.
409  *   - -EIO:   IPC returned error on request send. Inspect rte_errno detailed
410  *             error code.
411  *   - Negative error code from the proxied function.
412  *
413  * @note This mechanism is geared towards control-path tasks. Avoid calling it
414  *       in fast-path unless unbound delays are allowed. This is due to the IPC
415  *       mechanism itself (socket based).
416  * @note Due to IPC parameter size limitations the proxy logic shares call
417  *       results through the struct ena_adapter shared memory. This makes the
418  *       proxy mechanism strictly single-threaded. Therefore be sure to make all
419  *       calls to the same proxied function under the same lock.
420  */
421 #define ENA_PROXY(a, f, ...)						\
422 ({									\
423 	struct ena_adapter *_a = (a);					\
424 	struct timespec ts = { .tv_sec = ENA_MP_REQ_TMO };		\
425 	struct ena_mp_body *req, *rsp;					\
426 	struct rte_mp_reply mp_rep;					\
427 	struct rte_mp_msg mp_req;					\
428 	int ret;							\
429 									\
430 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {		\
431 		ret = f(__VA_ARGS__);					\
432 	} else {							\
433 		/* Prepare and send request */				\
434 		req = (struct ena_mp_body *)&mp_req.param;		\
435 		mp_msg_init(&mp_req, mp_type_ ## f, _a->edev_data->port_id); \
436 		mp_prep_ ## f(_a, req, ## __VA_ARGS__);			\
437 									\
438 		ret = rte_mp_request_sync(&mp_req, &mp_rep, &ts);	\
439 		if (likely(!ret)) {					\
440 			RTE_ASSERT(mp_rep.nb_received == 1);		\
441 			rsp = (struct ena_mp_body *)&mp_rep.msgs[0].param; \
442 			ret = rsp->result;				\
443 			if (ret == 0) {					\
444 				mp_proc_##f(_a, rsp, ## __VA_ARGS__);	\
445 			} else {					\
446 				PMD_DRV_LOG(ERR,			\
447 					    "%s returned error: %d\n",	\
448 					    mp_name_ ## f, rsp->result);\
449 			}						\
450 			free(mp_rep.msgs);				\
451 		} else if (rte_errno == ENOTSUP) {			\
452 			PMD_DRV_LOG(ERR,				\
453 				    "No IPC, can't proxy to primary\n");\
454 			ret = -rte_errno;				\
455 		} else {						\
456 			PMD_DRV_LOG(ERR, "Request %s failed: %s\n",	\
457 				    mp_name_ ## f,			\
458 				    rte_strerror(rte_errno));		\
459 			ret = -EIO;					\
460 		}							\
461 	}								\
462 	ret;								\
463 })
464 
465 /*********************************************************************
466  *  Multi-Process communication request descriptors
467  *********************************************************************/
468 
469 ENA_PROXY_DESC(ena_com_get_dev_basic_stats, ENA_MP_DEV_STATS_GET,
470 ({
471 	ENA_TOUCH(adapter);
472 	ENA_TOUCH(req);
473 	ENA_TOUCH(ena_dev);
474 	ENA_TOUCH(stats);
475 }),
476 ({
477 	ENA_TOUCH(rsp);
478 	ENA_TOUCH(ena_dev);
479 	if (stats != &adapter->basic_stats)
480 		rte_memcpy(stats, &adapter->basic_stats, sizeof(*stats));
481 }),
482 	struct ena_com_dev *ena_dev, struct ena_admin_basic_stats *stats);
483 
484 ENA_PROXY_DESC(ena_com_get_eni_stats, ENA_MP_ENI_STATS_GET,
485 ({
486 	ENA_TOUCH(adapter);
487 	ENA_TOUCH(req);
488 	ENA_TOUCH(ena_dev);
489 	ENA_TOUCH(stats);
490 }),
491 ({
492 	ENA_TOUCH(rsp);
493 	ENA_TOUCH(ena_dev);
494 	if (stats != (struct ena_admin_eni_stats *)&adapter->eni_stats)
495 		rte_memcpy(stats, &adapter->eni_stats, sizeof(*stats));
496 }),
497 	struct ena_com_dev *ena_dev, struct ena_admin_eni_stats *stats);
498 
499 ENA_PROXY_DESC(ena_com_set_dev_mtu, ENA_MP_MTU_SET,
500 ({
501 	ENA_TOUCH(adapter);
502 	ENA_TOUCH(ena_dev);
503 	req->args.mtu = mtu;
504 }),
505 ({
506 	ENA_TOUCH(adapter);
507 	ENA_TOUCH(rsp);
508 	ENA_TOUCH(ena_dev);
509 	ENA_TOUCH(mtu);
510 }),
511 	struct ena_com_dev *ena_dev, int mtu);
512 
513 ENA_PROXY_DESC(ena_com_indirect_table_set, ENA_MP_IND_TBL_SET,
514 ({
515 	ENA_TOUCH(adapter);
516 	ENA_TOUCH(req);
517 	ENA_TOUCH(ena_dev);
518 }),
519 ({
520 	ENA_TOUCH(adapter);
521 	ENA_TOUCH(rsp);
522 	ENA_TOUCH(ena_dev);
523 }),
524 	struct ena_com_dev *ena_dev);
525 
526 ENA_PROXY_DESC(ena_com_indirect_table_get, ENA_MP_IND_TBL_GET,
527 ({
528 	ENA_TOUCH(adapter);
529 	ENA_TOUCH(req);
530 	ENA_TOUCH(ena_dev);
531 	ENA_TOUCH(ind_tbl);
532 }),
533 ({
534 	ENA_TOUCH(rsp);
535 	ENA_TOUCH(ena_dev);
536 	if (ind_tbl != adapter->indirect_table)
537 		rte_memcpy(ind_tbl, adapter->indirect_table,
538 			   sizeof(adapter->indirect_table));
539 }),
540 	struct ena_com_dev *ena_dev, u32 *ind_tbl);
541 
542 static inline void ena_trigger_reset(struct ena_adapter *adapter,
543 				     enum ena_regs_reset_reason_types reason)
544 {
545 	if (likely(!adapter->trigger_reset)) {
546 		adapter->reset_reason = reason;
547 		adapter->trigger_reset = true;
548 	}
549 }
550 
551 static inline void ena_rx_mbuf_prepare(struct ena_ring *rx_ring,
552 				       struct rte_mbuf *mbuf,
553 				       struct ena_com_rx_ctx *ena_rx_ctx,
554 				       bool fill_hash)
555 {
556 	struct ena_stats_rx *rx_stats = &rx_ring->rx_stats;
557 	uint64_t ol_flags = 0;
558 	uint32_t packet_type = 0;
559 
560 	if (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP)
561 		packet_type |= RTE_PTYPE_L4_TCP;
562 	else if (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP)
563 		packet_type |= RTE_PTYPE_L4_UDP;
564 
565 	if (ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV4) {
566 		packet_type |= RTE_PTYPE_L3_IPV4;
567 		if (unlikely(ena_rx_ctx->l3_csum_err)) {
568 			++rx_stats->l3_csum_bad;
569 			ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
570 		} else {
571 			ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
572 		}
573 	} else if (ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV6) {
574 		packet_type |= RTE_PTYPE_L3_IPV6;
575 	}
576 
577 	if (!ena_rx_ctx->l4_csum_checked || ena_rx_ctx->frag) {
578 		ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN;
579 	} else {
580 		if (unlikely(ena_rx_ctx->l4_csum_err)) {
581 			++rx_stats->l4_csum_bad;
582 			ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
583 		} else {
584 			++rx_stats->l4_csum_good;
585 			ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
586 		}
587 	}
588 
589 	if (fill_hash &&
590 	    likely((packet_type & ENA_PTYPE_HAS_HASH) && !ena_rx_ctx->frag)) {
591 		ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
592 		mbuf->hash.rss = ena_rx_ctx->hash;
593 	}
594 
595 	mbuf->ol_flags = ol_flags;
596 	mbuf->packet_type = packet_type;
597 }
598 
599 static inline void ena_tx_mbuf_prepare(struct rte_mbuf *mbuf,
600 				       struct ena_com_tx_ctx *ena_tx_ctx,
601 				       uint64_t queue_offloads,
602 				       bool disable_meta_caching)
603 {
604 	struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta;
605 
606 	if ((mbuf->ol_flags & MBUF_OFFLOADS) &&
607 	    (queue_offloads & QUEUE_OFFLOADS)) {
608 		/* check if TSO is required */
609 		if ((mbuf->ol_flags & RTE_MBUF_F_TX_TCP_SEG) &&
610 		    (queue_offloads & RTE_ETH_TX_OFFLOAD_TCP_TSO)) {
611 			ena_tx_ctx->tso_enable = true;
612 
613 			ena_meta->l4_hdr_len = GET_L4_HDR_LEN(mbuf);
614 		}
615 
616 		/* check if L3 checksum is needed */
617 		if ((mbuf->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) &&
618 		    (queue_offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM))
619 			ena_tx_ctx->l3_csum_enable = true;
620 
621 		if (mbuf->ol_flags & RTE_MBUF_F_TX_IPV6) {
622 			ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV6;
623 		} else {
624 			ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV4;
625 
626 			/* set don't fragment (DF) flag */
627 			if (mbuf->packet_type &
628 				(RTE_PTYPE_L4_NONFRAG
629 				 | RTE_PTYPE_INNER_L4_NONFRAG))
630 				ena_tx_ctx->df = true;
631 		}
632 
633 		/* check if L4 checksum is needed */
634 		if (((mbuf->ol_flags & RTE_MBUF_F_TX_L4_MASK) == RTE_MBUF_F_TX_TCP_CKSUM) &&
635 		    (queue_offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM)) {
636 			ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_TCP;
637 			ena_tx_ctx->l4_csum_enable = true;
638 		} else if (((mbuf->ol_flags & RTE_MBUF_F_TX_L4_MASK) ==
639 				RTE_MBUF_F_TX_UDP_CKSUM) &&
640 				(queue_offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM)) {
641 			ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UDP;
642 			ena_tx_ctx->l4_csum_enable = true;
643 		} else {
644 			ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UNKNOWN;
645 			ena_tx_ctx->l4_csum_enable = false;
646 		}
647 
648 		ena_meta->mss = mbuf->tso_segsz;
649 		ena_meta->l3_hdr_len = mbuf->l3_len;
650 		ena_meta->l3_hdr_offset = mbuf->l2_len;
651 
652 		ena_tx_ctx->meta_valid = true;
653 	} else if (disable_meta_caching) {
654 		memset(ena_meta, 0, sizeof(*ena_meta));
655 		ena_tx_ctx->meta_valid = true;
656 	} else {
657 		ena_tx_ctx->meta_valid = false;
658 	}
659 }
660 
661 static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id)
662 {
663 	struct ena_tx_buffer *tx_info = NULL;
664 
665 	if (likely(req_id < tx_ring->ring_size)) {
666 		tx_info = &tx_ring->tx_buffer_info[req_id];
667 		if (likely(tx_info->mbuf))
668 			return 0;
669 	}
670 
671 	if (tx_info)
672 		PMD_TX_LOG(ERR, "tx_info doesn't have valid mbuf\n");
673 	else
674 		PMD_TX_LOG(ERR, "Invalid req_id: %hu\n", req_id);
675 
676 	/* Trigger device reset */
677 	++tx_ring->tx_stats.bad_req_id;
678 	ena_trigger_reset(tx_ring->adapter, ENA_REGS_RESET_INV_TX_REQ_ID);
679 	return -EFAULT;
680 }
681 
682 static void ena_config_host_info(struct ena_com_dev *ena_dev)
683 {
684 	struct ena_admin_host_info *host_info;
685 	int rc;
686 
687 	/* Allocate only the host info */
688 	rc = ena_com_allocate_host_info(ena_dev);
689 	if (rc) {
690 		PMD_DRV_LOG(ERR, "Cannot allocate host info\n");
691 		return;
692 	}
693 
694 	host_info = ena_dev->host_attr.host_info;
695 
696 	host_info->os_type = ENA_ADMIN_OS_DPDK;
697 	host_info->kernel_ver = RTE_VERSION;
698 	strlcpy((char *)host_info->kernel_ver_str, rte_version(),
699 		sizeof(host_info->kernel_ver_str));
700 	host_info->os_dist = RTE_VERSION;
701 	strlcpy((char *)host_info->os_dist_str, rte_version(),
702 		sizeof(host_info->os_dist_str));
703 	host_info->driver_version =
704 		(DRV_MODULE_VER_MAJOR) |
705 		(DRV_MODULE_VER_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) |
706 		(DRV_MODULE_VER_SUBMINOR <<
707 			ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT);
708 	host_info->num_cpus = rte_lcore_count();
709 
710 	host_info->driver_supported_features =
711 		ENA_ADMIN_HOST_INFO_RX_OFFSET_MASK |
712 		ENA_ADMIN_HOST_INFO_RSS_CONFIGURABLE_FUNCTION_KEY_MASK;
713 
714 	rc = ena_com_set_host_attributes(ena_dev);
715 	if (rc) {
716 		if (rc == -ENA_COM_UNSUPPORTED)
717 			PMD_DRV_LOG(WARNING, "Cannot set host attributes\n");
718 		else
719 			PMD_DRV_LOG(ERR, "Cannot set host attributes\n");
720 
721 		goto err;
722 	}
723 
724 	return;
725 
726 err:
727 	ena_com_delete_host_info(ena_dev);
728 }
729 
730 /* This function calculates the number of xstats based on the current config */
731 static unsigned int ena_xstats_calc_num(struct rte_eth_dev_data *data)
732 {
733 	return ENA_STATS_ARRAY_GLOBAL + ENA_STATS_ARRAY_ENI +
734 		(data->nb_tx_queues * ENA_STATS_ARRAY_TX) +
735 		(data->nb_rx_queues * ENA_STATS_ARRAY_RX);
736 }
737 
738 static void ena_config_debug_area(struct ena_adapter *adapter)
739 {
740 	u32 debug_area_size;
741 	int rc, ss_count;
742 
743 	ss_count = ena_xstats_calc_num(adapter->edev_data);
744 
745 	/* allocate 32 bytes for each string and 64bit for the value */
746 	debug_area_size = ss_count * ETH_GSTRING_LEN + sizeof(u64) * ss_count;
747 
748 	rc = ena_com_allocate_debug_area(&adapter->ena_dev, debug_area_size);
749 	if (rc) {
750 		PMD_DRV_LOG(ERR, "Cannot allocate debug area\n");
751 		return;
752 	}
753 
754 	rc = ena_com_set_host_attributes(&adapter->ena_dev);
755 	if (rc) {
756 		if (rc == -ENA_COM_UNSUPPORTED)
757 			PMD_DRV_LOG(WARNING, "Cannot set host attributes\n");
758 		else
759 			PMD_DRV_LOG(ERR, "Cannot set host attributes\n");
760 
761 		goto err;
762 	}
763 
764 	return;
765 err:
766 	ena_com_delete_debug_area(&adapter->ena_dev);
767 }
768 
769 static int ena_close(struct rte_eth_dev *dev)
770 {
771 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
772 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
773 	struct ena_adapter *adapter = dev->data->dev_private;
774 	int ret = 0;
775 
776 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
777 		return 0;
778 
779 	if (adapter->state == ENA_ADAPTER_STATE_RUNNING)
780 		ret = ena_stop(dev);
781 	adapter->state = ENA_ADAPTER_STATE_CLOSED;
782 
783 	ena_rx_queue_release_all(dev);
784 	ena_tx_queue_release_all(dev);
785 
786 	rte_free(adapter->drv_stats);
787 	adapter->drv_stats = NULL;
788 
789 	rte_intr_disable(intr_handle);
790 	rte_intr_callback_unregister(intr_handle,
791 				     ena_interrupt_handler_rte,
792 				     dev);
793 
794 	/*
795 	 * MAC is not allocated dynamically. Setting NULL should prevent from
796 	 * release of the resource in the rte_eth_dev_release_port().
797 	 */
798 	dev->data->mac_addrs = NULL;
799 
800 	return ret;
801 }
802 
803 static int
804 ena_dev_reset(struct rte_eth_dev *dev)
805 {
806 	int rc = 0;
807 
808 	/* Cannot release memory in secondary process */
809 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
810 		PMD_DRV_LOG(WARNING, "dev_reset not supported in secondary.\n");
811 		return -EPERM;
812 	}
813 
814 	ena_destroy_device(dev);
815 	rc = eth_ena_dev_init(dev);
816 	if (rc)
817 		PMD_INIT_LOG(CRIT, "Cannot initialize device\n");
818 
819 	return rc;
820 }
821 
822 static void ena_rx_queue_release_all(struct rte_eth_dev *dev)
823 {
824 	int nb_queues = dev->data->nb_rx_queues;
825 	int i;
826 
827 	for (i = 0; i < nb_queues; i++)
828 		ena_rx_queue_release(dev, i);
829 }
830 
831 static void ena_tx_queue_release_all(struct rte_eth_dev *dev)
832 {
833 	int nb_queues = dev->data->nb_tx_queues;
834 	int i;
835 
836 	for (i = 0; i < nb_queues; i++)
837 		ena_tx_queue_release(dev, i);
838 }
839 
840 static void ena_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
841 {
842 	struct ena_ring *ring = dev->data->rx_queues[qid];
843 
844 	/* Free ring resources */
845 	rte_free(ring->rx_buffer_info);
846 	ring->rx_buffer_info = NULL;
847 
848 	rte_free(ring->rx_refill_buffer);
849 	ring->rx_refill_buffer = NULL;
850 
851 	rte_free(ring->empty_rx_reqs);
852 	ring->empty_rx_reqs = NULL;
853 
854 	ring->configured = 0;
855 
856 	PMD_DRV_LOG(NOTICE, "Rx queue %d:%d released\n",
857 		ring->port_id, ring->id);
858 }
859 
860 static void ena_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
861 {
862 	struct ena_ring *ring = dev->data->tx_queues[qid];
863 
864 	/* Free ring resources */
865 	rte_free(ring->push_buf_intermediate_buf);
866 
867 	rte_free(ring->tx_buffer_info);
868 
869 	rte_free(ring->empty_tx_reqs);
870 
871 	ring->empty_tx_reqs = NULL;
872 	ring->tx_buffer_info = NULL;
873 	ring->push_buf_intermediate_buf = NULL;
874 
875 	ring->configured = 0;
876 
877 	PMD_DRV_LOG(NOTICE, "Tx queue %d:%d released\n",
878 		ring->port_id, ring->id);
879 }
880 
881 static void ena_rx_queue_release_bufs(struct ena_ring *ring)
882 {
883 	unsigned int i;
884 
885 	for (i = 0; i < ring->ring_size; ++i) {
886 		struct ena_rx_buffer *rx_info = &ring->rx_buffer_info[i];
887 		if (rx_info->mbuf) {
888 			rte_mbuf_raw_free(rx_info->mbuf);
889 			rx_info->mbuf = NULL;
890 		}
891 	}
892 }
893 
894 static void ena_tx_queue_release_bufs(struct ena_ring *ring)
895 {
896 	unsigned int i;
897 
898 	for (i = 0; i < ring->ring_size; ++i) {
899 		struct ena_tx_buffer *tx_buf = &ring->tx_buffer_info[i];
900 
901 		if (tx_buf->mbuf) {
902 			rte_pktmbuf_free(tx_buf->mbuf);
903 			tx_buf->mbuf = NULL;
904 		}
905 	}
906 }
907 
908 static int ena_link_update(struct rte_eth_dev *dev,
909 			   __rte_unused int wait_to_complete)
910 {
911 	struct rte_eth_link *link = &dev->data->dev_link;
912 	struct ena_adapter *adapter = dev->data->dev_private;
913 
914 	link->link_status = adapter->link_status ? RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN;
915 	link->link_speed = RTE_ETH_SPEED_NUM_NONE;
916 	link->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
917 
918 	return 0;
919 }
920 
921 static int ena_queue_start_all(struct rte_eth_dev *dev,
922 			       enum ena_ring_type ring_type)
923 {
924 	struct ena_adapter *adapter = dev->data->dev_private;
925 	struct ena_ring *queues = NULL;
926 	int nb_queues;
927 	int i = 0;
928 	int rc = 0;
929 
930 	if (ring_type == ENA_RING_TYPE_RX) {
931 		queues = adapter->rx_ring;
932 		nb_queues = dev->data->nb_rx_queues;
933 	} else {
934 		queues = adapter->tx_ring;
935 		nb_queues = dev->data->nb_tx_queues;
936 	}
937 	for (i = 0; i < nb_queues; i++) {
938 		if (queues[i].configured) {
939 			if (ring_type == ENA_RING_TYPE_RX) {
940 				ena_assert_msg(
941 					dev->data->rx_queues[i] == &queues[i],
942 					"Inconsistent state of Rx queues\n");
943 			} else {
944 				ena_assert_msg(
945 					dev->data->tx_queues[i] == &queues[i],
946 					"Inconsistent state of Tx queues\n");
947 			}
948 
949 			rc = ena_queue_start(dev, &queues[i]);
950 
951 			if (rc) {
952 				PMD_INIT_LOG(ERR,
953 					"Failed to start queue[%d] of type(%d)\n",
954 					i, ring_type);
955 				goto err;
956 			}
957 		}
958 	}
959 
960 	return 0;
961 
962 err:
963 	while (i--)
964 		if (queues[i].configured)
965 			ena_queue_stop(&queues[i]);
966 
967 	return rc;
968 }
969 
970 static int ena_check_valid_conf(struct ena_adapter *adapter)
971 {
972 	uint32_t mtu = adapter->edev_data->mtu;
973 
974 	if (mtu > adapter->max_mtu || mtu < ENA_MIN_MTU) {
975 		PMD_INIT_LOG(ERR,
976 			"Unsupported MTU of %d. Max MTU: %d, min MTU: %d\n",
977 			mtu, adapter->max_mtu, ENA_MIN_MTU);
978 		return ENA_COM_UNSUPPORTED;
979 	}
980 
981 	return 0;
982 }
983 
984 static int
985 ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx,
986 		       bool use_large_llq_hdr)
987 {
988 	struct ena_admin_feature_llq_desc *llq = &ctx->get_feat_ctx->llq;
989 	struct ena_com_dev *ena_dev = ctx->ena_dev;
990 	uint32_t max_tx_queue_size;
991 	uint32_t max_rx_queue_size;
992 
993 	if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
994 		struct ena_admin_queue_ext_feature_fields *max_queue_ext =
995 			&ctx->get_feat_ctx->max_queue_ext.max_queue_ext;
996 		max_rx_queue_size = RTE_MIN(max_queue_ext->max_rx_cq_depth,
997 			max_queue_ext->max_rx_sq_depth);
998 		max_tx_queue_size = max_queue_ext->max_tx_cq_depth;
999 
1000 		if (ena_dev->tx_mem_queue_type ==
1001 		    ENA_ADMIN_PLACEMENT_POLICY_DEV) {
1002 			max_tx_queue_size = RTE_MIN(max_tx_queue_size,
1003 				llq->max_llq_depth);
1004 		} else {
1005 			max_tx_queue_size = RTE_MIN(max_tx_queue_size,
1006 				max_queue_ext->max_tx_sq_depth);
1007 		}
1008 
1009 		ctx->max_rx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS,
1010 			max_queue_ext->max_per_packet_rx_descs);
1011 		ctx->max_tx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS,
1012 			max_queue_ext->max_per_packet_tx_descs);
1013 	} else {
1014 		struct ena_admin_queue_feature_desc *max_queues =
1015 			&ctx->get_feat_ctx->max_queues;
1016 		max_rx_queue_size = RTE_MIN(max_queues->max_cq_depth,
1017 			max_queues->max_sq_depth);
1018 		max_tx_queue_size = max_queues->max_cq_depth;
1019 
1020 		if (ena_dev->tx_mem_queue_type ==
1021 		    ENA_ADMIN_PLACEMENT_POLICY_DEV) {
1022 			max_tx_queue_size = RTE_MIN(max_tx_queue_size,
1023 				llq->max_llq_depth);
1024 		} else {
1025 			max_tx_queue_size = RTE_MIN(max_tx_queue_size,
1026 				max_queues->max_sq_depth);
1027 		}
1028 
1029 		ctx->max_rx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS,
1030 			max_queues->max_packet_rx_descs);
1031 		ctx->max_tx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS,
1032 			max_queues->max_packet_tx_descs);
1033 	}
1034 
1035 	/* Round down to the nearest power of 2 */
1036 	max_rx_queue_size = rte_align32prevpow2(max_rx_queue_size);
1037 	max_tx_queue_size = rte_align32prevpow2(max_tx_queue_size);
1038 
1039 	if (use_large_llq_hdr) {
1040 		if ((llq->entry_size_ctrl_supported &
1041 		     ENA_ADMIN_LIST_ENTRY_SIZE_256B) &&
1042 		    (ena_dev->tx_mem_queue_type ==
1043 		     ENA_ADMIN_PLACEMENT_POLICY_DEV)) {
1044 			max_tx_queue_size /= 2;
1045 			PMD_INIT_LOG(INFO,
1046 				"Forcing large headers and decreasing maximum Tx queue size to %d\n",
1047 				max_tx_queue_size);
1048 		} else {
1049 			PMD_INIT_LOG(ERR,
1050 				"Forcing large headers failed: LLQ is disabled or device does not support large headers\n");
1051 		}
1052 	}
1053 
1054 	if (unlikely(max_rx_queue_size == 0 || max_tx_queue_size == 0)) {
1055 		PMD_INIT_LOG(ERR, "Invalid queue size\n");
1056 		return -EFAULT;
1057 	}
1058 
1059 	ctx->max_tx_queue_size = max_tx_queue_size;
1060 	ctx->max_rx_queue_size = max_rx_queue_size;
1061 
1062 	return 0;
1063 }
1064 
1065 static void ena_stats_restart(struct rte_eth_dev *dev)
1066 {
1067 	struct ena_adapter *adapter = dev->data->dev_private;
1068 
1069 	rte_atomic64_init(&adapter->drv_stats->ierrors);
1070 	rte_atomic64_init(&adapter->drv_stats->oerrors);
1071 	rte_atomic64_init(&adapter->drv_stats->rx_nombuf);
1072 	adapter->drv_stats->rx_drops = 0;
1073 }
1074 
1075 static int ena_stats_get(struct rte_eth_dev *dev,
1076 			  struct rte_eth_stats *stats)
1077 {
1078 	struct ena_admin_basic_stats ena_stats;
1079 	struct ena_adapter *adapter = dev->data->dev_private;
1080 	struct ena_com_dev *ena_dev = &adapter->ena_dev;
1081 	int rc;
1082 	int i;
1083 	int max_rings_stats;
1084 
1085 	memset(&ena_stats, 0, sizeof(ena_stats));
1086 
1087 	rte_spinlock_lock(&adapter->admin_lock);
1088 	rc = ENA_PROXY(adapter, ena_com_get_dev_basic_stats, ena_dev,
1089 		       &ena_stats);
1090 	rte_spinlock_unlock(&adapter->admin_lock);
1091 	if (unlikely(rc)) {
1092 		PMD_DRV_LOG(ERR, "Could not retrieve statistics from ENA\n");
1093 		return rc;
1094 	}
1095 
1096 	/* Set of basic statistics from ENA */
1097 	stats->ipackets = __MERGE_64B_H_L(ena_stats.rx_pkts_high,
1098 					  ena_stats.rx_pkts_low);
1099 	stats->opackets = __MERGE_64B_H_L(ena_stats.tx_pkts_high,
1100 					  ena_stats.tx_pkts_low);
1101 	stats->ibytes = __MERGE_64B_H_L(ena_stats.rx_bytes_high,
1102 					ena_stats.rx_bytes_low);
1103 	stats->obytes = __MERGE_64B_H_L(ena_stats.tx_bytes_high,
1104 					ena_stats.tx_bytes_low);
1105 
1106 	/* Driver related stats */
1107 	stats->imissed = adapter->drv_stats->rx_drops;
1108 	stats->ierrors = rte_atomic64_read(&adapter->drv_stats->ierrors);
1109 	stats->oerrors = rte_atomic64_read(&adapter->drv_stats->oerrors);
1110 	stats->rx_nombuf = rte_atomic64_read(&adapter->drv_stats->rx_nombuf);
1111 
1112 	max_rings_stats = RTE_MIN(dev->data->nb_rx_queues,
1113 		RTE_ETHDEV_QUEUE_STAT_CNTRS);
1114 	for (i = 0; i < max_rings_stats; ++i) {
1115 		struct ena_stats_rx *rx_stats = &adapter->rx_ring[i].rx_stats;
1116 
1117 		stats->q_ibytes[i] = rx_stats->bytes;
1118 		stats->q_ipackets[i] = rx_stats->cnt;
1119 		stats->q_errors[i] = rx_stats->bad_desc_num +
1120 			rx_stats->bad_req_id;
1121 	}
1122 
1123 	max_rings_stats = RTE_MIN(dev->data->nb_tx_queues,
1124 		RTE_ETHDEV_QUEUE_STAT_CNTRS);
1125 	for (i = 0; i < max_rings_stats; ++i) {
1126 		struct ena_stats_tx *tx_stats = &adapter->tx_ring[i].tx_stats;
1127 
1128 		stats->q_obytes[i] = tx_stats->bytes;
1129 		stats->q_opackets[i] = tx_stats->cnt;
1130 	}
1131 
1132 	return 0;
1133 }
1134 
1135 static int ena_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
1136 {
1137 	struct ena_adapter *adapter;
1138 	struct ena_com_dev *ena_dev;
1139 	int rc = 0;
1140 
1141 	ena_assert_msg(dev->data != NULL, "Uninitialized device\n");
1142 	ena_assert_msg(dev->data->dev_private != NULL, "Uninitialized device\n");
1143 	adapter = dev->data->dev_private;
1144 
1145 	ena_dev = &adapter->ena_dev;
1146 	ena_assert_msg(ena_dev != NULL, "Uninitialized device\n");
1147 
1148 	if (mtu > adapter->max_mtu || mtu < ENA_MIN_MTU) {
1149 		PMD_DRV_LOG(ERR,
1150 			"Invalid MTU setting. New MTU: %d, max MTU: %d, min MTU: %d\n",
1151 			mtu, adapter->max_mtu, ENA_MIN_MTU);
1152 		return -EINVAL;
1153 	}
1154 
1155 	rc = ENA_PROXY(adapter, ena_com_set_dev_mtu, ena_dev, mtu);
1156 	if (rc)
1157 		PMD_DRV_LOG(ERR, "Could not set MTU: %d\n", mtu);
1158 	else
1159 		PMD_DRV_LOG(NOTICE, "MTU set to: %d\n", mtu);
1160 
1161 	return rc;
1162 }
1163 
1164 static int ena_start(struct rte_eth_dev *dev)
1165 {
1166 	struct ena_adapter *adapter = dev->data->dev_private;
1167 	uint64_t ticks;
1168 	int rc = 0;
1169 
1170 	/* Cannot allocate memory in secondary process */
1171 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1172 		PMD_DRV_LOG(WARNING, "dev_start not supported in secondary.\n");
1173 		return -EPERM;
1174 	}
1175 
1176 	rc = ena_check_valid_conf(adapter);
1177 	if (rc)
1178 		return rc;
1179 
1180 	rc = ena_setup_rx_intr(dev);
1181 	if (rc)
1182 		return rc;
1183 
1184 	rc = ena_queue_start_all(dev, ENA_RING_TYPE_RX);
1185 	if (rc)
1186 		return rc;
1187 
1188 	rc = ena_queue_start_all(dev, ENA_RING_TYPE_TX);
1189 	if (rc)
1190 		goto err_start_tx;
1191 
1192 	if (adapter->edev_data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
1193 		rc = ena_rss_configure(adapter);
1194 		if (rc)
1195 			goto err_rss_init;
1196 	}
1197 
1198 	ena_stats_restart(dev);
1199 
1200 	adapter->timestamp_wd = rte_get_timer_cycles();
1201 	adapter->keep_alive_timeout = ENA_DEVICE_KALIVE_TIMEOUT;
1202 
1203 	ticks = rte_get_timer_hz();
1204 	rte_timer_reset(&adapter->timer_wd, ticks, PERIODICAL, rte_lcore_id(),
1205 			ena_timer_wd_callback, dev);
1206 
1207 	++adapter->dev_stats.dev_start;
1208 	adapter->state = ENA_ADAPTER_STATE_RUNNING;
1209 
1210 	return 0;
1211 
1212 err_rss_init:
1213 	ena_queue_stop_all(dev, ENA_RING_TYPE_TX);
1214 err_start_tx:
1215 	ena_queue_stop_all(dev, ENA_RING_TYPE_RX);
1216 	return rc;
1217 }
1218 
1219 static int ena_stop(struct rte_eth_dev *dev)
1220 {
1221 	struct ena_adapter *adapter = dev->data->dev_private;
1222 	struct ena_com_dev *ena_dev = &adapter->ena_dev;
1223 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1224 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
1225 	int rc;
1226 
1227 	/* Cannot free memory in secondary process */
1228 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1229 		PMD_DRV_LOG(WARNING, "dev_stop not supported in secondary.\n");
1230 		return -EPERM;
1231 	}
1232 
1233 	rte_timer_stop_sync(&adapter->timer_wd);
1234 	ena_queue_stop_all(dev, ENA_RING_TYPE_TX);
1235 	ena_queue_stop_all(dev, ENA_RING_TYPE_RX);
1236 
1237 	if (adapter->trigger_reset) {
1238 		rc = ena_com_dev_reset(ena_dev, adapter->reset_reason);
1239 		if (rc)
1240 			PMD_DRV_LOG(ERR, "Device reset failed, rc: %d\n", rc);
1241 	}
1242 
1243 	rte_intr_disable(intr_handle);
1244 
1245 	rte_intr_efd_disable(intr_handle);
1246 
1247 	/* Cleanup vector list */
1248 	rte_intr_vec_list_free(intr_handle);
1249 
1250 	rte_intr_enable(intr_handle);
1251 
1252 	++adapter->dev_stats.dev_stop;
1253 	adapter->state = ENA_ADAPTER_STATE_STOPPED;
1254 	dev->data->dev_started = 0;
1255 
1256 	return 0;
1257 }
1258 
1259 static int ena_create_io_queue(struct rte_eth_dev *dev, struct ena_ring *ring)
1260 {
1261 	struct ena_adapter *adapter = ring->adapter;
1262 	struct ena_com_dev *ena_dev = &adapter->ena_dev;
1263 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1264 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
1265 	struct ena_com_create_io_ctx ctx =
1266 		/* policy set to _HOST just to satisfy icc compiler */
1267 		{ ENA_ADMIN_PLACEMENT_POLICY_HOST,
1268 		  0, 0, 0, 0, 0 };
1269 	uint16_t ena_qid;
1270 	unsigned int i;
1271 	int rc;
1272 
1273 	ctx.msix_vector = -1;
1274 	if (ring->type == ENA_RING_TYPE_TX) {
1275 		ena_qid = ENA_IO_TXQ_IDX(ring->id);
1276 		ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX;
1277 		ctx.mem_queue_type = ena_dev->tx_mem_queue_type;
1278 		for (i = 0; i < ring->ring_size; i++)
1279 			ring->empty_tx_reqs[i] = i;
1280 	} else {
1281 		ena_qid = ENA_IO_RXQ_IDX(ring->id);
1282 		ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX;
1283 		if (rte_intr_dp_is_en(intr_handle))
1284 			ctx.msix_vector =
1285 				rte_intr_vec_list_index_get(intr_handle,
1286 								   ring->id);
1287 
1288 		for (i = 0; i < ring->ring_size; i++)
1289 			ring->empty_rx_reqs[i] = i;
1290 	}
1291 	ctx.queue_size = ring->ring_size;
1292 	ctx.qid = ena_qid;
1293 	ctx.numa_node = ring->numa_socket_id;
1294 
1295 	rc = ena_com_create_io_queue(ena_dev, &ctx);
1296 	if (rc) {
1297 		PMD_DRV_LOG(ERR,
1298 			"Failed to create IO queue[%d] (qid:%d), rc: %d\n",
1299 			ring->id, ena_qid, rc);
1300 		return rc;
1301 	}
1302 
1303 	rc = ena_com_get_io_handlers(ena_dev, ena_qid,
1304 				     &ring->ena_com_io_sq,
1305 				     &ring->ena_com_io_cq);
1306 	if (rc) {
1307 		PMD_DRV_LOG(ERR,
1308 			"Failed to get IO queue[%d] handlers, rc: %d\n",
1309 			ring->id, rc);
1310 		ena_com_destroy_io_queue(ena_dev, ena_qid);
1311 		return rc;
1312 	}
1313 
1314 	if (ring->type == ENA_RING_TYPE_TX)
1315 		ena_com_update_numa_node(ring->ena_com_io_cq, ctx.numa_node);
1316 
1317 	/* Start with Rx interrupts being masked. */
1318 	if (ring->type == ENA_RING_TYPE_RX && rte_intr_dp_is_en(intr_handle))
1319 		ena_rx_queue_intr_disable(dev, ring->id);
1320 
1321 	return 0;
1322 }
1323 
1324 static void ena_queue_stop(struct ena_ring *ring)
1325 {
1326 	struct ena_com_dev *ena_dev = &ring->adapter->ena_dev;
1327 
1328 	if (ring->type == ENA_RING_TYPE_RX) {
1329 		ena_com_destroy_io_queue(ena_dev, ENA_IO_RXQ_IDX(ring->id));
1330 		ena_rx_queue_release_bufs(ring);
1331 	} else {
1332 		ena_com_destroy_io_queue(ena_dev, ENA_IO_TXQ_IDX(ring->id));
1333 		ena_tx_queue_release_bufs(ring);
1334 	}
1335 }
1336 
1337 static void ena_queue_stop_all(struct rte_eth_dev *dev,
1338 			      enum ena_ring_type ring_type)
1339 {
1340 	struct ena_adapter *adapter = dev->data->dev_private;
1341 	struct ena_ring *queues = NULL;
1342 	uint16_t nb_queues, i;
1343 
1344 	if (ring_type == ENA_RING_TYPE_RX) {
1345 		queues = adapter->rx_ring;
1346 		nb_queues = dev->data->nb_rx_queues;
1347 	} else {
1348 		queues = adapter->tx_ring;
1349 		nb_queues = dev->data->nb_tx_queues;
1350 	}
1351 
1352 	for (i = 0; i < nb_queues; ++i)
1353 		if (queues[i].configured)
1354 			ena_queue_stop(&queues[i]);
1355 }
1356 
1357 static int ena_queue_start(struct rte_eth_dev *dev, struct ena_ring *ring)
1358 {
1359 	int rc, bufs_num;
1360 
1361 	ena_assert_msg(ring->configured == 1,
1362 		       "Trying to start unconfigured queue\n");
1363 
1364 	rc = ena_create_io_queue(dev, ring);
1365 	if (rc) {
1366 		PMD_INIT_LOG(ERR, "Failed to create IO queue\n");
1367 		return rc;
1368 	}
1369 
1370 	ring->next_to_clean = 0;
1371 	ring->next_to_use = 0;
1372 
1373 	if (ring->type == ENA_RING_TYPE_TX) {
1374 		ring->tx_stats.available_desc =
1375 			ena_com_free_q_entries(ring->ena_com_io_sq);
1376 		return 0;
1377 	}
1378 
1379 	bufs_num = ring->ring_size - 1;
1380 	rc = ena_populate_rx_queue(ring, bufs_num);
1381 	if (rc != bufs_num) {
1382 		ena_com_destroy_io_queue(&ring->adapter->ena_dev,
1383 					 ENA_IO_RXQ_IDX(ring->id));
1384 		PMD_INIT_LOG(ERR, "Failed to populate Rx ring\n");
1385 		return ENA_COM_FAULT;
1386 	}
1387 	/* Flush per-core RX buffers pools cache as they can be used on other
1388 	 * cores as well.
1389 	 */
1390 	rte_mempool_cache_flush(NULL, ring->mb_pool);
1391 
1392 	return 0;
1393 }
1394 
1395 static int ena_tx_queue_setup(struct rte_eth_dev *dev,
1396 			      uint16_t queue_idx,
1397 			      uint16_t nb_desc,
1398 			      unsigned int socket_id,
1399 			      const struct rte_eth_txconf *tx_conf)
1400 {
1401 	struct ena_ring *txq = NULL;
1402 	struct ena_adapter *adapter = dev->data->dev_private;
1403 	unsigned int i;
1404 	uint16_t dyn_thresh;
1405 
1406 	txq = &adapter->tx_ring[queue_idx];
1407 
1408 	if (txq->configured) {
1409 		PMD_DRV_LOG(CRIT,
1410 			"API violation. Queue[%d] is already configured\n",
1411 			queue_idx);
1412 		return ENA_COM_FAULT;
1413 	}
1414 
1415 	if (!rte_is_power_of_2(nb_desc)) {
1416 		PMD_DRV_LOG(ERR,
1417 			"Unsupported size of Tx queue: %d is not a power of 2.\n",
1418 			nb_desc);
1419 		return -EINVAL;
1420 	}
1421 
1422 	if (nb_desc > adapter->max_tx_ring_size) {
1423 		PMD_DRV_LOG(ERR,
1424 			"Unsupported size of Tx queue (max size: %d)\n",
1425 			adapter->max_tx_ring_size);
1426 		return -EINVAL;
1427 	}
1428 
1429 	txq->port_id = dev->data->port_id;
1430 	txq->next_to_clean = 0;
1431 	txq->next_to_use = 0;
1432 	txq->ring_size = nb_desc;
1433 	txq->size_mask = nb_desc - 1;
1434 	txq->numa_socket_id = socket_id;
1435 	txq->pkts_without_db = false;
1436 	txq->last_cleanup_ticks = 0;
1437 
1438 	txq->tx_buffer_info = rte_zmalloc_socket("txq->tx_buffer_info",
1439 		sizeof(struct ena_tx_buffer) * txq->ring_size,
1440 		RTE_CACHE_LINE_SIZE,
1441 		socket_id);
1442 	if (!txq->tx_buffer_info) {
1443 		PMD_DRV_LOG(ERR,
1444 			"Failed to allocate memory for Tx buffer info\n");
1445 		return -ENOMEM;
1446 	}
1447 
1448 	txq->empty_tx_reqs = rte_zmalloc_socket("txq->empty_tx_reqs",
1449 		sizeof(uint16_t) * txq->ring_size,
1450 		RTE_CACHE_LINE_SIZE,
1451 		socket_id);
1452 	if (!txq->empty_tx_reqs) {
1453 		PMD_DRV_LOG(ERR,
1454 			"Failed to allocate memory for empty Tx requests\n");
1455 		rte_free(txq->tx_buffer_info);
1456 		return -ENOMEM;
1457 	}
1458 
1459 	txq->push_buf_intermediate_buf =
1460 		rte_zmalloc_socket("txq->push_buf_intermediate_buf",
1461 			txq->tx_max_header_size,
1462 			RTE_CACHE_LINE_SIZE,
1463 			socket_id);
1464 	if (!txq->push_buf_intermediate_buf) {
1465 		PMD_DRV_LOG(ERR, "Failed to alloc push buffer for LLQ\n");
1466 		rte_free(txq->tx_buffer_info);
1467 		rte_free(txq->empty_tx_reqs);
1468 		return -ENOMEM;
1469 	}
1470 
1471 	for (i = 0; i < txq->ring_size; i++)
1472 		txq->empty_tx_reqs[i] = i;
1473 
1474 	txq->offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
1475 
1476 	/* Check if caller provided the Tx cleanup threshold value. */
1477 	if (tx_conf->tx_free_thresh != 0) {
1478 		txq->tx_free_thresh = tx_conf->tx_free_thresh;
1479 	} else {
1480 		dyn_thresh = txq->ring_size -
1481 			txq->ring_size / ENA_REFILL_THRESH_DIVIDER;
1482 		txq->tx_free_thresh = RTE_MAX(dyn_thresh,
1483 			txq->ring_size - ENA_REFILL_THRESH_PACKET);
1484 	}
1485 
1486 	txq->missing_tx_completion_threshold =
1487 		RTE_MIN(txq->ring_size / 2, ENA_DEFAULT_MISSING_COMP);
1488 
1489 	/* Store pointer to this queue in upper layer */
1490 	txq->configured = 1;
1491 	dev->data->tx_queues[queue_idx] = txq;
1492 
1493 	return 0;
1494 }
1495 
1496 static int ena_rx_queue_setup(struct rte_eth_dev *dev,
1497 			      uint16_t queue_idx,
1498 			      uint16_t nb_desc,
1499 			      unsigned int socket_id,
1500 			      const struct rte_eth_rxconf *rx_conf,
1501 			      struct rte_mempool *mp)
1502 {
1503 	struct ena_adapter *adapter = dev->data->dev_private;
1504 	struct ena_ring *rxq = NULL;
1505 	size_t buffer_size;
1506 	int i;
1507 	uint16_t dyn_thresh;
1508 
1509 	rxq = &adapter->rx_ring[queue_idx];
1510 	if (rxq->configured) {
1511 		PMD_DRV_LOG(CRIT,
1512 			"API violation. Queue[%d] is already configured\n",
1513 			queue_idx);
1514 		return ENA_COM_FAULT;
1515 	}
1516 
1517 	if (!rte_is_power_of_2(nb_desc)) {
1518 		PMD_DRV_LOG(ERR,
1519 			"Unsupported size of Rx queue: %d is not a power of 2.\n",
1520 			nb_desc);
1521 		return -EINVAL;
1522 	}
1523 
1524 	if (nb_desc > adapter->max_rx_ring_size) {
1525 		PMD_DRV_LOG(ERR,
1526 			"Unsupported size of Rx queue (max size: %d)\n",
1527 			adapter->max_rx_ring_size);
1528 		return -EINVAL;
1529 	}
1530 
1531 	/* ENA isn't supporting buffers smaller than 1400 bytes */
1532 	buffer_size = rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM;
1533 	if (buffer_size < ENA_RX_BUF_MIN_SIZE) {
1534 		PMD_DRV_LOG(ERR,
1535 			"Unsupported size of Rx buffer: %zu (min size: %d)\n",
1536 			buffer_size, ENA_RX_BUF_MIN_SIZE);
1537 		return -EINVAL;
1538 	}
1539 
1540 	rxq->port_id = dev->data->port_id;
1541 	rxq->next_to_clean = 0;
1542 	rxq->next_to_use = 0;
1543 	rxq->ring_size = nb_desc;
1544 	rxq->size_mask = nb_desc - 1;
1545 	rxq->numa_socket_id = socket_id;
1546 	rxq->mb_pool = mp;
1547 
1548 	rxq->rx_buffer_info = rte_zmalloc_socket("rxq->buffer_info",
1549 		sizeof(struct ena_rx_buffer) * nb_desc,
1550 		RTE_CACHE_LINE_SIZE,
1551 		socket_id);
1552 	if (!rxq->rx_buffer_info) {
1553 		PMD_DRV_LOG(ERR,
1554 			"Failed to allocate memory for Rx buffer info\n");
1555 		return -ENOMEM;
1556 	}
1557 
1558 	rxq->rx_refill_buffer = rte_zmalloc_socket("rxq->rx_refill_buffer",
1559 		sizeof(struct rte_mbuf *) * nb_desc,
1560 		RTE_CACHE_LINE_SIZE,
1561 		socket_id);
1562 	if (!rxq->rx_refill_buffer) {
1563 		PMD_DRV_LOG(ERR,
1564 			"Failed to allocate memory for Rx refill buffer\n");
1565 		rte_free(rxq->rx_buffer_info);
1566 		rxq->rx_buffer_info = NULL;
1567 		return -ENOMEM;
1568 	}
1569 
1570 	rxq->empty_rx_reqs = rte_zmalloc_socket("rxq->empty_rx_reqs",
1571 		sizeof(uint16_t) * nb_desc,
1572 		RTE_CACHE_LINE_SIZE,
1573 		socket_id);
1574 	if (!rxq->empty_rx_reqs) {
1575 		PMD_DRV_LOG(ERR,
1576 			"Failed to allocate memory for empty Rx requests\n");
1577 		rte_free(rxq->rx_buffer_info);
1578 		rxq->rx_buffer_info = NULL;
1579 		rte_free(rxq->rx_refill_buffer);
1580 		rxq->rx_refill_buffer = NULL;
1581 		return -ENOMEM;
1582 	}
1583 
1584 	for (i = 0; i < nb_desc; i++)
1585 		rxq->empty_rx_reqs[i] = i;
1586 
1587 	rxq->offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
1588 
1589 	if (rx_conf->rx_free_thresh != 0) {
1590 		rxq->rx_free_thresh = rx_conf->rx_free_thresh;
1591 	} else {
1592 		dyn_thresh = rxq->ring_size / ENA_REFILL_THRESH_DIVIDER;
1593 		rxq->rx_free_thresh = RTE_MIN(dyn_thresh,
1594 			(uint16_t)(ENA_REFILL_THRESH_PACKET));
1595 	}
1596 
1597 	/* Store pointer to this queue in upper layer */
1598 	rxq->configured = 1;
1599 	dev->data->rx_queues[queue_idx] = rxq;
1600 
1601 	return 0;
1602 }
1603 
1604 static int ena_add_single_rx_desc(struct ena_com_io_sq *io_sq,
1605 				  struct rte_mbuf *mbuf, uint16_t id)
1606 {
1607 	struct ena_com_buf ebuf;
1608 	int rc;
1609 
1610 	/* prepare physical address for DMA transaction */
1611 	ebuf.paddr = mbuf->buf_iova + RTE_PKTMBUF_HEADROOM;
1612 	ebuf.len = mbuf->buf_len - RTE_PKTMBUF_HEADROOM;
1613 
1614 	/* pass resource to device */
1615 	rc = ena_com_add_single_rx_desc(io_sq, &ebuf, id);
1616 	if (unlikely(rc != 0))
1617 		PMD_RX_LOG(WARNING, "Failed adding Rx desc\n");
1618 
1619 	return rc;
1620 }
1621 
1622 static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count)
1623 {
1624 	unsigned int i;
1625 	int rc;
1626 	uint16_t next_to_use = rxq->next_to_use;
1627 	uint16_t req_id;
1628 #ifdef RTE_ETHDEV_DEBUG_RX
1629 	uint16_t in_use;
1630 #endif
1631 	struct rte_mbuf **mbufs = rxq->rx_refill_buffer;
1632 
1633 	if (unlikely(!count))
1634 		return 0;
1635 
1636 #ifdef RTE_ETHDEV_DEBUG_RX
1637 	in_use = rxq->ring_size - 1 -
1638 		ena_com_free_q_entries(rxq->ena_com_io_sq);
1639 	if (unlikely((in_use + count) >= rxq->ring_size))
1640 		PMD_RX_LOG(ERR, "Bad Rx ring state\n");
1641 #endif
1642 
1643 	/* get resources for incoming packets */
1644 	rc = rte_pktmbuf_alloc_bulk(rxq->mb_pool, mbufs, count);
1645 	if (unlikely(rc < 0)) {
1646 		rte_atomic64_inc(&rxq->adapter->drv_stats->rx_nombuf);
1647 		++rxq->rx_stats.mbuf_alloc_fail;
1648 		PMD_RX_LOG(DEBUG, "There are not enough free buffers\n");
1649 		return 0;
1650 	}
1651 
1652 	for (i = 0; i < count; i++) {
1653 		struct rte_mbuf *mbuf = mbufs[i];
1654 		struct ena_rx_buffer *rx_info;
1655 
1656 		if (likely((i + 4) < count))
1657 			rte_prefetch0(mbufs[i + 4]);
1658 
1659 		req_id = rxq->empty_rx_reqs[next_to_use];
1660 		rx_info = &rxq->rx_buffer_info[req_id];
1661 
1662 		rc = ena_add_single_rx_desc(rxq->ena_com_io_sq, mbuf, req_id);
1663 		if (unlikely(rc != 0))
1664 			break;
1665 
1666 		rx_info->mbuf = mbuf;
1667 		next_to_use = ENA_IDX_NEXT_MASKED(next_to_use, rxq->size_mask);
1668 	}
1669 
1670 	if (unlikely(i < count)) {
1671 		PMD_RX_LOG(WARNING,
1672 			"Refilled Rx queue[%d] with only %d/%d buffers\n",
1673 			rxq->id, i, count);
1674 		rte_pktmbuf_free_bulk(&mbufs[i], count - i);
1675 		++rxq->rx_stats.refill_partial;
1676 	}
1677 
1678 	/* When we submitted free resources to device... */
1679 	if (likely(i > 0)) {
1680 		/* ...let HW know that it can fill buffers with data. */
1681 		ena_com_write_sq_doorbell(rxq->ena_com_io_sq);
1682 
1683 		rxq->next_to_use = next_to_use;
1684 	}
1685 
1686 	return i;
1687 }
1688 
1689 static int ena_device_init(struct ena_adapter *adapter,
1690 			   struct rte_pci_device *pdev,
1691 			   struct ena_com_dev_get_features_ctx *get_feat_ctx)
1692 {
1693 	struct ena_com_dev *ena_dev = &adapter->ena_dev;
1694 	uint32_t aenq_groups;
1695 	int rc;
1696 	bool readless_supported;
1697 
1698 	/* Initialize mmio registers */
1699 	rc = ena_com_mmio_reg_read_request_init(ena_dev);
1700 	if (rc) {
1701 		PMD_DRV_LOG(ERR, "Failed to init MMIO read less\n");
1702 		return rc;
1703 	}
1704 
1705 	/* The PCIe configuration space revision id indicate if mmio reg
1706 	 * read is disabled.
1707 	 */
1708 	readless_supported = !(pdev->id.class_id & ENA_MMIO_DISABLE_REG_READ);
1709 	ena_com_set_mmio_read_mode(ena_dev, readless_supported);
1710 
1711 	/* reset device */
1712 	rc = ena_com_dev_reset(ena_dev, ENA_REGS_RESET_NORMAL);
1713 	if (rc) {
1714 		PMD_DRV_LOG(ERR, "Cannot reset device\n");
1715 		goto err_mmio_read_less;
1716 	}
1717 
1718 	/* check FW version */
1719 	rc = ena_com_validate_version(ena_dev);
1720 	if (rc) {
1721 		PMD_DRV_LOG(ERR, "Device version is too low\n");
1722 		goto err_mmio_read_less;
1723 	}
1724 
1725 	ena_dev->dma_addr_bits = ena_com_get_dma_width(ena_dev);
1726 
1727 	/* ENA device administration layer init */
1728 	rc = ena_com_admin_init(ena_dev, &aenq_handlers);
1729 	if (rc) {
1730 		PMD_DRV_LOG(ERR,
1731 			"Cannot initialize ENA admin queue\n");
1732 		goto err_mmio_read_less;
1733 	}
1734 
1735 	/* To enable the msix interrupts the driver needs to know the number
1736 	 * of queues. So the driver uses polling mode to retrieve this
1737 	 * information.
1738 	 */
1739 	ena_com_set_admin_polling_mode(ena_dev, true);
1740 
1741 	ena_config_host_info(ena_dev);
1742 
1743 	/* Get Device Attributes and features */
1744 	rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx);
1745 	if (rc) {
1746 		PMD_DRV_LOG(ERR,
1747 			"Cannot get attribute for ENA device, rc: %d\n", rc);
1748 		goto err_admin_init;
1749 	}
1750 
1751 	aenq_groups = BIT(ENA_ADMIN_LINK_CHANGE) |
1752 		      BIT(ENA_ADMIN_NOTIFICATION) |
1753 		      BIT(ENA_ADMIN_KEEP_ALIVE) |
1754 		      BIT(ENA_ADMIN_FATAL_ERROR) |
1755 		      BIT(ENA_ADMIN_WARNING);
1756 
1757 	aenq_groups &= get_feat_ctx->aenq.supported_groups;
1758 
1759 	adapter->all_aenq_groups = aenq_groups;
1760 
1761 	return 0;
1762 
1763 err_admin_init:
1764 	ena_com_admin_destroy(ena_dev);
1765 
1766 err_mmio_read_less:
1767 	ena_com_mmio_reg_read_request_destroy(ena_dev);
1768 
1769 	return rc;
1770 }
1771 
1772 static void ena_interrupt_handler_rte(void *cb_arg)
1773 {
1774 	struct rte_eth_dev *dev = cb_arg;
1775 	struct ena_adapter *adapter = dev->data->dev_private;
1776 	struct ena_com_dev *ena_dev = &adapter->ena_dev;
1777 
1778 	ena_com_admin_q_comp_intr_handler(ena_dev);
1779 	if (likely(adapter->state != ENA_ADAPTER_STATE_CLOSED))
1780 		ena_com_aenq_intr_handler(ena_dev, dev);
1781 }
1782 
1783 static void check_for_missing_keep_alive(struct ena_adapter *adapter)
1784 {
1785 	if (!(adapter->active_aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE)))
1786 		return;
1787 
1788 	if (adapter->keep_alive_timeout == ENA_HW_HINTS_NO_TIMEOUT)
1789 		return;
1790 
1791 	if (unlikely((rte_get_timer_cycles() - adapter->timestamp_wd) >=
1792 	    adapter->keep_alive_timeout)) {
1793 		PMD_DRV_LOG(ERR, "Keep alive timeout\n");
1794 		ena_trigger_reset(adapter, ENA_REGS_RESET_KEEP_ALIVE_TO);
1795 		++adapter->dev_stats.wd_expired;
1796 	}
1797 }
1798 
1799 /* Check if admin queue is enabled */
1800 static void check_for_admin_com_state(struct ena_adapter *adapter)
1801 {
1802 	if (unlikely(!ena_com_get_admin_running_state(&adapter->ena_dev))) {
1803 		PMD_DRV_LOG(ERR, "ENA admin queue is not in running state\n");
1804 		ena_trigger_reset(adapter, ENA_REGS_RESET_ADMIN_TO);
1805 	}
1806 }
1807 
1808 static int check_for_tx_completion_in_queue(struct ena_adapter *adapter,
1809 					    struct ena_ring *tx_ring)
1810 {
1811 	struct ena_tx_buffer *tx_buf;
1812 	uint64_t timestamp;
1813 	uint64_t completion_delay;
1814 	uint32_t missed_tx = 0;
1815 	unsigned int i;
1816 	int rc = 0;
1817 
1818 	for (i = 0; i < tx_ring->ring_size; ++i) {
1819 		tx_buf = &tx_ring->tx_buffer_info[i];
1820 		timestamp = tx_buf->timestamp;
1821 
1822 		if (timestamp == 0)
1823 			continue;
1824 
1825 		completion_delay = rte_get_timer_cycles() - timestamp;
1826 		if (completion_delay > adapter->missing_tx_completion_to) {
1827 			if (unlikely(!tx_buf->print_once)) {
1828 				PMD_TX_LOG(WARNING,
1829 					"Found a Tx that wasn't completed on time, qid %d, index %d. "
1830 					"Missing Tx outstanding for %" PRIu64 " msecs.\n",
1831 					tx_ring->id, i,	completion_delay /
1832 					rte_get_timer_hz() * 1000);
1833 				tx_buf->print_once = true;
1834 			}
1835 			++missed_tx;
1836 		}
1837 	}
1838 
1839 	if (unlikely(missed_tx > tx_ring->missing_tx_completion_threshold)) {
1840 		PMD_DRV_LOG(ERR,
1841 			"The number of lost Tx completions is above the threshold (%d > %d). "
1842 			"Trigger the device reset.\n",
1843 			missed_tx,
1844 			tx_ring->missing_tx_completion_threshold);
1845 		adapter->reset_reason = ENA_REGS_RESET_MISS_TX_CMPL;
1846 		adapter->trigger_reset = true;
1847 		rc = -EIO;
1848 	}
1849 
1850 	tx_ring->tx_stats.missed_tx += missed_tx;
1851 
1852 	return rc;
1853 }
1854 
1855 static void check_for_tx_completions(struct ena_adapter *adapter)
1856 {
1857 	struct ena_ring *tx_ring;
1858 	uint64_t tx_cleanup_delay;
1859 	size_t qid;
1860 	int budget;
1861 	uint16_t nb_tx_queues = adapter->edev_data->nb_tx_queues;
1862 
1863 	if (adapter->missing_tx_completion_to == ENA_HW_HINTS_NO_TIMEOUT)
1864 		return;
1865 
1866 	nb_tx_queues = adapter->edev_data->nb_tx_queues;
1867 	budget = adapter->missing_tx_completion_budget;
1868 
1869 	qid = adapter->last_tx_comp_qid;
1870 	while (budget-- > 0) {
1871 		tx_ring = &adapter->tx_ring[qid];
1872 
1873 		/* Tx cleanup is called only by the burst function and can be
1874 		 * called dynamically by the application. Also cleanup is
1875 		 * limited by the threshold. To avoid false detection of the
1876 		 * missing HW Tx completion, get the delay since last cleanup
1877 		 * function was called.
1878 		 */
1879 		tx_cleanup_delay = rte_get_timer_cycles() -
1880 			tx_ring->last_cleanup_ticks;
1881 		if (tx_cleanup_delay < adapter->tx_cleanup_stall_delay)
1882 			check_for_tx_completion_in_queue(adapter, tx_ring);
1883 		qid = (qid + 1) % nb_tx_queues;
1884 	}
1885 
1886 	adapter->last_tx_comp_qid = qid;
1887 }
1888 
1889 static void ena_timer_wd_callback(__rte_unused struct rte_timer *timer,
1890 				  void *arg)
1891 {
1892 	struct rte_eth_dev *dev = arg;
1893 	struct ena_adapter *adapter = dev->data->dev_private;
1894 
1895 	if (unlikely(adapter->trigger_reset))
1896 		return;
1897 
1898 	check_for_missing_keep_alive(adapter);
1899 	check_for_admin_com_state(adapter);
1900 	check_for_tx_completions(adapter);
1901 
1902 	if (unlikely(adapter->trigger_reset)) {
1903 		PMD_DRV_LOG(ERR, "Trigger reset is on\n");
1904 		rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET,
1905 			NULL);
1906 	}
1907 }
1908 
1909 static inline void
1910 set_default_llq_configurations(struct ena_llq_configurations *llq_config,
1911 			       struct ena_admin_feature_llq_desc *llq,
1912 			       bool use_large_llq_hdr)
1913 {
1914 	llq_config->llq_header_location = ENA_ADMIN_INLINE_HEADER;
1915 	llq_config->llq_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY;
1916 	llq_config->llq_num_decs_before_header =
1917 		ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2;
1918 
1919 	if (use_large_llq_hdr &&
1920 	    (llq->entry_size_ctrl_supported & ENA_ADMIN_LIST_ENTRY_SIZE_256B)) {
1921 		llq_config->llq_ring_entry_size =
1922 			ENA_ADMIN_LIST_ENTRY_SIZE_256B;
1923 		llq_config->llq_ring_entry_size_value = 256;
1924 	} else {
1925 		llq_config->llq_ring_entry_size =
1926 			ENA_ADMIN_LIST_ENTRY_SIZE_128B;
1927 		llq_config->llq_ring_entry_size_value = 128;
1928 	}
1929 }
1930 
1931 static int
1932 ena_set_queues_placement_policy(struct ena_adapter *adapter,
1933 				struct ena_com_dev *ena_dev,
1934 				struct ena_admin_feature_llq_desc *llq,
1935 				struct ena_llq_configurations *llq_default_configurations)
1936 {
1937 	int rc;
1938 	u32 llq_feature_mask;
1939 
1940 	llq_feature_mask = 1 << ENA_ADMIN_LLQ;
1941 	if (!(ena_dev->supported_features & llq_feature_mask)) {
1942 		PMD_DRV_LOG(INFO,
1943 			"LLQ is not supported. Fallback to host mode policy.\n");
1944 		ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
1945 		return 0;
1946 	}
1947 
1948 	rc = ena_com_config_dev_mode(ena_dev, llq, llq_default_configurations);
1949 	if (unlikely(rc)) {
1950 		PMD_INIT_LOG(WARNING,
1951 			"Failed to config dev mode. Fallback to host mode policy.\n");
1952 		ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
1953 		return 0;
1954 	}
1955 
1956 	/* Nothing to config, exit */
1957 	if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
1958 		return 0;
1959 
1960 	if (!adapter->dev_mem_base) {
1961 		PMD_DRV_LOG(ERR,
1962 			"Unable to access LLQ BAR resource. Fallback to host mode policy.\n");
1963 		ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
1964 		return 0;
1965 	}
1966 
1967 	ena_dev->mem_bar = adapter->dev_mem_base;
1968 
1969 	return 0;
1970 }
1971 
1972 static uint32_t ena_calc_max_io_queue_num(struct ena_com_dev *ena_dev,
1973 	struct ena_com_dev_get_features_ctx *get_feat_ctx)
1974 {
1975 	uint32_t io_tx_sq_num, io_tx_cq_num, io_rx_num, max_num_io_queues;
1976 
1977 	/* Regular queues capabilities */
1978 	if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
1979 		struct ena_admin_queue_ext_feature_fields *max_queue_ext =
1980 			&get_feat_ctx->max_queue_ext.max_queue_ext;
1981 		io_rx_num = RTE_MIN(max_queue_ext->max_rx_sq_num,
1982 				    max_queue_ext->max_rx_cq_num);
1983 		io_tx_sq_num = max_queue_ext->max_tx_sq_num;
1984 		io_tx_cq_num = max_queue_ext->max_tx_cq_num;
1985 	} else {
1986 		struct ena_admin_queue_feature_desc *max_queues =
1987 			&get_feat_ctx->max_queues;
1988 		io_tx_sq_num = max_queues->max_sq_num;
1989 		io_tx_cq_num = max_queues->max_cq_num;
1990 		io_rx_num = RTE_MIN(io_tx_sq_num, io_tx_cq_num);
1991 	}
1992 
1993 	/* In case of LLQ use the llq number in the get feature cmd */
1994 	if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
1995 		io_tx_sq_num = get_feat_ctx->llq.max_llq_num;
1996 
1997 	max_num_io_queues = RTE_MIN(ENA_MAX_NUM_IO_QUEUES, io_rx_num);
1998 	max_num_io_queues = RTE_MIN(max_num_io_queues, io_tx_sq_num);
1999 	max_num_io_queues = RTE_MIN(max_num_io_queues, io_tx_cq_num);
2000 
2001 	if (unlikely(max_num_io_queues == 0)) {
2002 		PMD_DRV_LOG(ERR, "Number of IO queues cannot not be 0\n");
2003 		return -EFAULT;
2004 	}
2005 
2006 	return max_num_io_queues;
2007 }
2008 
2009 static void
2010 ena_set_offloads(struct ena_offloads *offloads,
2011 		 struct ena_admin_feature_offload_desc *offload_desc)
2012 {
2013 	if (offload_desc->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK)
2014 		offloads->tx_offloads |= ENA_IPV4_TSO;
2015 
2016 	/* Tx IPv4 checksum offloads */
2017 	if (offload_desc->tx &
2018 	    ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L3_CSUM_IPV4_MASK)
2019 		offloads->tx_offloads |= ENA_L3_IPV4_CSUM;
2020 	if (offload_desc->tx &
2021 	    ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_MASK)
2022 		offloads->tx_offloads |= ENA_L4_IPV4_CSUM;
2023 	if (offload_desc->tx &
2024 	    ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK)
2025 		offloads->tx_offloads |= ENA_L4_IPV4_CSUM_PARTIAL;
2026 
2027 	/* Tx IPv6 checksum offloads */
2028 	if (offload_desc->tx &
2029 	    ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_MASK)
2030 		offloads->tx_offloads |= ENA_L4_IPV6_CSUM;
2031 	if (offload_desc->tx &
2032 	     ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK)
2033 		offloads->tx_offloads |= ENA_L4_IPV6_CSUM_PARTIAL;
2034 
2035 	/* Rx IPv4 checksum offloads */
2036 	if (offload_desc->rx_supported &
2037 	    ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L3_CSUM_IPV4_MASK)
2038 		offloads->rx_offloads |= ENA_L3_IPV4_CSUM;
2039 	if (offload_desc->rx_supported &
2040 	    ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK)
2041 		offloads->rx_offloads |= ENA_L4_IPV4_CSUM;
2042 
2043 	/* Rx IPv6 checksum offloads */
2044 	if (offload_desc->rx_supported &
2045 	    ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK)
2046 		offloads->rx_offloads |= ENA_L4_IPV6_CSUM;
2047 
2048 	if (offload_desc->rx_supported &
2049 	    ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_MASK)
2050 		offloads->rx_offloads |= ENA_RX_RSS_HASH;
2051 }
2052 
2053 static int ena_init_once(void)
2054 {
2055 	static bool init_done;
2056 
2057 	if (init_done)
2058 		return 0;
2059 
2060 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
2061 		/* Init timer subsystem for the ENA timer service. */
2062 		rte_timer_subsystem_init();
2063 		/* Register handler for requests from secondary processes. */
2064 		rte_mp_action_register(ENA_MP_NAME, ena_mp_primary_handle);
2065 	}
2066 
2067 	init_done = true;
2068 	return 0;
2069 }
2070 
2071 static int eth_ena_dev_init(struct rte_eth_dev *eth_dev)
2072 {
2073 	struct ena_calc_queue_size_ctx calc_queue_ctx = { 0 };
2074 	struct rte_pci_device *pci_dev;
2075 	struct rte_intr_handle *intr_handle;
2076 	struct ena_adapter *adapter = eth_dev->data->dev_private;
2077 	struct ena_com_dev *ena_dev = &adapter->ena_dev;
2078 	struct ena_com_dev_get_features_ctx get_feat_ctx;
2079 	struct ena_llq_configurations llq_config;
2080 	const char *queue_type_str;
2081 	uint32_t max_num_io_queues;
2082 	int rc;
2083 	static int adapters_found;
2084 	bool disable_meta_caching;
2085 
2086 	eth_dev->dev_ops = &ena_dev_ops;
2087 	eth_dev->rx_pkt_burst = &eth_ena_recv_pkts;
2088 	eth_dev->tx_pkt_burst = &eth_ena_xmit_pkts;
2089 	eth_dev->tx_pkt_prepare = &eth_ena_prep_pkts;
2090 
2091 	rc = ena_init_once();
2092 	if (rc != 0)
2093 		return rc;
2094 
2095 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2096 		return 0;
2097 
2098 	eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
2099 
2100 	memset(adapter, 0, sizeof(struct ena_adapter));
2101 	ena_dev = &adapter->ena_dev;
2102 
2103 	adapter->edev_data = eth_dev->data;
2104 
2105 	pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
2106 
2107 	PMD_INIT_LOG(INFO, "Initializing %x:%x:%x.%d\n",
2108 		     pci_dev->addr.domain,
2109 		     pci_dev->addr.bus,
2110 		     pci_dev->addr.devid,
2111 		     pci_dev->addr.function);
2112 
2113 	intr_handle = pci_dev->intr_handle;
2114 
2115 	adapter->regs = pci_dev->mem_resource[ENA_REGS_BAR].addr;
2116 	adapter->dev_mem_base = pci_dev->mem_resource[ENA_MEM_BAR].addr;
2117 
2118 	if (!adapter->regs) {
2119 		PMD_INIT_LOG(CRIT, "Failed to access registers BAR(%d)\n",
2120 			     ENA_REGS_BAR);
2121 		return -ENXIO;
2122 	}
2123 
2124 	ena_dev->reg_bar = adapter->regs;
2125 	/* Pass device data as a pointer which can be passed to the IO functions
2126 	 * by the ena_com (for example - the memory allocation).
2127 	 */
2128 	ena_dev->dmadev = eth_dev->data;
2129 
2130 	adapter->id_number = adapters_found;
2131 
2132 	snprintf(adapter->name, ENA_NAME_MAX_LEN, "ena_%d",
2133 		 adapter->id_number);
2134 
2135 	rc = ena_parse_devargs(adapter, pci_dev->device.devargs);
2136 	if (rc != 0) {
2137 		PMD_INIT_LOG(CRIT, "Failed to parse devargs\n");
2138 		goto err;
2139 	}
2140 
2141 	/* device specific initialization routine */
2142 	rc = ena_device_init(adapter, pci_dev, &get_feat_ctx);
2143 	if (rc) {
2144 		PMD_INIT_LOG(CRIT, "Failed to init ENA device\n");
2145 		goto err;
2146 	}
2147 
2148 	/* Check if device supports LSC */
2149 	if (!(adapter->all_aenq_groups & BIT(ENA_ADMIN_LINK_CHANGE)))
2150 		adapter->edev_data->dev_flags &= ~RTE_ETH_DEV_INTR_LSC;
2151 
2152 	set_default_llq_configurations(&llq_config, &get_feat_ctx.llq,
2153 		adapter->use_large_llq_hdr);
2154 	rc = ena_set_queues_placement_policy(adapter, ena_dev,
2155 					     &get_feat_ctx.llq, &llq_config);
2156 	if (unlikely(rc)) {
2157 		PMD_INIT_LOG(CRIT, "Failed to set placement policy\n");
2158 		return rc;
2159 	}
2160 
2161 	if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
2162 		queue_type_str = "Regular";
2163 	else
2164 		queue_type_str = "Low latency";
2165 	PMD_DRV_LOG(INFO, "Placement policy: %s\n", queue_type_str);
2166 
2167 	calc_queue_ctx.ena_dev = ena_dev;
2168 	calc_queue_ctx.get_feat_ctx = &get_feat_ctx;
2169 
2170 	max_num_io_queues = ena_calc_max_io_queue_num(ena_dev, &get_feat_ctx);
2171 	rc = ena_calc_io_queue_size(&calc_queue_ctx,
2172 		adapter->use_large_llq_hdr);
2173 	if (unlikely((rc != 0) || (max_num_io_queues == 0))) {
2174 		rc = -EFAULT;
2175 		goto err_device_destroy;
2176 	}
2177 
2178 	adapter->max_tx_ring_size = calc_queue_ctx.max_tx_queue_size;
2179 	adapter->max_rx_ring_size = calc_queue_ctx.max_rx_queue_size;
2180 	adapter->max_tx_sgl_size = calc_queue_ctx.max_tx_sgl_size;
2181 	adapter->max_rx_sgl_size = calc_queue_ctx.max_rx_sgl_size;
2182 	adapter->max_num_io_queues = max_num_io_queues;
2183 
2184 	if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
2185 		disable_meta_caching =
2186 			!!(get_feat_ctx.llq.accel_mode.u.get.supported_flags &
2187 			BIT(ENA_ADMIN_DISABLE_META_CACHING));
2188 	} else {
2189 		disable_meta_caching = false;
2190 	}
2191 
2192 	/* prepare ring structures */
2193 	ena_init_rings(adapter, disable_meta_caching);
2194 
2195 	ena_config_debug_area(adapter);
2196 
2197 	/* Set max MTU for this device */
2198 	adapter->max_mtu = get_feat_ctx.dev_attr.max_mtu;
2199 
2200 	ena_set_offloads(&adapter->offloads, &get_feat_ctx.offload);
2201 
2202 	/* Copy MAC address and point DPDK to it */
2203 	eth_dev->data->mac_addrs = (struct rte_ether_addr *)adapter->mac_addr;
2204 	rte_ether_addr_copy((struct rte_ether_addr *)
2205 			get_feat_ctx.dev_attr.mac_addr,
2206 			(struct rte_ether_addr *)adapter->mac_addr);
2207 
2208 	rc = ena_com_rss_init(ena_dev, ENA_RX_RSS_TABLE_LOG_SIZE);
2209 	if (unlikely(rc != 0)) {
2210 		PMD_DRV_LOG(ERR, "Failed to initialize RSS in ENA device\n");
2211 		goto err_delete_debug_area;
2212 	}
2213 
2214 	adapter->drv_stats = rte_zmalloc("adapter stats",
2215 					 sizeof(*adapter->drv_stats),
2216 					 RTE_CACHE_LINE_SIZE);
2217 	if (!adapter->drv_stats) {
2218 		PMD_DRV_LOG(ERR,
2219 			"Failed to allocate memory for adapter statistics\n");
2220 		rc = -ENOMEM;
2221 		goto err_rss_destroy;
2222 	}
2223 
2224 	rte_spinlock_init(&adapter->admin_lock);
2225 
2226 	rte_intr_callback_register(intr_handle,
2227 				   ena_interrupt_handler_rte,
2228 				   eth_dev);
2229 	rte_intr_enable(intr_handle);
2230 	ena_com_set_admin_polling_mode(ena_dev, false);
2231 	ena_com_admin_aenq_enable(ena_dev);
2232 
2233 	rte_timer_init(&adapter->timer_wd);
2234 
2235 	adapters_found++;
2236 	adapter->state = ENA_ADAPTER_STATE_INIT;
2237 
2238 	return 0;
2239 
2240 err_rss_destroy:
2241 	ena_com_rss_destroy(ena_dev);
2242 err_delete_debug_area:
2243 	ena_com_delete_debug_area(ena_dev);
2244 
2245 err_device_destroy:
2246 	ena_com_delete_host_info(ena_dev);
2247 	ena_com_admin_destroy(ena_dev);
2248 
2249 err:
2250 	return rc;
2251 }
2252 
2253 static void ena_destroy_device(struct rte_eth_dev *eth_dev)
2254 {
2255 	struct ena_adapter *adapter = eth_dev->data->dev_private;
2256 	struct ena_com_dev *ena_dev = &adapter->ena_dev;
2257 
2258 	if (adapter->state == ENA_ADAPTER_STATE_FREE)
2259 		return;
2260 
2261 	ena_com_set_admin_running_state(ena_dev, false);
2262 
2263 	if (adapter->state != ENA_ADAPTER_STATE_CLOSED)
2264 		ena_close(eth_dev);
2265 
2266 	ena_com_rss_destroy(ena_dev);
2267 
2268 	ena_com_delete_debug_area(ena_dev);
2269 	ena_com_delete_host_info(ena_dev);
2270 
2271 	ena_com_abort_admin_commands(ena_dev);
2272 	ena_com_wait_for_abort_completion(ena_dev);
2273 	ena_com_admin_destroy(ena_dev);
2274 	ena_com_mmio_reg_read_request_destroy(ena_dev);
2275 
2276 	adapter->state = ENA_ADAPTER_STATE_FREE;
2277 }
2278 
2279 static int eth_ena_dev_uninit(struct rte_eth_dev *eth_dev)
2280 {
2281 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2282 		return 0;
2283 
2284 	ena_destroy_device(eth_dev);
2285 
2286 	return 0;
2287 }
2288 
2289 static int ena_dev_configure(struct rte_eth_dev *dev)
2290 {
2291 	struct ena_adapter *adapter = dev->data->dev_private;
2292 	int rc;
2293 
2294 	adapter->state = ENA_ADAPTER_STATE_CONFIG;
2295 
2296 	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
2297 		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
2298 	dev->data->dev_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
2299 
2300 	/* Scattered Rx cannot be turned off in the HW, so this capability must
2301 	 * be forced.
2302 	 */
2303 	dev->data->scattered_rx = 1;
2304 
2305 	adapter->last_tx_comp_qid = 0;
2306 
2307 	adapter->missing_tx_completion_budget =
2308 		RTE_MIN(ENA_MONITORED_TX_QUEUES, dev->data->nb_tx_queues);
2309 
2310 	adapter->missing_tx_completion_to = ENA_TX_TIMEOUT;
2311 	/* To avoid detection of the spurious Tx completion timeout due to
2312 	 * application not calling the Tx cleanup function, set timeout for the
2313 	 * Tx queue which should be half of the missing completion timeout for a
2314 	 * safety. If there will be a lot of missing Tx completions in the
2315 	 * queue, they will be detected sooner or later.
2316 	 */
2317 	adapter->tx_cleanup_stall_delay = adapter->missing_tx_completion_to / 2;
2318 
2319 	rc = ena_configure_aenq(adapter);
2320 
2321 	return rc;
2322 }
2323 
2324 static void ena_init_rings(struct ena_adapter *adapter,
2325 			   bool disable_meta_caching)
2326 {
2327 	size_t i;
2328 
2329 	for (i = 0; i < adapter->max_num_io_queues; i++) {
2330 		struct ena_ring *ring = &adapter->tx_ring[i];
2331 
2332 		ring->configured = 0;
2333 		ring->type = ENA_RING_TYPE_TX;
2334 		ring->adapter = adapter;
2335 		ring->id = i;
2336 		ring->tx_mem_queue_type = adapter->ena_dev.tx_mem_queue_type;
2337 		ring->tx_max_header_size = adapter->ena_dev.tx_max_header_size;
2338 		ring->sgl_size = adapter->max_tx_sgl_size;
2339 		ring->disable_meta_caching = disable_meta_caching;
2340 	}
2341 
2342 	for (i = 0; i < adapter->max_num_io_queues; i++) {
2343 		struct ena_ring *ring = &adapter->rx_ring[i];
2344 
2345 		ring->configured = 0;
2346 		ring->type = ENA_RING_TYPE_RX;
2347 		ring->adapter = adapter;
2348 		ring->id = i;
2349 		ring->sgl_size = adapter->max_rx_sgl_size;
2350 	}
2351 }
2352 
2353 static uint64_t ena_get_rx_port_offloads(struct ena_adapter *adapter)
2354 {
2355 	uint64_t port_offloads = 0;
2356 
2357 	if (adapter->offloads.rx_offloads & ENA_L3_IPV4_CSUM)
2358 		port_offloads |= RTE_ETH_RX_OFFLOAD_IPV4_CKSUM;
2359 
2360 	if (adapter->offloads.rx_offloads &
2361 	    (ENA_L4_IPV4_CSUM | ENA_L4_IPV6_CSUM))
2362 		port_offloads |=
2363 			RTE_ETH_RX_OFFLOAD_UDP_CKSUM | RTE_ETH_RX_OFFLOAD_TCP_CKSUM;
2364 
2365 	if (adapter->offloads.rx_offloads & ENA_RX_RSS_HASH)
2366 		port_offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
2367 
2368 	port_offloads |= RTE_ETH_RX_OFFLOAD_SCATTER;
2369 
2370 	return port_offloads;
2371 }
2372 
2373 static uint64_t ena_get_tx_port_offloads(struct ena_adapter *adapter)
2374 {
2375 	uint64_t port_offloads = 0;
2376 
2377 	if (adapter->offloads.tx_offloads & ENA_IPV4_TSO)
2378 		port_offloads |= RTE_ETH_TX_OFFLOAD_TCP_TSO;
2379 
2380 	if (adapter->offloads.tx_offloads & ENA_L3_IPV4_CSUM)
2381 		port_offloads |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM;
2382 	if (adapter->offloads.tx_offloads &
2383 	    (ENA_L4_IPV4_CSUM_PARTIAL | ENA_L4_IPV4_CSUM |
2384 	     ENA_L4_IPV6_CSUM | ENA_L4_IPV6_CSUM_PARTIAL))
2385 		port_offloads |=
2386 			RTE_ETH_TX_OFFLOAD_UDP_CKSUM | RTE_ETH_TX_OFFLOAD_TCP_CKSUM;
2387 
2388 	port_offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
2389 
2390 	return port_offloads;
2391 }
2392 
2393 static uint64_t ena_get_rx_queue_offloads(struct ena_adapter *adapter)
2394 {
2395 	RTE_SET_USED(adapter);
2396 
2397 	return 0;
2398 }
2399 
2400 static uint64_t ena_get_tx_queue_offloads(struct ena_adapter *adapter)
2401 {
2402 	RTE_SET_USED(adapter);
2403 
2404 	return 0;
2405 }
2406 
2407 static int ena_infos_get(struct rte_eth_dev *dev,
2408 			  struct rte_eth_dev_info *dev_info)
2409 {
2410 	struct ena_adapter *adapter;
2411 	struct ena_com_dev *ena_dev;
2412 
2413 	ena_assert_msg(dev->data != NULL, "Uninitialized device\n");
2414 	ena_assert_msg(dev->data->dev_private != NULL, "Uninitialized device\n");
2415 	adapter = dev->data->dev_private;
2416 
2417 	ena_dev = &adapter->ena_dev;
2418 	ena_assert_msg(ena_dev != NULL, "Uninitialized device\n");
2419 
2420 	dev_info->speed_capa =
2421 			RTE_ETH_LINK_SPEED_1G   |
2422 			RTE_ETH_LINK_SPEED_2_5G |
2423 			RTE_ETH_LINK_SPEED_5G   |
2424 			RTE_ETH_LINK_SPEED_10G  |
2425 			RTE_ETH_LINK_SPEED_25G  |
2426 			RTE_ETH_LINK_SPEED_40G  |
2427 			RTE_ETH_LINK_SPEED_50G  |
2428 			RTE_ETH_LINK_SPEED_100G;
2429 
2430 	/* Inform framework about available features */
2431 	dev_info->rx_offload_capa = ena_get_rx_port_offloads(adapter);
2432 	dev_info->tx_offload_capa = ena_get_tx_port_offloads(adapter);
2433 	dev_info->rx_queue_offload_capa = ena_get_rx_queue_offloads(adapter);
2434 	dev_info->tx_queue_offload_capa = ena_get_tx_queue_offloads(adapter);
2435 
2436 	dev_info->flow_type_rss_offloads = ENA_ALL_RSS_HF;
2437 	dev_info->hash_key_size = ENA_HASH_KEY_SIZE;
2438 
2439 	dev_info->min_rx_bufsize = ENA_MIN_FRAME_LEN;
2440 	dev_info->max_rx_pktlen  = adapter->max_mtu + RTE_ETHER_HDR_LEN +
2441 		RTE_ETHER_CRC_LEN;
2442 	dev_info->min_mtu = ENA_MIN_MTU;
2443 	dev_info->max_mtu = adapter->max_mtu;
2444 	dev_info->max_mac_addrs = 1;
2445 
2446 	dev_info->max_rx_queues = adapter->max_num_io_queues;
2447 	dev_info->max_tx_queues = adapter->max_num_io_queues;
2448 	dev_info->reta_size = ENA_RX_RSS_TABLE_SIZE;
2449 
2450 	dev_info->rx_desc_lim.nb_max = adapter->max_rx_ring_size;
2451 	dev_info->rx_desc_lim.nb_min = ENA_MIN_RING_DESC;
2452 	dev_info->rx_desc_lim.nb_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS,
2453 					adapter->max_rx_sgl_size);
2454 	dev_info->rx_desc_lim.nb_mtu_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS,
2455 					adapter->max_rx_sgl_size);
2456 
2457 	dev_info->tx_desc_lim.nb_max = adapter->max_tx_ring_size;
2458 	dev_info->tx_desc_lim.nb_min = ENA_MIN_RING_DESC;
2459 	dev_info->tx_desc_lim.nb_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS,
2460 					adapter->max_tx_sgl_size);
2461 	dev_info->tx_desc_lim.nb_mtu_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS,
2462 					adapter->max_tx_sgl_size);
2463 
2464 	dev_info->default_rxportconf.ring_size = ENA_DEFAULT_RING_SIZE;
2465 	dev_info->default_txportconf.ring_size = ENA_DEFAULT_RING_SIZE;
2466 
2467 	return 0;
2468 }
2469 
2470 static inline void ena_init_rx_mbuf(struct rte_mbuf *mbuf, uint16_t len)
2471 {
2472 	mbuf->data_len = len;
2473 	mbuf->data_off = RTE_PKTMBUF_HEADROOM;
2474 	mbuf->refcnt = 1;
2475 	mbuf->next = NULL;
2476 }
2477 
2478 static struct rte_mbuf *ena_rx_mbuf(struct ena_ring *rx_ring,
2479 				    struct ena_com_rx_buf_info *ena_bufs,
2480 				    uint32_t descs,
2481 				    uint16_t *next_to_clean,
2482 				    uint8_t offset)
2483 {
2484 	struct rte_mbuf *mbuf;
2485 	struct rte_mbuf *mbuf_head;
2486 	struct ena_rx_buffer *rx_info;
2487 	int rc;
2488 	uint16_t ntc, len, req_id, buf = 0;
2489 
2490 	if (unlikely(descs == 0))
2491 		return NULL;
2492 
2493 	ntc = *next_to_clean;
2494 
2495 	len = ena_bufs[buf].len;
2496 	req_id = ena_bufs[buf].req_id;
2497 
2498 	rx_info = &rx_ring->rx_buffer_info[req_id];
2499 
2500 	mbuf = rx_info->mbuf;
2501 	RTE_ASSERT(mbuf != NULL);
2502 
2503 	ena_init_rx_mbuf(mbuf, len);
2504 
2505 	/* Fill the mbuf head with the data specific for 1st segment. */
2506 	mbuf_head = mbuf;
2507 	mbuf_head->nb_segs = descs;
2508 	mbuf_head->port = rx_ring->port_id;
2509 	mbuf_head->pkt_len = len;
2510 	mbuf_head->data_off += offset;
2511 
2512 	rx_info->mbuf = NULL;
2513 	rx_ring->empty_rx_reqs[ntc] = req_id;
2514 	ntc = ENA_IDX_NEXT_MASKED(ntc, rx_ring->size_mask);
2515 
2516 	while (--descs) {
2517 		++buf;
2518 		len = ena_bufs[buf].len;
2519 		req_id = ena_bufs[buf].req_id;
2520 
2521 		rx_info = &rx_ring->rx_buffer_info[req_id];
2522 		RTE_ASSERT(rx_info->mbuf != NULL);
2523 
2524 		if (unlikely(len == 0)) {
2525 			/*
2526 			 * Some devices can pass descriptor with the length 0.
2527 			 * To avoid confusion, the PMD is simply putting the
2528 			 * descriptor back, as it was never used. We'll avoid
2529 			 * mbuf allocation that way.
2530 			 */
2531 			rc = ena_add_single_rx_desc(rx_ring->ena_com_io_sq,
2532 				rx_info->mbuf, req_id);
2533 			if (unlikely(rc != 0)) {
2534 				/* Free the mbuf in case of an error. */
2535 				rte_mbuf_raw_free(rx_info->mbuf);
2536 			} else {
2537 				/*
2538 				 * If there was no error, just exit the loop as
2539 				 * 0 length descriptor is always the last one.
2540 				 */
2541 				break;
2542 			}
2543 		} else {
2544 			/* Create an mbuf chain. */
2545 			mbuf->next = rx_info->mbuf;
2546 			mbuf = mbuf->next;
2547 
2548 			ena_init_rx_mbuf(mbuf, len);
2549 			mbuf_head->pkt_len += len;
2550 		}
2551 
2552 		/*
2553 		 * Mark the descriptor as depleted and perform necessary
2554 		 * cleanup.
2555 		 * This code will execute in two cases:
2556 		 *  1. Descriptor len was greater than 0 - normal situation.
2557 		 *  2. Descriptor len was 0 and we failed to add the descriptor
2558 		 *     to the device. In that situation, we should try to add
2559 		 *     the mbuf again in the populate routine and mark the
2560 		 *     descriptor as used up by the device.
2561 		 */
2562 		rx_info->mbuf = NULL;
2563 		rx_ring->empty_rx_reqs[ntc] = req_id;
2564 		ntc = ENA_IDX_NEXT_MASKED(ntc, rx_ring->size_mask);
2565 	}
2566 
2567 	*next_to_clean = ntc;
2568 
2569 	return mbuf_head;
2570 }
2571 
2572 static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
2573 				  uint16_t nb_pkts)
2574 {
2575 	struct ena_ring *rx_ring = (struct ena_ring *)(rx_queue);
2576 	unsigned int free_queue_entries;
2577 	uint16_t next_to_clean = rx_ring->next_to_clean;
2578 	uint16_t descs_in_use;
2579 	struct rte_mbuf *mbuf;
2580 	uint16_t completed;
2581 	struct ena_com_rx_ctx ena_rx_ctx;
2582 	int i, rc = 0;
2583 	bool fill_hash;
2584 
2585 #ifdef RTE_ETHDEV_DEBUG_RX
2586 	/* Check adapter state */
2587 	if (unlikely(rx_ring->adapter->state != ENA_ADAPTER_STATE_RUNNING)) {
2588 		PMD_RX_LOG(ALERT,
2589 			"Trying to receive pkts while device is NOT running\n");
2590 		return 0;
2591 	}
2592 #endif
2593 
2594 	fill_hash = rx_ring->offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH;
2595 
2596 	descs_in_use = rx_ring->ring_size -
2597 		ena_com_free_q_entries(rx_ring->ena_com_io_sq) - 1;
2598 	nb_pkts = RTE_MIN(descs_in_use, nb_pkts);
2599 
2600 	for (completed = 0; completed < nb_pkts; completed++) {
2601 		ena_rx_ctx.max_bufs = rx_ring->sgl_size;
2602 		ena_rx_ctx.ena_bufs = rx_ring->ena_bufs;
2603 		ena_rx_ctx.descs = 0;
2604 		ena_rx_ctx.pkt_offset = 0;
2605 		/* receive packet context */
2606 		rc = ena_com_rx_pkt(rx_ring->ena_com_io_cq,
2607 				    rx_ring->ena_com_io_sq,
2608 				    &ena_rx_ctx);
2609 		if (unlikely(rc)) {
2610 			PMD_RX_LOG(ERR,
2611 				"Failed to get the packet from the device, rc: %d\n",
2612 				rc);
2613 			if (rc == ENA_COM_NO_SPACE) {
2614 				++rx_ring->rx_stats.bad_desc_num;
2615 				ena_trigger_reset(rx_ring->adapter,
2616 					ENA_REGS_RESET_TOO_MANY_RX_DESCS);
2617 			} else {
2618 				++rx_ring->rx_stats.bad_req_id;
2619 				ena_trigger_reset(rx_ring->adapter,
2620 					ENA_REGS_RESET_INV_RX_REQ_ID);
2621 			}
2622 			return 0;
2623 		}
2624 
2625 		mbuf = ena_rx_mbuf(rx_ring,
2626 			ena_rx_ctx.ena_bufs,
2627 			ena_rx_ctx.descs,
2628 			&next_to_clean,
2629 			ena_rx_ctx.pkt_offset);
2630 		if (unlikely(mbuf == NULL)) {
2631 			for (i = 0; i < ena_rx_ctx.descs; ++i) {
2632 				rx_ring->empty_rx_reqs[next_to_clean] =
2633 					rx_ring->ena_bufs[i].req_id;
2634 				next_to_clean = ENA_IDX_NEXT_MASKED(
2635 					next_to_clean, rx_ring->size_mask);
2636 			}
2637 			break;
2638 		}
2639 
2640 		/* fill mbuf attributes if any */
2641 		ena_rx_mbuf_prepare(rx_ring, mbuf, &ena_rx_ctx, fill_hash);
2642 
2643 		if (unlikely(mbuf->ol_flags &
2644 				(RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD)))
2645 			rte_atomic64_inc(&rx_ring->adapter->drv_stats->ierrors);
2646 
2647 		rx_pkts[completed] = mbuf;
2648 		rx_ring->rx_stats.bytes += mbuf->pkt_len;
2649 	}
2650 
2651 	rx_ring->rx_stats.cnt += completed;
2652 	rx_ring->next_to_clean = next_to_clean;
2653 
2654 	free_queue_entries = ena_com_free_q_entries(rx_ring->ena_com_io_sq);
2655 
2656 	/* Burst refill to save doorbells, memory barriers, const interval */
2657 	if (free_queue_entries >= rx_ring->rx_free_thresh) {
2658 		ena_com_update_dev_comp_head(rx_ring->ena_com_io_cq);
2659 		ena_populate_rx_queue(rx_ring, free_queue_entries);
2660 	}
2661 
2662 	return completed;
2663 }
2664 
2665 static uint16_t
2666 eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
2667 		uint16_t nb_pkts)
2668 {
2669 	int32_t ret;
2670 	uint32_t i;
2671 	struct rte_mbuf *m;
2672 	struct ena_ring *tx_ring = (struct ena_ring *)(tx_queue);
2673 	struct ena_adapter *adapter = tx_ring->adapter;
2674 	struct rte_ipv4_hdr *ip_hdr;
2675 	uint64_t ol_flags;
2676 	uint64_t l4_csum_flag;
2677 	uint64_t dev_offload_capa;
2678 	uint16_t frag_field;
2679 	bool need_pseudo_csum;
2680 
2681 	dev_offload_capa = adapter->offloads.tx_offloads;
2682 	for (i = 0; i != nb_pkts; i++) {
2683 		m = tx_pkts[i];
2684 		ol_flags = m->ol_flags;
2685 
2686 		/* Check if any offload flag was set */
2687 		if (ol_flags == 0)
2688 			continue;
2689 
2690 		l4_csum_flag = ol_flags & RTE_MBUF_F_TX_L4_MASK;
2691 		/* SCTP checksum offload is not supported by the ENA. */
2692 		if ((ol_flags & ENA_TX_OFFLOAD_NOTSUP_MASK) ||
2693 		    l4_csum_flag == RTE_MBUF_F_TX_SCTP_CKSUM) {
2694 			PMD_TX_LOG(DEBUG,
2695 				"mbuf[%" PRIu32 "] has unsupported offloads flags set: 0x%" PRIu64 "\n",
2696 				i, ol_flags);
2697 			rte_errno = ENOTSUP;
2698 			return i;
2699 		}
2700 
2701 		if (unlikely(m->nb_segs >= tx_ring->sgl_size &&
2702 		    !(tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV &&
2703 		      m->nb_segs == tx_ring->sgl_size &&
2704 		      m->data_len < tx_ring->tx_max_header_size))) {
2705 			PMD_TX_LOG(DEBUG,
2706 				"mbuf[%" PRIu32 "] has too many segments: %" PRIu16 "\n",
2707 				i, m->nb_segs);
2708 			rte_errno = EINVAL;
2709 			return i;
2710 		}
2711 
2712 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
2713 		/* Check if requested offload is also enabled for the queue */
2714 		if ((ol_flags & RTE_MBUF_F_TX_IP_CKSUM &&
2715 		     !(tx_ring->offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)) ||
2716 		    (l4_csum_flag == RTE_MBUF_F_TX_TCP_CKSUM &&
2717 		     !(tx_ring->offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM)) ||
2718 		    (l4_csum_flag == RTE_MBUF_F_TX_UDP_CKSUM &&
2719 		     !(tx_ring->offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM))) {
2720 			PMD_TX_LOG(DEBUG,
2721 				"mbuf[%" PRIu32 "]: requested offloads: %" PRIu16 " are not enabled for the queue[%u]\n",
2722 				i, m->nb_segs, tx_ring->id);
2723 			rte_errno = EINVAL;
2724 			return i;
2725 		}
2726 
2727 		/* The caller is obligated to set l2 and l3 len if any cksum
2728 		 * offload is enabled.
2729 		 */
2730 		if (unlikely(ol_flags & (RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_L4_MASK) &&
2731 		    (m->l2_len == 0 || m->l3_len == 0))) {
2732 			PMD_TX_LOG(DEBUG,
2733 				"mbuf[%" PRIu32 "]: l2_len or l3_len values are 0 while the offload was requested\n",
2734 				i);
2735 			rte_errno = EINVAL;
2736 			return i;
2737 		}
2738 		ret = rte_validate_tx_offload(m);
2739 		if (ret != 0) {
2740 			rte_errno = -ret;
2741 			return i;
2742 		}
2743 #endif
2744 
2745 		/* Verify HW support for requested offloads and determine if
2746 		 * pseudo header checksum is needed.
2747 		 */
2748 		need_pseudo_csum = false;
2749 		if (ol_flags & RTE_MBUF_F_TX_IPV4) {
2750 			if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM &&
2751 			    !(dev_offload_capa & ENA_L3_IPV4_CSUM)) {
2752 				rte_errno = ENOTSUP;
2753 				return i;
2754 			}
2755 
2756 			if (ol_flags & RTE_MBUF_F_TX_TCP_SEG &&
2757 			    !(dev_offload_capa & ENA_IPV4_TSO)) {
2758 				rte_errno = ENOTSUP;
2759 				return i;
2760 			}
2761 
2762 			/* Check HW capabilities and if pseudo csum is needed
2763 			 * for L4 offloads.
2764 			 */
2765 			if (l4_csum_flag != RTE_MBUF_F_TX_L4_NO_CKSUM &&
2766 			    !(dev_offload_capa & ENA_L4_IPV4_CSUM)) {
2767 				if (dev_offload_capa &
2768 				    ENA_L4_IPV4_CSUM_PARTIAL) {
2769 					need_pseudo_csum = true;
2770 				} else {
2771 					rte_errno = ENOTSUP;
2772 					return i;
2773 				}
2774 			}
2775 
2776 			/* Parse the DF flag */
2777 			ip_hdr = rte_pktmbuf_mtod_offset(m,
2778 				struct rte_ipv4_hdr *, m->l2_len);
2779 			frag_field = rte_be_to_cpu_16(ip_hdr->fragment_offset);
2780 			if (frag_field & RTE_IPV4_HDR_DF_FLAG) {
2781 				m->packet_type |= RTE_PTYPE_L4_NONFRAG;
2782 			} else if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
2783 				/* In case we are supposed to TSO and have DF
2784 				 * not set (DF=0) hardware must be provided with
2785 				 * partial checksum.
2786 				 */
2787 				need_pseudo_csum = true;
2788 			}
2789 		} else if (ol_flags & RTE_MBUF_F_TX_IPV6) {
2790 			/* There is no support for IPv6 TSO as for now. */
2791 			if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
2792 				rte_errno = ENOTSUP;
2793 				return i;
2794 			}
2795 
2796 			/* Check HW capabilities and if pseudo csum is needed */
2797 			if (l4_csum_flag != RTE_MBUF_F_TX_L4_NO_CKSUM &&
2798 			    !(dev_offload_capa & ENA_L4_IPV6_CSUM)) {
2799 				if (dev_offload_capa &
2800 				    ENA_L4_IPV6_CSUM_PARTIAL) {
2801 					need_pseudo_csum = true;
2802 				} else {
2803 					rte_errno = ENOTSUP;
2804 					return i;
2805 				}
2806 			}
2807 		}
2808 
2809 		if (need_pseudo_csum) {
2810 			ret = rte_net_intel_cksum_flags_prepare(m, ol_flags);
2811 			if (ret != 0) {
2812 				rte_errno = -ret;
2813 				return i;
2814 			}
2815 		}
2816 	}
2817 
2818 	return i;
2819 }
2820 
2821 static void ena_update_hints(struct ena_adapter *adapter,
2822 			     struct ena_admin_ena_hw_hints *hints)
2823 {
2824 	if (hints->admin_completion_tx_timeout)
2825 		adapter->ena_dev.admin_queue.completion_timeout =
2826 			hints->admin_completion_tx_timeout * 1000;
2827 
2828 	if (hints->mmio_read_timeout)
2829 		/* convert to usec */
2830 		adapter->ena_dev.mmio_read.reg_read_to =
2831 			hints->mmio_read_timeout * 1000;
2832 
2833 	if (hints->missing_tx_completion_timeout) {
2834 		if (hints->missing_tx_completion_timeout ==
2835 		    ENA_HW_HINTS_NO_TIMEOUT) {
2836 			adapter->missing_tx_completion_to =
2837 				ENA_HW_HINTS_NO_TIMEOUT;
2838 		} else {
2839 			/* Convert from msecs to ticks */
2840 			adapter->missing_tx_completion_to = rte_get_timer_hz() *
2841 				hints->missing_tx_completion_timeout / 1000;
2842 			adapter->tx_cleanup_stall_delay =
2843 				adapter->missing_tx_completion_to / 2;
2844 		}
2845 	}
2846 
2847 	if (hints->driver_watchdog_timeout) {
2848 		if (hints->driver_watchdog_timeout == ENA_HW_HINTS_NO_TIMEOUT)
2849 			adapter->keep_alive_timeout = ENA_HW_HINTS_NO_TIMEOUT;
2850 		else
2851 			// Convert msecs to ticks
2852 			adapter->keep_alive_timeout =
2853 				(hints->driver_watchdog_timeout *
2854 				rte_get_timer_hz()) / 1000;
2855 	}
2856 }
2857 
2858 static void ena_tx_map_mbuf(struct ena_ring *tx_ring,
2859 	struct ena_tx_buffer *tx_info,
2860 	struct rte_mbuf *mbuf,
2861 	void **push_header,
2862 	uint16_t *header_len)
2863 {
2864 	struct ena_com_buf *ena_buf;
2865 	uint16_t delta, seg_len, push_len;
2866 
2867 	delta = 0;
2868 	seg_len = mbuf->data_len;
2869 
2870 	tx_info->mbuf = mbuf;
2871 	ena_buf = tx_info->bufs;
2872 
2873 	if (tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
2874 		/*
2875 		 * Tx header might be (and will be in most cases) smaller than
2876 		 * tx_max_header_size. But it's not an issue to send more data
2877 		 * to the device, than actually needed if the mbuf size is
2878 		 * greater than tx_max_header_size.
2879 		 */
2880 		push_len = RTE_MIN(mbuf->pkt_len, tx_ring->tx_max_header_size);
2881 		*header_len = push_len;
2882 
2883 		if (likely(push_len <= seg_len)) {
2884 			/* If the push header is in the single segment, then
2885 			 * just point it to the 1st mbuf data.
2886 			 */
2887 			*push_header = rte_pktmbuf_mtod(mbuf, uint8_t *);
2888 		} else {
2889 			/* If the push header lays in the several segments, copy
2890 			 * it to the intermediate buffer.
2891 			 */
2892 			rte_pktmbuf_read(mbuf, 0, push_len,
2893 				tx_ring->push_buf_intermediate_buf);
2894 			*push_header = tx_ring->push_buf_intermediate_buf;
2895 			delta = push_len - seg_len;
2896 		}
2897 	} else {
2898 		*push_header = NULL;
2899 		*header_len = 0;
2900 		push_len = 0;
2901 	}
2902 
2903 	/* Process first segment taking into consideration pushed header */
2904 	if (seg_len > push_len) {
2905 		ena_buf->paddr = mbuf->buf_iova +
2906 				mbuf->data_off +
2907 				push_len;
2908 		ena_buf->len = seg_len - push_len;
2909 		ena_buf++;
2910 		tx_info->num_of_bufs++;
2911 	}
2912 
2913 	while ((mbuf = mbuf->next) != NULL) {
2914 		seg_len = mbuf->data_len;
2915 
2916 		/* Skip mbufs if whole data is pushed as a header */
2917 		if (unlikely(delta > seg_len)) {
2918 			delta -= seg_len;
2919 			continue;
2920 		}
2921 
2922 		ena_buf->paddr = mbuf->buf_iova + mbuf->data_off + delta;
2923 		ena_buf->len = seg_len - delta;
2924 		ena_buf++;
2925 		tx_info->num_of_bufs++;
2926 
2927 		delta = 0;
2928 	}
2929 }
2930 
2931 static int ena_xmit_mbuf(struct ena_ring *tx_ring, struct rte_mbuf *mbuf)
2932 {
2933 	struct ena_tx_buffer *tx_info;
2934 	struct ena_com_tx_ctx ena_tx_ctx = { { 0 } };
2935 	uint16_t next_to_use;
2936 	uint16_t header_len;
2937 	uint16_t req_id;
2938 	void *push_header;
2939 	int nb_hw_desc;
2940 	int rc;
2941 
2942 	/* Checking for space for 2 additional metadata descriptors due to
2943 	 * possible header split and metadata descriptor
2944 	 */
2945 	if (!ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
2946 					  mbuf->nb_segs + 2)) {
2947 		PMD_DRV_LOG(DEBUG, "Not enough space in the tx queue\n");
2948 		return ENA_COM_NO_MEM;
2949 	}
2950 
2951 	next_to_use = tx_ring->next_to_use;
2952 
2953 	req_id = tx_ring->empty_tx_reqs[next_to_use];
2954 	tx_info = &tx_ring->tx_buffer_info[req_id];
2955 	tx_info->num_of_bufs = 0;
2956 	RTE_ASSERT(tx_info->mbuf == NULL);
2957 
2958 	ena_tx_map_mbuf(tx_ring, tx_info, mbuf, &push_header, &header_len);
2959 
2960 	ena_tx_ctx.ena_bufs = tx_info->bufs;
2961 	ena_tx_ctx.push_header = push_header;
2962 	ena_tx_ctx.num_bufs = tx_info->num_of_bufs;
2963 	ena_tx_ctx.req_id = req_id;
2964 	ena_tx_ctx.header_len = header_len;
2965 
2966 	/* Set Tx offloads flags, if applicable */
2967 	ena_tx_mbuf_prepare(mbuf, &ena_tx_ctx, tx_ring->offloads,
2968 		tx_ring->disable_meta_caching);
2969 
2970 	if (unlikely(ena_com_is_doorbell_needed(tx_ring->ena_com_io_sq,
2971 			&ena_tx_ctx))) {
2972 		PMD_TX_LOG(DEBUG,
2973 			"LLQ Tx max burst size of queue %d achieved, writing doorbell to send burst\n",
2974 			tx_ring->id);
2975 		ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq);
2976 		tx_ring->tx_stats.doorbells++;
2977 		tx_ring->pkts_without_db = false;
2978 	}
2979 
2980 	/* prepare the packet's descriptors to dma engine */
2981 	rc = ena_com_prepare_tx(tx_ring->ena_com_io_sq,	&ena_tx_ctx,
2982 		&nb_hw_desc);
2983 	if (unlikely(rc)) {
2984 		PMD_DRV_LOG(ERR, "Failed to prepare Tx buffers, rc: %d\n", rc);
2985 		++tx_ring->tx_stats.prepare_ctx_err;
2986 		ena_trigger_reset(tx_ring->adapter,
2987 			ENA_REGS_RESET_DRIVER_INVALID_STATE);
2988 		return rc;
2989 	}
2990 
2991 	tx_info->tx_descs = nb_hw_desc;
2992 	tx_info->timestamp = rte_get_timer_cycles();
2993 
2994 	tx_ring->tx_stats.cnt++;
2995 	tx_ring->tx_stats.bytes += mbuf->pkt_len;
2996 
2997 	tx_ring->next_to_use = ENA_IDX_NEXT_MASKED(next_to_use,
2998 		tx_ring->size_mask);
2999 
3000 	return 0;
3001 }
3002 
3003 static int ena_tx_cleanup(void *txp, uint32_t free_pkt_cnt)
3004 {
3005 	struct ena_ring *tx_ring = (struct ena_ring *)txp;
3006 	unsigned int total_tx_descs = 0;
3007 	unsigned int total_tx_pkts = 0;
3008 	uint16_t cleanup_budget;
3009 	uint16_t next_to_clean = tx_ring->next_to_clean;
3010 
3011 	/*
3012 	 * If free_pkt_cnt is equal to 0, it means that the user requested
3013 	 * full cleanup, so attempt to release all Tx descriptors
3014 	 * (ring_size - 1 -> size_mask)
3015 	 */
3016 	cleanup_budget = (free_pkt_cnt == 0) ? tx_ring->size_mask : free_pkt_cnt;
3017 
3018 	while (likely(total_tx_pkts < cleanup_budget)) {
3019 		struct rte_mbuf *mbuf;
3020 		struct ena_tx_buffer *tx_info;
3021 		uint16_t req_id;
3022 
3023 		if (ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq, &req_id) != 0)
3024 			break;
3025 
3026 		if (unlikely(validate_tx_req_id(tx_ring, req_id) != 0))
3027 			break;
3028 
3029 		/* Get Tx info & store how many descs were processed  */
3030 		tx_info = &tx_ring->tx_buffer_info[req_id];
3031 		tx_info->timestamp = 0;
3032 
3033 		mbuf = tx_info->mbuf;
3034 		rte_pktmbuf_free(mbuf);
3035 
3036 		tx_info->mbuf = NULL;
3037 		tx_ring->empty_tx_reqs[next_to_clean] = req_id;
3038 
3039 		total_tx_descs += tx_info->tx_descs;
3040 		total_tx_pkts++;
3041 
3042 		/* Put back descriptor to the ring for reuse */
3043 		next_to_clean = ENA_IDX_NEXT_MASKED(next_to_clean,
3044 			tx_ring->size_mask);
3045 	}
3046 
3047 	if (likely(total_tx_descs > 0)) {
3048 		/* acknowledge completion of sent packets */
3049 		tx_ring->next_to_clean = next_to_clean;
3050 		ena_com_comp_ack(tx_ring->ena_com_io_sq, total_tx_descs);
3051 		ena_com_update_dev_comp_head(tx_ring->ena_com_io_cq);
3052 	}
3053 
3054 	/* Notify completion handler that full cleanup was performed */
3055 	if (free_pkt_cnt == 0 || total_tx_pkts < cleanup_budget)
3056 		tx_ring->last_cleanup_ticks = rte_get_timer_cycles();
3057 
3058 	return total_tx_pkts;
3059 }
3060 
3061 static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
3062 				  uint16_t nb_pkts)
3063 {
3064 	struct ena_ring *tx_ring = (struct ena_ring *)(tx_queue);
3065 	int available_desc;
3066 	uint16_t sent_idx = 0;
3067 
3068 #ifdef RTE_ETHDEV_DEBUG_TX
3069 	/* Check adapter state */
3070 	if (unlikely(tx_ring->adapter->state != ENA_ADAPTER_STATE_RUNNING)) {
3071 		PMD_TX_LOG(ALERT,
3072 			"Trying to xmit pkts while device is NOT running\n");
3073 		return 0;
3074 	}
3075 #endif
3076 
3077 	available_desc = ena_com_free_q_entries(tx_ring->ena_com_io_sq);
3078 	if (available_desc < tx_ring->tx_free_thresh)
3079 		ena_tx_cleanup((void *)tx_ring, 0);
3080 
3081 	for (sent_idx = 0; sent_idx < nb_pkts; sent_idx++) {
3082 		if (ena_xmit_mbuf(tx_ring, tx_pkts[sent_idx]))
3083 			break;
3084 		tx_ring->pkts_without_db = true;
3085 		rte_prefetch0(tx_pkts[ENA_IDX_ADD_MASKED(sent_idx, 4,
3086 			tx_ring->size_mask)]);
3087 	}
3088 
3089 	/* If there are ready packets to be xmitted... */
3090 	if (likely(tx_ring->pkts_without_db)) {
3091 		/* ...let HW do its best :-) */
3092 		ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq);
3093 		tx_ring->tx_stats.doorbells++;
3094 		tx_ring->pkts_without_db = false;
3095 	}
3096 
3097 	tx_ring->tx_stats.available_desc =
3098 		ena_com_free_q_entries(tx_ring->ena_com_io_sq);
3099 	tx_ring->tx_stats.tx_poll++;
3100 
3101 	return sent_idx;
3102 }
3103 
3104 int ena_copy_eni_stats(struct ena_adapter *adapter, struct ena_stats_eni *stats)
3105 {
3106 	int rc;
3107 
3108 	rte_spinlock_lock(&adapter->admin_lock);
3109 	/* Retrieve and store the latest statistics from the AQ. This ensures
3110 	 * that previous value is returned in case of a com error.
3111 	 */
3112 	rc = ENA_PROXY(adapter, ena_com_get_eni_stats, &adapter->ena_dev,
3113 		(struct ena_admin_eni_stats *)stats);
3114 	rte_spinlock_unlock(&adapter->admin_lock);
3115 	if (rc != 0) {
3116 		if (rc == ENA_COM_UNSUPPORTED) {
3117 			PMD_DRV_LOG(DEBUG,
3118 				"Retrieving ENI metrics is not supported\n");
3119 		} else {
3120 			PMD_DRV_LOG(WARNING,
3121 				"Failed to get ENI metrics, rc: %d\n", rc);
3122 		}
3123 		return rc;
3124 	}
3125 
3126 	return 0;
3127 }
3128 
3129 /**
3130  * DPDK callback to retrieve names of extended device statistics
3131  *
3132  * @param dev
3133  *   Pointer to Ethernet device structure.
3134  * @param[out] xstats_names
3135  *   Buffer to insert names into.
3136  * @param n
3137  *   Number of names.
3138  *
3139  * @return
3140  *   Number of xstats names.
3141  */
3142 static int ena_xstats_get_names(struct rte_eth_dev *dev,
3143 				struct rte_eth_xstat_name *xstats_names,
3144 				unsigned int n)
3145 {
3146 	unsigned int xstats_count = ena_xstats_calc_num(dev->data);
3147 	unsigned int stat, i, count = 0;
3148 
3149 	if (n < xstats_count || !xstats_names)
3150 		return xstats_count;
3151 
3152 	for (stat = 0; stat < ENA_STATS_ARRAY_GLOBAL; stat++, count++)
3153 		strcpy(xstats_names[count].name,
3154 			ena_stats_global_strings[stat].name);
3155 
3156 	for (stat = 0; stat < ENA_STATS_ARRAY_ENI; stat++, count++)
3157 		strcpy(xstats_names[count].name,
3158 			ena_stats_eni_strings[stat].name);
3159 
3160 	for (stat = 0; stat < ENA_STATS_ARRAY_RX; stat++)
3161 		for (i = 0; i < dev->data->nb_rx_queues; i++, count++)
3162 			snprintf(xstats_names[count].name,
3163 				sizeof(xstats_names[count].name),
3164 				"rx_q%d_%s", i,
3165 				ena_stats_rx_strings[stat].name);
3166 
3167 	for (stat = 0; stat < ENA_STATS_ARRAY_TX; stat++)
3168 		for (i = 0; i < dev->data->nb_tx_queues; i++, count++)
3169 			snprintf(xstats_names[count].name,
3170 				sizeof(xstats_names[count].name),
3171 				"tx_q%d_%s", i,
3172 				ena_stats_tx_strings[stat].name);
3173 
3174 	return xstats_count;
3175 }
3176 
3177 /**
3178  * DPDK callback to retrieve names of extended device statistics for the given
3179  * ids.
3180  *
3181  * @param dev
3182  *   Pointer to Ethernet device structure.
3183  * @param[out] xstats_names
3184  *   Buffer to insert names into.
3185  * @param ids
3186  *   IDs array for which the names should be retrieved.
3187  * @param size
3188  *   Number of ids.
3189  *
3190  * @return
3191  *   Positive value: number of xstats names. Negative value: error code.
3192  */
3193 static int ena_xstats_get_names_by_id(struct rte_eth_dev *dev,
3194 				      const uint64_t *ids,
3195 				      struct rte_eth_xstat_name *xstats_names,
3196 				      unsigned int size)
3197 {
3198 	uint64_t xstats_count = ena_xstats_calc_num(dev->data);
3199 	uint64_t id, qid;
3200 	unsigned int i;
3201 
3202 	if (xstats_names == NULL)
3203 		return xstats_count;
3204 
3205 	for (i = 0; i < size; ++i) {
3206 		id = ids[i];
3207 		if (id > xstats_count) {
3208 			PMD_DRV_LOG(ERR,
3209 				"ID value out of range: id=%" PRIu64 ", xstats_num=%" PRIu64 "\n",
3210 				 id, xstats_count);
3211 			return -EINVAL;
3212 		}
3213 
3214 		if (id < ENA_STATS_ARRAY_GLOBAL) {
3215 			strcpy(xstats_names[i].name,
3216 			       ena_stats_global_strings[id].name);
3217 			continue;
3218 		}
3219 
3220 		id -= ENA_STATS_ARRAY_GLOBAL;
3221 		if (id < ENA_STATS_ARRAY_ENI) {
3222 			strcpy(xstats_names[i].name,
3223 			       ena_stats_eni_strings[id].name);
3224 			continue;
3225 		}
3226 
3227 		id -= ENA_STATS_ARRAY_ENI;
3228 		if (id < ENA_STATS_ARRAY_RX) {
3229 			qid = id / dev->data->nb_rx_queues;
3230 			id %= dev->data->nb_rx_queues;
3231 			snprintf(xstats_names[i].name,
3232 				 sizeof(xstats_names[i].name),
3233 				 "rx_q%" PRIu64 "d_%s",
3234 				 qid, ena_stats_rx_strings[id].name);
3235 			continue;
3236 		}
3237 
3238 		id -= ENA_STATS_ARRAY_RX;
3239 		/* Although this condition is not needed, it was added for
3240 		 * compatibility if new xstat structure would be ever added.
3241 		 */
3242 		if (id < ENA_STATS_ARRAY_TX) {
3243 			qid = id / dev->data->nb_tx_queues;
3244 			id %= dev->data->nb_tx_queues;
3245 			snprintf(xstats_names[i].name,
3246 				 sizeof(xstats_names[i].name),
3247 				 "tx_q%" PRIu64 "_%s",
3248 				 qid, ena_stats_tx_strings[id].name);
3249 			continue;
3250 		}
3251 	}
3252 
3253 	return i;
3254 }
3255 
3256 /**
3257  * DPDK callback to get extended device statistics.
3258  *
3259  * @param dev
3260  *   Pointer to Ethernet device structure.
3261  * @param[out] stats
3262  *   Stats table output buffer.
3263  * @param n
3264  *   The size of the stats table.
3265  *
3266  * @return
3267  *   Number of xstats on success, negative on failure.
3268  */
3269 static int ena_xstats_get(struct rte_eth_dev *dev,
3270 			  struct rte_eth_xstat *xstats,
3271 			  unsigned int n)
3272 {
3273 	struct ena_adapter *adapter = dev->data->dev_private;
3274 	unsigned int xstats_count = ena_xstats_calc_num(dev->data);
3275 	struct ena_stats_eni eni_stats;
3276 	unsigned int stat, i, count = 0;
3277 	int stat_offset;
3278 	void *stats_begin;
3279 
3280 	if (n < xstats_count)
3281 		return xstats_count;
3282 
3283 	if (!xstats)
3284 		return 0;
3285 
3286 	for (stat = 0; stat < ENA_STATS_ARRAY_GLOBAL; stat++, count++) {
3287 		stat_offset = ena_stats_global_strings[stat].stat_offset;
3288 		stats_begin = &adapter->dev_stats;
3289 
3290 		xstats[count].id = count;
3291 		xstats[count].value = *((uint64_t *)
3292 			((char *)stats_begin + stat_offset));
3293 	}
3294 
3295 	/* Even if the function below fails, we should copy previous (or initial
3296 	 * values) to keep structure of rte_eth_xstat consistent.
3297 	 */
3298 	ena_copy_eni_stats(adapter, &eni_stats);
3299 	for (stat = 0; stat < ENA_STATS_ARRAY_ENI; stat++, count++) {
3300 		stat_offset = ena_stats_eni_strings[stat].stat_offset;
3301 		stats_begin = &eni_stats;
3302 
3303 		xstats[count].id = count;
3304 		xstats[count].value = *((uint64_t *)
3305 		    ((char *)stats_begin + stat_offset));
3306 	}
3307 
3308 	for (stat = 0; stat < ENA_STATS_ARRAY_RX; stat++) {
3309 		for (i = 0; i < dev->data->nb_rx_queues; i++, count++) {
3310 			stat_offset = ena_stats_rx_strings[stat].stat_offset;
3311 			stats_begin = &adapter->rx_ring[i].rx_stats;
3312 
3313 			xstats[count].id = count;
3314 			xstats[count].value = *((uint64_t *)
3315 				((char *)stats_begin + stat_offset));
3316 		}
3317 	}
3318 
3319 	for (stat = 0; stat < ENA_STATS_ARRAY_TX; stat++) {
3320 		for (i = 0; i < dev->data->nb_tx_queues; i++, count++) {
3321 			stat_offset = ena_stats_tx_strings[stat].stat_offset;
3322 			stats_begin = &adapter->tx_ring[i].rx_stats;
3323 
3324 			xstats[count].id = count;
3325 			xstats[count].value = *((uint64_t *)
3326 				((char *)stats_begin + stat_offset));
3327 		}
3328 	}
3329 
3330 	return count;
3331 }
3332 
3333 static int ena_xstats_get_by_id(struct rte_eth_dev *dev,
3334 				const uint64_t *ids,
3335 				uint64_t *values,
3336 				unsigned int n)
3337 {
3338 	struct ena_adapter *adapter = dev->data->dev_private;
3339 	struct ena_stats_eni eni_stats;
3340 	uint64_t id;
3341 	uint64_t rx_entries, tx_entries;
3342 	unsigned int i;
3343 	int qid;
3344 	int valid = 0;
3345 	bool was_eni_copied = false;
3346 
3347 	for (i = 0; i < n; ++i) {
3348 		id = ids[i];
3349 		/* Check if id belongs to global statistics */
3350 		if (id < ENA_STATS_ARRAY_GLOBAL) {
3351 			values[i] = *((uint64_t *)&adapter->dev_stats + id);
3352 			++valid;
3353 			continue;
3354 		}
3355 
3356 		/* Check if id belongs to ENI statistics */
3357 		id -= ENA_STATS_ARRAY_GLOBAL;
3358 		if (id < ENA_STATS_ARRAY_ENI) {
3359 			/* Avoid reading ENI stats multiple times in a single
3360 			 * function call, as it requires communication with the
3361 			 * admin queue.
3362 			 */
3363 			if (!was_eni_copied) {
3364 				was_eni_copied = true;
3365 				ena_copy_eni_stats(adapter, &eni_stats);
3366 			}
3367 			values[i] = *((uint64_t *)&eni_stats + id);
3368 			++valid;
3369 			continue;
3370 		}
3371 
3372 		/* Check if id belongs to rx queue statistics */
3373 		id -= ENA_STATS_ARRAY_ENI;
3374 		rx_entries = ENA_STATS_ARRAY_RX * dev->data->nb_rx_queues;
3375 		if (id < rx_entries) {
3376 			qid = id % dev->data->nb_rx_queues;
3377 			id /= dev->data->nb_rx_queues;
3378 			values[i] = *((uint64_t *)
3379 				&adapter->rx_ring[qid].rx_stats + id);
3380 			++valid;
3381 			continue;
3382 		}
3383 				/* Check if id belongs to rx queue statistics */
3384 		id -= rx_entries;
3385 		tx_entries = ENA_STATS_ARRAY_TX * dev->data->nb_tx_queues;
3386 		if (id < tx_entries) {
3387 			qid = id % dev->data->nb_tx_queues;
3388 			id /= dev->data->nb_tx_queues;
3389 			values[i] = *((uint64_t *)
3390 				&adapter->tx_ring[qid].tx_stats + id);
3391 			++valid;
3392 			continue;
3393 		}
3394 	}
3395 
3396 	return valid;
3397 }
3398 
3399 static int ena_process_bool_devarg(const char *key,
3400 				   const char *value,
3401 				   void *opaque)
3402 {
3403 	struct ena_adapter *adapter = opaque;
3404 	bool bool_value;
3405 
3406 	/* Parse the value. */
3407 	if (strcmp(value, "1") == 0) {
3408 		bool_value = true;
3409 	} else if (strcmp(value, "0") == 0) {
3410 		bool_value = false;
3411 	} else {
3412 		PMD_INIT_LOG(ERR,
3413 			"Invalid value: '%s' for key '%s'. Accepted: '0' or '1'\n",
3414 			value, key);
3415 		return -EINVAL;
3416 	}
3417 
3418 	/* Now, assign it to the proper adapter field. */
3419 	if (strcmp(key, ENA_DEVARG_LARGE_LLQ_HDR) == 0)
3420 		adapter->use_large_llq_hdr = bool_value;
3421 
3422 	return 0;
3423 }
3424 
3425 static int ena_parse_devargs(struct ena_adapter *adapter,
3426 			     struct rte_devargs *devargs)
3427 {
3428 	static const char * const allowed_args[] = {
3429 		ENA_DEVARG_LARGE_LLQ_HDR,
3430 		NULL,
3431 	};
3432 	struct rte_kvargs *kvlist;
3433 	int rc;
3434 
3435 	if (devargs == NULL)
3436 		return 0;
3437 
3438 	kvlist = rte_kvargs_parse(devargs->args, allowed_args);
3439 	if (kvlist == NULL) {
3440 		PMD_INIT_LOG(ERR, "Invalid device arguments: %s\n",
3441 			devargs->args);
3442 		return -EINVAL;
3443 	}
3444 
3445 	rc = rte_kvargs_process(kvlist, ENA_DEVARG_LARGE_LLQ_HDR,
3446 		ena_process_bool_devarg, adapter);
3447 
3448 	rte_kvargs_free(kvlist);
3449 
3450 	return rc;
3451 }
3452 
3453 static int ena_setup_rx_intr(struct rte_eth_dev *dev)
3454 {
3455 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
3456 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
3457 	int rc;
3458 	uint16_t vectors_nb, i;
3459 	bool rx_intr_requested = dev->data->dev_conf.intr_conf.rxq;
3460 
3461 	if (!rx_intr_requested)
3462 		return 0;
3463 
3464 	if (!rte_intr_cap_multiple(intr_handle)) {
3465 		PMD_DRV_LOG(ERR,
3466 			"Rx interrupt requested, but it isn't supported by the PCI driver\n");
3467 		return -ENOTSUP;
3468 	}
3469 
3470 	/* Disable interrupt mapping before the configuration starts. */
3471 	rte_intr_disable(intr_handle);
3472 
3473 	/* Verify if there are enough vectors available. */
3474 	vectors_nb = dev->data->nb_rx_queues;
3475 	if (vectors_nb > RTE_MAX_RXTX_INTR_VEC_ID) {
3476 		PMD_DRV_LOG(ERR,
3477 			"Too many Rx interrupts requested, maximum number: %d\n",
3478 			RTE_MAX_RXTX_INTR_VEC_ID);
3479 		rc = -ENOTSUP;
3480 		goto enable_intr;
3481 	}
3482 
3483 	/* Allocate the vector list */
3484 	if (rte_intr_vec_list_alloc(intr_handle, "intr_vec",
3485 					   dev->data->nb_rx_queues)) {
3486 		PMD_DRV_LOG(ERR,
3487 			"Failed to allocate interrupt vector for %d queues\n",
3488 			dev->data->nb_rx_queues);
3489 		rc = -ENOMEM;
3490 		goto enable_intr;
3491 	}
3492 
3493 	rc = rte_intr_efd_enable(intr_handle, vectors_nb);
3494 	if (rc != 0)
3495 		goto free_intr_vec;
3496 
3497 	if (!rte_intr_allow_others(intr_handle)) {
3498 		PMD_DRV_LOG(ERR,
3499 			"Not enough interrupts available to use both ENA Admin and Rx interrupts\n");
3500 		goto disable_intr_efd;
3501 	}
3502 
3503 	for (i = 0; i < vectors_nb; ++i)
3504 		if (rte_intr_vec_list_index_set(intr_handle, i,
3505 					   RTE_INTR_VEC_RXTX_OFFSET + i))
3506 			goto disable_intr_efd;
3507 
3508 	rte_intr_enable(intr_handle);
3509 	return 0;
3510 
3511 disable_intr_efd:
3512 	rte_intr_efd_disable(intr_handle);
3513 free_intr_vec:
3514 	rte_intr_vec_list_free(intr_handle);
3515 enable_intr:
3516 	rte_intr_enable(intr_handle);
3517 	return rc;
3518 }
3519 
3520 static void ena_rx_queue_intr_set(struct rte_eth_dev *dev,
3521 				 uint16_t queue_id,
3522 				 bool unmask)
3523 {
3524 	struct ena_adapter *adapter = dev->data->dev_private;
3525 	struct ena_ring *rxq = &adapter->rx_ring[queue_id];
3526 	struct ena_eth_io_intr_reg intr_reg;
3527 
3528 	ena_com_update_intr_reg(&intr_reg, 0, 0, unmask);
3529 	ena_com_unmask_intr(rxq->ena_com_io_cq, &intr_reg);
3530 }
3531 
3532 static int ena_rx_queue_intr_enable(struct rte_eth_dev *dev,
3533 				    uint16_t queue_id)
3534 {
3535 	ena_rx_queue_intr_set(dev, queue_id, true);
3536 
3537 	return 0;
3538 }
3539 
3540 static int ena_rx_queue_intr_disable(struct rte_eth_dev *dev,
3541 				     uint16_t queue_id)
3542 {
3543 	ena_rx_queue_intr_set(dev, queue_id, false);
3544 
3545 	return 0;
3546 }
3547 
3548 static int ena_configure_aenq(struct ena_adapter *adapter)
3549 {
3550 	uint32_t aenq_groups = adapter->all_aenq_groups;
3551 	int rc;
3552 
3553 	/* All_aenq_groups holds all AENQ functions supported by the device and
3554 	 * the HW, so at first we need to be sure the LSC request is valid.
3555 	 */
3556 	if (adapter->edev_data->dev_conf.intr_conf.lsc != 0) {
3557 		if (!(aenq_groups & BIT(ENA_ADMIN_LINK_CHANGE))) {
3558 			PMD_DRV_LOG(ERR,
3559 				"LSC requested, but it's not supported by the AENQ\n");
3560 			return -EINVAL;
3561 		}
3562 	} else {
3563 		/* If LSC wasn't enabled by the app, let's enable all supported
3564 		 * AENQ procedures except the LSC.
3565 		 */
3566 		aenq_groups &= ~BIT(ENA_ADMIN_LINK_CHANGE);
3567 	}
3568 
3569 	rc = ena_com_set_aenq_config(&adapter->ena_dev, aenq_groups);
3570 	if (rc != 0) {
3571 		PMD_DRV_LOG(ERR, "Cannot configure AENQ groups, rc=%d\n", rc);
3572 		return rc;
3573 	}
3574 
3575 	adapter->active_aenq_groups = aenq_groups;
3576 
3577 	return 0;
3578 }
3579 
3580 int ena_mp_indirect_table_set(struct ena_adapter *adapter)
3581 {
3582 	return ENA_PROXY(adapter, ena_com_indirect_table_set, &adapter->ena_dev);
3583 }
3584 
3585 int ena_mp_indirect_table_get(struct ena_adapter *adapter,
3586 			      uint32_t *indirect_table)
3587 {
3588 	return ENA_PROXY(adapter, ena_com_indirect_table_get, &adapter->ena_dev,
3589 		indirect_table);
3590 }
3591 
3592 /*********************************************************************
3593  *  ena_plat_dpdk.h functions implementations
3594  *********************************************************************/
3595 
3596 const struct rte_memzone *
3597 ena_mem_alloc_coherent(struct rte_eth_dev_data *data, size_t size,
3598 		       int socket_id, unsigned int alignment, void **virt_addr,
3599 		       dma_addr_t *phys_addr)
3600 {
3601 	char z_name[RTE_MEMZONE_NAMESIZE];
3602 	struct ena_adapter *adapter = data->dev_private;
3603 	const struct rte_memzone *memzone;
3604 	int rc;
3605 
3606 	rc = snprintf(z_name, RTE_MEMZONE_NAMESIZE, "ena_p%d_mz%" PRIu64 "",
3607 		data->port_id, adapter->memzone_cnt);
3608 	if (rc >= RTE_MEMZONE_NAMESIZE) {
3609 		PMD_DRV_LOG(ERR,
3610 			"Name for the ena_com memzone is too long. Port: %d, mz_num: %" PRIu64 "\n",
3611 			data->port_id, adapter->memzone_cnt);
3612 		goto error;
3613 	}
3614 	adapter->memzone_cnt++;
3615 
3616 	memzone = rte_memzone_reserve_aligned(z_name, size, socket_id,
3617 		RTE_MEMZONE_IOVA_CONTIG, alignment);
3618 	if (memzone == NULL) {
3619 		PMD_DRV_LOG(ERR, "Failed to allocate ena_com memzone: %s\n",
3620 			z_name);
3621 		goto error;
3622 	}
3623 
3624 	memset(memzone->addr, 0, size);
3625 	*virt_addr = memzone->addr;
3626 	*phys_addr = memzone->iova;
3627 
3628 	return memzone;
3629 
3630 error:
3631 	*virt_addr = NULL;
3632 	*phys_addr = 0;
3633 
3634 	return NULL;
3635 }
3636 
3637 
3638 /*********************************************************************
3639  *  PMD configuration
3640  *********************************************************************/
3641 static int eth_ena_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
3642 	struct rte_pci_device *pci_dev)
3643 {
3644 	return rte_eth_dev_pci_generic_probe(pci_dev,
3645 		sizeof(struct ena_adapter), eth_ena_dev_init);
3646 }
3647 
3648 static int eth_ena_pci_remove(struct rte_pci_device *pci_dev)
3649 {
3650 	return rte_eth_dev_pci_generic_remove(pci_dev, eth_ena_dev_uninit);
3651 }
3652 
3653 static struct rte_pci_driver rte_ena_pmd = {
3654 	.id_table = pci_id_ena_map,
3655 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
3656 		     RTE_PCI_DRV_WC_ACTIVATE,
3657 	.probe = eth_ena_pci_probe,
3658 	.remove = eth_ena_pci_remove,
3659 };
3660 
3661 RTE_PMD_REGISTER_PCI(net_ena, rte_ena_pmd);
3662 RTE_PMD_REGISTER_PCI_TABLE(net_ena, pci_id_ena_map);
3663 RTE_PMD_REGISTER_KMOD_DEP(net_ena, "* igb_uio | uio_pci_generic | vfio-pci");
3664 RTE_PMD_REGISTER_PARAM_STRING(net_ena, ENA_DEVARG_LARGE_LLQ_HDR "=<0|1>");
3665 RTE_LOG_REGISTER_SUFFIX(ena_logtype_init, init, NOTICE);
3666 RTE_LOG_REGISTER_SUFFIX(ena_logtype_driver, driver, NOTICE);
3667 #ifdef RTE_ETHDEV_DEBUG_RX
3668 RTE_LOG_REGISTER_SUFFIX(ena_logtype_rx, rx, DEBUG);
3669 #endif
3670 #ifdef RTE_ETHDEV_DEBUG_TX
3671 RTE_LOG_REGISTER_SUFFIX(ena_logtype_tx, tx, DEBUG);
3672 #endif
3673 RTE_LOG_REGISTER_SUFFIX(ena_logtype_com, com, WARNING);
3674 
3675 /******************************************************************************
3676  ******************************** AENQ Handlers *******************************
3677  *****************************************************************************/
3678 static void ena_update_on_link_change(void *adapter_data,
3679 				      struct ena_admin_aenq_entry *aenq_e)
3680 {
3681 	struct rte_eth_dev *eth_dev = adapter_data;
3682 	struct ena_adapter *adapter = eth_dev->data->dev_private;
3683 	struct ena_admin_aenq_link_change_desc *aenq_link_desc;
3684 	uint32_t status;
3685 
3686 	aenq_link_desc = (struct ena_admin_aenq_link_change_desc *)aenq_e;
3687 
3688 	status = get_ena_admin_aenq_link_change_desc_link_status(aenq_link_desc);
3689 	adapter->link_status = status;
3690 
3691 	ena_link_update(eth_dev, 0);
3692 	rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL);
3693 }
3694 
3695 static void ena_notification(void *adapter_data,
3696 			     struct ena_admin_aenq_entry *aenq_e)
3697 {
3698 	struct rte_eth_dev *eth_dev = adapter_data;
3699 	struct ena_adapter *adapter = eth_dev->data->dev_private;
3700 	struct ena_admin_ena_hw_hints *hints;
3701 
3702 	if (aenq_e->aenq_common_desc.group != ENA_ADMIN_NOTIFICATION)
3703 		PMD_DRV_LOG(WARNING, "Invalid AENQ group: %x. Expected: %x\n",
3704 			aenq_e->aenq_common_desc.group,
3705 			ENA_ADMIN_NOTIFICATION);
3706 
3707 	switch (aenq_e->aenq_common_desc.syndrome) {
3708 	case ENA_ADMIN_UPDATE_HINTS:
3709 		hints = (struct ena_admin_ena_hw_hints *)
3710 			(&aenq_e->inline_data_w4);
3711 		ena_update_hints(adapter, hints);
3712 		break;
3713 	default:
3714 		PMD_DRV_LOG(ERR, "Invalid AENQ notification link state: %d\n",
3715 			aenq_e->aenq_common_desc.syndrome);
3716 	}
3717 }
3718 
3719 static void ena_keep_alive(void *adapter_data,
3720 			   __rte_unused struct ena_admin_aenq_entry *aenq_e)
3721 {
3722 	struct rte_eth_dev *eth_dev = adapter_data;
3723 	struct ena_adapter *adapter = eth_dev->data->dev_private;
3724 	struct ena_admin_aenq_keep_alive_desc *desc;
3725 	uint64_t rx_drops;
3726 	uint64_t tx_drops;
3727 
3728 	adapter->timestamp_wd = rte_get_timer_cycles();
3729 
3730 	desc = (struct ena_admin_aenq_keep_alive_desc *)aenq_e;
3731 	rx_drops = ((uint64_t)desc->rx_drops_high << 32) | desc->rx_drops_low;
3732 	tx_drops = ((uint64_t)desc->tx_drops_high << 32) | desc->tx_drops_low;
3733 
3734 	adapter->drv_stats->rx_drops = rx_drops;
3735 	adapter->dev_stats.tx_drops = tx_drops;
3736 }
3737 
3738 /**
3739  * This handler will called for unknown event group or unimplemented handlers
3740  **/
3741 static void unimplemented_aenq_handler(__rte_unused void *data,
3742 				       __rte_unused struct ena_admin_aenq_entry *aenq_e)
3743 {
3744 	PMD_DRV_LOG(ERR,
3745 		"Unknown event was received or event with unimplemented handler\n");
3746 }
3747 
3748 static struct ena_aenq_handlers aenq_handlers = {
3749 	.handlers = {
3750 		[ENA_ADMIN_LINK_CHANGE] = ena_update_on_link_change,
3751 		[ENA_ADMIN_NOTIFICATION] = ena_notification,
3752 		[ENA_ADMIN_KEEP_ALIVE] = ena_keep_alive
3753 	},
3754 	.unimplemented_handler = unimplemented_aenq_handler
3755 };
3756 
3757 /*********************************************************************
3758  *  Multi-Process communication request handling (in primary)
3759  *********************************************************************/
3760 static int
3761 ena_mp_primary_handle(const struct rte_mp_msg *mp_msg, const void *peer)
3762 {
3763 	const struct ena_mp_body *req =
3764 		(const struct ena_mp_body *)mp_msg->param;
3765 	struct ena_adapter *adapter;
3766 	struct ena_com_dev *ena_dev;
3767 	struct ena_mp_body *rsp;
3768 	struct rte_mp_msg mp_rsp;
3769 	struct rte_eth_dev *dev;
3770 	int res = 0;
3771 
3772 	rsp = (struct ena_mp_body *)&mp_rsp.param;
3773 	mp_msg_init(&mp_rsp, req->type, req->port_id);
3774 
3775 	if (!rte_eth_dev_is_valid_port(req->port_id)) {
3776 		rte_errno = ENODEV;
3777 		res = -rte_errno;
3778 		PMD_DRV_LOG(ERR, "Unknown port %d in request %d\n",
3779 			    req->port_id, req->type);
3780 		goto end;
3781 	}
3782 	dev = &rte_eth_devices[req->port_id];
3783 	adapter = dev->data->dev_private;
3784 	ena_dev = &adapter->ena_dev;
3785 
3786 	switch (req->type) {
3787 	case ENA_MP_DEV_STATS_GET:
3788 		res = ena_com_get_dev_basic_stats(ena_dev,
3789 						  &adapter->basic_stats);
3790 		break;
3791 	case ENA_MP_ENI_STATS_GET:
3792 		res = ena_com_get_eni_stats(ena_dev,
3793 			(struct ena_admin_eni_stats *)&adapter->eni_stats);
3794 		break;
3795 	case ENA_MP_MTU_SET:
3796 		res = ena_com_set_dev_mtu(ena_dev, req->args.mtu);
3797 		break;
3798 	case ENA_MP_IND_TBL_GET:
3799 		res = ena_com_indirect_table_get(ena_dev,
3800 						 adapter->indirect_table);
3801 		break;
3802 	case ENA_MP_IND_TBL_SET:
3803 		res = ena_com_indirect_table_set(ena_dev);
3804 		break;
3805 	default:
3806 		PMD_DRV_LOG(ERR, "Unknown request type %d\n", req->type);
3807 		res = -EINVAL;
3808 		break;
3809 	}
3810 
3811 end:
3812 	/* Save processing result in the reply */
3813 	rsp->result = res;
3814 	/* Return just IPC processing status */
3815 	return rte_mp_reply(&mp_rsp, peer);
3816 }
3817