xref: /dpdk/drivers/net/ena/ena_ethdev.c (revision aa022e608d4dec2f9e889aa1ff93fc93ba4b0ffb)
1702928afSMaciej Bielski /* SPDX-License-Identifier: BSD-3-Clause
238364c26SMichal Krawczyk  * Copyright (c) 2015-2020 Amazon.com, Inc. or its affiliates.
31173fca2SJan Medala  * All rights reserved.
41173fca2SJan Medala  */
51173fca2SJan Medala 
66723c0fcSBruce Richardson #include <rte_string_fns.h>
71173fca2SJan Medala #include <rte_ether.h>
8ffc905f3SFerruh Yigit #include <rte_ethdev_driver.h>
9fdf91e0fSJan Blunck #include <rte_ethdev_pci.h>
101173fca2SJan Medala #include <rte_tcp.h>
111173fca2SJan Medala #include <rte_atomic.h>
121173fca2SJan Medala #include <rte_dev.h>
131173fca2SJan Medala #include <rte_errno.h>
14372c1af5SJan Medala #include <rte_version.h>
15b3fc5a1aSKonstantin Ananyev #include <rte_net.h>
168a7a73f2SMichal Krawczyk #include <rte_kvargs.h>
171173fca2SJan Medala 
181173fca2SJan Medala #include "ena_ethdev.h"
191173fca2SJan Medala #include "ena_logs.h"
201173fca2SJan Medala #include "ena_platform.h"
211173fca2SJan Medala #include "ena_com.h"
221173fca2SJan Medala #include "ena_eth_com.h"
231173fca2SJan Medala 
241173fca2SJan Medala #include <ena_common_defs.h>
251173fca2SJan Medala #include <ena_regs_defs.h>
261173fca2SJan Medala #include <ena_admin_defs.h>
271173fca2SJan Medala #include <ena_eth_io_defs.h>
281173fca2SJan Medala 
29419c3e3eSMichal Krawczyk #define DRV_MODULE_VER_MAJOR	2
30*aa022e60SMichal Krawczyk #define DRV_MODULE_VER_MINOR	2
314ffe83d5SMichal Krawczyk #define DRV_MODULE_VER_SUBMINOR	0
32372c1af5SJan Medala 
331173fca2SJan Medala #define ENA_IO_TXQ_IDX(q)	(2 * (q))
341173fca2SJan Medala #define ENA_IO_RXQ_IDX(q)	(2 * (q) + 1)
351173fca2SJan Medala /*reverse version of ENA_IO_RXQ_IDX*/
361173fca2SJan Medala #define ENA_IO_RXQ_IDX_REV(q)	((q - 1) / 2)
371173fca2SJan Medala 
381173fca2SJan Medala #define __MERGE_64B_H_L(h, l) (((uint64_t)h << 32) | l)
391173fca2SJan Medala #define TEST_BIT(val, bit_shift) (val & (1UL << bit_shift))
401173fca2SJan Medala 
411173fca2SJan Medala #define GET_L4_HDR_LEN(mbuf)					\
42f41b5156SOlivier Matz 	((rte_pktmbuf_mtod_offset(mbuf,	struct rte_tcp_hdr *,	\
431173fca2SJan Medala 		mbuf->l3_len + mbuf->l2_len)->data_off) >> 4)
441173fca2SJan Medala 
451173fca2SJan Medala #define ENA_RX_RSS_TABLE_LOG_SIZE  7
461173fca2SJan Medala #define ENA_RX_RSS_TABLE_SIZE	(1 << ENA_RX_RSS_TABLE_LOG_SIZE)
471173fca2SJan Medala #define ENA_HASH_KEY_SIZE	40
48372c1af5SJan Medala #define ETH_GSTRING_LEN	32
49372c1af5SJan Medala 
50372c1af5SJan Medala #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
51372c1af5SJan Medala 
5292680dc2SRafal Kozik #define ENA_MIN_RING_DESC	128
5392680dc2SRafal Kozik 
54372c1af5SJan Medala enum ethtool_stringset {
55372c1af5SJan Medala 	ETH_SS_TEST             = 0,
56372c1af5SJan Medala 	ETH_SS_STATS,
57372c1af5SJan Medala };
58372c1af5SJan Medala 
59372c1af5SJan Medala struct ena_stats {
60372c1af5SJan Medala 	char name[ETH_GSTRING_LEN];
61372c1af5SJan Medala 	int stat_offset;
62372c1af5SJan Medala };
63372c1af5SJan Medala 
64372c1af5SJan Medala #define ENA_STAT_ENTRY(stat, stat_type) { \
65372c1af5SJan Medala 	.name = #stat, \
66372c1af5SJan Medala 	.stat_offset = offsetof(struct ena_stats_##stat_type, stat) \
67372c1af5SJan Medala }
68372c1af5SJan Medala 
69372c1af5SJan Medala #define ENA_STAT_RX_ENTRY(stat) \
70372c1af5SJan Medala 	ENA_STAT_ENTRY(stat, rx)
71372c1af5SJan Medala 
72372c1af5SJan Medala #define ENA_STAT_TX_ENTRY(stat) \
73372c1af5SJan Medala 	ENA_STAT_ENTRY(stat, tx)
74372c1af5SJan Medala 
7545718adaSMichal Krawczyk #define ENA_STAT_ENI_ENTRY(stat) \
7645718adaSMichal Krawczyk 	ENA_STAT_ENTRY(stat, eni)
7745718adaSMichal Krawczyk 
78372c1af5SJan Medala #define ENA_STAT_GLOBAL_ENTRY(stat) \
79372c1af5SJan Medala 	ENA_STAT_ENTRY(stat, dev)
80372c1af5SJan Medala 
818a7a73f2SMichal Krawczyk /* Device arguments */
828a7a73f2SMichal Krawczyk #define ENA_DEVARG_LARGE_LLQ_HDR "large_llq_hdr"
838a7a73f2SMichal Krawczyk 
843adcba9aSMichal Krawczyk /*
853adcba9aSMichal Krawczyk  * Each rte_memzone should have unique name.
863adcba9aSMichal Krawczyk  * To satisfy it, count number of allocation and add it to name.
873adcba9aSMichal Krawczyk  */
88b14fcac0SIgor Chauskin rte_atomic32_t ena_alloc_cnt;
893adcba9aSMichal Krawczyk 
90372c1af5SJan Medala static const struct ena_stats ena_stats_global_strings[] = {
91372c1af5SJan Medala 	ENA_STAT_GLOBAL_ENTRY(wd_expired),
927830e905SSolganik Alexander 	ENA_STAT_GLOBAL_ENTRY(dev_start),
937830e905SSolganik Alexander 	ENA_STAT_GLOBAL_ENTRY(dev_stop),
94e1e73e32SMichal Krawczyk 	ENA_STAT_GLOBAL_ENTRY(tx_drops),
95372c1af5SJan Medala };
96372c1af5SJan Medala 
9745718adaSMichal Krawczyk static const struct ena_stats ena_stats_eni_strings[] = {
9845718adaSMichal Krawczyk 	ENA_STAT_ENI_ENTRY(bw_in_allowance_exceeded),
9945718adaSMichal Krawczyk 	ENA_STAT_ENI_ENTRY(bw_out_allowance_exceeded),
10045718adaSMichal Krawczyk 	ENA_STAT_ENI_ENTRY(pps_allowance_exceeded),
10145718adaSMichal Krawczyk 	ENA_STAT_ENI_ENTRY(conntrack_allowance_exceeded),
10245718adaSMichal Krawczyk 	ENA_STAT_ENI_ENTRY(linklocal_allowance_exceeded),
10345718adaSMichal Krawczyk };
10445718adaSMichal Krawczyk 
105372c1af5SJan Medala static const struct ena_stats ena_stats_tx_strings[] = {
106372c1af5SJan Medala 	ENA_STAT_TX_ENTRY(cnt),
107372c1af5SJan Medala 	ENA_STAT_TX_ENTRY(bytes),
1087830e905SSolganik Alexander 	ENA_STAT_TX_ENTRY(prepare_ctx_err),
109372c1af5SJan Medala 	ENA_STAT_TX_ENTRY(linearize),
110372c1af5SJan Medala 	ENA_STAT_TX_ENTRY(linearize_failed),
111372c1af5SJan Medala 	ENA_STAT_TX_ENTRY(tx_poll),
112372c1af5SJan Medala 	ENA_STAT_TX_ENTRY(doorbells),
113372c1af5SJan Medala 	ENA_STAT_TX_ENTRY(bad_req_id),
1147830e905SSolganik Alexander 	ENA_STAT_TX_ENTRY(available_desc),
115372c1af5SJan Medala };
116372c1af5SJan Medala 
117372c1af5SJan Medala static const struct ena_stats ena_stats_rx_strings[] = {
118372c1af5SJan Medala 	ENA_STAT_RX_ENTRY(cnt),
119372c1af5SJan Medala 	ENA_STAT_RX_ENTRY(bytes),
1207830e905SSolganik Alexander 	ENA_STAT_RX_ENTRY(refill_partial),
121372c1af5SJan Medala 	ENA_STAT_RX_ENTRY(bad_csum),
1227830e905SSolganik Alexander 	ENA_STAT_RX_ENTRY(mbuf_alloc_fail),
123372c1af5SJan Medala 	ENA_STAT_RX_ENTRY(bad_desc_num),
1247830e905SSolganik Alexander 	ENA_STAT_RX_ENTRY(bad_req_id),
125372c1af5SJan Medala };
126372c1af5SJan Medala 
127372c1af5SJan Medala #define ENA_STATS_ARRAY_GLOBAL	ARRAY_SIZE(ena_stats_global_strings)
12845718adaSMichal Krawczyk #define ENA_STATS_ARRAY_ENI	ARRAY_SIZE(ena_stats_eni_strings)
129372c1af5SJan Medala #define ENA_STATS_ARRAY_TX	ARRAY_SIZE(ena_stats_tx_strings)
130372c1af5SJan Medala #define ENA_STATS_ARRAY_RX	ARRAY_SIZE(ena_stats_rx_strings)
1311173fca2SJan Medala 
13256b8b9b7SRafal Kozik #define QUEUE_OFFLOADS (DEV_TX_OFFLOAD_TCP_CKSUM |\
13356b8b9b7SRafal Kozik 			DEV_TX_OFFLOAD_UDP_CKSUM |\
13456b8b9b7SRafal Kozik 			DEV_TX_OFFLOAD_IPV4_CKSUM |\
13556b8b9b7SRafal Kozik 			DEV_TX_OFFLOAD_TCP_TSO)
13656b8b9b7SRafal Kozik #define MBUF_OFFLOADS (PKT_TX_L4_MASK |\
13756b8b9b7SRafal Kozik 		       PKT_TX_IP_CKSUM |\
13856b8b9b7SRafal Kozik 		       PKT_TX_TCP_SEG)
13956b8b9b7SRafal Kozik 
1401173fca2SJan Medala /** Vendor ID used by Amazon devices */
1411173fca2SJan Medala #define PCI_VENDOR_ID_AMAZON 0x1D0F
1421173fca2SJan Medala /** Amazon devices */
1431173fca2SJan Medala #define PCI_DEVICE_ID_ENA_VF		0xEC20
144f7138b91SMichal Krawczyk #define PCI_DEVICE_ID_ENA_VF_RSERV0	0xEC21
1451173fca2SJan Medala 
146b3fc5a1aSKonstantin Ananyev #define	ENA_TX_OFFLOAD_MASK	(\
147b3fc5a1aSKonstantin Ananyev 	PKT_TX_L4_MASK |         \
148d6db681bSDidier Pallard 	PKT_TX_IPV6 |            \
149d6db681bSDidier Pallard 	PKT_TX_IPV4 |            \
150b3fc5a1aSKonstantin Ananyev 	PKT_TX_IP_CKSUM |        \
151b3fc5a1aSKonstantin Ananyev 	PKT_TX_TCP_SEG)
152b3fc5a1aSKonstantin Ananyev 
153b3fc5a1aSKonstantin Ananyev #define	ENA_TX_OFFLOAD_NOTSUP_MASK	\
154b3fc5a1aSKonstantin Ananyev 	(PKT_TX_OFFLOAD_MASK ^ ENA_TX_OFFLOAD_MASK)
155b3fc5a1aSKonstantin Ananyev 
15628a1fd4fSFerruh Yigit static const struct rte_pci_id pci_id_ena_map[] = {
157cb990571SDavid Marchand 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AMAZON, PCI_DEVICE_ID_ENA_VF) },
158f7138b91SMichal Krawczyk 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AMAZON, PCI_DEVICE_ID_ENA_VF_RSERV0) },
1591173fca2SJan Medala 	{ .device_id = 0 },
1601173fca2SJan Medala };
1611173fca2SJan Medala 
162ca148440SMichal Krawczyk static struct ena_aenq_handlers aenq_handlers;
1633adcba9aSMichal Krawczyk 
1641173fca2SJan Medala static int ena_device_init(struct ena_com_dev *ena_dev,
165e859d2b8SRafal Kozik 			   struct ena_com_dev_get_features_ctx *get_feat_ctx,
166e859d2b8SRafal Kozik 			   bool *wd_state);
1671173fca2SJan Medala static int ena_dev_configure(struct rte_eth_dev *dev);
16836278b82SMichal Krawczyk static void ena_tx_map_mbuf(struct ena_ring *tx_ring,
16936278b82SMichal Krawczyk 	struct ena_tx_buffer *tx_info,
17036278b82SMichal Krawczyk 	struct rte_mbuf *mbuf,
17136278b82SMichal Krawczyk 	void **push_header,
17236278b82SMichal Krawczyk 	uint16_t *header_len);
17336278b82SMichal Krawczyk static int ena_xmit_mbuf(struct ena_ring *tx_ring, struct rte_mbuf *mbuf);
17436278b82SMichal Krawczyk static void ena_tx_cleanup(struct ena_ring *tx_ring);
1751173fca2SJan Medala static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
1761173fca2SJan Medala 				  uint16_t nb_pkts);
177b3fc5a1aSKonstantin Ananyev static uint16_t eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
178b3fc5a1aSKonstantin Ananyev 		uint16_t nb_pkts);
1791173fca2SJan Medala static int ena_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
1801173fca2SJan Medala 			      uint16_t nb_desc, unsigned int socket_id,
1811173fca2SJan Medala 			      const struct rte_eth_txconf *tx_conf);
1821173fca2SJan Medala static int ena_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
1831173fca2SJan Medala 			      uint16_t nb_desc, unsigned int socket_id,
1841173fca2SJan Medala 			      const struct rte_eth_rxconf *rx_conf,
1851173fca2SJan Medala 			      struct rte_mempool *mp);
1861be097dcSMichal Krawczyk static inline void ena_init_rx_mbuf(struct rte_mbuf *mbuf, uint16_t len);
1871be097dcSMichal Krawczyk static struct rte_mbuf *ena_rx_mbuf(struct ena_ring *rx_ring,
1881be097dcSMichal Krawczyk 				    struct ena_com_rx_buf_info *ena_bufs,
1891be097dcSMichal Krawczyk 				    uint32_t descs,
1901be097dcSMichal Krawczyk 				    uint16_t *next_to_clean,
1911be097dcSMichal Krawczyk 				    uint8_t offset);
1921173fca2SJan Medala static uint16_t eth_ena_recv_pkts(void *rx_queue,
1931173fca2SJan Medala 				  struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
19483fd97b2SMichal Krawczyk static int ena_add_single_rx_desc(struct ena_com_io_sq *io_sq,
19583fd97b2SMichal Krawczyk 				  struct rte_mbuf *mbuf, uint16_t id);
1961173fca2SJan Medala static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count);
19733dde075SMichal Krawczyk static void ena_init_rings(struct ena_adapter *adapter,
19833dde075SMichal Krawczyk 			   bool disable_meta_caching);
1991173fca2SJan Medala static int ena_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
2001173fca2SJan Medala static int ena_start(struct rte_eth_dev *dev);
20162024eb8SIvan Ilchenko static int ena_stop(struct rte_eth_dev *dev);
202b142387bSThomas Monjalon static int ena_close(struct rte_eth_dev *dev);
2032081d5e2SMichal Krawczyk static int ena_dev_reset(struct rte_eth_dev *dev);
204d5b0924bSMatan Azrad static int ena_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats);
2051173fca2SJan Medala static void ena_rx_queue_release_all(struct rte_eth_dev *dev);
2061173fca2SJan Medala static void ena_tx_queue_release_all(struct rte_eth_dev *dev);
2071173fca2SJan Medala static void ena_rx_queue_release(void *queue);
2081173fca2SJan Medala static void ena_tx_queue_release(void *queue);
2091173fca2SJan Medala static void ena_rx_queue_release_bufs(struct ena_ring *ring);
2101173fca2SJan Medala static void ena_tx_queue_release_bufs(struct ena_ring *ring);
2111173fca2SJan Medala static int ena_link_update(struct rte_eth_dev *dev,
212dd2c630aSFerruh Yigit 			   int wait_to_complete);
213df238f84SMichal Krawczyk static int ena_create_io_queue(struct ena_ring *ring);
21426e5543dSRafal Kozik static void ena_queue_stop(struct ena_ring *ring);
21526e5543dSRafal Kozik static void ena_queue_stop_all(struct rte_eth_dev *dev,
21626e5543dSRafal Kozik 			      enum ena_ring_type ring_type);
21726e5543dSRafal Kozik static int ena_queue_start(struct ena_ring *ring);
21826e5543dSRafal Kozik static int ena_queue_start_all(struct rte_eth_dev *dev,
2191173fca2SJan Medala 			       enum ena_ring_type ring_type);
2201173fca2SJan Medala static void ena_stats_restart(struct rte_eth_dev *dev);
221bdad90d1SIvan Ilchenko static int ena_infos_get(struct rte_eth_dev *dev,
2221173fca2SJan Medala 			 struct rte_eth_dev_info *dev_info);
2231173fca2SJan Medala static int ena_rss_reta_update(struct rte_eth_dev *dev,
2241173fca2SJan Medala 			       struct rte_eth_rss_reta_entry64 *reta_conf,
2251173fca2SJan Medala 			       uint16_t reta_size);
2261173fca2SJan Medala static int ena_rss_reta_query(struct rte_eth_dev *dev,
2271173fca2SJan Medala 			      struct rte_eth_rss_reta_entry64 *reta_conf,
2281173fca2SJan Medala 			      uint16_t reta_size);
22915773e06SMichal Krawczyk static void ena_interrupt_handler_rte(void *cb_arg);
230d9b8b106SMichal Krawczyk static void ena_timer_wd_callback(struct rte_timer *timer, void *arg);
231e457bc70SRafal Kozik static void ena_destroy_device(struct rte_eth_dev *eth_dev);
232e457bc70SRafal Kozik static int eth_ena_dev_init(struct rte_eth_dev *eth_dev);
2337830e905SSolganik Alexander static int ena_xstats_get_names(struct rte_eth_dev *dev,
2347830e905SSolganik Alexander 				struct rte_eth_xstat_name *xstats_names,
2357830e905SSolganik Alexander 				unsigned int n);
2367830e905SSolganik Alexander static int ena_xstats_get(struct rte_eth_dev *dev,
2377830e905SSolganik Alexander 			  struct rte_eth_xstat *stats,
2387830e905SSolganik Alexander 			  unsigned int n);
2397830e905SSolganik Alexander static int ena_xstats_get_by_id(struct rte_eth_dev *dev,
2407830e905SSolganik Alexander 				const uint64_t *ids,
2417830e905SSolganik Alexander 				uint64_t *values,
2427830e905SSolganik Alexander 				unsigned int n);
2438a7a73f2SMichal Krawczyk static int ena_process_bool_devarg(const char *key,
2448a7a73f2SMichal Krawczyk 				   const char *value,
2458a7a73f2SMichal Krawczyk 				   void *opaque);
2468a7a73f2SMichal Krawczyk static int ena_parse_devargs(struct ena_adapter *adapter,
2478a7a73f2SMichal Krawczyk 			     struct rte_devargs *devargs);
24845718adaSMichal Krawczyk static int ena_copy_eni_stats(struct ena_adapter *adapter);
2491173fca2SJan Medala 
250103ab18cSFerruh Yigit static const struct eth_dev_ops ena_dev_ops = {
2511173fca2SJan Medala 	.dev_configure        = ena_dev_configure,
2521173fca2SJan Medala 	.dev_infos_get        = ena_infos_get,
2531173fca2SJan Medala 	.rx_queue_setup       = ena_rx_queue_setup,
2541173fca2SJan Medala 	.tx_queue_setup       = ena_tx_queue_setup,
2551173fca2SJan Medala 	.dev_start            = ena_start,
256eb0ef49dSMichal Krawczyk 	.dev_stop             = ena_stop,
2571173fca2SJan Medala 	.link_update          = ena_link_update,
2581173fca2SJan Medala 	.stats_get            = ena_stats_get,
2597830e905SSolganik Alexander 	.xstats_get_names     = ena_xstats_get_names,
2607830e905SSolganik Alexander 	.xstats_get	      = ena_xstats_get,
2617830e905SSolganik Alexander 	.xstats_get_by_id     = ena_xstats_get_by_id,
2621173fca2SJan Medala 	.mtu_set              = ena_mtu_set,
2631173fca2SJan Medala 	.rx_queue_release     = ena_rx_queue_release,
2641173fca2SJan Medala 	.tx_queue_release     = ena_tx_queue_release,
2651173fca2SJan Medala 	.dev_close            = ena_close,
2662081d5e2SMichal Krawczyk 	.dev_reset            = ena_dev_reset,
2671173fca2SJan Medala 	.reta_update          = ena_rss_reta_update,
2681173fca2SJan Medala 	.reta_query           = ena_rss_reta_query,
2691173fca2SJan Medala };
2701173fca2SJan Medala 
271086c6b66SMichal Krawczyk void ena_rss_key_fill(void *key, size_t size)
272086c6b66SMichal Krawczyk {
273086c6b66SMichal Krawczyk 	static bool key_generated;
274086c6b66SMichal Krawczyk 	static uint8_t default_key[ENA_HASH_KEY_SIZE];
275086c6b66SMichal Krawczyk 	size_t i;
276086c6b66SMichal Krawczyk 
277086c6b66SMichal Krawczyk 	RTE_ASSERT(size <= ENA_HASH_KEY_SIZE);
278086c6b66SMichal Krawczyk 
279086c6b66SMichal Krawczyk 	if (!key_generated) {
280086c6b66SMichal Krawczyk 		for (i = 0; i < ENA_HASH_KEY_SIZE; ++i)
281086c6b66SMichal Krawczyk 			default_key[i] = rte_rand() & 0xff;
282086c6b66SMichal Krawczyk 		key_generated = true;
283086c6b66SMichal Krawczyk 	}
284086c6b66SMichal Krawczyk 
285086c6b66SMichal Krawczyk 	rte_memcpy(key, default_key, size);
286086c6b66SMichal Krawczyk }
287086c6b66SMichal Krawczyk 
2881173fca2SJan Medala static inline void ena_rx_mbuf_prepare(struct rte_mbuf *mbuf,
2891173fca2SJan Medala 				       struct ena_com_rx_ctx *ena_rx_ctx)
2901173fca2SJan Medala {
2911173fca2SJan Medala 	uint64_t ol_flags = 0;
292fd617795SRafal Kozik 	uint32_t packet_type = 0;
2931173fca2SJan Medala 
2941173fca2SJan Medala 	if (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP)
295fd617795SRafal Kozik 		packet_type |= RTE_PTYPE_L4_TCP;
2961173fca2SJan Medala 	else if (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP)
297fd617795SRafal Kozik 		packet_type |= RTE_PTYPE_L4_UDP;
2981173fca2SJan Medala 
299856edce2SMichal Krawczyk 	if (ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV4) {
300fd617795SRafal Kozik 		packet_type |= RTE_PTYPE_L3_IPV4;
3011173fca2SJan Medala 		if (unlikely(ena_rx_ctx->l3_csum_err))
3021173fca2SJan Medala 			ol_flags |= PKT_RX_IP_CKSUM_BAD;
303856edce2SMichal Krawczyk 		else
304856edce2SMichal Krawczyk 			ol_flags |= PKT_RX_IP_CKSUM_GOOD;
305856edce2SMichal Krawczyk 	} else if (ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV6) {
306856edce2SMichal Krawczyk 		packet_type |= RTE_PTYPE_L3_IPV6;
307856edce2SMichal Krawczyk 	}
308856edce2SMichal Krawczyk 
309856edce2SMichal Krawczyk 	if (!ena_rx_ctx->l4_csum_checked || ena_rx_ctx->frag)
310856edce2SMichal Krawczyk 		ol_flags |= PKT_RX_L4_CKSUM_UNKNOWN;
311856edce2SMichal Krawczyk 	else
312856edce2SMichal Krawczyk 		if (unlikely(ena_rx_ctx->l4_csum_err))
313856edce2SMichal Krawczyk 			ol_flags |= PKT_RX_L4_CKSUM_BAD;
314856edce2SMichal Krawczyk 		else
315856edce2SMichal Krawczyk 			ol_flags |= PKT_RX_L4_CKSUM_GOOD;
3161173fca2SJan Medala 
3171173fca2SJan Medala 	mbuf->ol_flags = ol_flags;
318fd617795SRafal Kozik 	mbuf->packet_type = packet_type;
3191173fca2SJan Medala }
3201173fca2SJan Medala 
3211173fca2SJan Medala static inline void ena_tx_mbuf_prepare(struct rte_mbuf *mbuf,
32256b8b9b7SRafal Kozik 				       struct ena_com_tx_ctx *ena_tx_ctx,
32333dde075SMichal Krawczyk 				       uint64_t queue_offloads,
32433dde075SMichal Krawczyk 				       bool disable_meta_caching)
3251173fca2SJan Medala {
3261173fca2SJan Medala 	struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta;
3271173fca2SJan Medala 
32856b8b9b7SRafal Kozik 	if ((mbuf->ol_flags & MBUF_OFFLOADS) &&
32956b8b9b7SRafal Kozik 	    (queue_offloads & QUEUE_OFFLOADS)) {
3301173fca2SJan Medala 		/* check if TSO is required */
33156b8b9b7SRafal Kozik 		if ((mbuf->ol_flags & PKT_TX_TCP_SEG) &&
33256b8b9b7SRafal Kozik 		    (queue_offloads & DEV_TX_OFFLOAD_TCP_TSO)) {
3331173fca2SJan Medala 			ena_tx_ctx->tso_enable = true;
3341173fca2SJan Medala 
3351173fca2SJan Medala 			ena_meta->l4_hdr_len = GET_L4_HDR_LEN(mbuf);
3361173fca2SJan Medala 		}
3371173fca2SJan Medala 
3381173fca2SJan Medala 		/* check if L3 checksum is needed */
33956b8b9b7SRafal Kozik 		if ((mbuf->ol_flags & PKT_TX_IP_CKSUM) &&
34056b8b9b7SRafal Kozik 		    (queue_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM))
3411173fca2SJan Medala 			ena_tx_ctx->l3_csum_enable = true;
3421173fca2SJan Medala 
3431173fca2SJan Medala 		if (mbuf->ol_flags & PKT_TX_IPV6) {
3441173fca2SJan Medala 			ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV6;
3451173fca2SJan Medala 		} else {
3461173fca2SJan Medala 			ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV4;
3471173fca2SJan Medala 
3481173fca2SJan Medala 			/* set don't fragment (DF) flag */
3491173fca2SJan Medala 			if (mbuf->packet_type &
3501173fca2SJan Medala 				(RTE_PTYPE_L4_NONFRAG
3511173fca2SJan Medala 				 | RTE_PTYPE_INNER_L4_NONFRAG))
3521173fca2SJan Medala 				ena_tx_ctx->df = true;
3531173fca2SJan Medala 		}
3541173fca2SJan Medala 
3551173fca2SJan Medala 		/* check if L4 checksum is needed */
35640e7c021SMaciej Bielski 		if (((mbuf->ol_flags & PKT_TX_L4_MASK) == PKT_TX_TCP_CKSUM) &&
35756b8b9b7SRafal Kozik 		    (queue_offloads & DEV_TX_OFFLOAD_TCP_CKSUM)) {
3581173fca2SJan Medala 			ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_TCP;
3591173fca2SJan Medala 			ena_tx_ctx->l4_csum_enable = true;
36040e7c021SMaciej Bielski 		} else if (((mbuf->ol_flags & PKT_TX_L4_MASK) ==
36140e7c021SMaciej Bielski 				PKT_TX_UDP_CKSUM) &&
36256b8b9b7SRafal Kozik 				(queue_offloads & DEV_TX_OFFLOAD_UDP_CKSUM)) {
3631173fca2SJan Medala 			ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UDP;
3641173fca2SJan Medala 			ena_tx_ctx->l4_csum_enable = true;
36556b8b9b7SRafal Kozik 		} else {
3661173fca2SJan Medala 			ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UNKNOWN;
3671173fca2SJan Medala 			ena_tx_ctx->l4_csum_enable = false;
3681173fca2SJan Medala 		}
3691173fca2SJan Medala 
3701173fca2SJan Medala 		ena_meta->mss = mbuf->tso_segsz;
3711173fca2SJan Medala 		ena_meta->l3_hdr_len = mbuf->l3_len;
3721173fca2SJan Medala 		ena_meta->l3_hdr_offset = mbuf->l2_len;
3731173fca2SJan Medala 
3741173fca2SJan Medala 		ena_tx_ctx->meta_valid = true;
37533dde075SMichal Krawczyk 	} else if (disable_meta_caching) {
37633dde075SMichal Krawczyk 		memset(ena_meta, 0, sizeof(*ena_meta));
37733dde075SMichal Krawczyk 		ena_tx_ctx->meta_valid = true;
3781173fca2SJan Medala 	} else {
3791173fca2SJan Medala 		ena_tx_ctx->meta_valid = false;
3801173fca2SJan Medala 	}
3811173fca2SJan Medala }
3821173fca2SJan Medala 
383c2034976SMichal Krawczyk static inline int validate_rx_req_id(struct ena_ring *rx_ring, uint16_t req_id)
384c2034976SMichal Krawczyk {
385c2034976SMichal Krawczyk 	if (likely(req_id < rx_ring->ring_size))
386c2034976SMichal Krawczyk 		return 0;
387c2034976SMichal Krawczyk 
3886f1c9df9SStephen Hemminger 	PMD_DRV_LOG(ERR, "Invalid rx req_id: %hu\n", req_id);
389c2034976SMichal Krawczyk 
390c2034976SMichal Krawczyk 	rx_ring->adapter->reset_reason = ENA_REGS_RESET_INV_RX_REQ_ID;
391c2034976SMichal Krawczyk 	rx_ring->adapter->trigger_reset = true;
39245b6d861SMichal Krawczyk 	++rx_ring->rx_stats.bad_req_id;
393c2034976SMichal Krawczyk 
394c2034976SMichal Krawczyk 	return -EFAULT;
395c2034976SMichal Krawczyk }
396c2034976SMichal Krawczyk 
397f7d82d24SRafal Kozik static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id)
398f7d82d24SRafal Kozik {
399f7d82d24SRafal Kozik 	struct ena_tx_buffer *tx_info = NULL;
400f7d82d24SRafal Kozik 
401f7d82d24SRafal Kozik 	if (likely(req_id < tx_ring->ring_size)) {
402f7d82d24SRafal Kozik 		tx_info = &tx_ring->tx_buffer_info[req_id];
403f7d82d24SRafal Kozik 		if (likely(tx_info->mbuf))
404f7d82d24SRafal Kozik 			return 0;
405f7d82d24SRafal Kozik 	}
406f7d82d24SRafal Kozik 
407f7d82d24SRafal Kozik 	if (tx_info)
4086f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "tx_info doesn't have valid mbuf\n");
409f7d82d24SRafal Kozik 	else
4106f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "Invalid req_id: %hu\n", req_id);
411f7d82d24SRafal Kozik 
412f7d82d24SRafal Kozik 	/* Trigger device reset */
4137830e905SSolganik Alexander 	++tx_ring->tx_stats.bad_req_id;
414f7d82d24SRafal Kozik 	tx_ring->adapter->reset_reason = ENA_REGS_RESET_INV_TX_REQ_ID;
415f7d82d24SRafal Kozik 	tx_ring->adapter->trigger_reset	= true;
416f7d82d24SRafal Kozik 	return -EFAULT;
417f7d82d24SRafal Kozik }
418f7d82d24SRafal Kozik 
419372c1af5SJan Medala static void ena_config_host_info(struct ena_com_dev *ena_dev)
420372c1af5SJan Medala {
421372c1af5SJan Medala 	struct ena_admin_host_info *host_info;
422372c1af5SJan Medala 	int rc;
423372c1af5SJan Medala 
424372c1af5SJan Medala 	/* Allocate only the host info */
425372c1af5SJan Medala 	rc = ena_com_allocate_host_info(ena_dev);
426372c1af5SJan Medala 	if (rc) {
4276f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "Cannot allocate host info\n");
428372c1af5SJan Medala 		return;
429372c1af5SJan Medala 	}
430372c1af5SJan Medala 
431372c1af5SJan Medala 	host_info = ena_dev->host_attr.host_info;
432372c1af5SJan Medala 
433372c1af5SJan Medala 	host_info->os_type = ENA_ADMIN_OS_DPDK;
434372c1af5SJan Medala 	host_info->kernel_ver = RTE_VERSION;
4356723c0fcSBruce Richardson 	strlcpy((char *)host_info->kernel_ver_str, rte_version(),
4366723c0fcSBruce Richardson 		sizeof(host_info->kernel_ver_str));
437372c1af5SJan Medala 	host_info->os_dist = RTE_VERSION;
4386723c0fcSBruce Richardson 	strlcpy((char *)host_info->os_dist_str, rte_version(),
4396723c0fcSBruce Richardson 		sizeof(host_info->os_dist_str));
440372c1af5SJan Medala 	host_info->driver_version =
441372c1af5SJan Medala 		(DRV_MODULE_VER_MAJOR) |
442372c1af5SJan Medala 		(DRV_MODULE_VER_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) |
443c4144557SJan Medala 		(DRV_MODULE_VER_SUBMINOR <<
444c4144557SJan Medala 			ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT);
445b9302eb9SRafal Kozik 	host_info->num_cpus = rte_lcore_count();
446372c1af5SJan Medala 
4477b3a3c4bSMaciej Bielski 	host_info->driver_supported_features =
4487b3a3c4bSMaciej Bielski 		ENA_ADMIN_HOST_INFO_RX_OFFSET_MASK;
4497b3a3c4bSMaciej Bielski 
450372c1af5SJan Medala 	rc = ena_com_set_host_attributes(ena_dev);
451372c1af5SJan Medala 	if (rc) {
452241da076SRafal Kozik 		if (rc == -ENA_COM_UNSUPPORTED)
4536f1c9df9SStephen Hemminger 			PMD_DRV_LOG(WARNING, "Cannot set host attributes\n");
454241da076SRafal Kozik 		else
4556f1c9df9SStephen Hemminger 			PMD_DRV_LOG(ERR, "Cannot set host attributes\n");
456241da076SRafal Kozik 
457372c1af5SJan Medala 		goto err;
458372c1af5SJan Medala 	}
459372c1af5SJan Medala 
460372c1af5SJan Medala 	return;
461372c1af5SJan Medala 
462372c1af5SJan Medala err:
463372c1af5SJan Medala 	ena_com_delete_host_info(ena_dev);
464372c1af5SJan Medala }
465372c1af5SJan Medala 
4667830e905SSolganik Alexander /* This function calculates the number of xstats based on the current config */
4677830e905SSolganik Alexander static unsigned int ena_xstats_calc_num(struct rte_eth_dev *dev)
468372c1af5SJan Medala {
46945718adaSMichal Krawczyk 	return ENA_STATS_ARRAY_GLOBAL + ENA_STATS_ARRAY_ENI +
4707830e905SSolganik Alexander 		(dev->data->nb_tx_queues * ENA_STATS_ARRAY_TX) +
4717830e905SSolganik Alexander 		(dev->data->nb_rx_queues * ENA_STATS_ARRAY_RX);
472372c1af5SJan Medala }
473372c1af5SJan Medala 
474372c1af5SJan Medala static void ena_config_debug_area(struct ena_adapter *adapter)
475372c1af5SJan Medala {
476372c1af5SJan Medala 	u32 debug_area_size;
477372c1af5SJan Medala 	int rc, ss_count;
478372c1af5SJan Medala 
4797830e905SSolganik Alexander 	ss_count = ena_xstats_calc_num(adapter->rte_dev);
480372c1af5SJan Medala 
481372c1af5SJan Medala 	/* allocate 32 bytes for each string and 64bit for the value */
482372c1af5SJan Medala 	debug_area_size = ss_count * ETH_GSTRING_LEN + sizeof(u64) * ss_count;
483372c1af5SJan Medala 
484372c1af5SJan Medala 	rc = ena_com_allocate_debug_area(&adapter->ena_dev, debug_area_size);
485372c1af5SJan Medala 	if (rc) {
4866f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "Cannot allocate debug area\n");
487372c1af5SJan Medala 		return;
488372c1af5SJan Medala 	}
489372c1af5SJan Medala 
490372c1af5SJan Medala 	rc = ena_com_set_host_attributes(&adapter->ena_dev);
491372c1af5SJan Medala 	if (rc) {
492241da076SRafal Kozik 		if (rc == -ENA_COM_UNSUPPORTED)
4936f1c9df9SStephen Hemminger 			PMD_DRV_LOG(WARNING, "Cannot set host attributes\n");
494241da076SRafal Kozik 		else
4956f1c9df9SStephen Hemminger 			PMD_DRV_LOG(ERR, "Cannot set host attributes\n");
496241da076SRafal Kozik 
497372c1af5SJan Medala 		goto err;
498372c1af5SJan Medala 	}
499372c1af5SJan Medala 
500372c1af5SJan Medala 	return;
501372c1af5SJan Medala err:
502372c1af5SJan Medala 	ena_com_delete_debug_area(&adapter->ena_dev);
503372c1af5SJan Medala }
504372c1af5SJan Medala 
505b142387bSThomas Monjalon static int ena_close(struct rte_eth_dev *dev)
5061173fca2SJan Medala {
5074d7877fdSMichal Krawczyk 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
5084d7877fdSMichal Krawczyk 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
509890728ffSStephen Hemminger 	struct ena_adapter *adapter = dev->data->dev_private;
51062024eb8SIvan Ilchenko 	int ret = 0;
5111173fca2SJan Medala 
51230410493SThomas Monjalon 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
51330410493SThomas Monjalon 		return 0;
51430410493SThomas Monjalon 
515df238f84SMichal Krawczyk 	if (adapter->state == ENA_ADAPTER_STATE_RUNNING)
51662024eb8SIvan Ilchenko 		ret = ena_stop(dev);
517eb0ef49dSMichal Krawczyk 	adapter->state = ENA_ADAPTER_STATE_CLOSED;
51815773e06SMichal Krawczyk 
5191173fca2SJan Medala 	ena_rx_queue_release_all(dev);
5201173fca2SJan Medala 	ena_tx_queue_release_all(dev);
5214d7877fdSMichal Krawczyk 
5224d7877fdSMichal Krawczyk 	rte_free(adapter->drv_stats);
5234d7877fdSMichal Krawczyk 	adapter->drv_stats = NULL;
5244d7877fdSMichal Krawczyk 
5254d7877fdSMichal Krawczyk 	rte_intr_disable(intr_handle);
5264d7877fdSMichal Krawczyk 	rte_intr_callback_unregister(intr_handle,
5274d7877fdSMichal Krawczyk 				     ena_interrupt_handler_rte,
5284d7877fdSMichal Krawczyk 				     adapter);
5294d7877fdSMichal Krawczyk 
5304d7877fdSMichal Krawczyk 	/*
5314d7877fdSMichal Krawczyk 	 * MAC is not allocated dynamically. Setting NULL should prevent from
5324d7877fdSMichal Krawczyk 	 * release of the resource in the rte_eth_dev_release_port().
5334d7877fdSMichal Krawczyk 	 */
5344d7877fdSMichal Krawczyk 	dev->data->mac_addrs = NULL;
535b142387bSThomas Monjalon 
53662024eb8SIvan Ilchenko 	return ret;
5371173fca2SJan Medala }
5381173fca2SJan Medala 
5392081d5e2SMichal Krawczyk static int
5402081d5e2SMichal Krawczyk ena_dev_reset(struct rte_eth_dev *dev)
5412081d5e2SMichal Krawczyk {
542e457bc70SRafal Kozik 	int rc = 0;
5432081d5e2SMichal Krawczyk 
544e457bc70SRafal Kozik 	ena_destroy_device(dev);
545e457bc70SRafal Kozik 	rc = eth_ena_dev_init(dev);
546241da076SRafal Kozik 	if (rc)
547498c687aSRafal Kozik 		PMD_INIT_LOG(CRIT, "Cannot initialize device");
548e457bc70SRafal Kozik 
5492081d5e2SMichal Krawczyk 	return rc;
5502081d5e2SMichal Krawczyk }
5512081d5e2SMichal Krawczyk 
5521173fca2SJan Medala static int ena_rss_reta_update(struct rte_eth_dev *dev,
5531173fca2SJan Medala 			       struct rte_eth_rss_reta_entry64 *reta_conf,
5541173fca2SJan Medala 			       uint16_t reta_size)
5551173fca2SJan Medala {
556890728ffSStephen Hemminger 	struct ena_adapter *adapter = dev->data->dev_private;
5571173fca2SJan Medala 	struct ena_com_dev *ena_dev = &adapter->ena_dev;
558241da076SRafal Kozik 	int rc, i;
5591173fca2SJan Medala 	u16 entry_value;
5601173fca2SJan Medala 	int conf_idx;
5611173fca2SJan Medala 	int idx;
5621173fca2SJan Medala 
5631173fca2SJan Medala 	if ((reta_size == 0) || (reta_conf == NULL))
5641173fca2SJan Medala 		return -EINVAL;
5651173fca2SJan Medala 
5661173fca2SJan Medala 	if (reta_size > ENA_RX_RSS_TABLE_SIZE) {
5676f1c9df9SStephen Hemminger 		PMD_DRV_LOG(WARNING,
5681173fca2SJan Medala 			"indirection table %d is bigger than supported (%d)\n",
5691173fca2SJan Medala 			reta_size, ENA_RX_RSS_TABLE_SIZE);
570241da076SRafal Kozik 		return -EINVAL;
5711173fca2SJan Medala 	}
5721173fca2SJan Medala 
5731173fca2SJan Medala 	for (i = 0 ; i < reta_size ; i++) {
5741173fca2SJan Medala 		/* each reta_conf is for 64 entries.
5751173fca2SJan Medala 		 * to support 128 we use 2 conf of 64
5761173fca2SJan Medala 		 */
5771173fca2SJan Medala 		conf_idx = i / RTE_RETA_GROUP_SIZE;
5781173fca2SJan Medala 		idx = i % RTE_RETA_GROUP_SIZE;
5791173fca2SJan Medala 		if (TEST_BIT(reta_conf[conf_idx].mask, idx)) {
5801173fca2SJan Medala 			entry_value =
5811173fca2SJan Medala 				ENA_IO_RXQ_IDX(reta_conf[conf_idx].reta[idx]);
582241da076SRafal Kozik 
583241da076SRafal Kozik 			rc = ena_com_indirect_table_fill_entry(ena_dev,
5841173fca2SJan Medala 							       i,
5851173fca2SJan Medala 							       entry_value);
586241da076SRafal Kozik 			if (unlikely(rc && rc != ENA_COM_UNSUPPORTED)) {
5876f1c9df9SStephen Hemminger 				PMD_DRV_LOG(ERR,
5881173fca2SJan Medala 					"Cannot fill indirect table\n");
589241da076SRafal Kozik 				return rc;
5901173fca2SJan Medala 			}
5911173fca2SJan Medala 		}
5921173fca2SJan Medala 	}
5931173fca2SJan Medala 
5941343c415SMichal Krawczyk 	rte_spinlock_lock(&adapter->admin_lock);
595241da076SRafal Kozik 	rc = ena_com_indirect_table_set(ena_dev);
5961343c415SMichal Krawczyk 	rte_spinlock_unlock(&adapter->admin_lock);
597241da076SRafal Kozik 	if (unlikely(rc && rc != ENA_COM_UNSUPPORTED)) {
5986f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "Cannot flush the indirect table\n");
599241da076SRafal Kozik 		return rc;
6001173fca2SJan Medala 	}
6011173fca2SJan Medala 
6026f1c9df9SStephen Hemminger 	PMD_DRV_LOG(DEBUG, "%s(): RSS configured %d entries  for port %d\n",
6031173fca2SJan Medala 		__func__, reta_size, adapter->rte_dev->data->port_id);
604241da076SRafal Kozik 
605241da076SRafal Kozik 	return 0;
6061173fca2SJan Medala }
6071173fca2SJan Medala 
6081173fca2SJan Medala /* Query redirection table. */
6091173fca2SJan Medala static int ena_rss_reta_query(struct rte_eth_dev *dev,
6101173fca2SJan Medala 			      struct rte_eth_rss_reta_entry64 *reta_conf,
6111173fca2SJan Medala 			      uint16_t reta_size)
6121173fca2SJan Medala {
613890728ffSStephen Hemminger 	struct ena_adapter *adapter = dev->data->dev_private;
6141173fca2SJan Medala 	struct ena_com_dev *ena_dev = &adapter->ena_dev;
615241da076SRafal Kozik 	int rc;
6161173fca2SJan Medala 	int i;
6171173fca2SJan Medala 	u32 indirect_table[ENA_RX_RSS_TABLE_SIZE] = {0};
6181173fca2SJan Medala 	int reta_conf_idx;
6191173fca2SJan Medala 	int reta_idx;
6201173fca2SJan Medala 
6211173fca2SJan Medala 	if (reta_size == 0 || reta_conf == NULL ||
6221173fca2SJan Medala 	    (reta_size > RTE_RETA_GROUP_SIZE && ((reta_conf + 1) == NULL)))
6231173fca2SJan Medala 		return -EINVAL;
6241173fca2SJan Medala 
6251343c415SMichal Krawczyk 	rte_spinlock_lock(&adapter->admin_lock);
626241da076SRafal Kozik 	rc = ena_com_indirect_table_get(ena_dev, indirect_table);
6271343c415SMichal Krawczyk 	rte_spinlock_unlock(&adapter->admin_lock);
628241da076SRafal Kozik 	if (unlikely(rc && rc != ENA_COM_UNSUPPORTED)) {
6296f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "cannot get indirect table\n");
630241da076SRafal Kozik 		return -ENOTSUP;
6311173fca2SJan Medala 	}
6321173fca2SJan Medala 
6331173fca2SJan Medala 	for (i = 0 ; i < reta_size ; i++) {
6341173fca2SJan Medala 		reta_conf_idx = i / RTE_RETA_GROUP_SIZE;
6351173fca2SJan Medala 		reta_idx = i % RTE_RETA_GROUP_SIZE;
6361173fca2SJan Medala 		if (TEST_BIT(reta_conf[reta_conf_idx].mask, reta_idx))
6371173fca2SJan Medala 			reta_conf[reta_conf_idx].reta[reta_idx] =
6381173fca2SJan Medala 				ENA_IO_RXQ_IDX_REV(indirect_table[i]);
6391173fca2SJan Medala 	}
640241da076SRafal Kozik 
641241da076SRafal Kozik 	return 0;
6421173fca2SJan Medala }
6431173fca2SJan Medala 
6441173fca2SJan Medala static int ena_rss_init_default(struct ena_adapter *adapter)
6451173fca2SJan Medala {
6461173fca2SJan Medala 	struct ena_com_dev *ena_dev = &adapter->ena_dev;
6471173fca2SJan Medala 	uint16_t nb_rx_queues = adapter->rte_dev->data->nb_rx_queues;
6481173fca2SJan Medala 	int rc, i;
6491173fca2SJan Medala 	u32 val;
6501173fca2SJan Medala 
6511173fca2SJan Medala 	rc = ena_com_rss_init(ena_dev, ENA_RX_RSS_TABLE_LOG_SIZE);
6521173fca2SJan Medala 	if (unlikely(rc)) {
6536f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "Cannot init indirect table\n");
6541173fca2SJan Medala 		goto err_rss_init;
6551173fca2SJan Medala 	}
6561173fca2SJan Medala 
6571173fca2SJan Medala 	for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++) {
6581173fca2SJan Medala 		val = i % nb_rx_queues;
6591173fca2SJan Medala 		rc = ena_com_indirect_table_fill_entry(ena_dev, i,
6601173fca2SJan Medala 						       ENA_IO_RXQ_IDX(val));
6613adcba9aSMichal Krawczyk 		if (unlikely(rc && (rc != ENA_COM_UNSUPPORTED))) {
6626f1c9df9SStephen Hemminger 			PMD_DRV_LOG(ERR, "Cannot fill indirect table\n");
6631173fca2SJan Medala 			goto err_fill_indir;
6641173fca2SJan Medala 		}
6651173fca2SJan Medala 	}
6661173fca2SJan Medala 
6671173fca2SJan Medala 	rc = ena_com_fill_hash_function(ena_dev, ENA_ADMIN_CRC32, NULL,
6681173fca2SJan Medala 					ENA_HASH_KEY_SIZE, 0xFFFFFFFF);
6693adcba9aSMichal Krawczyk 	if (unlikely(rc && (rc != ENA_COM_UNSUPPORTED))) {
6706f1c9df9SStephen Hemminger 		PMD_DRV_LOG(INFO, "Cannot fill hash function\n");
6711173fca2SJan Medala 		goto err_fill_indir;
6721173fca2SJan Medala 	}
6731173fca2SJan Medala 
6741173fca2SJan Medala 	rc = ena_com_set_default_hash_ctrl(ena_dev);
6753adcba9aSMichal Krawczyk 	if (unlikely(rc && (rc != ENA_COM_UNSUPPORTED))) {
6766f1c9df9SStephen Hemminger 		PMD_DRV_LOG(INFO, "Cannot fill hash control\n");
6771173fca2SJan Medala 		goto err_fill_indir;
6781173fca2SJan Medala 	}
6791173fca2SJan Medala 
6801173fca2SJan Medala 	rc = ena_com_indirect_table_set(ena_dev);
6813adcba9aSMichal Krawczyk 	if (unlikely(rc && (rc != ENA_COM_UNSUPPORTED))) {
6826f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "Cannot flush the indirect table\n");
6831173fca2SJan Medala 		goto err_fill_indir;
6841173fca2SJan Medala 	}
6856f1c9df9SStephen Hemminger 	PMD_DRV_LOG(DEBUG, "RSS configured for port %d\n",
6861173fca2SJan Medala 		adapter->rte_dev->data->port_id);
6871173fca2SJan Medala 
6881173fca2SJan Medala 	return 0;
6891173fca2SJan Medala 
6901173fca2SJan Medala err_fill_indir:
6911173fca2SJan Medala 	ena_com_rss_destroy(ena_dev);
6921173fca2SJan Medala err_rss_init:
6931173fca2SJan Medala 
6941173fca2SJan Medala 	return rc;
6951173fca2SJan Medala }
6961173fca2SJan Medala 
6971173fca2SJan Medala static void ena_rx_queue_release_all(struct rte_eth_dev *dev)
6981173fca2SJan Medala {
6991173fca2SJan Medala 	struct ena_ring **queues = (struct ena_ring **)dev->data->rx_queues;
7001173fca2SJan Medala 	int nb_queues = dev->data->nb_rx_queues;
7011173fca2SJan Medala 	int i;
7021173fca2SJan Medala 
7031173fca2SJan Medala 	for (i = 0; i < nb_queues; i++)
7041173fca2SJan Medala 		ena_rx_queue_release(queues[i]);
7051173fca2SJan Medala }
7061173fca2SJan Medala 
7071173fca2SJan Medala static void ena_tx_queue_release_all(struct rte_eth_dev *dev)
7081173fca2SJan Medala {
7091173fca2SJan Medala 	struct ena_ring **queues = (struct ena_ring **)dev->data->tx_queues;
7101173fca2SJan Medala 	int nb_queues = dev->data->nb_tx_queues;
7111173fca2SJan Medala 	int i;
7121173fca2SJan Medala 
7131173fca2SJan Medala 	for (i = 0; i < nb_queues; i++)
7141173fca2SJan Medala 		ena_tx_queue_release(queues[i]);
7151173fca2SJan Medala }
7161173fca2SJan Medala 
7171173fca2SJan Medala static void ena_rx_queue_release(void *queue)
7181173fca2SJan Medala {
7191173fca2SJan Medala 	struct ena_ring *ring = (struct ena_ring *)queue;
7201173fca2SJan Medala 
7211173fca2SJan Medala 	/* Free ring resources */
7221173fca2SJan Medala 	if (ring->rx_buffer_info)
7231173fca2SJan Medala 		rte_free(ring->rx_buffer_info);
7241173fca2SJan Medala 	ring->rx_buffer_info = NULL;
7251173fca2SJan Medala 
72679405ee1SRafal Kozik 	if (ring->rx_refill_buffer)
72779405ee1SRafal Kozik 		rte_free(ring->rx_refill_buffer);
72879405ee1SRafal Kozik 	ring->rx_refill_buffer = NULL;
72979405ee1SRafal Kozik 
730c2034976SMichal Krawczyk 	if (ring->empty_rx_reqs)
731c2034976SMichal Krawczyk 		rte_free(ring->empty_rx_reqs);
732c2034976SMichal Krawczyk 	ring->empty_rx_reqs = NULL;
733c2034976SMichal Krawczyk 
7341173fca2SJan Medala 	ring->configured = 0;
7351173fca2SJan Medala 
7366f1c9df9SStephen Hemminger 	PMD_DRV_LOG(NOTICE, "RX Queue %d:%d released\n",
7371173fca2SJan Medala 		ring->port_id, ring->id);
7381173fca2SJan Medala }
7391173fca2SJan Medala 
7401173fca2SJan Medala static void ena_tx_queue_release(void *queue)
7411173fca2SJan Medala {
7421173fca2SJan Medala 	struct ena_ring *ring = (struct ena_ring *)queue;
7431173fca2SJan Medala 
7441173fca2SJan Medala 	/* Free ring resources */
7452fca2a98SMichal Krawczyk 	if (ring->push_buf_intermediate_buf)
7462fca2a98SMichal Krawczyk 		rte_free(ring->push_buf_intermediate_buf);
7472fca2a98SMichal Krawczyk 
7481173fca2SJan Medala 	if (ring->tx_buffer_info)
7491173fca2SJan Medala 		rte_free(ring->tx_buffer_info);
7501173fca2SJan Medala 
7511173fca2SJan Medala 	if (ring->empty_tx_reqs)
7521173fca2SJan Medala 		rte_free(ring->empty_tx_reqs);
7531173fca2SJan Medala 
7541173fca2SJan Medala 	ring->empty_tx_reqs = NULL;
7551173fca2SJan Medala 	ring->tx_buffer_info = NULL;
7562fca2a98SMichal Krawczyk 	ring->push_buf_intermediate_buf = NULL;
7571173fca2SJan Medala 
7581173fca2SJan Medala 	ring->configured = 0;
7591173fca2SJan Medala 
7606f1c9df9SStephen Hemminger 	PMD_DRV_LOG(NOTICE, "TX Queue %d:%d released\n",
7611173fca2SJan Medala 		ring->port_id, ring->id);
7621173fca2SJan Medala }
7631173fca2SJan Medala 
7641173fca2SJan Medala static void ena_rx_queue_release_bufs(struct ena_ring *ring)
7651173fca2SJan Medala {
766709b1dcbSRafal Kozik 	unsigned int i;
7671173fca2SJan Medala 
7681be097dcSMichal Krawczyk 	for (i = 0; i < ring->ring_size; ++i) {
7691be097dcSMichal Krawczyk 		struct ena_rx_buffer *rx_info = &ring->rx_buffer_info[i];
7701be097dcSMichal Krawczyk 		if (rx_info->mbuf) {
7711be097dcSMichal Krawczyk 			rte_mbuf_raw_free(rx_info->mbuf);
7721be097dcSMichal Krawczyk 			rx_info->mbuf = NULL;
7731be097dcSMichal Krawczyk 		}
7741173fca2SJan Medala 	}
7751173fca2SJan Medala }
7761173fca2SJan Medala 
7771173fca2SJan Medala static void ena_tx_queue_release_bufs(struct ena_ring *ring)
7781173fca2SJan Medala {
779207a514cSMichal Krawczyk 	unsigned int i;
7801173fca2SJan Medala 
781207a514cSMichal Krawczyk 	for (i = 0; i < ring->ring_size; ++i) {
782207a514cSMichal Krawczyk 		struct ena_tx_buffer *tx_buf = &ring->tx_buffer_info[i];
7831173fca2SJan Medala 
7841173fca2SJan Medala 		if (tx_buf->mbuf)
7851173fca2SJan Medala 			rte_pktmbuf_free(tx_buf->mbuf);
7861173fca2SJan Medala 	}
7871173fca2SJan Medala }
7881173fca2SJan Medala 
7891173fca2SJan Medala static int ena_link_update(struct rte_eth_dev *dev,
7901173fca2SJan Medala 			   __rte_unused int wait_to_complete)
7911173fca2SJan Medala {
7921173fca2SJan Medala 	struct rte_eth_link *link = &dev->data->dev_link;
793890728ffSStephen Hemminger 	struct ena_adapter *adapter = dev->data->dev_private;
794ca148440SMichal Krawczyk 
795ca148440SMichal Krawczyk 	link->link_status = adapter->link_status ? ETH_LINK_UP : ETH_LINK_DOWN;
79641e59028SRafal Kozik 	link->link_speed = ETH_SPEED_NUM_NONE;
7971173fca2SJan Medala 	link->link_duplex = ETH_LINK_FULL_DUPLEX;
7981173fca2SJan Medala 
7991173fca2SJan Medala 	return 0;
8001173fca2SJan Medala }
8011173fca2SJan Medala 
80226e5543dSRafal Kozik static int ena_queue_start_all(struct rte_eth_dev *dev,
8031173fca2SJan Medala 			       enum ena_ring_type ring_type)
8041173fca2SJan Medala {
805890728ffSStephen Hemminger 	struct ena_adapter *adapter = dev->data->dev_private;
8061173fca2SJan Medala 	struct ena_ring *queues = NULL;
80753b61841SMichal Krawczyk 	int nb_queues;
8081173fca2SJan Medala 	int i = 0;
8091173fca2SJan Medala 	int rc = 0;
8101173fca2SJan Medala 
81153b61841SMichal Krawczyk 	if (ring_type == ENA_RING_TYPE_RX) {
81253b61841SMichal Krawczyk 		queues = adapter->rx_ring;
81353b61841SMichal Krawczyk 		nb_queues = dev->data->nb_rx_queues;
81453b61841SMichal Krawczyk 	} else {
81553b61841SMichal Krawczyk 		queues = adapter->tx_ring;
81653b61841SMichal Krawczyk 		nb_queues = dev->data->nb_tx_queues;
81753b61841SMichal Krawczyk 	}
81853b61841SMichal Krawczyk 	for (i = 0; i < nb_queues; i++) {
8191173fca2SJan Medala 		if (queues[i].configured) {
8201173fca2SJan Medala 			if (ring_type == ENA_RING_TYPE_RX) {
8211173fca2SJan Medala 				ena_assert_msg(
8221173fca2SJan Medala 					dev->data->rx_queues[i] == &queues[i],
8231173fca2SJan Medala 					"Inconsistent state of rx queues\n");
8241173fca2SJan Medala 			} else {
8251173fca2SJan Medala 				ena_assert_msg(
8261173fca2SJan Medala 					dev->data->tx_queues[i] == &queues[i],
8271173fca2SJan Medala 					"Inconsistent state of tx queues\n");
8281173fca2SJan Medala 			}
8291173fca2SJan Medala 
83026e5543dSRafal Kozik 			rc = ena_queue_start(&queues[i]);
8311173fca2SJan Medala 
8321173fca2SJan Medala 			if (rc) {
8331173fca2SJan Medala 				PMD_INIT_LOG(ERR,
83426e5543dSRafal Kozik 					     "failed to start queue %d type(%d)",
8351173fca2SJan Medala 					     i, ring_type);
83626e5543dSRafal Kozik 				goto err;
8371173fca2SJan Medala 			}
8381173fca2SJan Medala 		}
8391173fca2SJan Medala 	}
8401173fca2SJan Medala 
8411173fca2SJan Medala 	return 0;
84226e5543dSRafal Kozik 
84326e5543dSRafal Kozik err:
84426e5543dSRafal Kozik 	while (i--)
84526e5543dSRafal Kozik 		if (queues[i].configured)
84626e5543dSRafal Kozik 			ena_queue_stop(&queues[i]);
84726e5543dSRafal Kozik 
84826e5543dSRafal Kozik 	return rc;
8491173fca2SJan Medala }
8501173fca2SJan Medala 
8511173fca2SJan Medala static uint32_t ena_get_mtu_conf(struct ena_adapter *adapter)
8521173fca2SJan Medala {
8531173fca2SJan Medala 	uint32_t max_frame_len = adapter->max_mtu;
8541173fca2SJan Medala 
8557369f88fSRafal Kozik 	if (adapter->rte_eth_dev_data->dev_conf.rxmode.offloads &
8567369f88fSRafal Kozik 	    DEV_RX_OFFLOAD_JUMBO_FRAME)
8571173fca2SJan Medala 		max_frame_len =
8581173fca2SJan Medala 			adapter->rte_eth_dev_data->dev_conf.rxmode.max_rx_pkt_len;
8591173fca2SJan Medala 
8601173fca2SJan Medala 	return max_frame_len;
8611173fca2SJan Medala }
8621173fca2SJan Medala 
8631173fca2SJan Medala static int ena_check_valid_conf(struct ena_adapter *adapter)
8641173fca2SJan Medala {
8651173fca2SJan Medala 	uint32_t max_frame_len = ena_get_mtu_conf(adapter);
8661173fca2SJan Medala 
867241da076SRafal Kozik 	if (max_frame_len > adapter->max_mtu || max_frame_len < ENA_MIN_MTU) {
868241da076SRafal Kozik 		PMD_INIT_LOG(ERR, "Unsupported MTU of %d. "
869498c687aSRafal Kozik 				  "max mtu: %d, min mtu: %d",
870241da076SRafal Kozik 			     max_frame_len, adapter->max_mtu, ENA_MIN_MTU);
871241da076SRafal Kozik 		return ENA_COM_UNSUPPORTED;
8721173fca2SJan Medala 	}
8731173fca2SJan Medala 
8741173fca2SJan Medala 	return 0;
8751173fca2SJan Medala }
8761173fca2SJan Medala 
8771173fca2SJan Medala static int
8788a7a73f2SMichal Krawczyk ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx,
8798a7a73f2SMichal Krawczyk 		       bool use_large_llq_hdr)
8801173fca2SJan Medala {
8812fca2a98SMichal Krawczyk 	struct ena_admin_feature_llq_desc *llq = &ctx->get_feat_ctx->llq;
8822fca2a98SMichal Krawczyk 	struct ena_com_dev *ena_dev = ctx->ena_dev;
8835920d930SMichal Krawczyk 	uint32_t max_tx_queue_size;
8845920d930SMichal Krawczyk 	uint32_t max_rx_queue_size;
8851173fca2SJan Medala 
8862fca2a98SMichal Krawczyk 	if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
887ea93d37eSRafal Kozik 		struct ena_admin_queue_ext_feature_fields *max_queue_ext =
888ea93d37eSRafal Kozik 			&ctx->get_feat_ctx->max_queue_ext.max_queue_ext;
8895920d930SMichal Krawczyk 		max_rx_queue_size = RTE_MIN(max_queue_ext->max_rx_cq_depth,
890ea93d37eSRafal Kozik 			max_queue_ext->max_rx_sq_depth);
8915920d930SMichal Krawczyk 		max_tx_queue_size = max_queue_ext->max_tx_cq_depth;
8922fca2a98SMichal Krawczyk 
8932fca2a98SMichal Krawczyk 		if (ena_dev->tx_mem_queue_type ==
8942fca2a98SMichal Krawczyk 		    ENA_ADMIN_PLACEMENT_POLICY_DEV) {
8955920d930SMichal Krawczyk 			max_tx_queue_size = RTE_MIN(max_tx_queue_size,
8962fca2a98SMichal Krawczyk 				llq->max_llq_depth);
8972fca2a98SMichal Krawczyk 		} else {
8985920d930SMichal Krawczyk 			max_tx_queue_size = RTE_MIN(max_tx_queue_size,
899ea93d37eSRafal Kozik 				max_queue_ext->max_tx_sq_depth);
9002fca2a98SMichal Krawczyk 		}
9012fca2a98SMichal Krawczyk 
902ea93d37eSRafal Kozik 		ctx->max_rx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS,
903ea93d37eSRafal Kozik 			max_queue_ext->max_per_packet_rx_descs);
904ea93d37eSRafal Kozik 		ctx->max_tx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS,
905ea93d37eSRafal Kozik 			max_queue_ext->max_per_packet_tx_descs);
906ea93d37eSRafal Kozik 	} else {
907ea93d37eSRafal Kozik 		struct ena_admin_queue_feature_desc *max_queues =
908ea93d37eSRafal Kozik 			&ctx->get_feat_ctx->max_queues;
9095920d930SMichal Krawczyk 		max_rx_queue_size = RTE_MIN(max_queues->max_cq_depth,
910ea93d37eSRafal Kozik 			max_queues->max_sq_depth);
9115920d930SMichal Krawczyk 		max_tx_queue_size = max_queues->max_cq_depth;
9122fca2a98SMichal Krawczyk 
9132fca2a98SMichal Krawczyk 		if (ena_dev->tx_mem_queue_type ==
9142fca2a98SMichal Krawczyk 		    ENA_ADMIN_PLACEMENT_POLICY_DEV) {
9155920d930SMichal Krawczyk 			max_tx_queue_size = RTE_MIN(max_tx_queue_size,
9162fca2a98SMichal Krawczyk 				llq->max_llq_depth);
9172fca2a98SMichal Krawczyk 		} else {
9185920d930SMichal Krawczyk 			max_tx_queue_size = RTE_MIN(max_tx_queue_size,
9192fca2a98SMichal Krawczyk 				max_queues->max_sq_depth);
9202fca2a98SMichal Krawczyk 		}
9212fca2a98SMichal Krawczyk 
922ea93d37eSRafal Kozik 		ctx->max_rx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS,
923ea93d37eSRafal Kozik 			max_queues->max_packet_rx_descs);
9245920d930SMichal Krawczyk 		ctx->max_tx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS,
9255920d930SMichal Krawczyk 			max_queues->max_packet_tx_descs);
926ea93d37eSRafal Kozik 	}
9271173fca2SJan Medala 
928ea93d37eSRafal Kozik 	/* Round down to the nearest power of 2 */
9295920d930SMichal Krawczyk 	max_rx_queue_size = rte_align32prevpow2(max_rx_queue_size);
9305920d930SMichal Krawczyk 	max_tx_queue_size = rte_align32prevpow2(max_tx_queue_size);
9311173fca2SJan Medala 
9328a7a73f2SMichal Krawczyk 	if (use_large_llq_hdr) {
9338a7a73f2SMichal Krawczyk 		if ((llq->entry_size_ctrl_supported &
9348a7a73f2SMichal Krawczyk 		     ENA_ADMIN_LIST_ENTRY_SIZE_256B) &&
9358a7a73f2SMichal Krawczyk 		    (ena_dev->tx_mem_queue_type ==
9368a7a73f2SMichal Krawczyk 		     ENA_ADMIN_PLACEMENT_POLICY_DEV)) {
9378a7a73f2SMichal Krawczyk 			max_tx_queue_size /= 2;
9388a7a73f2SMichal Krawczyk 			PMD_INIT_LOG(INFO,
9398a7a73f2SMichal Krawczyk 				"Forcing large headers and decreasing maximum TX queue size to %d\n",
9408a7a73f2SMichal Krawczyk 				max_tx_queue_size);
9418a7a73f2SMichal Krawczyk 		} else {
9428a7a73f2SMichal Krawczyk 			PMD_INIT_LOG(ERR,
9438a7a73f2SMichal Krawczyk 				"Forcing large headers failed: LLQ is disabled or device does not support large headers\n");
9448a7a73f2SMichal Krawczyk 		}
9458a7a73f2SMichal Krawczyk 	}
9468a7a73f2SMichal Krawczyk 
9475920d930SMichal Krawczyk 	if (unlikely(max_rx_queue_size == 0 || max_tx_queue_size == 0)) {
948f2462150SFerruh Yigit 		PMD_INIT_LOG(ERR, "Invalid queue size");
9491173fca2SJan Medala 		return -EFAULT;
9501173fca2SJan Medala 	}
9511173fca2SJan Medala 
9525920d930SMichal Krawczyk 	ctx->max_tx_queue_size = max_tx_queue_size;
9535920d930SMichal Krawczyk 	ctx->max_rx_queue_size = max_rx_queue_size;
9542061fe41SRafal Kozik 
955ea93d37eSRafal Kozik 	return 0;
9561173fca2SJan Medala }
9571173fca2SJan Medala 
9581173fca2SJan Medala static void ena_stats_restart(struct rte_eth_dev *dev)
9591173fca2SJan Medala {
960890728ffSStephen Hemminger 	struct ena_adapter *adapter = dev->data->dev_private;
9611173fca2SJan Medala 
9621173fca2SJan Medala 	rte_atomic64_init(&adapter->drv_stats->ierrors);
9631173fca2SJan Medala 	rte_atomic64_init(&adapter->drv_stats->oerrors);
9641173fca2SJan Medala 	rte_atomic64_init(&adapter->drv_stats->rx_nombuf);
965e1e73e32SMichal Krawczyk 	adapter->drv_stats->rx_drops = 0;
9661173fca2SJan Medala }
9671173fca2SJan Medala 
968d5b0924bSMatan Azrad static int ena_stats_get(struct rte_eth_dev *dev,
9691173fca2SJan Medala 			  struct rte_eth_stats *stats)
9701173fca2SJan Medala {
9711173fca2SJan Medala 	struct ena_admin_basic_stats ena_stats;
972890728ffSStephen Hemminger 	struct ena_adapter *adapter = dev->data->dev_private;
9731173fca2SJan Medala 	struct ena_com_dev *ena_dev = &adapter->ena_dev;
9741173fca2SJan Medala 	int rc;
97545b6d861SMichal Krawczyk 	int i;
97645b6d861SMichal Krawczyk 	int max_rings_stats;
9771173fca2SJan Medala 
9781173fca2SJan Medala 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
979d5b0924bSMatan Azrad 		return -ENOTSUP;
9801173fca2SJan Medala 
9811173fca2SJan Medala 	memset(&ena_stats, 0, sizeof(ena_stats));
9821343c415SMichal Krawczyk 
9831343c415SMichal Krawczyk 	rte_spinlock_lock(&adapter->admin_lock);
9841173fca2SJan Medala 	rc = ena_com_get_dev_basic_stats(ena_dev, &ena_stats);
9851343c415SMichal Krawczyk 	rte_spinlock_unlock(&adapter->admin_lock);
9861173fca2SJan Medala 	if (unlikely(rc)) {
9876f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "Could not retrieve statistics from ENA\n");
988d5b0924bSMatan Azrad 		return rc;
9891173fca2SJan Medala 	}
9901173fca2SJan Medala 
9911173fca2SJan Medala 	/* Set of basic statistics from ENA */
9921173fca2SJan Medala 	stats->ipackets = __MERGE_64B_H_L(ena_stats.rx_pkts_high,
9931173fca2SJan Medala 					  ena_stats.rx_pkts_low);
9941173fca2SJan Medala 	stats->opackets = __MERGE_64B_H_L(ena_stats.tx_pkts_high,
9951173fca2SJan Medala 					  ena_stats.tx_pkts_low);
9961173fca2SJan Medala 	stats->ibytes = __MERGE_64B_H_L(ena_stats.rx_bytes_high,
9971173fca2SJan Medala 					ena_stats.rx_bytes_low);
9981173fca2SJan Medala 	stats->obytes = __MERGE_64B_H_L(ena_stats.tx_bytes_high,
9991173fca2SJan Medala 					ena_stats.tx_bytes_low);
10001173fca2SJan Medala 
10011173fca2SJan Medala 	/* Driver related stats */
1002e1e73e32SMichal Krawczyk 	stats->imissed = adapter->drv_stats->rx_drops;
10031173fca2SJan Medala 	stats->ierrors = rte_atomic64_read(&adapter->drv_stats->ierrors);
10041173fca2SJan Medala 	stats->oerrors = rte_atomic64_read(&adapter->drv_stats->oerrors);
10051173fca2SJan Medala 	stats->rx_nombuf = rte_atomic64_read(&adapter->drv_stats->rx_nombuf);
100645b6d861SMichal Krawczyk 
100745b6d861SMichal Krawczyk 	max_rings_stats = RTE_MIN(dev->data->nb_rx_queues,
100845b6d861SMichal Krawczyk 		RTE_ETHDEV_QUEUE_STAT_CNTRS);
100945b6d861SMichal Krawczyk 	for (i = 0; i < max_rings_stats; ++i) {
101045b6d861SMichal Krawczyk 		struct ena_stats_rx *rx_stats = &adapter->rx_ring[i].rx_stats;
101145b6d861SMichal Krawczyk 
101245b6d861SMichal Krawczyk 		stats->q_ibytes[i] = rx_stats->bytes;
101345b6d861SMichal Krawczyk 		stats->q_ipackets[i] = rx_stats->cnt;
101445b6d861SMichal Krawczyk 		stats->q_errors[i] = rx_stats->bad_desc_num +
101545b6d861SMichal Krawczyk 			rx_stats->bad_req_id;
101645b6d861SMichal Krawczyk 	}
101745b6d861SMichal Krawczyk 
101845b6d861SMichal Krawczyk 	max_rings_stats = RTE_MIN(dev->data->nb_tx_queues,
101945b6d861SMichal Krawczyk 		RTE_ETHDEV_QUEUE_STAT_CNTRS);
102045b6d861SMichal Krawczyk 	for (i = 0; i < max_rings_stats; ++i) {
102145b6d861SMichal Krawczyk 		struct ena_stats_tx *tx_stats = &adapter->tx_ring[i].tx_stats;
102245b6d861SMichal Krawczyk 
102345b6d861SMichal Krawczyk 		stats->q_obytes[i] = tx_stats->bytes;
102445b6d861SMichal Krawczyk 		stats->q_opackets[i] = tx_stats->cnt;
102545b6d861SMichal Krawczyk 	}
102645b6d861SMichal Krawczyk 
1027d5b0924bSMatan Azrad 	return 0;
10281173fca2SJan Medala }
10291173fca2SJan Medala 
10301173fca2SJan Medala static int ena_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
10311173fca2SJan Medala {
10321173fca2SJan Medala 	struct ena_adapter *adapter;
10331173fca2SJan Medala 	struct ena_com_dev *ena_dev;
10341173fca2SJan Medala 	int rc = 0;
10351173fca2SJan Medala 
1036498c687aSRafal Kozik 	ena_assert_msg(dev->data != NULL, "Uninitialized device\n");
1037498c687aSRafal Kozik 	ena_assert_msg(dev->data->dev_private != NULL, "Uninitialized device\n");
1038890728ffSStephen Hemminger 	adapter = dev->data->dev_private;
10391173fca2SJan Medala 
10401173fca2SJan Medala 	ena_dev = &adapter->ena_dev;
1041498c687aSRafal Kozik 	ena_assert_msg(ena_dev != NULL, "Uninitialized device\n");
10421173fca2SJan Medala 
1043241da076SRafal Kozik 	if (mtu > ena_get_mtu_conf(adapter) || mtu < ENA_MIN_MTU) {
10446f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR,
1045241da076SRafal Kozik 			"Invalid MTU setting. new_mtu: %d "
1046241da076SRafal Kozik 			"max mtu: %d min mtu: %d\n",
1047241da076SRafal Kozik 			mtu, ena_get_mtu_conf(adapter), ENA_MIN_MTU);
1048241da076SRafal Kozik 		return -EINVAL;
10491173fca2SJan Medala 	}
10501173fca2SJan Medala 
10511173fca2SJan Medala 	rc = ena_com_set_dev_mtu(ena_dev, mtu);
10521173fca2SJan Medala 	if (rc)
10536f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "Could not set MTU: %d\n", mtu);
10541173fca2SJan Medala 	else
10556f1c9df9SStephen Hemminger 		PMD_DRV_LOG(NOTICE, "Set MTU: %d\n", mtu);
10561173fca2SJan Medala 
10571173fca2SJan Medala 	return rc;
10581173fca2SJan Medala }
10591173fca2SJan Medala 
10601173fca2SJan Medala static int ena_start(struct rte_eth_dev *dev)
10611173fca2SJan Medala {
1062890728ffSStephen Hemminger 	struct ena_adapter *adapter = dev->data->dev_private;
1063d9b8b106SMichal Krawczyk 	uint64_t ticks;
10641173fca2SJan Medala 	int rc = 0;
10651173fca2SJan Medala 
10661173fca2SJan Medala 	rc = ena_check_valid_conf(adapter);
10671173fca2SJan Medala 	if (rc)
10681173fca2SJan Medala 		return rc;
10691173fca2SJan Medala 
107026e5543dSRafal Kozik 	rc = ena_queue_start_all(dev, ENA_RING_TYPE_RX);
10711173fca2SJan Medala 	if (rc)
10721173fca2SJan Medala 		return rc;
10731173fca2SJan Medala 
107426e5543dSRafal Kozik 	rc = ena_queue_start_all(dev, ENA_RING_TYPE_TX);
10751173fca2SJan Medala 	if (rc)
107626e5543dSRafal Kozik 		goto err_start_tx;
10771173fca2SJan Medala 
10781173fca2SJan Medala 	if (adapter->rte_dev->data->dev_conf.rxmode.mq_mode &
1079361913adSDaria Kolistratova 	    ETH_MQ_RX_RSS_FLAG && adapter->rte_dev->data->nb_rx_queues > 0) {
10801173fca2SJan Medala 		rc = ena_rss_init_default(adapter);
10811173fca2SJan Medala 		if (rc)
108226e5543dSRafal Kozik 			goto err_rss_init;
10831173fca2SJan Medala 	}
10841173fca2SJan Medala 
10851173fca2SJan Medala 	ena_stats_restart(dev);
10861173fca2SJan Medala 
1087d9b8b106SMichal Krawczyk 	adapter->timestamp_wd = rte_get_timer_cycles();
1088d9b8b106SMichal Krawczyk 	adapter->keep_alive_timeout = ENA_DEVICE_KALIVE_TIMEOUT;
1089d9b8b106SMichal Krawczyk 
1090d9b8b106SMichal Krawczyk 	ticks = rte_get_timer_hz();
1091d9b8b106SMichal Krawczyk 	rte_timer_reset(&adapter->timer_wd, ticks, PERIODICAL, rte_lcore_id(),
1092d9b8b106SMichal Krawczyk 			ena_timer_wd_callback, adapter);
1093d9b8b106SMichal Krawczyk 
10947830e905SSolganik Alexander 	++adapter->dev_stats.dev_start;
10951173fca2SJan Medala 	adapter->state = ENA_ADAPTER_STATE_RUNNING;
10961173fca2SJan Medala 
10971173fca2SJan Medala 	return 0;
109826e5543dSRafal Kozik 
109926e5543dSRafal Kozik err_rss_init:
110026e5543dSRafal Kozik 	ena_queue_stop_all(dev, ENA_RING_TYPE_TX);
110126e5543dSRafal Kozik err_start_tx:
110226e5543dSRafal Kozik 	ena_queue_stop_all(dev, ENA_RING_TYPE_RX);
110326e5543dSRafal Kozik 	return rc;
11041173fca2SJan Medala }
11051173fca2SJan Medala 
110662024eb8SIvan Ilchenko static int ena_stop(struct rte_eth_dev *dev)
1107eb0ef49dSMichal Krawczyk {
1108890728ffSStephen Hemminger 	struct ena_adapter *adapter = dev->data->dev_private;
1109e457bc70SRafal Kozik 	struct ena_com_dev *ena_dev = &adapter->ena_dev;
1110e457bc70SRafal Kozik 	int rc;
1111eb0ef49dSMichal Krawczyk 
1112d9b8b106SMichal Krawczyk 	rte_timer_stop_sync(&adapter->timer_wd);
111326e5543dSRafal Kozik 	ena_queue_stop_all(dev, ENA_RING_TYPE_TX);
111426e5543dSRafal Kozik 	ena_queue_stop_all(dev, ENA_RING_TYPE_RX);
1115d9b8b106SMichal Krawczyk 
1116e457bc70SRafal Kozik 	if (adapter->trigger_reset) {
1117e457bc70SRafal Kozik 		rc = ena_com_dev_reset(ena_dev, adapter->reset_reason);
1118e457bc70SRafal Kozik 		if (rc)
11196f1c9df9SStephen Hemminger 			PMD_DRV_LOG(ERR, "Device reset failed rc=%d\n", rc);
1120e457bc70SRafal Kozik 	}
1121e457bc70SRafal Kozik 
11227830e905SSolganik Alexander 	++adapter->dev_stats.dev_stop;
1123eb0ef49dSMichal Krawczyk 	adapter->state = ENA_ADAPTER_STATE_STOPPED;
1124b8f5d2aeSThomas Monjalon 	dev->data->dev_started = 0;
112562024eb8SIvan Ilchenko 
112662024eb8SIvan Ilchenko 	return 0;
1127eb0ef49dSMichal Krawczyk }
1128eb0ef49dSMichal Krawczyk 
1129df238f84SMichal Krawczyk static int ena_create_io_queue(struct ena_ring *ring)
1130df238f84SMichal Krawczyk {
1131df238f84SMichal Krawczyk 	struct ena_adapter *adapter;
1132df238f84SMichal Krawczyk 	struct ena_com_dev *ena_dev;
1133df238f84SMichal Krawczyk 	struct ena_com_create_io_ctx ctx =
1134df238f84SMichal Krawczyk 		/* policy set to _HOST just to satisfy icc compiler */
1135df238f84SMichal Krawczyk 		{ ENA_ADMIN_PLACEMENT_POLICY_HOST,
1136df238f84SMichal Krawczyk 		  0, 0, 0, 0, 0 };
1137df238f84SMichal Krawczyk 	uint16_t ena_qid;
1138778677dcSRafal Kozik 	unsigned int i;
1139df238f84SMichal Krawczyk 	int rc;
1140df238f84SMichal Krawczyk 
1141df238f84SMichal Krawczyk 	adapter = ring->adapter;
1142df238f84SMichal Krawczyk 	ena_dev = &adapter->ena_dev;
1143df238f84SMichal Krawczyk 
1144df238f84SMichal Krawczyk 	if (ring->type == ENA_RING_TYPE_TX) {
1145df238f84SMichal Krawczyk 		ena_qid = ENA_IO_TXQ_IDX(ring->id);
1146df238f84SMichal Krawczyk 		ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX;
1147df238f84SMichal Krawczyk 		ctx.mem_queue_type = ena_dev->tx_mem_queue_type;
1148778677dcSRafal Kozik 		for (i = 0; i < ring->ring_size; i++)
1149778677dcSRafal Kozik 			ring->empty_tx_reqs[i] = i;
1150df238f84SMichal Krawczyk 	} else {
1151df238f84SMichal Krawczyk 		ena_qid = ENA_IO_RXQ_IDX(ring->id);
1152df238f84SMichal Krawczyk 		ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX;
1153778677dcSRafal Kozik 		for (i = 0; i < ring->ring_size; i++)
1154778677dcSRafal Kozik 			ring->empty_rx_reqs[i] = i;
1155df238f84SMichal Krawczyk 	}
1156badc3a6aSMichal Krawczyk 	ctx.queue_size = ring->ring_size;
1157df238f84SMichal Krawczyk 	ctx.qid = ena_qid;
1158df238f84SMichal Krawczyk 	ctx.msix_vector = -1; /* interrupts not used */
11594217cb0bSMichal Krawczyk 	ctx.numa_node = ring->numa_socket_id;
1160df238f84SMichal Krawczyk 
1161df238f84SMichal Krawczyk 	rc = ena_com_create_io_queue(ena_dev, &ctx);
1162df238f84SMichal Krawczyk 	if (rc) {
11636f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR,
1164df238f84SMichal Krawczyk 			"failed to create io queue #%d (qid:%d) rc: %d\n",
1165df238f84SMichal Krawczyk 			ring->id, ena_qid, rc);
1166df238f84SMichal Krawczyk 		return rc;
1167df238f84SMichal Krawczyk 	}
1168df238f84SMichal Krawczyk 
1169df238f84SMichal Krawczyk 	rc = ena_com_get_io_handlers(ena_dev, ena_qid,
1170df238f84SMichal Krawczyk 				     &ring->ena_com_io_sq,
1171df238f84SMichal Krawczyk 				     &ring->ena_com_io_cq);
1172df238f84SMichal Krawczyk 	if (rc) {
11736f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR,
1174df238f84SMichal Krawczyk 			"Failed to get io queue handlers. queue num %d rc: %d\n",
1175df238f84SMichal Krawczyk 			ring->id, rc);
1176df238f84SMichal Krawczyk 		ena_com_destroy_io_queue(ena_dev, ena_qid);
1177df238f84SMichal Krawczyk 		return rc;
1178df238f84SMichal Krawczyk 	}
1179df238f84SMichal Krawczyk 
1180df238f84SMichal Krawczyk 	if (ring->type == ENA_RING_TYPE_TX)
1181df238f84SMichal Krawczyk 		ena_com_update_numa_node(ring->ena_com_io_cq, ctx.numa_node);
1182df238f84SMichal Krawczyk 
1183df238f84SMichal Krawczyk 	return 0;
1184df238f84SMichal Krawczyk }
1185df238f84SMichal Krawczyk 
118626e5543dSRafal Kozik static void ena_queue_stop(struct ena_ring *ring)
1187df238f84SMichal Krawczyk {
118826e5543dSRafal Kozik 	struct ena_com_dev *ena_dev = &ring->adapter->ena_dev;
1189df238f84SMichal Krawczyk 
119026e5543dSRafal Kozik 	if (ring->type == ENA_RING_TYPE_RX) {
119126e5543dSRafal Kozik 		ena_com_destroy_io_queue(ena_dev, ENA_IO_RXQ_IDX(ring->id));
119226e5543dSRafal Kozik 		ena_rx_queue_release_bufs(ring);
119326e5543dSRafal Kozik 	} else {
119426e5543dSRafal Kozik 		ena_com_destroy_io_queue(ena_dev, ENA_IO_TXQ_IDX(ring->id));
119526e5543dSRafal Kozik 		ena_tx_queue_release_bufs(ring);
1196df238f84SMichal Krawczyk 	}
1197df238f84SMichal Krawczyk }
1198df238f84SMichal Krawczyk 
119926e5543dSRafal Kozik static void ena_queue_stop_all(struct rte_eth_dev *dev,
120026e5543dSRafal Kozik 			      enum ena_ring_type ring_type)
120126e5543dSRafal Kozik {
1202890728ffSStephen Hemminger 	struct ena_adapter *adapter = dev->data->dev_private;
120326e5543dSRafal Kozik 	struct ena_ring *queues = NULL;
120426e5543dSRafal Kozik 	uint16_t nb_queues, i;
120526e5543dSRafal Kozik 
120626e5543dSRafal Kozik 	if (ring_type == ENA_RING_TYPE_RX) {
120726e5543dSRafal Kozik 		queues = adapter->rx_ring;
120826e5543dSRafal Kozik 		nb_queues = dev->data->nb_rx_queues;
120926e5543dSRafal Kozik 	} else {
121026e5543dSRafal Kozik 		queues = adapter->tx_ring;
121126e5543dSRafal Kozik 		nb_queues = dev->data->nb_tx_queues;
121226e5543dSRafal Kozik 	}
121326e5543dSRafal Kozik 
121426e5543dSRafal Kozik 	for (i = 0; i < nb_queues; ++i)
121526e5543dSRafal Kozik 		if (queues[i].configured)
121626e5543dSRafal Kozik 			ena_queue_stop(&queues[i]);
121726e5543dSRafal Kozik }
121826e5543dSRafal Kozik 
121926e5543dSRafal Kozik static int ena_queue_start(struct ena_ring *ring)
12201173fca2SJan Medala {
1221a467e8f3SMichal Krawczyk 	int rc, bufs_num;
12221173fca2SJan Medala 
12231173fca2SJan Medala 	ena_assert_msg(ring->configured == 1,
122426e5543dSRafal Kozik 		       "Trying to start unconfigured queue\n");
12251173fca2SJan Medala 
1226df238f84SMichal Krawczyk 	rc = ena_create_io_queue(ring);
1227df238f84SMichal Krawczyk 	if (rc) {
1228498c687aSRafal Kozik 		PMD_INIT_LOG(ERR, "Failed to create IO queue!");
1229df238f84SMichal Krawczyk 		return rc;
1230df238f84SMichal Krawczyk 	}
1231df238f84SMichal Krawczyk 
12321173fca2SJan Medala 	ring->next_to_clean = 0;
12331173fca2SJan Medala 	ring->next_to_use = 0;
12341173fca2SJan Medala 
12357830e905SSolganik Alexander 	if (ring->type == ENA_RING_TYPE_TX) {
12367830e905SSolganik Alexander 		ring->tx_stats.available_desc =
1237b2b02edeSMichal Krawczyk 			ena_com_free_q_entries(ring->ena_com_io_sq);
12381173fca2SJan Medala 		return 0;
12397830e905SSolganik Alexander 	}
12401173fca2SJan Medala 
1241a467e8f3SMichal Krawczyk 	bufs_num = ring->ring_size - 1;
1242a467e8f3SMichal Krawczyk 	rc = ena_populate_rx_queue(ring, bufs_num);
1243a467e8f3SMichal Krawczyk 	if (rc != bufs_num) {
124426e5543dSRafal Kozik 		ena_com_destroy_io_queue(&ring->adapter->ena_dev,
124526e5543dSRafal Kozik 					 ENA_IO_RXQ_IDX(ring->id));
1246f2462150SFerruh Yigit 		PMD_INIT_LOG(ERR, "Failed to populate rx ring !");
1247241da076SRafal Kozik 		return ENA_COM_FAULT;
12481173fca2SJan Medala 	}
12491173fca2SJan Medala 
12501173fca2SJan Medala 	return 0;
12511173fca2SJan Medala }
12521173fca2SJan Medala 
12531173fca2SJan Medala static int ena_tx_queue_setup(struct rte_eth_dev *dev,
12541173fca2SJan Medala 			      uint16_t queue_idx,
12551173fca2SJan Medala 			      uint16_t nb_desc,
12564217cb0bSMichal Krawczyk 			      unsigned int socket_id,
125756b8b9b7SRafal Kozik 			      const struct rte_eth_txconf *tx_conf)
12581173fca2SJan Medala {
12591173fca2SJan Medala 	struct ena_ring *txq = NULL;
1260890728ffSStephen Hemminger 	struct ena_adapter *adapter = dev->data->dev_private;
12611173fca2SJan Medala 	unsigned int i;
12621173fca2SJan Medala 
12631173fca2SJan Medala 	txq = &adapter->tx_ring[queue_idx];
12641173fca2SJan Medala 
12651173fca2SJan Medala 	if (txq->configured) {
12666f1c9df9SStephen Hemminger 		PMD_DRV_LOG(CRIT,
12671173fca2SJan Medala 			"API violation. Queue %d is already configured\n",
12681173fca2SJan Medala 			queue_idx);
1269241da076SRafal Kozik 		return ENA_COM_FAULT;
12701173fca2SJan Medala 	}
12711173fca2SJan Medala 
12721daff526SJakub Palider 	if (!rte_is_power_of_2(nb_desc)) {
12736f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR,
1274498c687aSRafal Kozik 			"Unsupported size of TX queue: %d is not a power of 2.\n",
12751daff526SJakub Palider 			nb_desc);
12761daff526SJakub Palider 		return -EINVAL;
12771daff526SJakub Palider 	}
12781daff526SJakub Palider 
12795920d930SMichal Krawczyk 	if (nb_desc > adapter->max_tx_ring_size) {
12806f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR,
12811173fca2SJan Medala 			"Unsupported size of TX queue (max size: %d)\n",
12825920d930SMichal Krawczyk 			adapter->max_tx_ring_size);
12831173fca2SJan Medala 		return -EINVAL;
12841173fca2SJan Medala 	}
12851173fca2SJan Medala 
1286ea93d37eSRafal Kozik 	if (nb_desc == RTE_ETH_DEV_FALLBACK_TX_RINGSIZE)
12875920d930SMichal Krawczyk 		nb_desc = adapter->max_tx_ring_size;
1288ea93d37eSRafal Kozik 
12891173fca2SJan Medala 	txq->port_id = dev->data->port_id;
12901173fca2SJan Medala 	txq->next_to_clean = 0;
12911173fca2SJan Medala 	txq->next_to_use = 0;
12921173fca2SJan Medala 	txq->ring_size = nb_desc;
1293c0006061SMichal Krawczyk 	txq->size_mask = nb_desc - 1;
12944217cb0bSMichal Krawczyk 	txq->numa_socket_id = socket_id;
12951173fca2SJan Medala 
12961173fca2SJan Medala 	txq->tx_buffer_info = rte_zmalloc("txq->tx_buffer_info",
12971173fca2SJan Medala 					  sizeof(struct ena_tx_buffer) *
12981173fca2SJan Medala 					  txq->ring_size,
12991173fca2SJan Medala 					  RTE_CACHE_LINE_SIZE);
13001173fca2SJan Medala 	if (!txq->tx_buffer_info) {
13016f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "failed to alloc mem for tx buffer info\n");
1302df238f84SMichal Krawczyk 		return -ENOMEM;
13031173fca2SJan Medala 	}
13041173fca2SJan Medala 
13051173fca2SJan Medala 	txq->empty_tx_reqs = rte_zmalloc("txq->empty_tx_reqs",
13061173fca2SJan Medala 					 sizeof(u16) * txq->ring_size,
13071173fca2SJan Medala 					 RTE_CACHE_LINE_SIZE);
13081173fca2SJan Medala 	if (!txq->empty_tx_reqs) {
13096f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "failed to alloc mem for tx reqs\n");
1310df238f84SMichal Krawczyk 		rte_free(txq->tx_buffer_info);
1311df238f84SMichal Krawczyk 		return -ENOMEM;
13121173fca2SJan Medala 	}
1313241da076SRafal Kozik 
13142fca2a98SMichal Krawczyk 	txq->push_buf_intermediate_buf =
13152fca2a98SMichal Krawczyk 		rte_zmalloc("txq->push_buf_intermediate_buf",
13162fca2a98SMichal Krawczyk 			    txq->tx_max_header_size,
13172fca2a98SMichal Krawczyk 			    RTE_CACHE_LINE_SIZE);
13182fca2a98SMichal Krawczyk 	if (!txq->push_buf_intermediate_buf) {
13196f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "failed to alloc push buff for LLQ\n");
13202fca2a98SMichal Krawczyk 		rte_free(txq->tx_buffer_info);
13212fca2a98SMichal Krawczyk 		rte_free(txq->empty_tx_reqs);
13222fca2a98SMichal Krawczyk 		return -ENOMEM;
13232fca2a98SMichal Krawczyk 	}
13242fca2a98SMichal Krawczyk 
13251173fca2SJan Medala 	for (i = 0; i < txq->ring_size; i++)
13261173fca2SJan Medala 		txq->empty_tx_reqs[i] = i;
13271173fca2SJan Medala 
13282081d5e2SMichal Krawczyk 	if (tx_conf != NULL) {
13292081d5e2SMichal Krawczyk 		txq->offloads =
13302081d5e2SMichal Krawczyk 			tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
13312081d5e2SMichal Krawczyk 	}
13321173fca2SJan Medala 	/* Store pointer to this queue in upper layer */
13331173fca2SJan Medala 	txq->configured = 1;
13341173fca2SJan Medala 	dev->data->tx_queues[queue_idx] = txq;
1335241da076SRafal Kozik 
1336241da076SRafal Kozik 	return 0;
13371173fca2SJan Medala }
13381173fca2SJan Medala 
13391173fca2SJan Medala static int ena_rx_queue_setup(struct rte_eth_dev *dev,
13401173fca2SJan Medala 			      uint16_t queue_idx,
13411173fca2SJan Medala 			      uint16_t nb_desc,
13424217cb0bSMichal Krawczyk 			      unsigned int socket_id,
1343a4996bd8SWei Dai 			      __rte_unused const struct rte_eth_rxconf *rx_conf,
13441173fca2SJan Medala 			      struct rte_mempool *mp)
13451173fca2SJan Medala {
1346890728ffSStephen Hemminger 	struct ena_adapter *adapter = dev->data->dev_private;
13471173fca2SJan Medala 	struct ena_ring *rxq = NULL;
134838364c26SMichal Krawczyk 	size_t buffer_size;
1349df238f84SMichal Krawczyk 	int i;
13501173fca2SJan Medala 
13511173fca2SJan Medala 	rxq = &adapter->rx_ring[queue_idx];
13521173fca2SJan Medala 	if (rxq->configured) {
13536f1c9df9SStephen Hemminger 		PMD_DRV_LOG(CRIT,
13541173fca2SJan Medala 			"API violation. Queue %d is already configured\n",
13551173fca2SJan Medala 			queue_idx);
1356241da076SRafal Kozik 		return ENA_COM_FAULT;
13571173fca2SJan Medala 	}
13581173fca2SJan Medala 
1359ea93d37eSRafal Kozik 	if (nb_desc == RTE_ETH_DEV_FALLBACK_RX_RINGSIZE)
13605920d930SMichal Krawczyk 		nb_desc = adapter->max_rx_ring_size;
1361ea93d37eSRafal Kozik 
13621daff526SJakub Palider 	if (!rte_is_power_of_2(nb_desc)) {
13636f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR,
1364498c687aSRafal Kozik 			"Unsupported size of RX queue: %d is not a power of 2.\n",
13651daff526SJakub Palider 			nb_desc);
13661daff526SJakub Palider 		return -EINVAL;
13671daff526SJakub Palider 	}
13681daff526SJakub Palider 
13695920d930SMichal Krawczyk 	if (nb_desc > adapter->max_rx_ring_size) {
13706f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR,
13711173fca2SJan Medala 			"Unsupported size of RX queue (max size: %d)\n",
13725920d930SMichal Krawczyk 			adapter->max_rx_ring_size);
13731173fca2SJan Medala 		return -EINVAL;
13741173fca2SJan Medala 	}
13751173fca2SJan Medala 
137638364c26SMichal Krawczyk 	/* ENA isn't supporting buffers smaller than 1400 bytes */
137738364c26SMichal Krawczyk 	buffer_size = rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM;
137838364c26SMichal Krawczyk 	if (buffer_size < ENA_RX_BUF_MIN_SIZE) {
137938364c26SMichal Krawczyk 		PMD_DRV_LOG(ERR,
138038364c26SMichal Krawczyk 			"Unsupported size of RX buffer: %zu (min size: %d)\n",
138138364c26SMichal Krawczyk 			buffer_size, ENA_RX_BUF_MIN_SIZE);
138238364c26SMichal Krawczyk 		return -EINVAL;
138338364c26SMichal Krawczyk 	}
138438364c26SMichal Krawczyk 
13851173fca2SJan Medala 	rxq->port_id = dev->data->port_id;
13861173fca2SJan Medala 	rxq->next_to_clean = 0;
13871173fca2SJan Medala 	rxq->next_to_use = 0;
13881173fca2SJan Medala 	rxq->ring_size = nb_desc;
1389c0006061SMichal Krawczyk 	rxq->size_mask = nb_desc - 1;
13904217cb0bSMichal Krawczyk 	rxq->numa_socket_id = socket_id;
13911173fca2SJan Medala 	rxq->mb_pool = mp;
13921173fca2SJan Medala 
13931173fca2SJan Medala 	rxq->rx_buffer_info = rte_zmalloc("rxq->buffer_info",
13941be097dcSMichal Krawczyk 		sizeof(struct ena_rx_buffer) * nb_desc,
13951173fca2SJan Medala 		RTE_CACHE_LINE_SIZE);
13961173fca2SJan Medala 	if (!rxq->rx_buffer_info) {
13976f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "failed to alloc mem for rx buffer info\n");
13981173fca2SJan Medala 		return -ENOMEM;
13991173fca2SJan Medala 	}
14001173fca2SJan Medala 
140179405ee1SRafal Kozik 	rxq->rx_refill_buffer = rte_zmalloc("rxq->rx_refill_buffer",
140279405ee1SRafal Kozik 					    sizeof(struct rte_mbuf *) * nb_desc,
140379405ee1SRafal Kozik 					    RTE_CACHE_LINE_SIZE);
140479405ee1SRafal Kozik 
140579405ee1SRafal Kozik 	if (!rxq->rx_refill_buffer) {
14066f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "failed to alloc mem for rx refill buffer\n");
140779405ee1SRafal Kozik 		rte_free(rxq->rx_buffer_info);
140879405ee1SRafal Kozik 		rxq->rx_buffer_info = NULL;
140979405ee1SRafal Kozik 		return -ENOMEM;
141079405ee1SRafal Kozik 	}
141179405ee1SRafal Kozik 
1412c2034976SMichal Krawczyk 	rxq->empty_rx_reqs = rte_zmalloc("rxq->empty_rx_reqs",
1413c2034976SMichal Krawczyk 					 sizeof(uint16_t) * nb_desc,
1414c2034976SMichal Krawczyk 					 RTE_CACHE_LINE_SIZE);
1415c2034976SMichal Krawczyk 	if (!rxq->empty_rx_reqs) {
14166f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "failed to alloc mem for empty rx reqs\n");
1417c2034976SMichal Krawczyk 		rte_free(rxq->rx_buffer_info);
1418c2034976SMichal Krawczyk 		rxq->rx_buffer_info = NULL;
141979405ee1SRafal Kozik 		rte_free(rxq->rx_refill_buffer);
142079405ee1SRafal Kozik 		rxq->rx_refill_buffer = NULL;
1421c2034976SMichal Krawczyk 		return -ENOMEM;
1422c2034976SMichal Krawczyk 	}
1423c2034976SMichal Krawczyk 
1424c2034976SMichal Krawczyk 	for (i = 0; i < nb_desc; i++)
1425eccbe2ffSRafal Kozik 		rxq->empty_rx_reqs[i] = i;
1426c2034976SMichal Krawczyk 
14271173fca2SJan Medala 	/* Store pointer to this queue in upper layer */
14281173fca2SJan Medala 	rxq->configured = 1;
14291173fca2SJan Medala 	dev->data->rx_queues[queue_idx] = rxq;
14301173fca2SJan Medala 
1431df238f84SMichal Krawczyk 	return 0;
14321173fca2SJan Medala }
14331173fca2SJan Medala 
143483fd97b2SMichal Krawczyk static int ena_add_single_rx_desc(struct ena_com_io_sq *io_sq,
143583fd97b2SMichal Krawczyk 				  struct rte_mbuf *mbuf, uint16_t id)
143683fd97b2SMichal Krawczyk {
143783fd97b2SMichal Krawczyk 	struct ena_com_buf ebuf;
143883fd97b2SMichal Krawczyk 	int rc;
143983fd97b2SMichal Krawczyk 
144083fd97b2SMichal Krawczyk 	/* prepare physical address for DMA transaction */
144183fd97b2SMichal Krawczyk 	ebuf.paddr = mbuf->buf_iova + RTE_PKTMBUF_HEADROOM;
144283fd97b2SMichal Krawczyk 	ebuf.len = mbuf->buf_len - RTE_PKTMBUF_HEADROOM;
144383fd97b2SMichal Krawczyk 
144483fd97b2SMichal Krawczyk 	/* pass resource to device */
144583fd97b2SMichal Krawczyk 	rc = ena_com_add_single_rx_desc(io_sq, &ebuf, id);
144683fd97b2SMichal Krawczyk 	if (unlikely(rc != 0))
144783fd97b2SMichal Krawczyk 		PMD_DRV_LOG(WARNING, "failed adding rx desc\n");
144883fd97b2SMichal Krawczyk 
144983fd97b2SMichal Krawczyk 	return rc;
145083fd97b2SMichal Krawczyk }
145183fd97b2SMichal Krawczyk 
14521173fca2SJan Medala static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count)
14531173fca2SJan Medala {
14541173fca2SJan Medala 	unsigned int i;
14551173fca2SJan Medala 	int rc;
14561daff526SJakub Palider 	uint16_t next_to_use = rxq->next_to_use;
1457c2034976SMichal Krawczyk 	uint16_t in_use, req_id;
145879405ee1SRafal Kozik 	struct rte_mbuf **mbufs = rxq->rx_refill_buffer;
14591173fca2SJan Medala 
14601173fca2SJan Medala 	if (unlikely(!count))
14611173fca2SJan Medala 		return 0;
14621173fca2SJan Medala 
1463c0006061SMichal Krawczyk 	in_use = rxq->ring_size - 1 -
1464c0006061SMichal Krawczyk 		ena_com_free_q_entries(rxq->ena_com_io_sq);
1465c0006061SMichal Krawczyk 	ena_assert_msg(((in_use + count) < rxq->ring_size),
1466c0006061SMichal Krawczyk 		"bad ring state\n");
14671173fca2SJan Medala 
14681173fca2SJan Medala 	/* get resources for incoming packets */
146979405ee1SRafal Kozik 	rc = rte_mempool_get_bulk(rxq->mb_pool, (void **)mbufs, count);
14701173fca2SJan Medala 	if (unlikely(rc < 0)) {
14711173fca2SJan Medala 		rte_atomic64_inc(&rxq->adapter->drv_stats->rx_nombuf);
14727830e905SSolganik Alexander 		++rxq->rx_stats.mbuf_alloc_fail;
14731173fca2SJan Medala 		PMD_RX_LOG(DEBUG, "there are no enough free buffers");
14741173fca2SJan Medala 		return 0;
14751173fca2SJan Medala 	}
14761173fca2SJan Medala 
14771173fca2SJan Medala 	for (i = 0; i < count; i++) {
147879405ee1SRafal Kozik 		struct rte_mbuf *mbuf = mbufs[i];
14791be097dcSMichal Krawczyk 		struct ena_rx_buffer *rx_info;
14801173fca2SJan Medala 
148179405ee1SRafal Kozik 		if (likely((i + 4) < count))
148279405ee1SRafal Kozik 			rte_prefetch0(mbufs[i + 4]);
1483c2034976SMichal Krawczyk 
1484c0006061SMichal Krawczyk 		req_id = rxq->empty_rx_reqs[next_to_use];
1485241da076SRafal Kozik 		rc = validate_rx_req_id(rxq, req_id);
14861be097dcSMichal Krawczyk 		if (unlikely(rc))
1487241da076SRafal Kozik 			break;
14881be097dcSMichal Krawczyk 
14891be097dcSMichal Krawczyk 		rx_info = &rxq->rx_buffer_info[req_id];
1490241da076SRafal Kozik 
149183fd97b2SMichal Krawczyk 		rc = ena_add_single_rx_desc(rxq->ena_com_io_sq, mbuf, req_id);
149283fd97b2SMichal Krawczyk 		if (unlikely(rc != 0))
14931173fca2SJan Medala 			break;
149483fd97b2SMichal Krawczyk 
14951be097dcSMichal Krawczyk 		rx_info->mbuf = mbuf;
1496c0006061SMichal Krawczyk 		next_to_use = ENA_IDX_NEXT_MASKED(next_to_use, rxq->size_mask);
14971173fca2SJan Medala 	}
14981173fca2SJan Medala 
149979405ee1SRafal Kozik 	if (unlikely(i < count)) {
15006f1c9df9SStephen Hemminger 		PMD_DRV_LOG(WARNING, "refilled rx qid %d with only %d "
1501241da076SRafal Kozik 			"buffers (from %d)\n", rxq->id, i, count);
150279405ee1SRafal Kozik 		rte_mempool_put_bulk(rxq->mb_pool, (void **)(&mbufs[i]),
150379405ee1SRafal Kozik 				     count - i);
15047830e905SSolganik Alexander 		++rxq->rx_stats.refill_partial;
150579405ee1SRafal Kozik 	}
1506241da076SRafal Kozik 
15075e02e19eSJan Medala 	/* When we submitted free recources to device... */
15083d19e1abSRafal Kozik 	if (likely(i > 0)) {
150938faa87eSMichal Krawczyk 		/* ...let HW know that it can fill buffers with data. */
15101173fca2SJan Medala 		ena_com_write_sq_doorbell(rxq->ena_com_io_sq);
15111173fca2SJan Medala 
15125e02e19eSJan Medala 		rxq->next_to_use = next_to_use;
15135e02e19eSJan Medala 	}
15145e02e19eSJan Medala 
15151173fca2SJan Medala 	return i;
15161173fca2SJan Medala }
15171173fca2SJan Medala 
15181173fca2SJan Medala static int ena_device_init(struct ena_com_dev *ena_dev,
1519e859d2b8SRafal Kozik 			   struct ena_com_dev_get_features_ctx *get_feat_ctx,
1520e859d2b8SRafal Kozik 			   bool *wd_state)
15211173fca2SJan Medala {
1522ca148440SMichal Krawczyk 	uint32_t aenq_groups;
15231173fca2SJan Medala 	int rc;
1524c4144557SJan Medala 	bool readless_supported;
15251173fca2SJan Medala 
15261173fca2SJan Medala 	/* Initialize mmio registers */
15271173fca2SJan Medala 	rc = ena_com_mmio_reg_read_request_init(ena_dev);
15281173fca2SJan Medala 	if (rc) {
15296f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "failed to init mmio read less\n");
15301173fca2SJan Medala 		return rc;
15311173fca2SJan Medala 	}
15321173fca2SJan Medala 
1533c4144557SJan Medala 	/* The PCIe configuration space revision id indicate if mmio reg
1534c4144557SJan Medala 	 * read is disabled.
1535c4144557SJan Medala 	 */
1536c4144557SJan Medala 	readless_supported =
1537c4144557SJan Medala 		!(((struct rte_pci_device *)ena_dev->dmadev)->id.class_id
1538c4144557SJan Medala 			       & ENA_MMIO_DISABLE_REG_READ);
1539c4144557SJan Medala 	ena_com_set_mmio_read_mode(ena_dev, readless_supported);
1540c4144557SJan Medala 
15411173fca2SJan Medala 	/* reset device */
15423adcba9aSMichal Krawczyk 	rc = ena_com_dev_reset(ena_dev, ENA_REGS_RESET_NORMAL);
15431173fca2SJan Medala 	if (rc) {
15446f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "cannot reset device\n");
15451173fca2SJan Medala 		goto err_mmio_read_less;
15461173fca2SJan Medala 	}
15471173fca2SJan Medala 
15481173fca2SJan Medala 	/* check FW version */
15491173fca2SJan Medala 	rc = ena_com_validate_version(ena_dev);
15501173fca2SJan Medala 	if (rc) {
15516f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "device version is too low\n");
15521173fca2SJan Medala 		goto err_mmio_read_less;
15531173fca2SJan Medala 	}
15541173fca2SJan Medala 
15551173fca2SJan Medala 	ena_dev->dma_addr_bits = ena_com_get_dma_width(ena_dev);
15561173fca2SJan Medala 
15571173fca2SJan Medala 	/* ENA device administration layer init */
1558b68309beSRafal Kozik 	rc = ena_com_admin_init(ena_dev, &aenq_handlers);
15591173fca2SJan Medala 	if (rc) {
15606f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR,
15611173fca2SJan Medala 			"cannot initialize ena admin queue with device\n");
15621173fca2SJan Medala 		goto err_mmio_read_less;
15631173fca2SJan Medala 	}
15641173fca2SJan Medala 
15651173fca2SJan Medala 	/* To enable the msix interrupts the driver needs to know the number
15661173fca2SJan Medala 	 * of queues. So the driver uses polling mode to retrieve this
15671173fca2SJan Medala 	 * information.
15681173fca2SJan Medala 	 */
15691173fca2SJan Medala 	ena_com_set_admin_polling_mode(ena_dev, true);
15701173fca2SJan Medala 
1571201ff2e5SJakub Palider 	ena_config_host_info(ena_dev);
1572201ff2e5SJakub Palider 
15731173fca2SJan Medala 	/* Get Device Attributes and features */
15741173fca2SJan Medala 	rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx);
15751173fca2SJan Medala 	if (rc) {
15766f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR,
15771173fca2SJan Medala 			"cannot get attribute for ena device rc= %d\n", rc);
15781173fca2SJan Medala 		goto err_admin_init;
15791173fca2SJan Medala 	}
15801173fca2SJan Medala 
1581f01f060cSRafal Kozik 	aenq_groups = BIT(ENA_ADMIN_LINK_CHANGE) |
1582d9b8b106SMichal Krawczyk 		      BIT(ENA_ADMIN_NOTIFICATION) |
1583983cce2dSRafal Kozik 		      BIT(ENA_ADMIN_KEEP_ALIVE) |
1584983cce2dSRafal Kozik 		      BIT(ENA_ADMIN_FATAL_ERROR) |
1585983cce2dSRafal Kozik 		      BIT(ENA_ADMIN_WARNING);
1586ca148440SMichal Krawczyk 
1587ca148440SMichal Krawczyk 	aenq_groups &= get_feat_ctx->aenq.supported_groups;
1588ca148440SMichal Krawczyk 	rc = ena_com_set_aenq_config(ena_dev, aenq_groups);
1589ca148440SMichal Krawczyk 	if (rc) {
15906f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "Cannot configure aenq groups rc: %d\n", rc);
1591ca148440SMichal Krawczyk 		goto err_admin_init;
1592ca148440SMichal Krawczyk 	}
1593ca148440SMichal Krawczyk 
1594e859d2b8SRafal Kozik 	*wd_state = !!(aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE));
1595e859d2b8SRafal Kozik 
15961173fca2SJan Medala 	return 0;
15971173fca2SJan Medala 
15981173fca2SJan Medala err_admin_init:
15991173fca2SJan Medala 	ena_com_admin_destroy(ena_dev);
16001173fca2SJan Medala 
16011173fca2SJan Medala err_mmio_read_less:
16021173fca2SJan Medala 	ena_com_mmio_reg_read_request_destroy(ena_dev);
16031173fca2SJan Medala 
16041173fca2SJan Medala 	return rc;
16051173fca2SJan Medala }
16061173fca2SJan Medala 
1607ca148440SMichal Krawczyk static void ena_interrupt_handler_rte(void *cb_arg)
160815773e06SMichal Krawczyk {
1609890728ffSStephen Hemminger 	struct ena_adapter *adapter = cb_arg;
161015773e06SMichal Krawczyk 	struct ena_com_dev *ena_dev = &adapter->ena_dev;
161115773e06SMichal Krawczyk 
161215773e06SMichal Krawczyk 	ena_com_admin_q_comp_intr_handler(ena_dev);
16133d19e1abSRafal Kozik 	if (likely(adapter->state != ENA_ADAPTER_STATE_CLOSED))
1614ca148440SMichal Krawczyk 		ena_com_aenq_intr_handler(ena_dev, adapter);
161515773e06SMichal Krawczyk }
161615773e06SMichal Krawczyk 
16175efb9fc7SMichal Krawczyk static void check_for_missing_keep_alive(struct ena_adapter *adapter)
16185efb9fc7SMichal Krawczyk {
1619e859d2b8SRafal Kozik 	if (!adapter->wd_state)
1620e859d2b8SRafal Kozik 		return;
1621e859d2b8SRafal Kozik 
16225efb9fc7SMichal Krawczyk 	if (adapter->keep_alive_timeout == ENA_HW_HINTS_NO_TIMEOUT)
16235efb9fc7SMichal Krawczyk 		return;
16245efb9fc7SMichal Krawczyk 
16255efb9fc7SMichal Krawczyk 	if (unlikely((rte_get_timer_cycles() - adapter->timestamp_wd) >=
16265efb9fc7SMichal Krawczyk 	    adapter->keep_alive_timeout)) {
16276f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "Keep alive timeout\n");
16285efb9fc7SMichal Krawczyk 		adapter->reset_reason = ENA_REGS_RESET_KEEP_ALIVE_TO;
16295efb9fc7SMichal Krawczyk 		adapter->trigger_reset = true;
16307830e905SSolganik Alexander 		++adapter->dev_stats.wd_expired;
16315efb9fc7SMichal Krawczyk 	}
16325efb9fc7SMichal Krawczyk }
16335efb9fc7SMichal Krawczyk 
16345efb9fc7SMichal Krawczyk /* Check if admin queue is enabled */
16355efb9fc7SMichal Krawczyk static void check_for_admin_com_state(struct ena_adapter *adapter)
16365efb9fc7SMichal Krawczyk {
16375efb9fc7SMichal Krawczyk 	if (unlikely(!ena_com_get_admin_running_state(&adapter->ena_dev))) {
16386f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "ENA admin queue is not in running state!\n");
16395efb9fc7SMichal Krawczyk 		adapter->reset_reason = ENA_REGS_RESET_ADMIN_TO;
16405efb9fc7SMichal Krawczyk 		adapter->trigger_reset = true;
16415efb9fc7SMichal Krawczyk 	}
16425efb9fc7SMichal Krawczyk }
16435efb9fc7SMichal Krawczyk 
1644d9b8b106SMichal Krawczyk static void ena_timer_wd_callback(__rte_unused struct rte_timer *timer,
1645d9b8b106SMichal Krawczyk 				  void *arg)
1646d9b8b106SMichal Krawczyk {
1647890728ffSStephen Hemminger 	struct ena_adapter *adapter = arg;
1648d9b8b106SMichal Krawczyk 	struct rte_eth_dev *dev = adapter->rte_dev;
1649d9b8b106SMichal Krawczyk 
16505efb9fc7SMichal Krawczyk 	check_for_missing_keep_alive(adapter);
16515efb9fc7SMichal Krawczyk 	check_for_admin_com_state(adapter);
1652d9b8b106SMichal Krawczyk 
16535efb9fc7SMichal Krawczyk 	if (unlikely(adapter->trigger_reset)) {
16546f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "Trigger reset is on\n");
16555723fbedSFerruh Yigit 		rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET,
1656d9b8b106SMichal Krawczyk 			NULL);
1657d9b8b106SMichal Krawczyk 	}
1658d9b8b106SMichal Krawczyk }
1659d9b8b106SMichal Krawczyk 
16602fca2a98SMichal Krawczyk static inline void
16618a7a73f2SMichal Krawczyk set_default_llq_configurations(struct ena_llq_configurations *llq_config,
16628a7a73f2SMichal Krawczyk 			       struct ena_admin_feature_llq_desc *llq,
16638a7a73f2SMichal Krawczyk 			       bool use_large_llq_hdr)
16642fca2a98SMichal Krawczyk {
16652fca2a98SMichal Krawczyk 	llq_config->llq_header_location = ENA_ADMIN_INLINE_HEADER;
16662fca2a98SMichal Krawczyk 	llq_config->llq_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY;
16672fca2a98SMichal Krawczyk 	llq_config->llq_num_decs_before_header =
16682fca2a98SMichal Krawczyk 		ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2;
16698a7a73f2SMichal Krawczyk 
16708a7a73f2SMichal Krawczyk 	if (use_large_llq_hdr &&
16718a7a73f2SMichal Krawczyk 	    (llq->entry_size_ctrl_supported & ENA_ADMIN_LIST_ENTRY_SIZE_256B)) {
16728a7a73f2SMichal Krawczyk 		llq_config->llq_ring_entry_size =
16738a7a73f2SMichal Krawczyk 			ENA_ADMIN_LIST_ENTRY_SIZE_256B;
16748a7a73f2SMichal Krawczyk 		llq_config->llq_ring_entry_size_value = 256;
16758a7a73f2SMichal Krawczyk 	} else {
16768a7a73f2SMichal Krawczyk 		llq_config->llq_ring_entry_size =
16778a7a73f2SMichal Krawczyk 			ENA_ADMIN_LIST_ENTRY_SIZE_128B;
16782fca2a98SMichal Krawczyk 		llq_config->llq_ring_entry_size_value = 128;
16792fca2a98SMichal Krawczyk 	}
16808a7a73f2SMichal Krawczyk }
16812fca2a98SMichal Krawczyk 
16822fca2a98SMichal Krawczyk static int
16832fca2a98SMichal Krawczyk ena_set_queues_placement_policy(struct ena_adapter *adapter,
16842fca2a98SMichal Krawczyk 				struct ena_com_dev *ena_dev,
16852fca2a98SMichal Krawczyk 				struct ena_admin_feature_llq_desc *llq,
16862fca2a98SMichal Krawczyk 				struct ena_llq_configurations *llq_default_configurations)
16872fca2a98SMichal Krawczyk {
16882fca2a98SMichal Krawczyk 	int rc;
16892fca2a98SMichal Krawczyk 	u32 llq_feature_mask;
16902fca2a98SMichal Krawczyk 
16912fca2a98SMichal Krawczyk 	llq_feature_mask = 1 << ENA_ADMIN_LLQ;
16922fca2a98SMichal Krawczyk 	if (!(ena_dev->supported_features & llq_feature_mask)) {
16936f1c9df9SStephen Hemminger 		PMD_DRV_LOG(INFO,
16942fca2a98SMichal Krawczyk 			"LLQ is not supported. Fallback to host mode policy.\n");
16952fca2a98SMichal Krawczyk 		ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
16962fca2a98SMichal Krawczyk 		return 0;
16972fca2a98SMichal Krawczyk 	}
16982fca2a98SMichal Krawczyk 
16992fca2a98SMichal Krawczyk 	rc = ena_com_config_dev_mode(ena_dev, llq, llq_default_configurations);
17002fca2a98SMichal Krawczyk 	if (unlikely(rc)) {
17012fca2a98SMichal Krawczyk 		PMD_INIT_LOG(WARNING, "Failed to config dev mode. "
1702498c687aSRafal Kozik 			"Fallback to host mode policy.");
17032fca2a98SMichal Krawczyk 		ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
17042fca2a98SMichal Krawczyk 		return 0;
17052fca2a98SMichal Krawczyk 	}
17062fca2a98SMichal Krawczyk 
17072fca2a98SMichal Krawczyk 	/* Nothing to config, exit */
17082fca2a98SMichal Krawczyk 	if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
17092fca2a98SMichal Krawczyk 		return 0;
17102fca2a98SMichal Krawczyk 
17112fca2a98SMichal Krawczyk 	if (!adapter->dev_mem_base) {
17126f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "Unable to access LLQ bar resource. "
17132fca2a98SMichal Krawczyk 			"Fallback to host mode policy.\n.");
17142fca2a98SMichal Krawczyk 		ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
17152fca2a98SMichal Krawczyk 		return 0;
17162fca2a98SMichal Krawczyk 	}
17172fca2a98SMichal Krawczyk 
17182fca2a98SMichal Krawczyk 	ena_dev->mem_bar = adapter->dev_mem_base;
17192fca2a98SMichal Krawczyk 
17202fca2a98SMichal Krawczyk 	return 0;
17212fca2a98SMichal Krawczyk }
17222fca2a98SMichal Krawczyk 
17235920d930SMichal Krawczyk static uint32_t ena_calc_max_io_queue_num(struct ena_com_dev *ena_dev,
172401bd6877SRafal Kozik 	struct ena_com_dev_get_features_ctx *get_feat_ctx)
172501bd6877SRafal Kozik {
17265920d930SMichal Krawczyk 	uint32_t io_tx_sq_num, io_tx_cq_num, io_rx_num, max_num_io_queues;
172701bd6877SRafal Kozik 
1728ea93d37eSRafal Kozik 	/* Regular queues capabilities */
1729ea93d37eSRafal Kozik 	if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
1730ea93d37eSRafal Kozik 		struct ena_admin_queue_ext_feature_fields *max_queue_ext =
1731ea93d37eSRafal Kozik 			&get_feat_ctx->max_queue_ext.max_queue_ext;
17322fca2a98SMichal Krawczyk 		io_rx_num = RTE_MIN(max_queue_ext->max_rx_sq_num,
17332fca2a98SMichal Krawczyk 				    max_queue_ext->max_rx_cq_num);
17342fca2a98SMichal Krawczyk 		io_tx_sq_num = max_queue_ext->max_tx_sq_num;
17352fca2a98SMichal Krawczyk 		io_tx_cq_num = max_queue_ext->max_tx_cq_num;
1736ea93d37eSRafal Kozik 	} else {
1737ea93d37eSRafal Kozik 		struct ena_admin_queue_feature_desc *max_queues =
1738ea93d37eSRafal Kozik 			&get_feat_ctx->max_queues;
17392fca2a98SMichal Krawczyk 		io_tx_sq_num = max_queues->max_sq_num;
17402fca2a98SMichal Krawczyk 		io_tx_cq_num = max_queues->max_cq_num;
17412fca2a98SMichal Krawczyk 		io_rx_num = RTE_MIN(io_tx_sq_num, io_tx_cq_num);
1742ea93d37eSRafal Kozik 	}
174301bd6877SRafal Kozik 
17442fca2a98SMichal Krawczyk 	/* In case of LLQ use the llq number in the get feature cmd */
17452fca2a98SMichal Krawczyk 	if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
17462fca2a98SMichal Krawczyk 		io_tx_sq_num = get_feat_ctx->llq.max_llq_num;
17472fca2a98SMichal Krawczyk 
17485920d930SMichal Krawczyk 	max_num_io_queues = RTE_MIN(ENA_MAX_NUM_IO_QUEUES, io_rx_num);
17495920d930SMichal Krawczyk 	max_num_io_queues = RTE_MIN(max_num_io_queues, io_tx_sq_num);
17505920d930SMichal Krawczyk 	max_num_io_queues = RTE_MIN(max_num_io_queues, io_tx_cq_num);
175101bd6877SRafal Kozik 
17525920d930SMichal Krawczyk 	if (unlikely(max_num_io_queues == 0)) {
17536f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "Number of IO queues should not be 0\n");
175401bd6877SRafal Kozik 		return -EFAULT;
175501bd6877SRafal Kozik 	}
175601bd6877SRafal Kozik 
17575920d930SMichal Krawczyk 	return max_num_io_queues;
175801bd6877SRafal Kozik }
175901bd6877SRafal Kozik 
17601173fca2SJan Medala static int eth_ena_dev_init(struct rte_eth_dev *eth_dev)
17611173fca2SJan Medala {
1762ea93d37eSRafal Kozik 	struct ena_calc_queue_size_ctx calc_queue_ctx = { 0 };
17631173fca2SJan Medala 	struct rte_pci_device *pci_dev;
1764eb0ef49dSMichal Krawczyk 	struct rte_intr_handle *intr_handle;
1765890728ffSStephen Hemminger 	struct ena_adapter *adapter = eth_dev->data->dev_private;
17661173fca2SJan Medala 	struct ena_com_dev *ena_dev = &adapter->ena_dev;
17671173fca2SJan Medala 	struct ena_com_dev_get_features_ctx get_feat_ctx;
17682fca2a98SMichal Krawczyk 	struct ena_llq_configurations llq_config;
17692fca2a98SMichal Krawczyk 	const char *queue_type_str;
17705920d930SMichal Krawczyk 	uint32_t max_num_io_queues;
1771ea93d37eSRafal Kozik 	int rc;
17721173fca2SJan Medala 	static int adapters_found;
177333dde075SMichal Krawczyk 	bool disable_meta_caching;
17745f267cb0SFerruh Yigit 	bool wd_state = false;
17751173fca2SJan Medala 
17761173fca2SJan Medala 	eth_dev->dev_ops = &ena_dev_ops;
17771173fca2SJan Medala 	eth_dev->rx_pkt_burst = &eth_ena_recv_pkts;
17781173fca2SJan Medala 	eth_dev->tx_pkt_burst = &eth_ena_xmit_pkts;
1779b3fc5a1aSKonstantin Ananyev 	eth_dev->tx_pkt_prepare = &eth_ena_prep_pkts;
17801173fca2SJan Medala 
17811173fca2SJan Medala 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
17821173fca2SJan Medala 		return 0;
17831173fca2SJan Medala 
1784f30e69b4SFerruh Yigit 	eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
1785f30e69b4SFerruh Yigit 
1786fd976890SMichal Krawczyk 	memset(adapter, 0, sizeof(struct ena_adapter));
1787fd976890SMichal Krawczyk 	ena_dev = &adapter->ena_dev;
1788fd976890SMichal Krawczyk 
1789fd976890SMichal Krawczyk 	adapter->rte_eth_dev_data = eth_dev->data;
1790fd976890SMichal Krawczyk 	adapter->rte_dev = eth_dev;
1791fd976890SMichal Krawczyk 
1792c0802544SFerruh Yigit 	pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
17931173fca2SJan Medala 	adapter->pdev = pci_dev;
17941173fca2SJan Medala 
1795f2462150SFerruh Yigit 	PMD_INIT_LOG(INFO, "Initializing %x:%x:%x.%d",
17961173fca2SJan Medala 		     pci_dev->addr.domain,
17971173fca2SJan Medala 		     pci_dev->addr.bus,
17981173fca2SJan Medala 		     pci_dev->addr.devid,
17991173fca2SJan Medala 		     pci_dev->addr.function);
18001173fca2SJan Medala 
1801eb0ef49dSMichal Krawczyk 	intr_handle = &pci_dev->intr_handle;
1802eb0ef49dSMichal Krawczyk 
18031173fca2SJan Medala 	adapter->regs = pci_dev->mem_resource[ENA_REGS_BAR].addr;
18041173fca2SJan Medala 	adapter->dev_mem_base = pci_dev->mem_resource[ENA_MEM_BAR].addr;
18051173fca2SJan Medala 
18061d339597SRafal Kozik 	if (!adapter->regs) {
1807f2462150SFerruh Yigit 		PMD_INIT_LOG(CRIT, "Failed to access registers BAR(%d)",
18081173fca2SJan Medala 			     ENA_REGS_BAR);
18091d339597SRafal Kozik 		return -ENXIO;
18101d339597SRafal Kozik 	}
18111173fca2SJan Medala 
18121173fca2SJan Medala 	ena_dev->reg_bar = adapter->regs;
18131173fca2SJan Medala 	ena_dev->dmadev = adapter->pdev;
18141173fca2SJan Medala 
18151173fca2SJan Medala 	adapter->id_number = adapters_found;
18161173fca2SJan Medala 
18171173fca2SJan Medala 	snprintf(adapter->name, ENA_NAME_MAX_LEN, "ena_%d",
18181173fca2SJan Medala 		 adapter->id_number);
18191173fca2SJan Medala 
18208a7a73f2SMichal Krawczyk 	rc = ena_parse_devargs(adapter, pci_dev->device.devargs);
18218a7a73f2SMichal Krawczyk 	if (rc != 0) {
18228a7a73f2SMichal Krawczyk 		PMD_INIT_LOG(CRIT, "Failed to parse devargs\n");
18238a7a73f2SMichal Krawczyk 		goto err;
18248a7a73f2SMichal Krawczyk 	}
18258a7a73f2SMichal Krawczyk 
18261173fca2SJan Medala 	/* device specific initialization routine */
1827e859d2b8SRafal Kozik 	rc = ena_device_init(ena_dev, &get_feat_ctx, &wd_state);
18281173fca2SJan Medala 	if (rc) {
1829f2462150SFerruh Yigit 		PMD_INIT_LOG(CRIT, "Failed to init ENA device");
1830241da076SRafal Kozik 		goto err;
18311173fca2SJan Medala 	}
1832e859d2b8SRafal Kozik 	adapter->wd_state = wd_state;
18331173fca2SJan Medala 
18348a7a73f2SMichal Krawczyk 	set_default_llq_configurations(&llq_config, &get_feat_ctx.llq,
18358a7a73f2SMichal Krawczyk 		adapter->use_large_llq_hdr);
18362fca2a98SMichal Krawczyk 	rc = ena_set_queues_placement_policy(adapter, ena_dev,
18372fca2a98SMichal Krawczyk 					     &get_feat_ctx.llq, &llq_config);
18382fca2a98SMichal Krawczyk 	if (unlikely(rc)) {
18392fca2a98SMichal Krawczyk 		PMD_INIT_LOG(CRIT, "Failed to set placement policy");
18402fca2a98SMichal Krawczyk 		return rc;
18412fca2a98SMichal Krawczyk 	}
18422fca2a98SMichal Krawczyk 
18432fca2a98SMichal Krawczyk 	if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
18442fca2a98SMichal Krawczyk 		queue_type_str = "Regular";
18452fca2a98SMichal Krawczyk 	else
18462fca2a98SMichal Krawczyk 		queue_type_str = "Low latency";
18476f1c9df9SStephen Hemminger 	PMD_DRV_LOG(INFO, "Placement policy: %s\n", queue_type_str);
1848ea93d37eSRafal Kozik 
1849ea93d37eSRafal Kozik 	calc_queue_ctx.ena_dev = ena_dev;
1850ea93d37eSRafal Kozik 	calc_queue_ctx.get_feat_ctx = &get_feat_ctx;
18511173fca2SJan Medala 
18525920d930SMichal Krawczyk 	max_num_io_queues = ena_calc_max_io_queue_num(ena_dev, &get_feat_ctx);
18538a7a73f2SMichal Krawczyk 	rc = ena_calc_io_queue_size(&calc_queue_ctx,
18548a7a73f2SMichal Krawczyk 		adapter->use_large_llq_hdr);
18555920d930SMichal Krawczyk 	if (unlikely((rc != 0) || (max_num_io_queues == 0))) {
1856241da076SRafal Kozik 		rc = -EFAULT;
1857241da076SRafal Kozik 		goto err_device_destroy;
1858241da076SRafal Kozik 	}
18591173fca2SJan Medala 
18605920d930SMichal Krawczyk 	adapter->max_tx_ring_size = calc_queue_ctx.max_tx_queue_size;
18615920d930SMichal Krawczyk 	adapter->max_rx_ring_size = calc_queue_ctx.max_rx_queue_size;
1862ea93d37eSRafal Kozik 	adapter->max_tx_sgl_size = calc_queue_ctx.max_tx_sgl_size;
1863ea93d37eSRafal Kozik 	adapter->max_rx_sgl_size = calc_queue_ctx.max_rx_sgl_size;
18645920d930SMichal Krawczyk 	adapter->max_num_io_queues = max_num_io_queues;
18652061fe41SRafal Kozik 
186633dde075SMichal Krawczyk 	if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
186733dde075SMichal Krawczyk 		disable_meta_caching =
186833dde075SMichal Krawczyk 			!!(get_feat_ctx.llq.accel_mode.u.get.supported_flags &
186933dde075SMichal Krawczyk 			BIT(ENA_ADMIN_DISABLE_META_CACHING));
187033dde075SMichal Krawczyk 	} else {
187133dde075SMichal Krawczyk 		disable_meta_caching = false;
187233dde075SMichal Krawczyk 	}
187333dde075SMichal Krawczyk 
18741173fca2SJan Medala 	/* prepare ring structures */
187533dde075SMichal Krawczyk 	ena_init_rings(adapter, disable_meta_caching);
18761173fca2SJan Medala 
1877372c1af5SJan Medala 	ena_config_debug_area(adapter);
1878372c1af5SJan Medala 
18791173fca2SJan Medala 	/* Set max MTU for this device */
18801173fca2SJan Medala 	adapter->max_mtu = get_feat_ctx.dev_attr.max_mtu;
18811173fca2SJan Medala 
1882117ba4a6SMichal Krawczyk 	/* set device support for offloads */
1883117ba4a6SMichal Krawczyk 	adapter->offloads.tso4_supported = (get_feat_ctx.offload.tx &
1884117ba4a6SMichal Krawczyk 		ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK) != 0;
1885117ba4a6SMichal Krawczyk 	adapter->offloads.tx_csum_supported = (get_feat_ctx.offload.tx &
1886117ba4a6SMichal Krawczyk 		ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK) != 0;
1887ef538c1aSMichal Krawczyk 	adapter->offloads.rx_csum_supported =
1888117ba4a6SMichal Krawczyk 		(get_feat_ctx.offload.rx_supported &
1889117ba4a6SMichal Krawczyk 		ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK) != 0;
189083277a7cSJakub Palider 
18911173fca2SJan Medala 	/* Copy MAC address and point DPDK to it */
18926d13ea8eSOlivier Matz 	eth_dev->data->mac_addrs = (struct rte_ether_addr *)adapter->mac_addr;
1893538da7a1SOlivier Matz 	rte_ether_addr_copy((struct rte_ether_addr *)
1894538da7a1SOlivier Matz 			get_feat_ctx.dev_attr.mac_addr,
18956d13ea8eSOlivier Matz 			(struct rte_ether_addr *)adapter->mac_addr);
18961173fca2SJan Medala 
18971173fca2SJan Medala 	adapter->drv_stats = rte_zmalloc("adapter stats",
18981173fca2SJan Medala 					 sizeof(*adapter->drv_stats),
18991173fca2SJan Medala 					 RTE_CACHE_LINE_SIZE);
19001173fca2SJan Medala 	if (!adapter->drv_stats) {
19016f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "failed to alloc mem for adapter stats\n");
1902241da076SRafal Kozik 		rc = -ENOMEM;
1903241da076SRafal Kozik 		goto err_delete_debug_area;
19041173fca2SJan Medala 	}
19051173fca2SJan Medala 
19061343c415SMichal Krawczyk 	rte_spinlock_init(&adapter->admin_lock);
19071343c415SMichal Krawczyk 
1908eb0ef49dSMichal Krawczyk 	rte_intr_callback_register(intr_handle,
1909eb0ef49dSMichal Krawczyk 				   ena_interrupt_handler_rte,
1910eb0ef49dSMichal Krawczyk 				   adapter);
1911eb0ef49dSMichal Krawczyk 	rte_intr_enable(intr_handle);
1912eb0ef49dSMichal Krawczyk 	ena_com_set_admin_polling_mode(ena_dev, false);
1913ca148440SMichal Krawczyk 	ena_com_admin_aenq_enable(ena_dev);
1914eb0ef49dSMichal Krawczyk 
1915d9b8b106SMichal Krawczyk 	if (adapters_found == 0)
1916d9b8b106SMichal Krawczyk 		rte_timer_subsystem_init();
1917d9b8b106SMichal Krawczyk 	rte_timer_init(&adapter->timer_wd);
1918d9b8b106SMichal Krawczyk 
19191173fca2SJan Medala 	adapters_found++;
19201173fca2SJan Medala 	adapter->state = ENA_ADAPTER_STATE_INIT;
19211173fca2SJan Medala 
19221173fca2SJan Medala 	return 0;
1923241da076SRafal Kozik 
1924241da076SRafal Kozik err_delete_debug_area:
1925241da076SRafal Kozik 	ena_com_delete_debug_area(ena_dev);
1926241da076SRafal Kozik 
1927241da076SRafal Kozik err_device_destroy:
1928241da076SRafal Kozik 	ena_com_delete_host_info(ena_dev);
1929241da076SRafal Kozik 	ena_com_admin_destroy(ena_dev);
1930241da076SRafal Kozik 
1931241da076SRafal Kozik err:
1932241da076SRafal Kozik 	return rc;
19331173fca2SJan Medala }
19341173fca2SJan Medala 
1935e457bc70SRafal Kozik static void ena_destroy_device(struct rte_eth_dev *eth_dev)
1936eb0ef49dSMichal Krawczyk {
1937890728ffSStephen Hemminger 	struct ena_adapter *adapter = eth_dev->data->dev_private;
1938e457bc70SRafal Kozik 	struct ena_com_dev *ena_dev = &adapter->ena_dev;
1939eb0ef49dSMichal Krawczyk 
1940e457bc70SRafal Kozik 	if (adapter->state == ENA_ADAPTER_STATE_FREE)
1941e457bc70SRafal Kozik 		return;
1942e457bc70SRafal Kozik 
1943e457bc70SRafal Kozik 	ena_com_set_admin_running_state(ena_dev, false);
1944eb0ef49dSMichal Krawczyk 
1945eb0ef49dSMichal Krawczyk 	if (adapter->state != ENA_ADAPTER_STATE_CLOSED)
1946eb0ef49dSMichal Krawczyk 		ena_close(eth_dev);
1947eb0ef49dSMichal Krawczyk 
1948e457bc70SRafal Kozik 	ena_com_delete_debug_area(ena_dev);
1949e457bc70SRafal Kozik 	ena_com_delete_host_info(ena_dev);
1950e457bc70SRafal Kozik 
1951e457bc70SRafal Kozik 	ena_com_abort_admin_commands(ena_dev);
1952e457bc70SRafal Kozik 	ena_com_wait_for_abort_completion(ena_dev);
1953e457bc70SRafal Kozik 	ena_com_admin_destroy(ena_dev);
1954e457bc70SRafal Kozik 	ena_com_mmio_reg_read_request_destroy(ena_dev);
1955e457bc70SRafal Kozik 
1956e457bc70SRafal Kozik 	adapter->state = ENA_ADAPTER_STATE_FREE;
1957e457bc70SRafal Kozik }
1958e457bc70SRafal Kozik 
1959e457bc70SRafal Kozik static int eth_ena_dev_uninit(struct rte_eth_dev *eth_dev)
1960e457bc70SRafal Kozik {
1961e457bc70SRafal Kozik 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1962e457bc70SRafal Kozik 		return 0;
1963e457bc70SRafal Kozik 
1964e457bc70SRafal Kozik 	ena_destroy_device(eth_dev);
1965e457bc70SRafal Kozik 
1966eb0ef49dSMichal Krawczyk 	return 0;
1967eb0ef49dSMichal Krawczyk }
1968eb0ef49dSMichal Krawczyk 
19691173fca2SJan Medala static int ena_dev_configure(struct rte_eth_dev *dev)
19701173fca2SJan Medala {
1971890728ffSStephen Hemminger 	struct ena_adapter *adapter = dev->data->dev_private;
19727369f88fSRafal Kozik 
19731173fca2SJan Medala 	adapter->state = ENA_ADAPTER_STATE_CONFIG;
19741173fca2SJan Medala 
1975a4996bd8SWei Dai 	adapter->tx_selected_offloads = dev->data->dev_conf.txmode.offloads;
1976a4996bd8SWei Dai 	adapter->rx_selected_offloads = dev->data->dev_conf.rxmode.offloads;
19771173fca2SJan Medala 	return 0;
19781173fca2SJan Medala }
19791173fca2SJan Medala 
198033dde075SMichal Krawczyk static void ena_init_rings(struct ena_adapter *adapter,
198133dde075SMichal Krawczyk 			   bool disable_meta_caching)
19821173fca2SJan Medala {
19835920d930SMichal Krawczyk 	size_t i;
19841173fca2SJan Medala 
19855920d930SMichal Krawczyk 	for (i = 0; i < adapter->max_num_io_queues; i++) {
19861173fca2SJan Medala 		struct ena_ring *ring = &adapter->tx_ring[i];
19871173fca2SJan Medala 
19881173fca2SJan Medala 		ring->configured = 0;
19891173fca2SJan Medala 		ring->type = ENA_RING_TYPE_TX;
19901173fca2SJan Medala 		ring->adapter = adapter;
19911173fca2SJan Medala 		ring->id = i;
19921173fca2SJan Medala 		ring->tx_mem_queue_type = adapter->ena_dev.tx_mem_queue_type;
19931173fca2SJan Medala 		ring->tx_max_header_size = adapter->ena_dev.tx_max_header_size;
19942061fe41SRafal Kozik 		ring->sgl_size = adapter->max_tx_sgl_size;
199533dde075SMichal Krawczyk 		ring->disable_meta_caching = disable_meta_caching;
19961173fca2SJan Medala 	}
19971173fca2SJan Medala 
19985920d930SMichal Krawczyk 	for (i = 0; i < adapter->max_num_io_queues; i++) {
19991173fca2SJan Medala 		struct ena_ring *ring = &adapter->rx_ring[i];
20001173fca2SJan Medala 
20011173fca2SJan Medala 		ring->configured = 0;
20021173fca2SJan Medala 		ring->type = ENA_RING_TYPE_RX;
20031173fca2SJan Medala 		ring->adapter = adapter;
20041173fca2SJan Medala 		ring->id = i;
2005ea93d37eSRafal Kozik 		ring->sgl_size = adapter->max_rx_sgl_size;
20061173fca2SJan Medala 	}
20071173fca2SJan Medala }
20081173fca2SJan Medala 
2009bdad90d1SIvan Ilchenko static int ena_infos_get(struct rte_eth_dev *dev,
20101173fca2SJan Medala 			  struct rte_eth_dev_info *dev_info)
20111173fca2SJan Medala {
20121173fca2SJan Medala 	struct ena_adapter *adapter;
20131173fca2SJan Medala 	struct ena_com_dev *ena_dev;
201456b8b9b7SRafal Kozik 	uint64_t rx_feat = 0, tx_feat = 0;
20151173fca2SJan Medala 
2016498c687aSRafal Kozik 	ena_assert_msg(dev->data != NULL, "Uninitialized device\n");
2017498c687aSRafal Kozik 	ena_assert_msg(dev->data->dev_private != NULL, "Uninitialized device\n");
2018890728ffSStephen Hemminger 	adapter = dev->data->dev_private;
20191173fca2SJan Medala 
20201173fca2SJan Medala 	ena_dev = &adapter->ena_dev;
2021498c687aSRafal Kozik 	ena_assert_msg(ena_dev != NULL, "Uninitialized device\n");
20221173fca2SJan Medala 
2023e274f573SMarc Sune 	dev_info->speed_capa =
2024e274f573SMarc Sune 			ETH_LINK_SPEED_1G   |
2025e274f573SMarc Sune 			ETH_LINK_SPEED_2_5G |
2026e274f573SMarc Sune 			ETH_LINK_SPEED_5G   |
2027e274f573SMarc Sune 			ETH_LINK_SPEED_10G  |
2028e274f573SMarc Sune 			ETH_LINK_SPEED_25G  |
2029e274f573SMarc Sune 			ETH_LINK_SPEED_40G  |
2030b2feed01SThomas Monjalon 			ETH_LINK_SPEED_50G  |
2031b2feed01SThomas Monjalon 			ETH_LINK_SPEED_100G;
2032e274f573SMarc Sune 
20331173fca2SJan Medala 	/* Set Tx & Rx features available for device */
2034117ba4a6SMichal Krawczyk 	if (adapter->offloads.tso4_supported)
20351173fca2SJan Medala 		tx_feat	|= DEV_TX_OFFLOAD_TCP_TSO;
20361173fca2SJan Medala 
2037117ba4a6SMichal Krawczyk 	if (adapter->offloads.tx_csum_supported)
20381173fca2SJan Medala 		tx_feat |= DEV_TX_OFFLOAD_IPV4_CKSUM |
20391173fca2SJan Medala 			DEV_TX_OFFLOAD_UDP_CKSUM |
20401173fca2SJan Medala 			DEV_TX_OFFLOAD_TCP_CKSUM;
20411173fca2SJan Medala 
2042117ba4a6SMichal Krawczyk 	if (adapter->offloads.rx_csum_supported)
20431173fca2SJan Medala 		rx_feat |= DEV_RX_OFFLOAD_IPV4_CKSUM |
20441173fca2SJan Medala 			DEV_RX_OFFLOAD_UDP_CKSUM  |
20451173fca2SJan Medala 			DEV_RX_OFFLOAD_TCP_CKSUM;
20461173fca2SJan Medala 
2047a0a4ff40SRafal Kozik 	rx_feat |= DEV_RX_OFFLOAD_JUMBO_FRAME;
2048a0a4ff40SRafal Kozik 
20491173fca2SJan Medala 	/* Inform framework about available features */
20501173fca2SJan Medala 	dev_info->rx_offload_capa = rx_feat;
20517369f88fSRafal Kozik 	dev_info->rx_queue_offload_capa = rx_feat;
20521173fca2SJan Medala 	dev_info->tx_offload_capa = tx_feat;
205356b8b9b7SRafal Kozik 	dev_info->tx_queue_offload_capa = tx_feat;
20541173fca2SJan Medala 
2055b01ead20SRafal Kozik 	dev_info->flow_type_rss_offloads = ETH_RSS_IP | ETH_RSS_TCP |
2056b01ead20SRafal Kozik 					   ETH_RSS_UDP;
2057b01ead20SRafal Kozik 
20581173fca2SJan Medala 	dev_info->min_rx_bufsize = ENA_MIN_FRAME_LEN;
20591173fca2SJan Medala 	dev_info->max_rx_pktlen  = adapter->max_mtu;
20601173fca2SJan Medala 	dev_info->max_mac_addrs = 1;
20611173fca2SJan Medala 
20625920d930SMichal Krawczyk 	dev_info->max_rx_queues = adapter->max_num_io_queues;
20635920d930SMichal Krawczyk 	dev_info->max_tx_queues = adapter->max_num_io_queues;
20641173fca2SJan Medala 	dev_info->reta_size = ENA_RX_RSS_TABLE_SIZE;
206556b8b9b7SRafal Kozik 
206656b8b9b7SRafal Kozik 	adapter->tx_supported_offloads = tx_feat;
20677369f88fSRafal Kozik 	adapter->rx_supported_offloads = rx_feat;
206892680dc2SRafal Kozik 
20695920d930SMichal Krawczyk 	dev_info->rx_desc_lim.nb_max = adapter->max_rx_ring_size;
207092680dc2SRafal Kozik 	dev_info->rx_desc_lim.nb_min = ENA_MIN_RING_DESC;
2071ea93d37eSRafal Kozik 	dev_info->rx_desc_lim.nb_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS,
2072ea93d37eSRafal Kozik 					adapter->max_rx_sgl_size);
2073ea93d37eSRafal Kozik 	dev_info->rx_desc_lim.nb_mtu_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS,
2074ea93d37eSRafal Kozik 					adapter->max_rx_sgl_size);
207592680dc2SRafal Kozik 
20765920d930SMichal Krawczyk 	dev_info->tx_desc_lim.nb_max = adapter->max_tx_ring_size;
207792680dc2SRafal Kozik 	dev_info->tx_desc_lim.nb_min = ENA_MIN_RING_DESC;
207892680dc2SRafal Kozik 	dev_info->tx_desc_lim.nb_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS,
2079ea93d37eSRafal Kozik 					adapter->max_tx_sgl_size);
208092680dc2SRafal Kozik 	dev_info->tx_desc_lim.nb_mtu_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS,
2081ea93d37eSRafal Kozik 					adapter->max_tx_sgl_size);
2082bdad90d1SIvan Ilchenko 
2083bdad90d1SIvan Ilchenko 	return 0;
20841173fca2SJan Medala }
20851173fca2SJan Medala 
20861be097dcSMichal Krawczyk static inline void ena_init_rx_mbuf(struct rte_mbuf *mbuf, uint16_t len)
20871be097dcSMichal Krawczyk {
20881be097dcSMichal Krawczyk 	mbuf->data_len = len;
20891be097dcSMichal Krawczyk 	mbuf->data_off = RTE_PKTMBUF_HEADROOM;
20901be097dcSMichal Krawczyk 	mbuf->refcnt = 1;
20911be097dcSMichal Krawczyk 	mbuf->next = NULL;
20921be097dcSMichal Krawczyk }
20931be097dcSMichal Krawczyk 
20941be097dcSMichal Krawczyk static struct rte_mbuf *ena_rx_mbuf(struct ena_ring *rx_ring,
20951be097dcSMichal Krawczyk 				    struct ena_com_rx_buf_info *ena_bufs,
20961be097dcSMichal Krawczyk 				    uint32_t descs,
20971be097dcSMichal Krawczyk 				    uint16_t *next_to_clean,
20981be097dcSMichal Krawczyk 				    uint8_t offset)
20991be097dcSMichal Krawczyk {
21001be097dcSMichal Krawczyk 	struct rte_mbuf *mbuf;
21011be097dcSMichal Krawczyk 	struct rte_mbuf *mbuf_head;
21021be097dcSMichal Krawczyk 	struct ena_rx_buffer *rx_info;
210383fd97b2SMichal Krawczyk 	int rc;
21041be097dcSMichal Krawczyk 	uint16_t ntc, len, req_id, buf = 0;
21051be097dcSMichal Krawczyk 
21061be097dcSMichal Krawczyk 	if (unlikely(descs == 0))
21071be097dcSMichal Krawczyk 		return NULL;
21081be097dcSMichal Krawczyk 
21091be097dcSMichal Krawczyk 	ntc = *next_to_clean;
21101be097dcSMichal Krawczyk 
21111be097dcSMichal Krawczyk 	len = ena_bufs[buf].len;
21121be097dcSMichal Krawczyk 	req_id = ena_bufs[buf].req_id;
21131be097dcSMichal Krawczyk 	if (unlikely(validate_rx_req_id(rx_ring, req_id)))
21141be097dcSMichal Krawczyk 		return NULL;
21151be097dcSMichal Krawczyk 
21161be097dcSMichal Krawczyk 	rx_info = &rx_ring->rx_buffer_info[req_id];
21171be097dcSMichal Krawczyk 
21181be097dcSMichal Krawczyk 	mbuf = rx_info->mbuf;
21191be097dcSMichal Krawczyk 	RTE_ASSERT(mbuf != NULL);
21201be097dcSMichal Krawczyk 
21211be097dcSMichal Krawczyk 	ena_init_rx_mbuf(mbuf, len);
21221be097dcSMichal Krawczyk 
21231be097dcSMichal Krawczyk 	/* Fill the mbuf head with the data specific for 1st segment. */
21241be097dcSMichal Krawczyk 	mbuf_head = mbuf;
21251be097dcSMichal Krawczyk 	mbuf_head->nb_segs = descs;
21261be097dcSMichal Krawczyk 	mbuf_head->port = rx_ring->port_id;
21271be097dcSMichal Krawczyk 	mbuf_head->pkt_len = len;
21281be097dcSMichal Krawczyk 	mbuf_head->data_off += offset;
21291be097dcSMichal Krawczyk 
21301be097dcSMichal Krawczyk 	rx_info->mbuf = NULL;
2131c0006061SMichal Krawczyk 	rx_ring->empty_rx_reqs[ntc] = req_id;
2132c0006061SMichal Krawczyk 	ntc = ENA_IDX_NEXT_MASKED(ntc, rx_ring->size_mask);
21331be097dcSMichal Krawczyk 
21341be097dcSMichal Krawczyk 	while (--descs) {
21351be097dcSMichal Krawczyk 		++buf;
21361be097dcSMichal Krawczyk 		len = ena_bufs[buf].len;
21371be097dcSMichal Krawczyk 		req_id = ena_bufs[buf].req_id;
21381be097dcSMichal Krawczyk 		if (unlikely(validate_rx_req_id(rx_ring, req_id))) {
21391be097dcSMichal Krawczyk 			rte_mbuf_raw_free(mbuf_head);
21401be097dcSMichal Krawczyk 			return NULL;
21411be097dcSMichal Krawczyk 		}
21421be097dcSMichal Krawczyk 
21431be097dcSMichal Krawczyk 		rx_info = &rx_ring->rx_buffer_info[req_id];
21441be097dcSMichal Krawczyk 		RTE_ASSERT(rx_info->mbuf != NULL);
21451be097dcSMichal Krawczyk 
214683fd97b2SMichal Krawczyk 		if (unlikely(len == 0)) {
214783fd97b2SMichal Krawczyk 			/*
214883fd97b2SMichal Krawczyk 			 * Some devices can pass descriptor with the length 0.
214983fd97b2SMichal Krawczyk 			 * To avoid confusion, the PMD is simply putting the
215083fd97b2SMichal Krawczyk 			 * descriptor back, as it was never used. We'll avoid
215183fd97b2SMichal Krawczyk 			 * mbuf allocation that way.
215283fd97b2SMichal Krawczyk 			 */
215383fd97b2SMichal Krawczyk 			rc = ena_add_single_rx_desc(rx_ring->ena_com_io_sq,
215483fd97b2SMichal Krawczyk 				rx_info->mbuf, req_id);
215583fd97b2SMichal Krawczyk 			if (unlikely(rc != 0)) {
215683fd97b2SMichal Krawczyk 				/* Free the mbuf in case of an error. */
215783fd97b2SMichal Krawczyk 				rte_mbuf_raw_free(rx_info->mbuf);
215883fd97b2SMichal Krawczyk 			} else {
215983fd97b2SMichal Krawczyk 				/*
216083fd97b2SMichal Krawczyk 				 * If there was no error, just exit the loop as
216183fd97b2SMichal Krawczyk 				 * 0 length descriptor is always the last one.
216283fd97b2SMichal Krawczyk 				 */
216383fd97b2SMichal Krawczyk 				break;
216483fd97b2SMichal Krawczyk 			}
216583fd97b2SMichal Krawczyk 		} else {
21661be097dcSMichal Krawczyk 			/* Create an mbuf chain. */
21671be097dcSMichal Krawczyk 			mbuf->next = rx_info->mbuf;
21681be097dcSMichal Krawczyk 			mbuf = mbuf->next;
21691be097dcSMichal Krawczyk 
21701be097dcSMichal Krawczyk 			ena_init_rx_mbuf(mbuf, len);
21711be097dcSMichal Krawczyk 			mbuf_head->pkt_len += len;
217283fd97b2SMichal Krawczyk 		}
21731be097dcSMichal Krawczyk 
217483fd97b2SMichal Krawczyk 		/*
217583fd97b2SMichal Krawczyk 		 * Mark the descriptor as depleted and perform necessary
217683fd97b2SMichal Krawczyk 		 * cleanup.
217783fd97b2SMichal Krawczyk 		 * This code will execute in two cases:
217883fd97b2SMichal Krawczyk 		 *  1. Descriptor len was greater than 0 - normal situation.
217983fd97b2SMichal Krawczyk 		 *  2. Descriptor len was 0 and we failed to add the descriptor
218083fd97b2SMichal Krawczyk 		 *     to the device. In that situation, we should try to add
218183fd97b2SMichal Krawczyk 		 *     the mbuf again in the populate routine and mark the
218283fd97b2SMichal Krawczyk 		 *     descriptor as used up by the device.
218383fd97b2SMichal Krawczyk 		 */
21841be097dcSMichal Krawczyk 		rx_info->mbuf = NULL;
2185c0006061SMichal Krawczyk 		rx_ring->empty_rx_reqs[ntc] = req_id;
2186c0006061SMichal Krawczyk 		ntc = ENA_IDX_NEXT_MASKED(ntc, rx_ring->size_mask);
21871be097dcSMichal Krawczyk 	}
21881be097dcSMichal Krawczyk 
21891be097dcSMichal Krawczyk 	*next_to_clean = ntc;
21901be097dcSMichal Krawczyk 
21911be097dcSMichal Krawczyk 	return mbuf_head;
21921be097dcSMichal Krawczyk }
21931be097dcSMichal Krawczyk 
21941173fca2SJan Medala static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
21951173fca2SJan Medala 				  uint16_t nb_pkts)
21961173fca2SJan Medala {
21971173fca2SJan Medala 	struct ena_ring *rx_ring = (struct ena_ring *)(rx_queue);
219877550607SMichal Krawczyk 	unsigned int free_queue_entries;
219977550607SMichal Krawczyk 	unsigned int refill_threshold;
22001173fca2SJan Medala 	uint16_t next_to_clean = rx_ring->next_to_clean;
220174456796SMichal Krawczyk 	uint16_t descs_in_use;
22021be097dcSMichal Krawczyk 	struct rte_mbuf *mbuf;
22031be097dcSMichal Krawczyk 	uint16_t completed;
22041173fca2SJan Medala 	struct ena_com_rx_ctx ena_rx_ctx;
22051be097dcSMichal Krawczyk 	int i, rc = 0;
22061173fca2SJan Medala 
22071173fca2SJan Medala 	/* Check adapter state */
22081173fca2SJan Medala 	if (unlikely(rx_ring->adapter->state != ENA_ADAPTER_STATE_RUNNING)) {
22096f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ALERT,
22101173fca2SJan Medala 			"Trying to receive pkts while device is NOT running\n");
22111173fca2SJan Medala 		return 0;
22121173fca2SJan Medala 	}
22131173fca2SJan Medala 
2214c0006061SMichal Krawczyk 	descs_in_use = rx_ring->ring_size -
221574456796SMichal Krawczyk 		ena_com_free_q_entries(rx_ring->ena_com_io_sq) - 1;
221674456796SMichal Krawczyk 	nb_pkts = RTE_MIN(descs_in_use, nb_pkts);
22171173fca2SJan Medala 
22181173fca2SJan Medala 	for (completed = 0; completed < nb_pkts; completed++) {
2219ea93d37eSRafal Kozik 		ena_rx_ctx.max_bufs = rx_ring->sgl_size;
22201173fca2SJan Medala 		ena_rx_ctx.ena_bufs = rx_ring->ena_bufs;
22211173fca2SJan Medala 		ena_rx_ctx.descs = 0;
22227b3a3c4bSMaciej Bielski 		ena_rx_ctx.pkt_offset = 0;
22231173fca2SJan Medala 		/* receive packet context */
22241173fca2SJan Medala 		rc = ena_com_rx_pkt(rx_ring->ena_com_io_cq,
22251173fca2SJan Medala 				    rx_ring->ena_com_io_sq,
22261173fca2SJan Medala 				    &ena_rx_ctx);
22271173fca2SJan Medala 		if (unlikely(rc)) {
22286f1c9df9SStephen Hemminger 			PMD_DRV_LOG(ERR, "ena_com_rx_pkt error %d\n", rc);
22299b260dbfSRafal Kozik 			rx_ring->adapter->reset_reason =
22309b260dbfSRafal Kozik 				ENA_REGS_RESET_TOO_MANY_RX_DESCS;
2231241da076SRafal Kozik 			rx_ring->adapter->trigger_reset = true;
22327830e905SSolganik Alexander 			++rx_ring->rx_stats.bad_desc_num;
22331173fca2SJan Medala 			return 0;
22341173fca2SJan Medala 		}
22351173fca2SJan Medala 
22361be097dcSMichal Krawczyk 		mbuf = ena_rx_mbuf(rx_ring,
22371be097dcSMichal Krawczyk 			ena_rx_ctx.ena_bufs,
22381be097dcSMichal Krawczyk 			ena_rx_ctx.descs,
22391be097dcSMichal Krawczyk 			&next_to_clean,
22401be097dcSMichal Krawczyk 			ena_rx_ctx.pkt_offset);
22411be097dcSMichal Krawczyk 		if (unlikely(mbuf == NULL)) {
22421be097dcSMichal Krawczyk 			for (i = 0; i < ena_rx_ctx.descs; ++i) {
2243c0006061SMichal Krawczyk 				rx_ring->empty_rx_reqs[next_to_clean] =
22441be097dcSMichal Krawczyk 					rx_ring->ena_bufs[i].req_id;
2245c0006061SMichal Krawczyk 				next_to_clean = ENA_IDX_NEXT_MASKED(
2246c0006061SMichal Krawczyk 					next_to_clean, rx_ring->size_mask);
22471173fca2SJan Medala 			}
2248f00930d9SRafal Kozik 			break;
22491be097dcSMichal Krawczyk 		}
22501173fca2SJan Medala 
22511173fca2SJan Medala 		/* fill mbuf attributes if any */
22521be097dcSMichal Krawczyk 		ena_rx_mbuf_prepare(mbuf, &ena_rx_ctx);
22537830e905SSolganik Alexander 
22541be097dcSMichal Krawczyk 		if (unlikely(mbuf->ol_flags &
2255ef74b5f7SMichal Krawczyk 				(PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD))) {
2256ef74b5f7SMichal Krawczyk 			rte_atomic64_inc(&rx_ring->adapter->drv_stats->ierrors);
22577830e905SSolganik Alexander 			++rx_ring->rx_stats.bad_csum;
2258ef74b5f7SMichal Krawczyk 		}
22597830e905SSolganik Alexander 
22601be097dcSMichal Krawczyk 		mbuf->hash.rss = ena_rx_ctx.hash;
22611173fca2SJan Medala 
22621be097dcSMichal Krawczyk 		rx_pkts[completed] = mbuf;
22631be097dcSMichal Krawczyk 		rx_ring->rx_stats.bytes += mbuf->pkt_len;
22641173fca2SJan Medala 	}
22651173fca2SJan Medala 
22661be097dcSMichal Krawczyk 	rx_ring->rx_stats.cnt += completed;
2267ec78af6bSMichal Krawczyk 	rx_ring->next_to_clean = next_to_clean;
2268ec78af6bSMichal Krawczyk 
226977550607SMichal Krawczyk 	free_queue_entries = ena_com_free_q_entries(rx_ring->ena_com_io_sq);
227077550607SMichal Krawczyk 	refill_threshold =
2271c0006061SMichal Krawczyk 		RTE_MIN(rx_ring->ring_size / ENA_REFILL_THRESH_DIVIDER,
227277550607SMichal Krawczyk 		(unsigned int)ENA_REFILL_THRESH_PACKET);
227377550607SMichal Krawczyk 
22741173fca2SJan Medala 	/* Burst refill to save doorbells, memory barriers, const interval */
227577550607SMichal Krawczyk 	if (free_queue_entries > refill_threshold) {
2276a45462c5SRafal Kozik 		ena_com_update_dev_comp_head(rx_ring->ena_com_io_cq);
227777550607SMichal Krawczyk 		ena_populate_rx_queue(rx_ring, free_queue_entries);
2278a45462c5SRafal Kozik 	}
22791173fca2SJan Medala 
22801be097dcSMichal Krawczyk 	return completed;
22811173fca2SJan Medala }
22821173fca2SJan Medala 
2283b3fc5a1aSKonstantin Ananyev static uint16_t
228483277a7cSJakub Palider eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
2285b3fc5a1aSKonstantin Ananyev 		uint16_t nb_pkts)
2286b3fc5a1aSKonstantin Ananyev {
2287b3fc5a1aSKonstantin Ananyev 	int32_t ret;
2288b3fc5a1aSKonstantin Ananyev 	uint32_t i;
2289b3fc5a1aSKonstantin Ananyev 	struct rte_mbuf *m;
229083277a7cSJakub Palider 	struct ena_ring *tx_ring = (struct ena_ring *)(tx_queue);
2291a7c528e5SOlivier Matz 	struct rte_ipv4_hdr *ip_hdr;
2292b3fc5a1aSKonstantin Ananyev 	uint64_t ol_flags;
229383277a7cSJakub Palider 	uint16_t frag_field;
229483277a7cSJakub Palider 
2295b3fc5a1aSKonstantin Ananyev 	for (i = 0; i != nb_pkts; i++) {
2296b3fc5a1aSKonstantin Ananyev 		m = tx_pkts[i];
2297b3fc5a1aSKonstantin Ananyev 		ol_flags = m->ol_flags;
2298b3fc5a1aSKonstantin Ananyev 
2299bc5ef57dSMichal Krawczyk 		if (!(ol_flags & PKT_TX_IPV4))
2300bc5ef57dSMichal Krawczyk 			continue;
2301bc5ef57dSMichal Krawczyk 
2302bc5ef57dSMichal Krawczyk 		/* If there was not L2 header length specified, assume it is
2303bc5ef57dSMichal Krawczyk 		 * length of the ethernet header.
2304bc5ef57dSMichal Krawczyk 		 */
2305bc5ef57dSMichal Krawczyk 		if (unlikely(m->l2_len == 0))
23066d13ea8eSOlivier Matz 			m->l2_len = sizeof(struct rte_ether_hdr);
2307bc5ef57dSMichal Krawczyk 
2308a7c528e5SOlivier Matz 		ip_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *,
2309bc5ef57dSMichal Krawczyk 						 m->l2_len);
2310bc5ef57dSMichal Krawczyk 		frag_field = rte_be_to_cpu_16(ip_hdr->fragment_offset);
2311bc5ef57dSMichal Krawczyk 
231224ac604eSOlivier Matz 		if ((frag_field & RTE_IPV4_HDR_DF_FLAG) != 0) {
2313bc5ef57dSMichal Krawczyk 			m->packet_type |= RTE_PTYPE_L4_NONFRAG;
2314bc5ef57dSMichal Krawczyk 
2315bc5ef57dSMichal Krawczyk 			/* If IPv4 header has DF flag enabled and TSO support is
2316bc5ef57dSMichal Krawczyk 			 * disabled, partial chcecksum should not be calculated.
2317bc5ef57dSMichal Krawczyk 			 */
2318117ba4a6SMichal Krawczyk 			if (!tx_ring->adapter->offloads.tso4_supported)
2319bc5ef57dSMichal Krawczyk 				continue;
2320bc5ef57dSMichal Krawczyk 		}
2321bc5ef57dSMichal Krawczyk 
2322b3fc5a1aSKonstantin Ananyev 		if ((ol_flags & ENA_TX_OFFLOAD_NOTSUP_MASK) != 0 ||
2323b3fc5a1aSKonstantin Ananyev 				(ol_flags & PKT_TX_L4_MASK) ==
2324b3fc5a1aSKonstantin Ananyev 				PKT_TX_SCTP_CKSUM) {
2325baeed5f4SMichal Krawczyk 			rte_errno = ENOTSUP;
2326b3fc5a1aSKonstantin Ananyev 			return i;
2327b3fc5a1aSKonstantin Ananyev 		}
2328b3fc5a1aSKonstantin Ananyev 
2329b3fc5a1aSKonstantin Ananyev #ifdef RTE_LIBRTE_ETHDEV_DEBUG
2330b3fc5a1aSKonstantin Ananyev 		ret = rte_validate_tx_offload(m);
2331b3fc5a1aSKonstantin Ananyev 		if (ret != 0) {
2332baeed5f4SMichal Krawczyk 			rte_errno = -ret;
2333b3fc5a1aSKonstantin Ananyev 			return i;
2334b3fc5a1aSKonstantin Ananyev 		}
2335b3fc5a1aSKonstantin Ananyev #endif
233683277a7cSJakub Palider 
233783277a7cSJakub Palider 		/* In case we are supposed to TSO and have DF not set (DF=0)
233883277a7cSJakub Palider 		 * hardware must be provided with partial checksum, otherwise
233983277a7cSJakub Palider 		 * it will take care of necessary calculations.
234083277a7cSJakub Palider 		 */
234183277a7cSJakub Palider 
2342b3fc5a1aSKonstantin Ananyev 		ret = rte_net_intel_cksum_flags_prepare(m,
2343b3fc5a1aSKonstantin Ananyev 			ol_flags & ~PKT_TX_TCP_SEG);
2344b3fc5a1aSKonstantin Ananyev 		if (ret != 0) {
2345baeed5f4SMichal Krawczyk 			rte_errno = -ret;
2346b3fc5a1aSKonstantin Ananyev 			return i;
2347b3fc5a1aSKonstantin Ananyev 		}
2348b3fc5a1aSKonstantin Ananyev 	}
2349b3fc5a1aSKonstantin Ananyev 
2350b3fc5a1aSKonstantin Ananyev 	return i;
2351b3fc5a1aSKonstantin Ananyev }
2352b3fc5a1aSKonstantin Ananyev 
2353f01f060cSRafal Kozik static void ena_update_hints(struct ena_adapter *adapter,
2354f01f060cSRafal Kozik 			     struct ena_admin_ena_hw_hints *hints)
2355f01f060cSRafal Kozik {
2356f01f060cSRafal Kozik 	if (hints->admin_completion_tx_timeout)
2357f01f060cSRafal Kozik 		adapter->ena_dev.admin_queue.completion_timeout =
2358f01f060cSRafal Kozik 			hints->admin_completion_tx_timeout * 1000;
2359f01f060cSRafal Kozik 
2360f01f060cSRafal Kozik 	if (hints->mmio_read_timeout)
2361f01f060cSRafal Kozik 		/* convert to usec */
2362f01f060cSRafal Kozik 		adapter->ena_dev.mmio_read.reg_read_to =
2363f01f060cSRafal Kozik 			hints->mmio_read_timeout * 1000;
2364d9b8b106SMichal Krawczyk 
2365d9b8b106SMichal Krawczyk 	if (hints->driver_watchdog_timeout) {
2366d9b8b106SMichal Krawczyk 		if (hints->driver_watchdog_timeout == ENA_HW_HINTS_NO_TIMEOUT)
2367d9b8b106SMichal Krawczyk 			adapter->keep_alive_timeout = ENA_HW_HINTS_NO_TIMEOUT;
2368d9b8b106SMichal Krawczyk 		else
2369d9b8b106SMichal Krawczyk 			// Convert msecs to ticks
2370d9b8b106SMichal Krawczyk 			adapter->keep_alive_timeout =
2371d9b8b106SMichal Krawczyk 				(hints->driver_watchdog_timeout *
2372d9b8b106SMichal Krawczyk 				rte_get_timer_hz()) / 1000;
2373d9b8b106SMichal Krawczyk 	}
2374f01f060cSRafal Kozik }
2375f01f060cSRafal Kozik 
23762061fe41SRafal Kozik static int ena_check_and_linearize_mbuf(struct ena_ring *tx_ring,
23772061fe41SRafal Kozik 					struct rte_mbuf *mbuf)
23782061fe41SRafal Kozik {
23792fca2a98SMichal Krawczyk 	struct ena_com_dev *ena_dev;
23802fca2a98SMichal Krawczyk 	int num_segments, header_len, rc;
23812061fe41SRafal Kozik 
23822fca2a98SMichal Krawczyk 	ena_dev = &tx_ring->adapter->ena_dev;
23832061fe41SRafal Kozik 	num_segments = mbuf->nb_segs;
23842fca2a98SMichal Krawczyk 	header_len = mbuf->data_len;
23852061fe41SRafal Kozik 
23862061fe41SRafal Kozik 	if (likely(num_segments < tx_ring->sgl_size))
23872061fe41SRafal Kozik 		return 0;
23882061fe41SRafal Kozik 
23892fca2a98SMichal Krawczyk 	if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV &&
23902fca2a98SMichal Krawczyk 	    (num_segments == tx_ring->sgl_size) &&
23912fca2a98SMichal Krawczyk 	    (header_len < tx_ring->tx_max_header_size))
23922fca2a98SMichal Krawczyk 		return 0;
23932fca2a98SMichal Krawczyk 
23947830e905SSolganik Alexander 	++tx_ring->tx_stats.linearize;
23952061fe41SRafal Kozik 	rc = rte_pktmbuf_linearize(mbuf);
23967830e905SSolganik Alexander 	if (unlikely(rc)) {
23976f1c9df9SStephen Hemminger 		PMD_DRV_LOG(WARNING, "Mbuf linearize failed\n");
23987830e905SSolganik Alexander 		rte_atomic64_inc(&tx_ring->adapter->drv_stats->ierrors);
23997830e905SSolganik Alexander 		++tx_ring->tx_stats.linearize_failed;
24007830e905SSolganik Alexander 		return rc;
24017830e905SSolganik Alexander 	}
24022061fe41SRafal Kozik 
24032061fe41SRafal Kozik 	return rc;
24042061fe41SRafal Kozik }
24052061fe41SRafal Kozik 
240636278b82SMichal Krawczyk static void ena_tx_map_mbuf(struct ena_ring *tx_ring,
240736278b82SMichal Krawczyk 	struct ena_tx_buffer *tx_info,
240836278b82SMichal Krawczyk 	struct rte_mbuf *mbuf,
240936278b82SMichal Krawczyk 	void **push_header,
241036278b82SMichal Krawczyk 	uint16_t *header_len)
241136278b82SMichal Krawczyk {
241236278b82SMichal Krawczyk 	struct ena_com_buf *ena_buf;
241336278b82SMichal Krawczyk 	uint16_t delta, seg_len, push_len;
241436278b82SMichal Krawczyk 
241536278b82SMichal Krawczyk 	delta = 0;
241636278b82SMichal Krawczyk 	seg_len = mbuf->data_len;
241736278b82SMichal Krawczyk 
241836278b82SMichal Krawczyk 	tx_info->mbuf = mbuf;
241936278b82SMichal Krawczyk 	ena_buf = tx_info->bufs;
242036278b82SMichal Krawczyk 
242136278b82SMichal Krawczyk 	if (tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
242236278b82SMichal Krawczyk 		/*
242336278b82SMichal Krawczyk 		 * Tx header might be (and will be in most cases) smaller than
242436278b82SMichal Krawczyk 		 * tx_max_header_size. But it's not an issue to send more data
242536278b82SMichal Krawczyk 		 * to the device, than actually needed if the mbuf size is
242636278b82SMichal Krawczyk 		 * greater than tx_max_header_size.
242736278b82SMichal Krawczyk 		 */
242836278b82SMichal Krawczyk 		push_len = RTE_MIN(mbuf->pkt_len, tx_ring->tx_max_header_size);
242936278b82SMichal Krawczyk 		*header_len = push_len;
243036278b82SMichal Krawczyk 
243136278b82SMichal Krawczyk 		if (likely(push_len <= seg_len)) {
243236278b82SMichal Krawczyk 			/* If the push header is in the single segment, then
243336278b82SMichal Krawczyk 			 * just point it to the 1st mbuf data.
243436278b82SMichal Krawczyk 			 */
243536278b82SMichal Krawczyk 			*push_header = rte_pktmbuf_mtod(mbuf, uint8_t *);
243636278b82SMichal Krawczyk 		} else {
243736278b82SMichal Krawczyk 			/* If the push header lays in the several segments, copy
243836278b82SMichal Krawczyk 			 * it to the intermediate buffer.
243936278b82SMichal Krawczyk 			 */
244036278b82SMichal Krawczyk 			rte_pktmbuf_read(mbuf, 0, push_len,
244136278b82SMichal Krawczyk 				tx_ring->push_buf_intermediate_buf);
244236278b82SMichal Krawczyk 			*push_header = tx_ring->push_buf_intermediate_buf;
244336278b82SMichal Krawczyk 			delta = push_len - seg_len;
244436278b82SMichal Krawczyk 		}
244536278b82SMichal Krawczyk 	} else {
244636278b82SMichal Krawczyk 		*push_header = NULL;
244736278b82SMichal Krawczyk 		*header_len = 0;
244836278b82SMichal Krawczyk 		push_len = 0;
244936278b82SMichal Krawczyk 	}
245036278b82SMichal Krawczyk 
245136278b82SMichal Krawczyk 	/* Process first segment taking into consideration pushed header */
245236278b82SMichal Krawczyk 	if (seg_len > push_len) {
245336278b82SMichal Krawczyk 		ena_buf->paddr = mbuf->buf_iova +
245436278b82SMichal Krawczyk 				mbuf->data_off +
245536278b82SMichal Krawczyk 				push_len;
245636278b82SMichal Krawczyk 		ena_buf->len = seg_len - push_len;
245736278b82SMichal Krawczyk 		ena_buf++;
245836278b82SMichal Krawczyk 		tx_info->num_of_bufs++;
245936278b82SMichal Krawczyk 	}
246036278b82SMichal Krawczyk 
246136278b82SMichal Krawczyk 	while ((mbuf = mbuf->next) != NULL) {
246236278b82SMichal Krawczyk 		seg_len = mbuf->data_len;
246336278b82SMichal Krawczyk 
246436278b82SMichal Krawczyk 		/* Skip mbufs if whole data is pushed as a header */
246536278b82SMichal Krawczyk 		if (unlikely(delta > seg_len)) {
246636278b82SMichal Krawczyk 			delta -= seg_len;
246736278b82SMichal Krawczyk 			continue;
246836278b82SMichal Krawczyk 		}
246936278b82SMichal Krawczyk 
247036278b82SMichal Krawczyk 		ena_buf->paddr = mbuf->buf_iova + mbuf->data_off + delta;
247136278b82SMichal Krawczyk 		ena_buf->len = seg_len - delta;
247236278b82SMichal Krawczyk 		ena_buf++;
247336278b82SMichal Krawczyk 		tx_info->num_of_bufs++;
247436278b82SMichal Krawczyk 
247536278b82SMichal Krawczyk 		delta = 0;
247636278b82SMichal Krawczyk 	}
247736278b82SMichal Krawczyk }
247836278b82SMichal Krawczyk 
247936278b82SMichal Krawczyk static int ena_xmit_mbuf(struct ena_ring *tx_ring, struct rte_mbuf *mbuf)
248036278b82SMichal Krawczyk {
248136278b82SMichal Krawczyk 	struct ena_tx_buffer *tx_info;
248236278b82SMichal Krawczyk 	struct ena_com_tx_ctx ena_tx_ctx = { { 0 } };
248336278b82SMichal Krawczyk 	uint16_t next_to_use;
248436278b82SMichal Krawczyk 	uint16_t header_len;
248536278b82SMichal Krawczyk 	uint16_t req_id;
248636278b82SMichal Krawczyk 	void *push_header;
248736278b82SMichal Krawczyk 	int nb_hw_desc;
248836278b82SMichal Krawczyk 	int rc;
248936278b82SMichal Krawczyk 
249036278b82SMichal Krawczyk 	rc = ena_check_and_linearize_mbuf(tx_ring, mbuf);
249136278b82SMichal Krawczyk 	if (unlikely(rc))
249236278b82SMichal Krawczyk 		return rc;
249336278b82SMichal Krawczyk 
249436278b82SMichal Krawczyk 	next_to_use = tx_ring->next_to_use;
249536278b82SMichal Krawczyk 
249636278b82SMichal Krawczyk 	req_id = tx_ring->empty_tx_reqs[next_to_use];
249736278b82SMichal Krawczyk 	tx_info = &tx_ring->tx_buffer_info[req_id];
249836278b82SMichal Krawczyk 	tx_info->num_of_bufs = 0;
249936278b82SMichal Krawczyk 
250036278b82SMichal Krawczyk 	ena_tx_map_mbuf(tx_ring, tx_info, mbuf, &push_header, &header_len);
250136278b82SMichal Krawczyk 
250236278b82SMichal Krawczyk 	ena_tx_ctx.ena_bufs = tx_info->bufs;
250336278b82SMichal Krawczyk 	ena_tx_ctx.push_header = push_header;
250436278b82SMichal Krawczyk 	ena_tx_ctx.num_bufs = tx_info->num_of_bufs;
250536278b82SMichal Krawczyk 	ena_tx_ctx.req_id = req_id;
250636278b82SMichal Krawczyk 	ena_tx_ctx.header_len = header_len;
250736278b82SMichal Krawczyk 
250836278b82SMichal Krawczyk 	/* Set Tx offloads flags, if applicable */
250936278b82SMichal Krawczyk 	ena_tx_mbuf_prepare(mbuf, &ena_tx_ctx, tx_ring->offloads,
251036278b82SMichal Krawczyk 		tx_ring->disable_meta_caching);
251136278b82SMichal Krawczyk 
251236278b82SMichal Krawczyk 	if (unlikely(ena_com_is_doorbell_needed(tx_ring->ena_com_io_sq,
251336278b82SMichal Krawczyk 			&ena_tx_ctx))) {
251436278b82SMichal Krawczyk 		PMD_DRV_LOG(DEBUG,
251536278b82SMichal Krawczyk 			"llq tx max burst size of queue %d achieved, writing doorbell to send burst\n",
251636278b82SMichal Krawczyk 			tx_ring->id);
251736278b82SMichal Krawczyk 		ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq);
251836278b82SMichal Krawczyk 	}
251936278b82SMichal Krawczyk 
252036278b82SMichal Krawczyk 	/* prepare the packet's descriptors to dma engine */
252136278b82SMichal Krawczyk 	rc = ena_com_prepare_tx(tx_ring->ena_com_io_sq,	&ena_tx_ctx,
252236278b82SMichal Krawczyk 		&nb_hw_desc);
252336278b82SMichal Krawczyk 	if (unlikely(rc)) {
252436278b82SMichal Krawczyk 		++tx_ring->tx_stats.prepare_ctx_err;
252536278b82SMichal Krawczyk 		return rc;
252636278b82SMichal Krawczyk 	}
252736278b82SMichal Krawczyk 
252836278b82SMichal Krawczyk 	tx_info->tx_descs = nb_hw_desc;
252936278b82SMichal Krawczyk 
253036278b82SMichal Krawczyk 	tx_ring->tx_stats.cnt++;
253136278b82SMichal Krawczyk 	tx_ring->tx_stats.bytes += mbuf->pkt_len;
253236278b82SMichal Krawczyk 
253336278b82SMichal Krawczyk 	tx_ring->next_to_use = ENA_IDX_NEXT_MASKED(next_to_use,
253436278b82SMichal Krawczyk 		tx_ring->size_mask);
253536278b82SMichal Krawczyk 
253636278b82SMichal Krawczyk 	return 0;
253736278b82SMichal Krawczyk }
253836278b82SMichal Krawczyk 
253936278b82SMichal Krawczyk static void ena_tx_cleanup(struct ena_ring *tx_ring)
254036278b82SMichal Krawczyk {
254136278b82SMichal Krawczyk 	unsigned int cleanup_budget;
254236278b82SMichal Krawczyk 	unsigned int total_tx_descs = 0;
254336278b82SMichal Krawczyk 	uint16_t next_to_clean = tx_ring->next_to_clean;
254436278b82SMichal Krawczyk 
254536278b82SMichal Krawczyk 	cleanup_budget = RTE_MIN(tx_ring->ring_size / ENA_REFILL_THRESH_DIVIDER,
254636278b82SMichal Krawczyk 		(unsigned int)ENA_REFILL_THRESH_PACKET);
254736278b82SMichal Krawczyk 
254836278b82SMichal Krawczyk 	while (likely(total_tx_descs < cleanup_budget)) {
254936278b82SMichal Krawczyk 		struct rte_mbuf *mbuf;
255036278b82SMichal Krawczyk 		struct ena_tx_buffer *tx_info;
255136278b82SMichal Krawczyk 		uint16_t req_id;
255236278b82SMichal Krawczyk 
255336278b82SMichal Krawczyk 		if (ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq, &req_id) != 0)
255436278b82SMichal Krawczyk 			break;
255536278b82SMichal Krawczyk 
255636278b82SMichal Krawczyk 		if (unlikely(validate_tx_req_id(tx_ring, req_id) != 0))
255736278b82SMichal Krawczyk 			break;
255836278b82SMichal Krawczyk 
255936278b82SMichal Krawczyk 		/* Get Tx info & store how many descs were processed  */
256036278b82SMichal Krawczyk 		tx_info = &tx_ring->tx_buffer_info[req_id];
256136278b82SMichal Krawczyk 
256236278b82SMichal Krawczyk 		mbuf = tx_info->mbuf;
256336278b82SMichal Krawczyk 		rte_pktmbuf_free(mbuf);
256436278b82SMichal Krawczyk 
256536278b82SMichal Krawczyk 		tx_info->mbuf = NULL;
256636278b82SMichal Krawczyk 		tx_ring->empty_tx_reqs[next_to_clean] = req_id;
256736278b82SMichal Krawczyk 
256836278b82SMichal Krawczyk 		total_tx_descs += tx_info->tx_descs;
256936278b82SMichal Krawczyk 
257036278b82SMichal Krawczyk 		/* Put back descriptor to the ring for reuse */
257136278b82SMichal Krawczyk 		next_to_clean = ENA_IDX_NEXT_MASKED(next_to_clean,
257236278b82SMichal Krawczyk 			tx_ring->size_mask);
257336278b82SMichal Krawczyk 	}
257436278b82SMichal Krawczyk 
257536278b82SMichal Krawczyk 	if (likely(total_tx_descs > 0)) {
257636278b82SMichal Krawczyk 		/* acknowledge completion of sent packets */
257736278b82SMichal Krawczyk 		tx_ring->next_to_clean = next_to_clean;
257836278b82SMichal Krawczyk 		ena_com_comp_ack(tx_ring->ena_com_io_sq, total_tx_descs);
257936278b82SMichal Krawczyk 		ena_com_update_dev_comp_head(tx_ring->ena_com_io_cq);
258036278b82SMichal Krawczyk 	}
258136278b82SMichal Krawczyk }
258236278b82SMichal Krawczyk 
25831173fca2SJan Medala static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
25841173fca2SJan Medala 				  uint16_t nb_pkts)
25851173fca2SJan Medala {
25861173fca2SJan Medala 	struct ena_ring *tx_ring = (struct ena_ring *)(tx_queue);
258774456796SMichal Krawczyk 	uint16_t sent_idx = 0;
25881173fca2SJan Medala 
25891173fca2SJan Medala 	/* Check adapter state */
25901173fca2SJan Medala 	if (unlikely(tx_ring->adapter->state != ENA_ADAPTER_STATE_RUNNING)) {
25916f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ALERT,
25921173fca2SJan Medala 			"Trying to xmit pkts while device is NOT running\n");
25931173fca2SJan Medala 		return 0;
25941173fca2SJan Medala 	}
25951173fca2SJan Medala 
259674456796SMichal Krawczyk 	nb_pkts = RTE_MIN(ena_com_free_q_entries(tx_ring->ena_com_io_sq),
259774456796SMichal Krawczyk 		nb_pkts);
2598b66b6e72SJakub Palider 
25991173fca2SJan Medala 	for (sent_idx = 0; sent_idx < nb_pkts; sent_idx++) {
260036278b82SMichal Krawczyk 		if (ena_xmit_mbuf(tx_ring, tx_pkts[sent_idx]))
26012061fe41SRafal Kozik 			break;
26022061fe41SRafal Kozik 
260336278b82SMichal Krawczyk 		rte_prefetch0(tx_pkts[ENA_IDX_ADD_MASKED(sent_idx, 4,
260436278b82SMichal Krawczyk 			tx_ring->size_mask)]);
26052fca2a98SMichal Krawczyk 	}
26062fca2a98SMichal Krawczyk 
26077830e905SSolganik Alexander 	tx_ring->tx_stats.available_desc =
2608b2b02edeSMichal Krawczyk 		ena_com_free_q_entries(tx_ring->ena_com_io_sq);
26091173fca2SJan Medala 
26105e02e19eSJan Medala 	/* If there are ready packets to be xmitted... */
26115e02e19eSJan Medala 	if (sent_idx > 0) {
26125e02e19eSJan Medala 		/* ...let HW do its best :-) */
26131173fca2SJan Medala 		ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq);
261445b6d861SMichal Krawczyk 		tx_ring->tx_stats.doorbells++;
26155e02e19eSJan Medala 	}
26165e02e19eSJan Medala 
261736278b82SMichal Krawczyk 	ena_tx_cleanup(tx_ring);
2618f7d82d24SRafal Kozik 
26197830e905SSolganik Alexander 	tx_ring->tx_stats.available_desc =
2620b2b02edeSMichal Krawczyk 		ena_com_free_q_entries(tx_ring->ena_com_io_sq);
26217830e905SSolganik Alexander 	tx_ring->tx_stats.tx_poll++;
26227830e905SSolganik Alexander 
26231173fca2SJan Medala 	return sent_idx;
26241173fca2SJan Medala }
26251173fca2SJan Medala 
262645718adaSMichal Krawczyk int ena_copy_eni_stats(struct ena_adapter *adapter)
262745718adaSMichal Krawczyk {
262845718adaSMichal Krawczyk 	struct ena_admin_eni_stats admin_eni_stats;
262945718adaSMichal Krawczyk 	int rc;
263045718adaSMichal Krawczyk 
263145718adaSMichal Krawczyk 	rte_spinlock_lock(&adapter->admin_lock);
263245718adaSMichal Krawczyk 	rc = ena_com_get_eni_stats(&adapter->ena_dev, &admin_eni_stats);
263345718adaSMichal Krawczyk 	rte_spinlock_unlock(&adapter->admin_lock);
263445718adaSMichal Krawczyk 	if (rc != 0) {
263545718adaSMichal Krawczyk 		if (rc == ENA_COM_UNSUPPORTED) {
263645718adaSMichal Krawczyk 			PMD_DRV_LOG(DEBUG,
263745718adaSMichal Krawczyk 				"Retrieving ENI metrics is not supported.\n");
263845718adaSMichal Krawczyk 		} else {
263945718adaSMichal Krawczyk 			PMD_DRV_LOG(WARNING,
264045718adaSMichal Krawczyk 				"Failed to get ENI metrics: %d\n", rc);
264145718adaSMichal Krawczyk 		}
264245718adaSMichal Krawczyk 		return rc;
264345718adaSMichal Krawczyk 	}
264445718adaSMichal Krawczyk 
264545718adaSMichal Krawczyk 	rte_memcpy(&adapter->eni_stats, &admin_eni_stats,
264645718adaSMichal Krawczyk 		sizeof(struct ena_stats_eni));
264745718adaSMichal Krawczyk 
264845718adaSMichal Krawczyk 	return 0;
264945718adaSMichal Krawczyk }
265045718adaSMichal Krawczyk 
26517830e905SSolganik Alexander /**
26527830e905SSolganik Alexander  * DPDK callback to retrieve names of extended device statistics
26537830e905SSolganik Alexander  *
26547830e905SSolganik Alexander  * @param dev
26557830e905SSolganik Alexander  *   Pointer to Ethernet device structure.
26567830e905SSolganik Alexander  * @param[out] xstats_names
26577830e905SSolganik Alexander  *   Buffer to insert names into.
26587830e905SSolganik Alexander  * @param n
26597830e905SSolganik Alexander  *   Number of names.
26607830e905SSolganik Alexander  *
26617830e905SSolganik Alexander  * @return
26627830e905SSolganik Alexander  *   Number of xstats names.
26637830e905SSolganik Alexander  */
26647830e905SSolganik Alexander static int ena_xstats_get_names(struct rte_eth_dev *dev,
26657830e905SSolganik Alexander 				struct rte_eth_xstat_name *xstats_names,
26667830e905SSolganik Alexander 				unsigned int n)
26677830e905SSolganik Alexander {
26687830e905SSolganik Alexander 	unsigned int xstats_count = ena_xstats_calc_num(dev);
26697830e905SSolganik Alexander 	unsigned int stat, i, count = 0;
26707830e905SSolganik Alexander 
26717830e905SSolganik Alexander 	if (n < xstats_count || !xstats_names)
26727830e905SSolganik Alexander 		return xstats_count;
26737830e905SSolganik Alexander 
26747830e905SSolganik Alexander 	for (stat = 0; stat < ENA_STATS_ARRAY_GLOBAL; stat++, count++)
26757830e905SSolganik Alexander 		strcpy(xstats_names[count].name,
26767830e905SSolganik Alexander 			ena_stats_global_strings[stat].name);
26777830e905SSolganik Alexander 
267845718adaSMichal Krawczyk 	for (stat = 0; stat < ENA_STATS_ARRAY_ENI; stat++, count++)
267945718adaSMichal Krawczyk 		strcpy(xstats_names[count].name,
268045718adaSMichal Krawczyk 			ena_stats_eni_strings[stat].name);
268145718adaSMichal Krawczyk 
26827830e905SSolganik Alexander 	for (stat = 0; stat < ENA_STATS_ARRAY_RX; stat++)
26837830e905SSolganik Alexander 		for (i = 0; i < dev->data->nb_rx_queues; i++, count++)
26847830e905SSolganik Alexander 			snprintf(xstats_names[count].name,
26857830e905SSolganik Alexander 				sizeof(xstats_names[count].name),
26867830e905SSolganik Alexander 				"rx_q%d_%s", i,
26877830e905SSolganik Alexander 				ena_stats_rx_strings[stat].name);
26887830e905SSolganik Alexander 
26897830e905SSolganik Alexander 	for (stat = 0; stat < ENA_STATS_ARRAY_TX; stat++)
26907830e905SSolganik Alexander 		for (i = 0; i < dev->data->nb_tx_queues; i++, count++)
26917830e905SSolganik Alexander 			snprintf(xstats_names[count].name,
26927830e905SSolganik Alexander 				sizeof(xstats_names[count].name),
26937830e905SSolganik Alexander 				"tx_q%d_%s", i,
26947830e905SSolganik Alexander 				ena_stats_tx_strings[stat].name);
26957830e905SSolganik Alexander 
26967830e905SSolganik Alexander 	return xstats_count;
26977830e905SSolganik Alexander }
26987830e905SSolganik Alexander 
26997830e905SSolganik Alexander /**
27007830e905SSolganik Alexander  * DPDK callback to get extended device statistics.
27017830e905SSolganik Alexander  *
27027830e905SSolganik Alexander  * @param dev
27037830e905SSolganik Alexander  *   Pointer to Ethernet device structure.
27047830e905SSolganik Alexander  * @param[out] stats
27057830e905SSolganik Alexander  *   Stats table output buffer.
27067830e905SSolganik Alexander  * @param n
27077830e905SSolganik Alexander  *   The size of the stats table.
27087830e905SSolganik Alexander  *
27097830e905SSolganik Alexander  * @return
27107830e905SSolganik Alexander  *   Number of xstats on success, negative on failure.
27117830e905SSolganik Alexander  */
27127830e905SSolganik Alexander static int ena_xstats_get(struct rte_eth_dev *dev,
27137830e905SSolganik Alexander 			  struct rte_eth_xstat *xstats,
27147830e905SSolganik Alexander 			  unsigned int n)
27157830e905SSolganik Alexander {
2716890728ffSStephen Hemminger 	struct ena_adapter *adapter = dev->data->dev_private;
27177830e905SSolganik Alexander 	unsigned int xstats_count = ena_xstats_calc_num(dev);
27187830e905SSolganik Alexander 	unsigned int stat, i, count = 0;
27197830e905SSolganik Alexander 	int stat_offset;
27207830e905SSolganik Alexander 	void *stats_begin;
27217830e905SSolganik Alexander 
27227830e905SSolganik Alexander 	if (n < xstats_count)
27237830e905SSolganik Alexander 		return xstats_count;
27247830e905SSolganik Alexander 
27257830e905SSolganik Alexander 	if (!xstats)
27267830e905SSolganik Alexander 		return 0;
27277830e905SSolganik Alexander 
27287830e905SSolganik Alexander 	for (stat = 0; stat < ENA_STATS_ARRAY_GLOBAL; stat++, count++) {
2729493107fdSMichal Krawczyk 		stat_offset = ena_stats_global_strings[stat].stat_offset;
27307830e905SSolganik Alexander 		stats_begin = &adapter->dev_stats;
27317830e905SSolganik Alexander 
27327830e905SSolganik Alexander 		xstats[count].id = count;
27337830e905SSolganik Alexander 		xstats[count].value = *((uint64_t *)
27347830e905SSolganik Alexander 			((char *)stats_begin + stat_offset));
27357830e905SSolganik Alexander 	}
27367830e905SSolganik Alexander 
273745718adaSMichal Krawczyk 	/* Even if the function below fails, we should copy previous (or initial
273845718adaSMichal Krawczyk 	 * values) to keep structure of rte_eth_xstat consistent.
273945718adaSMichal Krawczyk 	 */
274045718adaSMichal Krawczyk 	ena_copy_eni_stats(adapter);
274145718adaSMichal Krawczyk 	for (stat = 0; stat < ENA_STATS_ARRAY_ENI; stat++, count++) {
274245718adaSMichal Krawczyk 		stat_offset = ena_stats_eni_strings[stat].stat_offset;
274345718adaSMichal Krawczyk 		stats_begin = &adapter->eni_stats;
274445718adaSMichal Krawczyk 
274545718adaSMichal Krawczyk 		xstats[count].id = count;
274645718adaSMichal Krawczyk 		xstats[count].value = *((uint64_t *)
274745718adaSMichal Krawczyk 		    ((char *)stats_begin + stat_offset));
274845718adaSMichal Krawczyk 	}
274945718adaSMichal Krawczyk 
27507830e905SSolganik Alexander 	for (stat = 0; stat < ENA_STATS_ARRAY_RX; stat++) {
27517830e905SSolganik Alexander 		for (i = 0; i < dev->data->nb_rx_queues; i++, count++) {
27527830e905SSolganik Alexander 			stat_offset = ena_stats_rx_strings[stat].stat_offset;
27537830e905SSolganik Alexander 			stats_begin = &adapter->rx_ring[i].rx_stats;
27547830e905SSolganik Alexander 
27557830e905SSolganik Alexander 			xstats[count].id = count;
27567830e905SSolganik Alexander 			xstats[count].value = *((uint64_t *)
27577830e905SSolganik Alexander 				((char *)stats_begin + stat_offset));
27587830e905SSolganik Alexander 		}
27597830e905SSolganik Alexander 	}
27607830e905SSolganik Alexander 
27617830e905SSolganik Alexander 	for (stat = 0; stat < ENA_STATS_ARRAY_TX; stat++) {
27627830e905SSolganik Alexander 		for (i = 0; i < dev->data->nb_tx_queues; i++, count++) {
27637830e905SSolganik Alexander 			stat_offset = ena_stats_tx_strings[stat].stat_offset;
27647830e905SSolganik Alexander 			stats_begin = &adapter->tx_ring[i].rx_stats;
27657830e905SSolganik Alexander 
27667830e905SSolganik Alexander 			xstats[count].id = count;
27677830e905SSolganik Alexander 			xstats[count].value = *((uint64_t *)
27687830e905SSolganik Alexander 				((char *)stats_begin + stat_offset));
27697830e905SSolganik Alexander 		}
27707830e905SSolganik Alexander 	}
27717830e905SSolganik Alexander 
27727830e905SSolganik Alexander 	return count;
27737830e905SSolganik Alexander }
27747830e905SSolganik Alexander 
27757830e905SSolganik Alexander static int ena_xstats_get_by_id(struct rte_eth_dev *dev,
27767830e905SSolganik Alexander 				const uint64_t *ids,
27777830e905SSolganik Alexander 				uint64_t *values,
27787830e905SSolganik Alexander 				unsigned int n)
27797830e905SSolganik Alexander {
2780890728ffSStephen Hemminger 	struct ena_adapter *adapter = dev->data->dev_private;
27817830e905SSolganik Alexander 	uint64_t id;
27827830e905SSolganik Alexander 	uint64_t rx_entries, tx_entries;
27837830e905SSolganik Alexander 	unsigned int i;
27847830e905SSolganik Alexander 	int qid;
27857830e905SSolganik Alexander 	int valid = 0;
278645718adaSMichal Krawczyk 	bool was_eni_copied = false;
278745718adaSMichal Krawczyk 
27887830e905SSolganik Alexander 	for (i = 0; i < n; ++i) {
27897830e905SSolganik Alexander 		id = ids[i];
27907830e905SSolganik Alexander 		/* Check if id belongs to global statistics */
27917830e905SSolganik Alexander 		if (id < ENA_STATS_ARRAY_GLOBAL) {
27927830e905SSolganik Alexander 			values[i] = *((uint64_t *)&adapter->dev_stats + id);
27937830e905SSolganik Alexander 			++valid;
27947830e905SSolganik Alexander 			continue;
27957830e905SSolganik Alexander 		}
27967830e905SSolganik Alexander 
279745718adaSMichal Krawczyk 		/* Check if id belongs to ENI statistics */
27987830e905SSolganik Alexander 		id -= ENA_STATS_ARRAY_GLOBAL;
279945718adaSMichal Krawczyk 		if (id < ENA_STATS_ARRAY_ENI) {
280045718adaSMichal Krawczyk 			/* Avoid reading ENI stats multiple times in a single
280145718adaSMichal Krawczyk 			 * function call, as it requires communication with the
280245718adaSMichal Krawczyk 			 * admin queue.
280345718adaSMichal Krawczyk 			 */
280445718adaSMichal Krawczyk 			if (!was_eni_copied) {
280545718adaSMichal Krawczyk 				was_eni_copied = true;
280645718adaSMichal Krawczyk 				ena_copy_eni_stats(adapter);
280745718adaSMichal Krawczyk 			}
280845718adaSMichal Krawczyk 			values[i] = *((uint64_t *)&adapter->eni_stats + id);
280945718adaSMichal Krawczyk 			++valid;
281045718adaSMichal Krawczyk 			continue;
281145718adaSMichal Krawczyk 		}
281245718adaSMichal Krawczyk 
281345718adaSMichal Krawczyk 		/* Check if id belongs to rx queue statistics */
281445718adaSMichal Krawczyk 		id -= ENA_STATS_ARRAY_ENI;
28157830e905SSolganik Alexander 		rx_entries = ENA_STATS_ARRAY_RX * dev->data->nb_rx_queues;
28167830e905SSolganik Alexander 		if (id < rx_entries) {
28177830e905SSolganik Alexander 			qid = id % dev->data->nb_rx_queues;
28187830e905SSolganik Alexander 			id /= dev->data->nb_rx_queues;
28197830e905SSolganik Alexander 			values[i] = *((uint64_t *)
28207830e905SSolganik Alexander 				&adapter->rx_ring[qid].rx_stats + id);
28217830e905SSolganik Alexander 			++valid;
28227830e905SSolganik Alexander 			continue;
28237830e905SSolganik Alexander 		}
28247830e905SSolganik Alexander 				/* Check if id belongs to rx queue statistics */
28257830e905SSolganik Alexander 		id -= rx_entries;
28267830e905SSolganik Alexander 		tx_entries = ENA_STATS_ARRAY_TX * dev->data->nb_tx_queues;
28277830e905SSolganik Alexander 		if (id < tx_entries) {
28287830e905SSolganik Alexander 			qid = id % dev->data->nb_tx_queues;
28297830e905SSolganik Alexander 			id /= dev->data->nb_tx_queues;
28307830e905SSolganik Alexander 			values[i] = *((uint64_t *)
28317830e905SSolganik Alexander 				&adapter->tx_ring[qid].tx_stats + id);
28327830e905SSolganik Alexander 			++valid;
28337830e905SSolganik Alexander 			continue;
28347830e905SSolganik Alexander 		}
28357830e905SSolganik Alexander 	}
28367830e905SSolganik Alexander 
28377830e905SSolganik Alexander 	return valid;
28387830e905SSolganik Alexander }
28397830e905SSolganik Alexander 
28408a7a73f2SMichal Krawczyk static int ena_process_bool_devarg(const char *key,
28418a7a73f2SMichal Krawczyk 				   const char *value,
28428a7a73f2SMichal Krawczyk 				   void *opaque)
28438a7a73f2SMichal Krawczyk {
28448a7a73f2SMichal Krawczyk 	struct ena_adapter *adapter = opaque;
28458a7a73f2SMichal Krawczyk 	bool bool_value;
28468a7a73f2SMichal Krawczyk 
28478a7a73f2SMichal Krawczyk 	/* Parse the value. */
28488a7a73f2SMichal Krawczyk 	if (strcmp(value, "1") == 0) {
28498a7a73f2SMichal Krawczyk 		bool_value = true;
28508a7a73f2SMichal Krawczyk 	} else if (strcmp(value, "0") == 0) {
28518a7a73f2SMichal Krawczyk 		bool_value = false;
28528a7a73f2SMichal Krawczyk 	} else {
28538a7a73f2SMichal Krawczyk 		PMD_INIT_LOG(ERR,
28548a7a73f2SMichal Krawczyk 			"Invalid value: '%s' for key '%s'. Accepted: '0' or '1'\n",
28558a7a73f2SMichal Krawczyk 			value, key);
28568a7a73f2SMichal Krawczyk 		return -EINVAL;
28578a7a73f2SMichal Krawczyk 	}
28588a7a73f2SMichal Krawczyk 
28598a7a73f2SMichal Krawczyk 	/* Now, assign it to the proper adapter field. */
28608a7a73f2SMichal Krawczyk 	if (strcmp(key, ENA_DEVARG_LARGE_LLQ_HDR))
28618a7a73f2SMichal Krawczyk 		adapter->use_large_llq_hdr = bool_value;
28628a7a73f2SMichal Krawczyk 
28638a7a73f2SMichal Krawczyk 	return 0;
28648a7a73f2SMichal Krawczyk }
28658a7a73f2SMichal Krawczyk 
28668a7a73f2SMichal Krawczyk static int ena_parse_devargs(struct ena_adapter *adapter,
28678a7a73f2SMichal Krawczyk 			     struct rte_devargs *devargs)
28688a7a73f2SMichal Krawczyk {
28698a7a73f2SMichal Krawczyk 	static const char * const allowed_args[] = {
28708a7a73f2SMichal Krawczyk 		ENA_DEVARG_LARGE_LLQ_HDR,
28718a7a73f2SMichal Krawczyk 	};
28728a7a73f2SMichal Krawczyk 	struct rte_kvargs *kvlist;
28738a7a73f2SMichal Krawczyk 	int rc;
28748a7a73f2SMichal Krawczyk 
28758a7a73f2SMichal Krawczyk 	if (devargs == NULL)
28768a7a73f2SMichal Krawczyk 		return 0;
28778a7a73f2SMichal Krawczyk 
28788a7a73f2SMichal Krawczyk 	kvlist = rte_kvargs_parse(devargs->args, allowed_args);
28798a7a73f2SMichal Krawczyk 	if (kvlist == NULL) {
28808a7a73f2SMichal Krawczyk 		PMD_INIT_LOG(ERR, "Invalid device arguments: %s\n",
28818a7a73f2SMichal Krawczyk 			devargs->args);
28828a7a73f2SMichal Krawczyk 		return -EINVAL;
28838a7a73f2SMichal Krawczyk 	}
28848a7a73f2SMichal Krawczyk 
28858a7a73f2SMichal Krawczyk 	rc = rte_kvargs_process(kvlist, ENA_DEVARG_LARGE_LLQ_HDR,
28868a7a73f2SMichal Krawczyk 		ena_process_bool_devarg, adapter);
28878a7a73f2SMichal Krawczyk 
28888a7a73f2SMichal Krawczyk 	rte_kvargs_free(kvlist);
28898a7a73f2SMichal Krawczyk 
28908a7a73f2SMichal Krawczyk 	return rc;
28918a7a73f2SMichal Krawczyk }
28928a7a73f2SMichal Krawczyk 
2893ca148440SMichal Krawczyk /*********************************************************************
2894ca148440SMichal Krawczyk  *  PMD configuration
2895ca148440SMichal Krawczyk  *********************************************************************/
2896fdf91e0fSJan Blunck static int eth_ena_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
2897fdf91e0fSJan Blunck 	struct rte_pci_device *pci_dev)
2898fdf91e0fSJan Blunck {
2899fdf91e0fSJan Blunck 	return rte_eth_dev_pci_generic_probe(pci_dev,
2900fdf91e0fSJan Blunck 		sizeof(struct ena_adapter), eth_ena_dev_init);
2901fdf91e0fSJan Blunck }
2902fdf91e0fSJan Blunck 
2903fdf91e0fSJan Blunck static int eth_ena_pci_remove(struct rte_pci_device *pci_dev)
2904fdf91e0fSJan Blunck {
2905eb0ef49dSMichal Krawczyk 	return rte_eth_dev_pci_generic_remove(pci_dev, eth_ena_dev_uninit);
2906fdf91e0fSJan Blunck }
2907fdf91e0fSJan Blunck 
2908fdf91e0fSJan Blunck static struct rte_pci_driver rte_ena_pmd = {
29091173fca2SJan Medala 	.id_table = pci_id_ena_map,
291005e0eee0SRafal Kozik 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
291105e0eee0SRafal Kozik 		     RTE_PCI_DRV_WC_ACTIVATE,
2912fdf91e0fSJan Blunck 	.probe = eth_ena_pci_probe,
2913fdf91e0fSJan Blunck 	.remove = eth_ena_pci_remove,
29141173fca2SJan Medala };
29151173fca2SJan Medala 
2916fdf91e0fSJan Blunck RTE_PMD_REGISTER_PCI(net_ena, rte_ena_pmd);
291701f19227SShreyansh Jain RTE_PMD_REGISTER_PCI_TABLE(net_ena, pci_id_ena_map);
291806e81dc9SDavid Marchand RTE_PMD_REGISTER_KMOD_DEP(net_ena, "* igb_uio | uio_pci_generic | vfio-pci");
29198a7a73f2SMichal Krawczyk RTE_PMD_REGISTER_PARAM_STRING(net_ena, ENA_DEVARG_LARGE_LLQ_HDR "=<0|1>");
29209c99878aSJerin Jacob RTE_LOG_REGISTER(ena_logtype_init, pmd.net.ena.init, NOTICE);
29219c99878aSJerin Jacob RTE_LOG_REGISTER(ena_logtype_driver, pmd.net.ena.driver, NOTICE);
29226f1c9df9SStephen Hemminger #ifdef RTE_LIBRTE_ENA_DEBUG_RX
29239c99878aSJerin Jacob RTE_LOG_REGISTER(ena_logtype_rx, pmd.net.ena.rx, NOTICE);
29246f1c9df9SStephen Hemminger #endif
29256f1c9df9SStephen Hemminger #ifdef RTE_LIBRTE_ENA_DEBUG_TX
29269c99878aSJerin Jacob RTE_LOG_REGISTER(ena_logtype_tx, pmd.net.ena.tx, NOTICE);
29276f1c9df9SStephen Hemminger #endif
29286f1c9df9SStephen Hemminger #ifdef RTE_LIBRTE_ENA_DEBUG_TX_FREE
29299c99878aSJerin Jacob RTE_LOG_REGISTER(ena_logtype_tx_free, pmd.net.ena.tx_free, NOTICE);
29306f1c9df9SStephen Hemminger #endif
29316f1c9df9SStephen Hemminger #ifdef RTE_LIBRTE_ENA_COM_DEBUG
29329c99878aSJerin Jacob RTE_LOG_REGISTER(ena_logtype_com, pmd.net.ena.com, NOTICE);
29336f1c9df9SStephen Hemminger #endif
29343adcba9aSMichal Krawczyk 
29353adcba9aSMichal Krawczyk /******************************************************************************
29363adcba9aSMichal Krawczyk  ******************************** AENQ Handlers *******************************
29373adcba9aSMichal Krawczyk  *****************************************************************************/
2938ca148440SMichal Krawczyk static void ena_update_on_link_change(void *adapter_data,
2939ca148440SMichal Krawczyk 				      struct ena_admin_aenq_entry *aenq_e)
2940ca148440SMichal Krawczyk {
2941ca148440SMichal Krawczyk 	struct rte_eth_dev *eth_dev;
2942ca148440SMichal Krawczyk 	struct ena_adapter *adapter;
2943ca148440SMichal Krawczyk 	struct ena_admin_aenq_link_change_desc *aenq_link_desc;
2944ca148440SMichal Krawczyk 	uint32_t status;
2945ca148440SMichal Krawczyk 
2946890728ffSStephen Hemminger 	adapter = adapter_data;
2947ca148440SMichal Krawczyk 	aenq_link_desc = (struct ena_admin_aenq_link_change_desc *)aenq_e;
2948ca148440SMichal Krawczyk 	eth_dev = adapter->rte_dev;
2949ca148440SMichal Krawczyk 
2950ca148440SMichal Krawczyk 	status = get_ena_admin_aenq_link_change_desc_link_status(aenq_link_desc);
2951ca148440SMichal Krawczyk 	adapter->link_status = status;
2952ca148440SMichal Krawczyk 
2953ca148440SMichal Krawczyk 	ena_link_update(eth_dev, 0);
29545723fbedSFerruh Yigit 	rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL);
2955ca148440SMichal Krawczyk }
2956ca148440SMichal Krawczyk 
2957f01f060cSRafal Kozik static void ena_notification(void *data,
2958f01f060cSRafal Kozik 			     struct ena_admin_aenq_entry *aenq_e)
2959f01f060cSRafal Kozik {
2960890728ffSStephen Hemminger 	struct ena_adapter *adapter = data;
2961f01f060cSRafal Kozik 	struct ena_admin_ena_hw_hints *hints;
2962f01f060cSRafal Kozik 
2963f01f060cSRafal Kozik 	if (aenq_e->aenq_common_desc.group != ENA_ADMIN_NOTIFICATION)
29646f1c9df9SStephen Hemminger 		PMD_DRV_LOG(WARNING, "Invalid group(%x) expected %x\n",
2965f01f060cSRafal Kozik 			aenq_e->aenq_common_desc.group,
2966f01f060cSRafal Kozik 			ENA_ADMIN_NOTIFICATION);
2967f01f060cSRafal Kozik 
2968f01f060cSRafal Kozik 	switch (aenq_e->aenq_common_desc.syndrom) {
2969f01f060cSRafal Kozik 	case ENA_ADMIN_UPDATE_HINTS:
2970f01f060cSRafal Kozik 		hints = (struct ena_admin_ena_hw_hints *)
2971f01f060cSRafal Kozik 			(&aenq_e->inline_data_w4);
2972f01f060cSRafal Kozik 		ena_update_hints(adapter, hints);
2973f01f060cSRafal Kozik 		break;
2974f01f060cSRafal Kozik 	default:
29756f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "Invalid aenq notification link state %d\n",
2976f01f060cSRafal Kozik 			aenq_e->aenq_common_desc.syndrom);
2977f01f060cSRafal Kozik 	}
2978f01f060cSRafal Kozik }
2979f01f060cSRafal Kozik 
2980d9b8b106SMichal Krawczyk static void ena_keep_alive(void *adapter_data,
2981d9b8b106SMichal Krawczyk 			   __rte_unused struct ena_admin_aenq_entry *aenq_e)
2982d9b8b106SMichal Krawczyk {
2983890728ffSStephen Hemminger 	struct ena_adapter *adapter = adapter_data;
298494c3e376SRafal Kozik 	struct ena_admin_aenq_keep_alive_desc *desc;
298594c3e376SRafal Kozik 	uint64_t rx_drops;
2986e1e73e32SMichal Krawczyk 	uint64_t tx_drops;
2987d9b8b106SMichal Krawczyk 
2988d9b8b106SMichal Krawczyk 	adapter->timestamp_wd = rte_get_timer_cycles();
298994c3e376SRafal Kozik 
299094c3e376SRafal Kozik 	desc = (struct ena_admin_aenq_keep_alive_desc *)aenq_e;
299194c3e376SRafal Kozik 	rx_drops = ((uint64_t)desc->rx_drops_high << 32) | desc->rx_drops_low;
2992e1e73e32SMichal Krawczyk 	tx_drops = ((uint64_t)desc->tx_drops_high << 32) | desc->tx_drops_low;
2993e1e73e32SMichal Krawczyk 
2994e1e73e32SMichal Krawczyk 	adapter->drv_stats->rx_drops = rx_drops;
2995e1e73e32SMichal Krawczyk 	adapter->dev_stats.tx_drops = tx_drops;
2996d9b8b106SMichal Krawczyk }
2997d9b8b106SMichal Krawczyk 
29983adcba9aSMichal Krawczyk /**
29993adcba9aSMichal Krawczyk  * This handler will called for unknown event group or unimplemented handlers
30003adcba9aSMichal Krawczyk  **/
30013adcba9aSMichal Krawczyk static void unimplemented_aenq_handler(__rte_unused void *data,
30023adcba9aSMichal Krawczyk 				       __rte_unused struct ena_admin_aenq_entry *aenq_e)
30033adcba9aSMichal Krawczyk {
30046f1c9df9SStephen Hemminger 	PMD_DRV_LOG(ERR, "Unknown event was received or event with "
3005983cce2dSRafal Kozik 			  "unimplemented handler\n");
30063adcba9aSMichal Krawczyk }
30073adcba9aSMichal Krawczyk 
3008ca148440SMichal Krawczyk static struct ena_aenq_handlers aenq_handlers = {
30093adcba9aSMichal Krawczyk 	.handlers = {
3010ca148440SMichal Krawczyk 		[ENA_ADMIN_LINK_CHANGE] = ena_update_on_link_change,
3011f01f060cSRafal Kozik 		[ENA_ADMIN_NOTIFICATION] = ena_notification,
3012d9b8b106SMichal Krawczyk 		[ENA_ADMIN_KEEP_ALIVE] = ena_keep_alive
30133adcba9aSMichal Krawczyk 	},
30143adcba9aSMichal Krawczyk 	.unimplemented_handler = unimplemented_aenq_handler
30153adcba9aSMichal Krawczyk };
3016