xref: /dpdk/drivers/net/ena/ena_ethdev.c (revision 8a90f3d8d09b5fa2e7a6a7b83b8e4868acebbe01)
1702928afSMaciej Bielski /* SPDX-License-Identifier: BSD-3-Clause
238364c26SMichal Krawczyk  * Copyright (c) 2015-2020 Amazon.com, Inc. or its affiliates.
31173fca2SJan Medala  * All rights reserved.
41173fca2SJan Medala  */
51173fca2SJan Medala 
66723c0fcSBruce Richardson #include <rte_string_fns.h>
71173fca2SJan Medala #include <rte_ether.h>
8df96fd0dSBruce Richardson #include <ethdev_driver.h>
9df96fd0dSBruce Richardson #include <ethdev_pci.h>
101173fca2SJan Medala #include <rte_tcp.h>
111173fca2SJan Medala #include <rte_atomic.h>
121173fca2SJan Medala #include <rte_dev.h>
131173fca2SJan Medala #include <rte_errno.h>
14372c1af5SJan Medala #include <rte_version.h>
15b3fc5a1aSKonstantin Ananyev #include <rte_net.h>
168a7a73f2SMichal Krawczyk #include <rte_kvargs.h>
171173fca2SJan Medala 
181173fca2SJan Medala #include "ena_ethdev.h"
191173fca2SJan Medala #include "ena_logs.h"
201173fca2SJan Medala #include "ena_platform.h"
211173fca2SJan Medala #include "ena_com.h"
221173fca2SJan Medala #include "ena_eth_com.h"
231173fca2SJan Medala 
241173fca2SJan Medala #include <ena_common_defs.h>
251173fca2SJan Medala #include <ena_regs_defs.h>
261173fca2SJan Medala #include <ena_admin_defs.h>
271173fca2SJan Medala #include <ena_eth_io_defs.h>
281173fca2SJan Medala 
29419c3e3eSMichal Krawczyk #define DRV_MODULE_VER_MAJOR	2
30aa022e60SMichal Krawczyk #define DRV_MODULE_VER_MINOR	2
3105cffdcfSMichal Krawczyk #define DRV_MODULE_VER_SUBMINOR	1
32372c1af5SJan Medala 
331173fca2SJan Medala #define ENA_IO_TXQ_IDX(q)	(2 * (q))
341173fca2SJan Medala #define ENA_IO_RXQ_IDX(q)	(2 * (q) + 1)
351173fca2SJan Medala /*reverse version of ENA_IO_RXQ_IDX*/
361173fca2SJan Medala #define ENA_IO_RXQ_IDX_REV(q)	((q - 1) / 2)
371173fca2SJan Medala 
381173fca2SJan Medala #define __MERGE_64B_H_L(h, l) (((uint64_t)h << 32) | l)
391173fca2SJan Medala #define TEST_BIT(val, bit_shift) (val & (1UL << bit_shift))
401173fca2SJan Medala 
411173fca2SJan Medala #define GET_L4_HDR_LEN(mbuf)					\
42f41b5156SOlivier Matz 	((rte_pktmbuf_mtod_offset(mbuf,	struct rte_tcp_hdr *,	\
431173fca2SJan Medala 		mbuf->l3_len + mbuf->l2_len)->data_off) >> 4)
441173fca2SJan Medala 
451173fca2SJan Medala #define ENA_RX_RSS_TABLE_LOG_SIZE  7
461173fca2SJan Medala #define ENA_RX_RSS_TABLE_SIZE	(1 << ENA_RX_RSS_TABLE_LOG_SIZE)
471173fca2SJan Medala #define ENA_HASH_KEY_SIZE	40
48372c1af5SJan Medala #define ETH_GSTRING_LEN	32
49372c1af5SJan Medala 
50372c1af5SJan Medala #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
51372c1af5SJan Medala 
5292680dc2SRafal Kozik #define ENA_MIN_RING_DESC	128
5392680dc2SRafal Kozik 
54372c1af5SJan Medala enum ethtool_stringset {
55372c1af5SJan Medala 	ETH_SS_TEST             = 0,
56372c1af5SJan Medala 	ETH_SS_STATS,
57372c1af5SJan Medala };
58372c1af5SJan Medala 
59372c1af5SJan Medala struct ena_stats {
60372c1af5SJan Medala 	char name[ETH_GSTRING_LEN];
61372c1af5SJan Medala 	int stat_offset;
62372c1af5SJan Medala };
63372c1af5SJan Medala 
64372c1af5SJan Medala #define ENA_STAT_ENTRY(stat, stat_type) { \
65372c1af5SJan Medala 	.name = #stat, \
66372c1af5SJan Medala 	.stat_offset = offsetof(struct ena_stats_##stat_type, stat) \
67372c1af5SJan Medala }
68372c1af5SJan Medala 
69372c1af5SJan Medala #define ENA_STAT_RX_ENTRY(stat) \
70372c1af5SJan Medala 	ENA_STAT_ENTRY(stat, rx)
71372c1af5SJan Medala 
72372c1af5SJan Medala #define ENA_STAT_TX_ENTRY(stat) \
73372c1af5SJan Medala 	ENA_STAT_ENTRY(stat, tx)
74372c1af5SJan Medala 
7545718adaSMichal Krawczyk #define ENA_STAT_ENI_ENTRY(stat) \
7645718adaSMichal Krawczyk 	ENA_STAT_ENTRY(stat, eni)
7745718adaSMichal Krawczyk 
78372c1af5SJan Medala #define ENA_STAT_GLOBAL_ENTRY(stat) \
79372c1af5SJan Medala 	ENA_STAT_ENTRY(stat, dev)
80372c1af5SJan Medala 
818a7a73f2SMichal Krawczyk /* Device arguments */
828a7a73f2SMichal Krawczyk #define ENA_DEVARG_LARGE_LLQ_HDR "large_llq_hdr"
838a7a73f2SMichal Krawczyk 
843adcba9aSMichal Krawczyk /*
853adcba9aSMichal Krawczyk  * Each rte_memzone should have unique name.
863adcba9aSMichal Krawczyk  * To satisfy it, count number of allocation and add it to name.
873adcba9aSMichal Krawczyk  */
88b14fcac0SIgor Chauskin rte_atomic32_t ena_alloc_cnt;
893adcba9aSMichal Krawczyk 
90372c1af5SJan Medala static const struct ena_stats ena_stats_global_strings[] = {
91372c1af5SJan Medala 	ENA_STAT_GLOBAL_ENTRY(wd_expired),
927830e905SSolganik Alexander 	ENA_STAT_GLOBAL_ENTRY(dev_start),
937830e905SSolganik Alexander 	ENA_STAT_GLOBAL_ENTRY(dev_stop),
94e1e73e32SMichal Krawczyk 	ENA_STAT_GLOBAL_ENTRY(tx_drops),
95372c1af5SJan Medala };
96372c1af5SJan Medala 
9745718adaSMichal Krawczyk static const struct ena_stats ena_stats_eni_strings[] = {
9845718adaSMichal Krawczyk 	ENA_STAT_ENI_ENTRY(bw_in_allowance_exceeded),
9945718adaSMichal Krawczyk 	ENA_STAT_ENI_ENTRY(bw_out_allowance_exceeded),
10045718adaSMichal Krawczyk 	ENA_STAT_ENI_ENTRY(pps_allowance_exceeded),
10145718adaSMichal Krawczyk 	ENA_STAT_ENI_ENTRY(conntrack_allowance_exceeded),
10245718adaSMichal Krawczyk 	ENA_STAT_ENI_ENTRY(linklocal_allowance_exceeded),
10345718adaSMichal Krawczyk };
10445718adaSMichal Krawczyk 
105372c1af5SJan Medala static const struct ena_stats ena_stats_tx_strings[] = {
106372c1af5SJan Medala 	ENA_STAT_TX_ENTRY(cnt),
107372c1af5SJan Medala 	ENA_STAT_TX_ENTRY(bytes),
1087830e905SSolganik Alexander 	ENA_STAT_TX_ENTRY(prepare_ctx_err),
109372c1af5SJan Medala 	ENA_STAT_TX_ENTRY(linearize),
110372c1af5SJan Medala 	ENA_STAT_TX_ENTRY(linearize_failed),
111372c1af5SJan Medala 	ENA_STAT_TX_ENTRY(tx_poll),
112372c1af5SJan Medala 	ENA_STAT_TX_ENTRY(doorbells),
113372c1af5SJan Medala 	ENA_STAT_TX_ENTRY(bad_req_id),
1147830e905SSolganik Alexander 	ENA_STAT_TX_ENTRY(available_desc),
115372c1af5SJan Medala };
116372c1af5SJan Medala 
117372c1af5SJan Medala static const struct ena_stats ena_stats_rx_strings[] = {
118372c1af5SJan Medala 	ENA_STAT_RX_ENTRY(cnt),
119372c1af5SJan Medala 	ENA_STAT_RX_ENTRY(bytes),
1207830e905SSolganik Alexander 	ENA_STAT_RX_ENTRY(refill_partial),
121372c1af5SJan Medala 	ENA_STAT_RX_ENTRY(bad_csum),
1227830e905SSolganik Alexander 	ENA_STAT_RX_ENTRY(mbuf_alloc_fail),
123372c1af5SJan Medala 	ENA_STAT_RX_ENTRY(bad_desc_num),
1247830e905SSolganik Alexander 	ENA_STAT_RX_ENTRY(bad_req_id),
125372c1af5SJan Medala };
126372c1af5SJan Medala 
127372c1af5SJan Medala #define ENA_STATS_ARRAY_GLOBAL	ARRAY_SIZE(ena_stats_global_strings)
12845718adaSMichal Krawczyk #define ENA_STATS_ARRAY_ENI	ARRAY_SIZE(ena_stats_eni_strings)
129372c1af5SJan Medala #define ENA_STATS_ARRAY_TX	ARRAY_SIZE(ena_stats_tx_strings)
130372c1af5SJan Medala #define ENA_STATS_ARRAY_RX	ARRAY_SIZE(ena_stats_rx_strings)
1311173fca2SJan Medala 
13256b8b9b7SRafal Kozik #define QUEUE_OFFLOADS (DEV_TX_OFFLOAD_TCP_CKSUM |\
13356b8b9b7SRafal Kozik 			DEV_TX_OFFLOAD_UDP_CKSUM |\
13456b8b9b7SRafal Kozik 			DEV_TX_OFFLOAD_IPV4_CKSUM |\
13556b8b9b7SRafal Kozik 			DEV_TX_OFFLOAD_TCP_TSO)
13656b8b9b7SRafal Kozik #define MBUF_OFFLOADS (PKT_TX_L4_MASK |\
13756b8b9b7SRafal Kozik 		       PKT_TX_IP_CKSUM |\
13856b8b9b7SRafal Kozik 		       PKT_TX_TCP_SEG)
13956b8b9b7SRafal Kozik 
1401173fca2SJan Medala /** Vendor ID used by Amazon devices */
1411173fca2SJan Medala #define PCI_VENDOR_ID_AMAZON 0x1D0F
1421173fca2SJan Medala /** Amazon devices */
1431173fca2SJan Medala #define PCI_DEVICE_ID_ENA_VF		0xEC20
144f7138b91SMichal Krawczyk #define PCI_DEVICE_ID_ENA_VF_RSERV0	0xEC21
1451173fca2SJan Medala 
146b3fc5a1aSKonstantin Ananyev #define	ENA_TX_OFFLOAD_MASK	(\
147b3fc5a1aSKonstantin Ananyev 	PKT_TX_L4_MASK |         \
148d6db681bSDidier Pallard 	PKT_TX_IPV6 |            \
149d6db681bSDidier Pallard 	PKT_TX_IPV4 |            \
150b3fc5a1aSKonstantin Ananyev 	PKT_TX_IP_CKSUM |        \
151b3fc5a1aSKonstantin Ananyev 	PKT_TX_TCP_SEG)
152b3fc5a1aSKonstantin Ananyev 
153b3fc5a1aSKonstantin Ananyev #define	ENA_TX_OFFLOAD_NOTSUP_MASK	\
154b3fc5a1aSKonstantin Ananyev 	(PKT_TX_OFFLOAD_MASK ^ ENA_TX_OFFLOAD_MASK)
155b3fc5a1aSKonstantin Ananyev 
15628a1fd4fSFerruh Yigit static const struct rte_pci_id pci_id_ena_map[] = {
157cb990571SDavid Marchand 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AMAZON, PCI_DEVICE_ID_ENA_VF) },
158f7138b91SMichal Krawczyk 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AMAZON, PCI_DEVICE_ID_ENA_VF_RSERV0) },
1591173fca2SJan Medala 	{ .device_id = 0 },
1601173fca2SJan Medala };
1611173fca2SJan Medala 
162ca148440SMichal Krawczyk static struct ena_aenq_handlers aenq_handlers;
1633adcba9aSMichal Krawczyk 
1641173fca2SJan Medala static int ena_device_init(struct ena_com_dev *ena_dev,
165e859d2b8SRafal Kozik 			   struct ena_com_dev_get_features_ctx *get_feat_ctx,
166e859d2b8SRafal Kozik 			   bool *wd_state);
1671173fca2SJan Medala static int ena_dev_configure(struct rte_eth_dev *dev);
16836278b82SMichal Krawczyk static void ena_tx_map_mbuf(struct ena_ring *tx_ring,
16936278b82SMichal Krawczyk 	struct ena_tx_buffer *tx_info,
17036278b82SMichal Krawczyk 	struct rte_mbuf *mbuf,
17136278b82SMichal Krawczyk 	void **push_header,
17236278b82SMichal Krawczyk 	uint16_t *header_len);
17336278b82SMichal Krawczyk static int ena_xmit_mbuf(struct ena_ring *tx_ring, struct rte_mbuf *mbuf);
17436278b82SMichal Krawczyk static void ena_tx_cleanup(struct ena_ring *tx_ring);
1751173fca2SJan Medala static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
1761173fca2SJan Medala 				  uint16_t nb_pkts);
177b3fc5a1aSKonstantin Ananyev static uint16_t eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
178b3fc5a1aSKonstantin Ananyev 		uint16_t nb_pkts);
1791173fca2SJan Medala static int ena_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
1801173fca2SJan Medala 			      uint16_t nb_desc, unsigned int socket_id,
1811173fca2SJan Medala 			      const struct rte_eth_txconf *tx_conf);
1821173fca2SJan Medala static int ena_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
1831173fca2SJan Medala 			      uint16_t nb_desc, unsigned int socket_id,
1841173fca2SJan Medala 			      const struct rte_eth_rxconf *rx_conf,
1851173fca2SJan Medala 			      struct rte_mempool *mp);
1861be097dcSMichal Krawczyk static inline void ena_init_rx_mbuf(struct rte_mbuf *mbuf, uint16_t len);
1871be097dcSMichal Krawczyk static struct rte_mbuf *ena_rx_mbuf(struct ena_ring *rx_ring,
1881be097dcSMichal Krawczyk 				    struct ena_com_rx_buf_info *ena_bufs,
1891be097dcSMichal Krawczyk 				    uint32_t descs,
1901be097dcSMichal Krawczyk 				    uint16_t *next_to_clean,
1911be097dcSMichal Krawczyk 				    uint8_t offset);
1921173fca2SJan Medala static uint16_t eth_ena_recv_pkts(void *rx_queue,
1931173fca2SJan Medala 				  struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
19483fd97b2SMichal Krawczyk static int ena_add_single_rx_desc(struct ena_com_io_sq *io_sq,
19583fd97b2SMichal Krawczyk 				  struct rte_mbuf *mbuf, uint16_t id);
1961173fca2SJan Medala static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count);
19733dde075SMichal Krawczyk static void ena_init_rings(struct ena_adapter *adapter,
19833dde075SMichal Krawczyk 			   bool disable_meta_caching);
1991173fca2SJan Medala static int ena_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
2001173fca2SJan Medala static int ena_start(struct rte_eth_dev *dev);
20162024eb8SIvan Ilchenko static int ena_stop(struct rte_eth_dev *dev);
202b142387bSThomas Monjalon static int ena_close(struct rte_eth_dev *dev);
2032081d5e2SMichal Krawczyk static int ena_dev_reset(struct rte_eth_dev *dev);
204d5b0924bSMatan Azrad static int ena_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats);
2051173fca2SJan Medala static void ena_rx_queue_release_all(struct rte_eth_dev *dev);
2061173fca2SJan Medala static void ena_tx_queue_release_all(struct rte_eth_dev *dev);
2071173fca2SJan Medala static void ena_rx_queue_release(void *queue);
2081173fca2SJan Medala static void ena_tx_queue_release(void *queue);
2091173fca2SJan Medala static void ena_rx_queue_release_bufs(struct ena_ring *ring);
2101173fca2SJan Medala static void ena_tx_queue_release_bufs(struct ena_ring *ring);
2111173fca2SJan Medala static int ena_link_update(struct rte_eth_dev *dev,
212dd2c630aSFerruh Yigit 			   int wait_to_complete);
213df238f84SMichal Krawczyk static int ena_create_io_queue(struct ena_ring *ring);
21426e5543dSRafal Kozik static void ena_queue_stop(struct ena_ring *ring);
21526e5543dSRafal Kozik static void ena_queue_stop_all(struct rte_eth_dev *dev,
21626e5543dSRafal Kozik 			      enum ena_ring_type ring_type);
21726e5543dSRafal Kozik static int ena_queue_start(struct ena_ring *ring);
21826e5543dSRafal Kozik static int ena_queue_start_all(struct rte_eth_dev *dev,
2191173fca2SJan Medala 			       enum ena_ring_type ring_type);
2201173fca2SJan Medala static void ena_stats_restart(struct rte_eth_dev *dev);
221bdad90d1SIvan Ilchenko static int ena_infos_get(struct rte_eth_dev *dev,
2221173fca2SJan Medala 			 struct rte_eth_dev_info *dev_info);
2231173fca2SJan Medala static int ena_rss_reta_update(struct rte_eth_dev *dev,
2241173fca2SJan Medala 			       struct rte_eth_rss_reta_entry64 *reta_conf,
2251173fca2SJan Medala 			       uint16_t reta_size);
2261173fca2SJan Medala static int ena_rss_reta_query(struct rte_eth_dev *dev,
2271173fca2SJan Medala 			      struct rte_eth_rss_reta_entry64 *reta_conf,
2281173fca2SJan Medala 			      uint16_t reta_size);
22915773e06SMichal Krawczyk static void ena_interrupt_handler_rte(void *cb_arg);
230d9b8b106SMichal Krawczyk static void ena_timer_wd_callback(struct rte_timer *timer, void *arg);
231e457bc70SRafal Kozik static void ena_destroy_device(struct rte_eth_dev *eth_dev);
232e457bc70SRafal Kozik static int eth_ena_dev_init(struct rte_eth_dev *eth_dev);
2337830e905SSolganik Alexander static int ena_xstats_get_names(struct rte_eth_dev *dev,
2347830e905SSolganik Alexander 				struct rte_eth_xstat_name *xstats_names,
2357830e905SSolganik Alexander 				unsigned int n);
2367830e905SSolganik Alexander static int ena_xstats_get(struct rte_eth_dev *dev,
2377830e905SSolganik Alexander 			  struct rte_eth_xstat *stats,
2387830e905SSolganik Alexander 			  unsigned int n);
2397830e905SSolganik Alexander static int ena_xstats_get_by_id(struct rte_eth_dev *dev,
2407830e905SSolganik Alexander 				const uint64_t *ids,
2417830e905SSolganik Alexander 				uint64_t *values,
2427830e905SSolganik Alexander 				unsigned int n);
2438a7a73f2SMichal Krawczyk static int ena_process_bool_devarg(const char *key,
2448a7a73f2SMichal Krawczyk 				   const char *value,
2458a7a73f2SMichal Krawczyk 				   void *opaque);
2468a7a73f2SMichal Krawczyk static int ena_parse_devargs(struct ena_adapter *adapter,
2478a7a73f2SMichal Krawczyk 			     struct rte_devargs *devargs);
24845718adaSMichal Krawczyk static int ena_copy_eni_stats(struct ena_adapter *adapter);
2491173fca2SJan Medala 
250103ab18cSFerruh Yigit static const struct eth_dev_ops ena_dev_ops = {
2511173fca2SJan Medala 	.dev_configure        = ena_dev_configure,
2521173fca2SJan Medala 	.dev_infos_get        = ena_infos_get,
2531173fca2SJan Medala 	.rx_queue_setup       = ena_rx_queue_setup,
2541173fca2SJan Medala 	.tx_queue_setup       = ena_tx_queue_setup,
2551173fca2SJan Medala 	.dev_start            = ena_start,
256eb0ef49dSMichal Krawczyk 	.dev_stop             = ena_stop,
2571173fca2SJan Medala 	.link_update          = ena_link_update,
2581173fca2SJan Medala 	.stats_get            = ena_stats_get,
2597830e905SSolganik Alexander 	.xstats_get_names     = ena_xstats_get_names,
2607830e905SSolganik Alexander 	.xstats_get	      = ena_xstats_get,
2617830e905SSolganik Alexander 	.xstats_get_by_id     = ena_xstats_get_by_id,
2621173fca2SJan Medala 	.mtu_set              = ena_mtu_set,
2631173fca2SJan Medala 	.rx_queue_release     = ena_rx_queue_release,
2641173fca2SJan Medala 	.tx_queue_release     = ena_tx_queue_release,
2651173fca2SJan Medala 	.dev_close            = ena_close,
2662081d5e2SMichal Krawczyk 	.dev_reset            = ena_dev_reset,
2671173fca2SJan Medala 	.reta_update          = ena_rss_reta_update,
2681173fca2SJan Medala 	.reta_query           = ena_rss_reta_query,
2691173fca2SJan Medala };
2701173fca2SJan Medala 
271086c6b66SMichal Krawczyk void ena_rss_key_fill(void *key, size_t size)
272086c6b66SMichal Krawczyk {
273086c6b66SMichal Krawczyk 	static bool key_generated;
274086c6b66SMichal Krawczyk 	static uint8_t default_key[ENA_HASH_KEY_SIZE];
275086c6b66SMichal Krawczyk 	size_t i;
276086c6b66SMichal Krawczyk 
277086c6b66SMichal Krawczyk 	RTE_ASSERT(size <= ENA_HASH_KEY_SIZE);
278086c6b66SMichal Krawczyk 
279086c6b66SMichal Krawczyk 	if (!key_generated) {
280086c6b66SMichal Krawczyk 		for (i = 0; i < ENA_HASH_KEY_SIZE; ++i)
281086c6b66SMichal Krawczyk 			default_key[i] = rte_rand() & 0xff;
282086c6b66SMichal Krawczyk 		key_generated = true;
283086c6b66SMichal Krawczyk 	}
284086c6b66SMichal Krawczyk 
285086c6b66SMichal Krawczyk 	rte_memcpy(key, default_key, size);
286086c6b66SMichal Krawczyk }
287086c6b66SMichal Krawczyk 
2881173fca2SJan Medala static inline void ena_rx_mbuf_prepare(struct rte_mbuf *mbuf,
2891173fca2SJan Medala 				       struct ena_com_rx_ctx *ena_rx_ctx)
2901173fca2SJan Medala {
2911173fca2SJan Medala 	uint64_t ol_flags = 0;
292fd617795SRafal Kozik 	uint32_t packet_type = 0;
2931173fca2SJan Medala 
2941173fca2SJan Medala 	if (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP)
295fd617795SRafal Kozik 		packet_type |= RTE_PTYPE_L4_TCP;
2961173fca2SJan Medala 	else if (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP)
297fd617795SRafal Kozik 		packet_type |= RTE_PTYPE_L4_UDP;
2981173fca2SJan Medala 
299856edce2SMichal Krawczyk 	if (ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV4) {
300fd617795SRafal Kozik 		packet_type |= RTE_PTYPE_L3_IPV4;
3011173fca2SJan Medala 		if (unlikely(ena_rx_ctx->l3_csum_err))
3021173fca2SJan Medala 			ol_flags |= PKT_RX_IP_CKSUM_BAD;
303856edce2SMichal Krawczyk 		else
304856edce2SMichal Krawczyk 			ol_flags |= PKT_RX_IP_CKSUM_GOOD;
305856edce2SMichal Krawczyk 	} else if (ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV6) {
306856edce2SMichal Krawczyk 		packet_type |= RTE_PTYPE_L3_IPV6;
307856edce2SMichal Krawczyk 	}
308856edce2SMichal Krawczyk 
309856edce2SMichal Krawczyk 	if (!ena_rx_ctx->l4_csum_checked || ena_rx_ctx->frag)
310856edce2SMichal Krawczyk 		ol_flags |= PKT_RX_L4_CKSUM_UNKNOWN;
311856edce2SMichal Krawczyk 	else
312856edce2SMichal Krawczyk 		if (unlikely(ena_rx_ctx->l4_csum_err))
313856edce2SMichal Krawczyk 			ol_flags |= PKT_RX_L4_CKSUM_BAD;
314856edce2SMichal Krawczyk 		else
315856edce2SMichal Krawczyk 			ol_flags |= PKT_RX_L4_CKSUM_GOOD;
3161173fca2SJan Medala 
3171173fca2SJan Medala 	mbuf->ol_flags = ol_flags;
318fd617795SRafal Kozik 	mbuf->packet_type = packet_type;
3191173fca2SJan Medala }
3201173fca2SJan Medala 
3211173fca2SJan Medala static inline void ena_tx_mbuf_prepare(struct rte_mbuf *mbuf,
32256b8b9b7SRafal Kozik 				       struct ena_com_tx_ctx *ena_tx_ctx,
32333dde075SMichal Krawczyk 				       uint64_t queue_offloads,
32433dde075SMichal Krawczyk 				       bool disable_meta_caching)
3251173fca2SJan Medala {
3261173fca2SJan Medala 	struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta;
3271173fca2SJan Medala 
32856b8b9b7SRafal Kozik 	if ((mbuf->ol_flags & MBUF_OFFLOADS) &&
32956b8b9b7SRafal Kozik 	    (queue_offloads & QUEUE_OFFLOADS)) {
3301173fca2SJan Medala 		/* check if TSO is required */
33156b8b9b7SRafal Kozik 		if ((mbuf->ol_flags & PKT_TX_TCP_SEG) &&
33256b8b9b7SRafal Kozik 		    (queue_offloads & DEV_TX_OFFLOAD_TCP_TSO)) {
3331173fca2SJan Medala 			ena_tx_ctx->tso_enable = true;
3341173fca2SJan Medala 
3351173fca2SJan Medala 			ena_meta->l4_hdr_len = GET_L4_HDR_LEN(mbuf);
3361173fca2SJan Medala 		}
3371173fca2SJan Medala 
3381173fca2SJan Medala 		/* check if L3 checksum is needed */
33956b8b9b7SRafal Kozik 		if ((mbuf->ol_flags & PKT_TX_IP_CKSUM) &&
34056b8b9b7SRafal Kozik 		    (queue_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM))
3411173fca2SJan Medala 			ena_tx_ctx->l3_csum_enable = true;
3421173fca2SJan Medala 
3431173fca2SJan Medala 		if (mbuf->ol_flags & PKT_TX_IPV6) {
3441173fca2SJan Medala 			ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV6;
3451173fca2SJan Medala 		} else {
3461173fca2SJan Medala 			ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV4;
3471173fca2SJan Medala 
3481173fca2SJan Medala 			/* set don't fragment (DF) flag */
3491173fca2SJan Medala 			if (mbuf->packet_type &
3501173fca2SJan Medala 				(RTE_PTYPE_L4_NONFRAG
3511173fca2SJan Medala 				 | RTE_PTYPE_INNER_L4_NONFRAG))
3521173fca2SJan Medala 				ena_tx_ctx->df = true;
3531173fca2SJan Medala 		}
3541173fca2SJan Medala 
3551173fca2SJan Medala 		/* check if L4 checksum is needed */
35640e7c021SMaciej Bielski 		if (((mbuf->ol_flags & PKT_TX_L4_MASK) == PKT_TX_TCP_CKSUM) &&
35756b8b9b7SRafal Kozik 		    (queue_offloads & DEV_TX_OFFLOAD_TCP_CKSUM)) {
3581173fca2SJan Medala 			ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_TCP;
3591173fca2SJan Medala 			ena_tx_ctx->l4_csum_enable = true;
36040e7c021SMaciej Bielski 		} else if (((mbuf->ol_flags & PKT_TX_L4_MASK) ==
36140e7c021SMaciej Bielski 				PKT_TX_UDP_CKSUM) &&
36256b8b9b7SRafal Kozik 				(queue_offloads & DEV_TX_OFFLOAD_UDP_CKSUM)) {
3631173fca2SJan Medala 			ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UDP;
3641173fca2SJan Medala 			ena_tx_ctx->l4_csum_enable = true;
36556b8b9b7SRafal Kozik 		} else {
3661173fca2SJan Medala 			ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UNKNOWN;
3671173fca2SJan Medala 			ena_tx_ctx->l4_csum_enable = false;
3681173fca2SJan Medala 		}
3691173fca2SJan Medala 
3701173fca2SJan Medala 		ena_meta->mss = mbuf->tso_segsz;
3711173fca2SJan Medala 		ena_meta->l3_hdr_len = mbuf->l3_len;
3721173fca2SJan Medala 		ena_meta->l3_hdr_offset = mbuf->l2_len;
3731173fca2SJan Medala 
3741173fca2SJan Medala 		ena_tx_ctx->meta_valid = true;
37533dde075SMichal Krawczyk 	} else if (disable_meta_caching) {
37633dde075SMichal Krawczyk 		memset(ena_meta, 0, sizeof(*ena_meta));
37733dde075SMichal Krawczyk 		ena_tx_ctx->meta_valid = true;
3781173fca2SJan Medala 	} else {
3791173fca2SJan Medala 		ena_tx_ctx->meta_valid = false;
3801173fca2SJan Medala 	}
3811173fca2SJan Medala }
3821173fca2SJan Medala 
383f7d82d24SRafal Kozik static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id)
384f7d82d24SRafal Kozik {
385f7d82d24SRafal Kozik 	struct ena_tx_buffer *tx_info = NULL;
386f7d82d24SRafal Kozik 
387f7d82d24SRafal Kozik 	if (likely(req_id < tx_ring->ring_size)) {
388f7d82d24SRafal Kozik 		tx_info = &tx_ring->tx_buffer_info[req_id];
389f7d82d24SRafal Kozik 		if (likely(tx_info->mbuf))
390f7d82d24SRafal Kozik 			return 0;
391f7d82d24SRafal Kozik 	}
392f7d82d24SRafal Kozik 
393f7d82d24SRafal Kozik 	if (tx_info)
3946f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "tx_info doesn't have valid mbuf\n");
395f7d82d24SRafal Kozik 	else
3966f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "Invalid req_id: %hu\n", req_id);
397f7d82d24SRafal Kozik 
398f7d82d24SRafal Kozik 	/* Trigger device reset */
3997830e905SSolganik Alexander 	++tx_ring->tx_stats.bad_req_id;
400f7d82d24SRafal Kozik 	tx_ring->adapter->reset_reason = ENA_REGS_RESET_INV_TX_REQ_ID;
401f7d82d24SRafal Kozik 	tx_ring->adapter->trigger_reset	= true;
402f7d82d24SRafal Kozik 	return -EFAULT;
403f7d82d24SRafal Kozik }
404f7d82d24SRafal Kozik 
405372c1af5SJan Medala static void ena_config_host_info(struct ena_com_dev *ena_dev)
406372c1af5SJan Medala {
407372c1af5SJan Medala 	struct ena_admin_host_info *host_info;
408372c1af5SJan Medala 	int rc;
409372c1af5SJan Medala 
410372c1af5SJan Medala 	/* Allocate only the host info */
411372c1af5SJan Medala 	rc = ena_com_allocate_host_info(ena_dev);
412372c1af5SJan Medala 	if (rc) {
4136f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "Cannot allocate host info\n");
414372c1af5SJan Medala 		return;
415372c1af5SJan Medala 	}
416372c1af5SJan Medala 
417372c1af5SJan Medala 	host_info = ena_dev->host_attr.host_info;
418372c1af5SJan Medala 
419372c1af5SJan Medala 	host_info->os_type = ENA_ADMIN_OS_DPDK;
420372c1af5SJan Medala 	host_info->kernel_ver = RTE_VERSION;
4216723c0fcSBruce Richardson 	strlcpy((char *)host_info->kernel_ver_str, rte_version(),
4226723c0fcSBruce Richardson 		sizeof(host_info->kernel_ver_str));
423372c1af5SJan Medala 	host_info->os_dist = RTE_VERSION;
4246723c0fcSBruce Richardson 	strlcpy((char *)host_info->os_dist_str, rte_version(),
4256723c0fcSBruce Richardson 		sizeof(host_info->os_dist_str));
426372c1af5SJan Medala 	host_info->driver_version =
427372c1af5SJan Medala 		(DRV_MODULE_VER_MAJOR) |
428372c1af5SJan Medala 		(DRV_MODULE_VER_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) |
429c4144557SJan Medala 		(DRV_MODULE_VER_SUBMINOR <<
430c4144557SJan Medala 			ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT);
431b9302eb9SRafal Kozik 	host_info->num_cpus = rte_lcore_count();
432372c1af5SJan Medala 
4337b3a3c4bSMaciej Bielski 	host_info->driver_supported_features =
4347b3a3c4bSMaciej Bielski 		ENA_ADMIN_HOST_INFO_RX_OFFSET_MASK;
4357b3a3c4bSMaciej Bielski 
436372c1af5SJan Medala 	rc = ena_com_set_host_attributes(ena_dev);
437372c1af5SJan Medala 	if (rc) {
438241da076SRafal Kozik 		if (rc == -ENA_COM_UNSUPPORTED)
4396f1c9df9SStephen Hemminger 			PMD_DRV_LOG(WARNING, "Cannot set host attributes\n");
440241da076SRafal Kozik 		else
4416f1c9df9SStephen Hemminger 			PMD_DRV_LOG(ERR, "Cannot set host attributes\n");
442241da076SRafal Kozik 
443372c1af5SJan Medala 		goto err;
444372c1af5SJan Medala 	}
445372c1af5SJan Medala 
446372c1af5SJan Medala 	return;
447372c1af5SJan Medala 
448372c1af5SJan Medala err:
449372c1af5SJan Medala 	ena_com_delete_host_info(ena_dev);
450372c1af5SJan Medala }
451372c1af5SJan Medala 
4527830e905SSolganik Alexander /* This function calculates the number of xstats based on the current config */
4537830e905SSolganik Alexander static unsigned int ena_xstats_calc_num(struct rte_eth_dev *dev)
454372c1af5SJan Medala {
45545718adaSMichal Krawczyk 	return ENA_STATS_ARRAY_GLOBAL + ENA_STATS_ARRAY_ENI +
4567830e905SSolganik Alexander 		(dev->data->nb_tx_queues * ENA_STATS_ARRAY_TX) +
4577830e905SSolganik Alexander 		(dev->data->nb_rx_queues * ENA_STATS_ARRAY_RX);
458372c1af5SJan Medala }
459372c1af5SJan Medala 
460372c1af5SJan Medala static void ena_config_debug_area(struct ena_adapter *adapter)
461372c1af5SJan Medala {
462372c1af5SJan Medala 	u32 debug_area_size;
463372c1af5SJan Medala 	int rc, ss_count;
464372c1af5SJan Medala 
4657830e905SSolganik Alexander 	ss_count = ena_xstats_calc_num(adapter->rte_dev);
466372c1af5SJan Medala 
467372c1af5SJan Medala 	/* allocate 32 bytes for each string and 64bit for the value */
468372c1af5SJan Medala 	debug_area_size = ss_count * ETH_GSTRING_LEN + sizeof(u64) * ss_count;
469372c1af5SJan Medala 
470372c1af5SJan Medala 	rc = ena_com_allocate_debug_area(&adapter->ena_dev, debug_area_size);
471372c1af5SJan Medala 	if (rc) {
4726f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "Cannot allocate debug area\n");
473372c1af5SJan Medala 		return;
474372c1af5SJan Medala 	}
475372c1af5SJan Medala 
476372c1af5SJan Medala 	rc = ena_com_set_host_attributes(&adapter->ena_dev);
477372c1af5SJan Medala 	if (rc) {
478241da076SRafal Kozik 		if (rc == -ENA_COM_UNSUPPORTED)
4796f1c9df9SStephen Hemminger 			PMD_DRV_LOG(WARNING, "Cannot set host attributes\n");
480241da076SRafal Kozik 		else
4816f1c9df9SStephen Hemminger 			PMD_DRV_LOG(ERR, "Cannot set host attributes\n");
482241da076SRafal Kozik 
483372c1af5SJan Medala 		goto err;
484372c1af5SJan Medala 	}
485372c1af5SJan Medala 
486372c1af5SJan Medala 	return;
487372c1af5SJan Medala err:
488372c1af5SJan Medala 	ena_com_delete_debug_area(&adapter->ena_dev);
489372c1af5SJan Medala }
490372c1af5SJan Medala 
491b142387bSThomas Monjalon static int ena_close(struct rte_eth_dev *dev)
4921173fca2SJan Medala {
4934d7877fdSMichal Krawczyk 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
4944d7877fdSMichal Krawczyk 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
495890728ffSStephen Hemminger 	struct ena_adapter *adapter = dev->data->dev_private;
49662024eb8SIvan Ilchenko 	int ret = 0;
4971173fca2SJan Medala 
49830410493SThomas Monjalon 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
49930410493SThomas Monjalon 		return 0;
50030410493SThomas Monjalon 
501df238f84SMichal Krawczyk 	if (adapter->state == ENA_ADAPTER_STATE_RUNNING)
50262024eb8SIvan Ilchenko 		ret = ena_stop(dev);
503eb0ef49dSMichal Krawczyk 	adapter->state = ENA_ADAPTER_STATE_CLOSED;
50415773e06SMichal Krawczyk 
5051173fca2SJan Medala 	ena_rx_queue_release_all(dev);
5061173fca2SJan Medala 	ena_tx_queue_release_all(dev);
5074d7877fdSMichal Krawczyk 
5084d7877fdSMichal Krawczyk 	rte_free(adapter->drv_stats);
5094d7877fdSMichal Krawczyk 	adapter->drv_stats = NULL;
5104d7877fdSMichal Krawczyk 
5114d7877fdSMichal Krawczyk 	rte_intr_disable(intr_handle);
5124d7877fdSMichal Krawczyk 	rte_intr_callback_unregister(intr_handle,
5134d7877fdSMichal Krawczyk 				     ena_interrupt_handler_rte,
5144d7877fdSMichal Krawczyk 				     adapter);
5154d7877fdSMichal Krawczyk 
5164d7877fdSMichal Krawczyk 	/*
5174d7877fdSMichal Krawczyk 	 * MAC is not allocated dynamically. Setting NULL should prevent from
5184d7877fdSMichal Krawczyk 	 * release of the resource in the rte_eth_dev_release_port().
5194d7877fdSMichal Krawczyk 	 */
5204d7877fdSMichal Krawczyk 	dev->data->mac_addrs = NULL;
521b142387bSThomas Monjalon 
52262024eb8SIvan Ilchenko 	return ret;
5231173fca2SJan Medala }
5241173fca2SJan Medala 
5252081d5e2SMichal Krawczyk static int
5262081d5e2SMichal Krawczyk ena_dev_reset(struct rte_eth_dev *dev)
5272081d5e2SMichal Krawczyk {
528e457bc70SRafal Kozik 	int rc = 0;
5292081d5e2SMichal Krawczyk 
530e457bc70SRafal Kozik 	ena_destroy_device(dev);
531e457bc70SRafal Kozik 	rc = eth_ena_dev_init(dev);
532241da076SRafal Kozik 	if (rc)
533498c687aSRafal Kozik 		PMD_INIT_LOG(CRIT, "Cannot initialize device");
534e457bc70SRafal Kozik 
5352081d5e2SMichal Krawczyk 	return rc;
5362081d5e2SMichal Krawczyk }
5372081d5e2SMichal Krawczyk 
5381173fca2SJan Medala static int ena_rss_reta_update(struct rte_eth_dev *dev,
5391173fca2SJan Medala 			       struct rte_eth_rss_reta_entry64 *reta_conf,
5401173fca2SJan Medala 			       uint16_t reta_size)
5411173fca2SJan Medala {
542890728ffSStephen Hemminger 	struct ena_adapter *adapter = dev->data->dev_private;
5431173fca2SJan Medala 	struct ena_com_dev *ena_dev = &adapter->ena_dev;
544241da076SRafal Kozik 	int rc, i;
5451173fca2SJan Medala 	u16 entry_value;
5461173fca2SJan Medala 	int conf_idx;
5471173fca2SJan Medala 	int idx;
5481173fca2SJan Medala 
5491173fca2SJan Medala 	if ((reta_size == 0) || (reta_conf == NULL))
5501173fca2SJan Medala 		return -EINVAL;
5511173fca2SJan Medala 
5521173fca2SJan Medala 	if (reta_size > ENA_RX_RSS_TABLE_SIZE) {
5536f1c9df9SStephen Hemminger 		PMD_DRV_LOG(WARNING,
5541173fca2SJan Medala 			"indirection table %d is bigger than supported (%d)\n",
5551173fca2SJan Medala 			reta_size, ENA_RX_RSS_TABLE_SIZE);
556241da076SRafal Kozik 		return -EINVAL;
5571173fca2SJan Medala 	}
5581173fca2SJan Medala 
5591173fca2SJan Medala 	for (i = 0 ; i < reta_size ; i++) {
5601173fca2SJan Medala 		/* each reta_conf is for 64 entries.
5611173fca2SJan Medala 		 * to support 128 we use 2 conf of 64
5621173fca2SJan Medala 		 */
5631173fca2SJan Medala 		conf_idx = i / RTE_RETA_GROUP_SIZE;
5641173fca2SJan Medala 		idx = i % RTE_RETA_GROUP_SIZE;
5651173fca2SJan Medala 		if (TEST_BIT(reta_conf[conf_idx].mask, idx)) {
5661173fca2SJan Medala 			entry_value =
5671173fca2SJan Medala 				ENA_IO_RXQ_IDX(reta_conf[conf_idx].reta[idx]);
568241da076SRafal Kozik 
569241da076SRafal Kozik 			rc = ena_com_indirect_table_fill_entry(ena_dev,
5701173fca2SJan Medala 							       i,
5711173fca2SJan Medala 							       entry_value);
572241da076SRafal Kozik 			if (unlikely(rc && rc != ENA_COM_UNSUPPORTED)) {
5736f1c9df9SStephen Hemminger 				PMD_DRV_LOG(ERR,
5741173fca2SJan Medala 					"Cannot fill indirect table\n");
575241da076SRafal Kozik 				return rc;
5761173fca2SJan Medala 			}
5771173fca2SJan Medala 		}
5781173fca2SJan Medala 	}
5791173fca2SJan Medala 
5801343c415SMichal Krawczyk 	rte_spinlock_lock(&adapter->admin_lock);
581241da076SRafal Kozik 	rc = ena_com_indirect_table_set(ena_dev);
5821343c415SMichal Krawczyk 	rte_spinlock_unlock(&adapter->admin_lock);
583241da076SRafal Kozik 	if (unlikely(rc && rc != ENA_COM_UNSUPPORTED)) {
5846f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "Cannot flush the indirect table\n");
585241da076SRafal Kozik 		return rc;
5861173fca2SJan Medala 	}
5871173fca2SJan Medala 
5886f1c9df9SStephen Hemminger 	PMD_DRV_LOG(DEBUG, "%s(): RSS configured %d entries  for port %d\n",
5891173fca2SJan Medala 		__func__, reta_size, adapter->rte_dev->data->port_id);
590241da076SRafal Kozik 
591241da076SRafal Kozik 	return 0;
5921173fca2SJan Medala }
5931173fca2SJan Medala 
5941173fca2SJan Medala /* Query redirection table. */
5951173fca2SJan Medala static int ena_rss_reta_query(struct rte_eth_dev *dev,
5961173fca2SJan Medala 			      struct rte_eth_rss_reta_entry64 *reta_conf,
5971173fca2SJan Medala 			      uint16_t reta_size)
5981173fca2SJan Medala {
599890728ffSStephen Hemminger 	struct ena_adapter *adapter = dev->data->dev_private;
6001173fca2SJan Medala 	struct ena_com_dev *ena_dev = &adapter->ena_dev;
601241da076SRafal Kozik 	int rc;
6021173fca2SJan Medala 	int i;
6031173fca2SJan Medala 	u32 indirect_table[ENA_RX_RSS_TABLE_SIZE] = {0};
6041173fca2SJan Medala 	int reta_conf_idx;
6051173fca2SJan Medala 	int reta_idx;
6061173fca2SJan Medala 
6071173fca2SJan Medala 	if (reta_size == 0 || reta_conf == NULL ||
6081173fca2SJan Medala 	    (reta_size > RTE_RETA_GROUP_SIZE && ((reta_conf + 1) == NULL)))
6091173fca2SJan Medala 		return -EINVAL;
6101173fca2SJan Medala 
6111343c415SMichal Krawczyk 	rte_spinlock_lock(&adapter->admin_lock);
612241da076SRafal Kozik 	rc = ena_com_indirect_table_get(ena_dev, indirect_table);
6131343c415SMichal Krawczyk 	rte_spinlock_unlock(&adapter->admin_lock);
614241da076SRafal Kozik 	if (unlikely(rc && rc != ENA_COM_UNSUPPORTED)) {
6156f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "cannot get indirect table\n");
616241da076SRafal Kozik 		return -ENOTSUP;
6171173fca2SJan Medala 	}
6181173fca2SJan Medala 
6191173fca2SJan Medala 	for (i = 0 ; i < reta_size ; i++) {
6201173fca2SJan Medala 		reta_conf_idx = i / RTE_RETA_GROUP_SIZE;
6211173fca2SJan Medala 		reta_idx = i % RTE_RETA_GROUP_SIZE;
6221173fca2SJan Medala 		if (TEST_BIT(reta_conf[reta_conf_idx].mask, reta_idx))
6231173fca2SJan Medala 			reta_conf[reta_conf_idx].reta[reta_idx] =
6241173fca2SJan Medala 				ENA_IO_RXQ_IDX_REV(indirect_table[i]);
6251173fca2SJan Medala 	}
626241da076SRafal Kozik 
627241da076SRafal Kozik 	return 0;
6281173fca2SJan Medala }
6291173fca2SJan Medala 
6301173fca2SJan Medala static int ena_rss_init_default(struct ena_adapter *adapter)
6311173fca2SJan Medala {
6321173fca2SJan Medala 	struct ena_com_dev *ena_dev = &adapter->ena_dev;
6331173fca2SJan Medala 	uint16_t nb_rx_queues = adapter->rte_dev->data->nb_rx_queues;
6341173fca2SJan Medala 	int rc, i;
6351173fca2SJan Medala 	u32 val;
6361173fca2SJan Medala 
6371173fca2SJan Medala 	rc = ena_com_rss_init(ena_dev, ENA_RX_RSS_TABLE_LOG_SIZE);
6381173fca2SJan Medala 	if (unlikely(rc)) {
6396f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "Cannot init indirect table\n");
6401173fca2SJan Medala 		goto err_rss_init;
6411173fca2SJan Medala 	}
6421173fca2SJan Medala 
6431173fca2SJan Medala 	for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++) {
6441173fca2SJan Medala 		val = i % nb_rx_queues;
6451173fca2SJan Medala 		rc = ena_com_indirect_table_fill_entry(ena_dev, i,
6461173fca2SJan Medala 						       ENA_IO_RXQ_IDX(val));
6473adcba9aSMichal Krawczyk 		if (unlikely(rc && (rc != ENA_COM_UNSUPPORTED))) {
6486f1c9df9SStephen Hemminger 			PMD_DRV_LOG(ERR, "Cannot fill indirect table\n");
6491173fca2SJan Medala 			goto err_fill_indir;
6501173fca2SJan Medala 		}
6511173fca2SJan Medala 	}
6521173fca2SJan Medala 
6531173fca2SJan Medala 	rc = ena_com_fill_hash_function(ena_dev, ENA_ADMIN_CRC32, NULL,
6541173fca2SJan Medala 					ENA_HASH_KEY_SIZE, 0xFFFFFFFF);
6553adcba9aSMichal Krawczyk 	if (unlikely(rc && (rc != ENA_COM_UNSUPPORTED))) {
6566f1c9df9SStephen Hemminger 		PMD_DRV_LOG(INFO, "Cannot fill hash function\n");
6571173fca2SJan Medala 		goto err_fill_indir;
6581173fca2SJan Medala 	}
6591173fca2SJan Medala 
6601173fca2SJan Medala 	rc = ena_com_set_default_hash_ctrl(ena_dev);
6613adcba9aSMichal Krawczyk 	if (unlikely(rc && (rc != ENA_COM_UNSUPPORTED))) {
6626f1c9df9SStephen Hemminger 		PMD_DRV_LOG(INFO, "Cannot fill hash control\n");
6631173fca2SJan Medala 		goto err_fill_indir;
6641173fca2SJan Medala 	}
6651173fca2SJan Medala 
6661173fca2SJan Medala 	rc = ena_com_indirect_table_set(ena_dev);
6673adcba9aSMichal Krawczyk 	if (unlikely(rc && (rc != ENA_COM_UNSUPPORTED))) {
6686f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "Cannot flush the indirect table\n");
6691173fca2SJan Medala 		goto err_fill_indir;
6701173fca2SJan Medala 	}
6716f1c9df9SStephen Hemminger 	PMD_DRV_LOG(DEBUG, "RSS configured for port %d\n",
6721173fca2SJan Medala 		adapter->rte_dev->data->port_id);
6731173fca2SJan Medala 
6741173fca2SJan Medala 	return 0;
6751173fca2SJan Medala 
6761173fca2SJan Medala err_fill_indir:
6771173fca2SJan Medala 	ena_com_rss_destroy(ena_dev);
6781173fca2SJan Medala err_rss_init:
6791173fca2SJan Medala 
6801173fca2SJan Medala 	return rc;
6811173fca2SJan Medala }
6821173fca2SJan Medala 
6831173fca2SJan Medala static void ena_rx_queue_release_all(struct rte_eth_dev *dev)
6841173fca2SJan Medala {
6851173fca2SJan Medala 	struct ena_ring **queues = (struct ena_ring **)dev->data->rx_queues;
6861173fca2SJan Medala 	int nb_queues = dev->data->nb_rx_queues;
6871173fca2SJan Medala 	int i;
6881173fca2SJan Medala 
6891173fca2SJan Medala 	for (i = 0; i < nb_queues; i++)
6901173fca2SJan Medala 		ena_rx_queue_release(queues[i]);
6911173fca2SJan Medala }
6921173fca2SJan Medala 
6931173fca2SJan Medala static void ena_tx_queue_release_all(struct rte_eth_dev *dev)
6941173fca2SJan Medala {
6951173fca2SJan Medala 	struct ena_ring **queues = (struct ena_ring **)dev->data->tx_queues;
6961173fca2SJan Medala 	int nb_queues = dev->data->nb_tx_queues;
6971173fca2SJan Medala 	int i;
6981173fca2SJan Medala 
6991173fca2SJan Medala 	for (i = 0; i < nb_queues; i++)
7001173fca2SJan Medala 		ena_tx_queue_release(queues[i]);
7011173fca2SJan Medala }
7021173fca2SJan Medala 
7031173fca2SJan Medala static void ena_rx_queue_release(void *queue)
7041173fca2SJan Medala {
7051173fca2SJan Medala 	struct ena_ring *ring = (struct ena_ring *)queue;
7061173fca2SJan Medala 
7071173fca2SJan Medala 	/* Free ring resources */
7081173fca2SJan Medala 	if (ring->rx_buffer_info)
7091173fca2SJan Medala 		rte_free(ring->rx_buffer_info);
7101173fca2SJan Medala 	ring->rx_buffer_info = NULL;
7111173fca2SJan Medala 
71279405ee1SRafal Kozik 	if (ring->rx_refill_buffer)
71379405ee1SRafal Kozik 		rte_free(ring->rx_refill_buffer);
71479405ee1SRafal Kozik 	ring->rx_refill_buffer = NULL;
71579405ee1SRafal Kozik 
716c2034976SMichal Krawczyk 	if (ring->empty_rx_reqs)
717c2034976SMichal Krawczyk 		rte_free(ring->empty_rx_reqs);
718c2034976SMichal Krawczyk 	ring->empty_rx_reqs = NULL;
719c2034976SMichal Krawczyk 
7201173fca2SJan Medala 	ring->configured = 0;
7211173fca2SJan Medala 
7226f1c9df9SStephen Hemminger 	PMD_DRV_LOG(NOTICE, "RX Queue %d:%d released\n",
7231173fca2SJan Medala 		ring->port_id, ring->id);
7241173fca2SJan Medala }
7251173fca2SJan Medala 
7261173fca2SJan Medala static void ena_tx_queue_release(void *queue)
7271173fca2SJan Medala {
7281173fca2SJan Medala 	struct ena_ring *ring = (struct ena_ring *)queue;
7291173fca2SJan Medala 
7301173fca2SJan Medala 	/* Free ring resources */
7312fca2a98SMichal Krawczyk 	if (ring->push_buf_intermediate_buf)
7322fca2a98SMichal Krawczyk 		rte_free(ring->push_buf_intermediate_buf);
7332fca2a98SMichal Krawczyk 
7341173fca2SJan Medala 	if (ring->tx_buffer_info)
7351173fca2SJan Medala 		rte_free(ring->tx_buffer_info);
7361173fca2SJan Medala 
7371173fca2SJan Medala 	if (ring->empty_tx_reqs)
7381173fca2SJan Medala 		rte_free(ring->empty_tx_reqs);
7391173fca2SJan Medala 
7401173fca2SJan Medala 	ring->empty_tx_reqs = NULL;
7411173fca2SJan Medala 	ring->tx_buffer_info = NULL;
7422fca2a98SMichal Krawczyk 	ring->push_buf_intermediate_buf = NULL;
7431173fca2SJan Medala 
7441173fca2SJan Medala 	ring->configured = 0;
7451173fca2SJan Medala 
7466f1c9df9SStephen Hemminger 	PMD_DRV_LOG(NOTICE, "TX Queue %d:%d released\n",
7471173fca2SJan Medala 		ring->port_id, ring->id);
7481173fca2SJan Medala }
7491173fca2SJan Medala 
7501173fca2SJan Medala static void ena_rx_queue_release_bufs(struct ena_ring *ring)
7511173fca2SJan Medala {
752709b1dcbSRafal Kozik 	unsigned int i;
7531173fca2SJan Medala 
7541be097dcSMichal Krawczyk 	for (i = 0; i < ring->ring_size; ++i) {
7551be097dcSMichal Krawczyk 		struct ena_rx_buffer *rx_info = &ring->rx_buffer_info[i];
7561be097dcSMichal Krawczyk 		if (rx_info->mbuf) {
7571be097dcSMichal Krawczyk 			rte_mbuf_raw_free(rx_info->mbuf);
7581be097dcSMichal Krawczyk 			rx_info->mbuf = NULL;
7591be097dcSMichal Krawczyk 		}
7601173fca2SJan Medala 	}
7611173fca2SJan Medala }
7621173fca2SJan Medala 
7631173fca2SJan Medala static void ena_tx_queue_release_bufs(struct ena_ring *ring)
7641173fca2SJan Medala {
765207a514cSMichal Krawczyk 	unsigned int i;
7661173fca2SJan Medala 
767207a514cSMichal Krawczyk 	for (i = 0; i < ring->ring_size; ++i) {
768207a514cSMichal Krawczyk 		struct ena_tx_buffer *tx_buf = &ring->tx_buffer_info[i];
7691173fca2SJan Medala 
7701173fca2SJan Medala 		if (tx_buf->mbuf)
7711173fca2SJan Medala 			rte_pktmbuf_free(tx_buf->mbuf);
7721173fca2SJan Medala 	}
7731173fca2SJan Medala }
7741173fca2SJan Medala 
7751173fca2SJan Medala static int ena_link_update(struct rte_eth_dev *dev,
7761173fca2SJan Medala 			   __rte_unused int wait_to_complete)
7771173fca2SJan Medala {
7781173fca2SJan Medala 	struct rte_eth_link *link = &dev->data->dev_link;
779890728ffSStephen Hemminger 	struct ena_adapter *adapter = dev->data->dev_private;
780ca148440SMichal Krawczyk 
781ca148440SMichal Krawczyk 	link->link_status = adapter->link_status ? ETH_LINK_UP : ETH_LINK_DOWN;
78241e59028SRafal Kozik 	link->link_speed = ETH_SPEED_NUM_NONE;
7831173fca2SJan Medala 	link->link_duplex = ETH_LINK_FULL_DUPLEX;
7841173fca2SJan Medala 
7851173fca2SJan Medala 	return 0;
7861173fca2SJan Medala }
7871173fca2SJan Medala 
78826e5543dSRafal Kozik static int ena_queue_start_all(struct rte_eth_dev *dev,
7891173fca2SJan Medala 			       enum ena_ring_type ring_type)
7901173fca2SJan Medala {
791890728ffSStephen Hemminger 	struct ena_adapter *adapter = dev->data->dev_private;
7921173fca2SJan Medala 	struct ena_ring *queues = NULL;
79353b61841SMichal Krawczyk 	int nb_queues;
7941173fca2SJan Medala 	int i = 0;
7951173fca2SJan Medala 	int rc = 0;
7961173fca2SJan Medala 
79753b61841SMichal Krawczyk 	if (ring_type == ENA_RING_TYPE_RX) {
79853b61841SMichal Krawczyk 		queues = adapter->rx_ring;
79953b61841SMichal Krawczyk 		nb_queues = dev->data->nb_rx_queues;
80053b61841SMichal Krawczyk 	} else {
80153b61841SMichal Krawczyk 		queues = adapter->tx_ring;
80253b61841SMichal Krawczyk 		nb_queues = dev->data->nb_tx_queues;
80353b61841SMichal Krawczyk 	}
80453b61841SMichal Krawczyk 	for (i = 0; i < nb_queues; i++) {
8051173fca2SJan Medala 		if (queues[i].configured) {
8061173fca2SJan Medala 			if (ring_type == ENA_RING_TYPE_RX) {
8071173fca2SJan Medala 				ena_assert_msg(
8081173fca2SJan Medala 					dev->data->rx_queues[i] == &queues[i],
8091173fca2SJan Medala 					"Inconsistent state of rx queues\n");
8101173fca2SJan Medala 			} else {
8111173fca2SJan Medala 				ena_assert_msg(
8121173fca2SJan Medala 					dev->data->tx_queues[i] == &queues[i],
8131173fca2SJan Medala 					"Inconsistent state of tx queues\n");
8141173fca2SJan Medala 			}
8151173fca2SJan Medala 
81626e5543dSRafal Kozik 			rc = ena_queue_start(&queues[i]);
8171173fca2SJan Medala 
8181173fca2SJan Medala 			if (rc) {
8191173fca2SJan Medala 				PMD_INIT_LOG(ERR,
82026e5543dSRafal Kozik 					     "failed to start queue %d type(%d)",
8211173fca2SJan Medala 					     i, ring_type);
82226e5543dSRafal Kozik 				goto err;
8231173fca2SJan Medala 			}
8241173fca2SJan Medala 		}
8251173fca2SJan Medala 	}
8261173fca2SJan Medala 
8271173fca2SJan Medala 	return 0;
82826e5543dSRafal Kozik 
82926e5543dSRafal Kozik err:
83026e5543dSRafal Kozik 	while (i--)
83126e5543dSRafal Kozik 		if (queues[i].configured)
83226e5543dSRafal Kozik 			ena_queue_stop(&queues[i]);
83326e5543dSRafal Kozik 
83426e5543dSRafal Kozik 	return rc;
8351173fca2SJan Medala }
8361173fca2SJan Medala 
8371173fca2SJan Medala static uint32_t ena_get_mtu_conf(struct ena_adapter *adapter)
8381173fca2SJan Medala {
8391173fca2SJan Medala 	uint32_t max_frame_len = adapter->max_mtu;
8401173fca2SJan Medala 
8417369f88fSRafal Kozik 	if (adapter->rte_eth_dev_data->dev_conf.rxmode.offloads &
8427369f88fSRafal Kozik 	    DEV_RX_OFFLOAD_JUMBO_FRAME)
8431173fca2SJan Medala 		max_frame_len =
8441173fca2SJan Medala 			adapter->rte_eth_dev_data->dev_conf.rxmode.max_rx_pkt_len;
8451173fca2SJan Medala 
8461173fca2SJan Medala 	return max_frame_len;
8471173fca2SJan Medala }
8481173fca2SJan Medala 
8491173fca2SJan Medala static int ena_check_valid_conf(struct ena_adapter *adapter)
8501173fca2SJan Medala {
8511173fca2SJan Medala 	uint32_t max_frame_len = ena_get_mtu_conf(adapter);
8521173fca2SJan Medala 
853241da076SRafal Kozik 	if (max_frame_len > adapter->max_mtu || max_frame_len < ENA_MIN_MTU) {
854241da076SRafal Kozik 		PMD_INIT_LOG(ERR, "Unsupported MTU of %d. "
855498c687aSRafal Kozik 				  "max mtu: %d, min mtu: %d",
856241da076SRafal Kozik 			     max_frame_len, adapter->max_mtu, ENA_MIN_MTU);
857241da076SRafal Kozik 		return ENA_COM_UNSUPPORTED;
8581173fca2SJan Medala 	}
8591173fca2SJan Medala 
8601173fca2SJan Medala 	return 0;
8611173fca2SJan Medala }
8621173fca2SJan Medala 
8631173fca2SJan Medala static int
8648a7a73f2SMichal Krawczyk ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx,
8658a7a73f2SMichal Krawczyk 		       bool use_large_llq_hdr)
8661173fca2SJan Medala {
8672fca2a98SMichal Krawczyk 	struct ena_admin_feature_llq_desc *llq = &ctx->get_feat_ctx->llq;
8682fca2a98SMichal Krawczyk 	struct ena_com_dev *ena_dev = ctx->ena_dev;
8695920d930SMichal Krawczyk 	uint32_t max_tx_queue_size;
8705920d930SMichal Krawczyk 	uint32_t max_rx_queue_size;
8711173fca2SJan Medala 
8722fca2a98SMichal Krawczyk 	if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
873ea93d37eSRafal Kozik 		struct ena_admin_queue_ext_feature_fields *max_queue_ext =
874ea93d37eSRafal Kozik 			&ctx->get_feat_ctx->max_queue_ext.max_queue_ext;
8755920d930SMichal Krawczyk 		max_rx_queue_size = RTE_MIN(max_queue_ext->max_rx_cq_depth,
876ea93d37eSRafal Kozik 			max_queue_ext->max_rx_sq_depth);
8775920d930SMichal Krawczyk 		max_tx_queue_size = max_queue_ext->max_tx_cq_depth;
8782fca2a98SMichal Krawczyk 
8792fca2a98SMichal Krawczyk 		if (ena_dev->tx_mem_queue_type ==
8802fca2a98SMichal Krawczyk 		    ENA_ADMIN_PLACEMENT_POLICY_DEV) {
8815920d930SMichal Krawczyk 			max_tx_queue_size = RTE_MIN(max_tx_queue_size,
8822fca2a98SMichal Krawczyk 				llq->max_llq_depth);
8832fca2a98SMichal Krawczyk 		} else {
8845920d930SMichal Krawczyk 			max_tx_queue_size = RTE_MIN(max_tx_queue_size,
885ea93d37eSRafal Kozik 				max_queue_ext->max_tx_sq_depth);
8862fca2a98SMichal Krawczyk 		}
8872fca2a98SMichal Krawczyk 
888ea93d37eSRafal Kozik 		ctx->max_rx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS,
889ea93d37eSRafal Kozik 			max_queue_ext->max_per_packet_rx_descs);
890ea93d37eSRafal Kozik 		ctx->max_tx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS,
891ea93d37eSRafal Kozik 			max_queue_ext->max_per_packet_tx_descs);
892ea93d37eSRafal Kozik 	} else {
893ea93d37eSRafal Kozik 		struct ena_admin_queue_feature_desc *max_queues =
894ea93d37eSRafal Kozik 			&ctx->get_feat_ctx->max_queues;
8955920d930SMichal Krawczyk 		max_rx_queue_size = RTE_MIN(max_queues->max_cq_depth,
896ea93d37eSRafal Kozik 			max_queues->max_sq_depth);
8975920d930SMichal Krawczyk 		max_tx_queue_size = max_queues->max_cq_depth;
8982fca2a98SMichal Krawczyk 
8992fca2a98SMichal Krawczyk 		if (ena_dev->tx_mem_queue_type ==
9002fca2a98SMichal Krawczyk 		    ENA_ADMIN_PLACEMENT_POLICY_DEV) {
9015920d930SMichal Krawczyk 			max_tx_queue_size = RTE_MIN(max_tx_queue_size,
9022fca2a98SMichal Krawczyk 				llq->max_llq_depth);
9032fca2a98SMichal Krawczyk 		} else {
9045920d930SMichal Krawczyk 			max_tx_queue_size = RTE_MIN(max_tx_queue_size,
9052fca2a98SMichal Krawczyk 				max_queues->max_sq_depth);
9062fca2a98SMichal Krawczyk 		}
9072fca2a98SMichal Krawczyk 
908ea93d37eSRafal Kozik 		ctx->max_rx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS,
909ea93d37eSRafal Kozik 			max_queues->max_packet_rx_descs);
9105920d930SMichal Krawczyk 		ctx->max_tx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS,
9115920d930SMichal Krawczyk 			max_queues->max_packet_tx_descs);
912ea93d37eSRafal Kozik 	}
9131173fca2SJan Medala 
914ea93d37eSRafal Kozik 	/* Round down to the nearest power of 2 */
9155920d930SMichal Krawczyk 	max_rx_queue_size = rte_align32prevpow2(max_rx_queue_size);
9165920d930SMichal Krawczyk 	max_tx_queue_size = rte_align32prevpow2(max_tx_queue_size);
9171173fca2SJan Medala 
9188a7a73f2SMichal Krawczyk 	if (use_large_llq_hdr) {
9198a7a73f2SMichal Krawczyk 		if ((llq->entry_size_ctrl_supported &
9208a7a73f2SMichal Krawczyk 		     ENA_ADMIN_LIST_ENTRY_SIZE_256B) &&
9218a7a73f2SMichal Krawczyk 		    (ena_dev->tx_mem_queue_type ==
9228a7a73f2SMichal Krawczyk 		     ENA_ADMIN_PLACEMENT_POLICY_DEV)) {
9238a7a73f2SMichal Krawczyk 			max_tx_queue_size /= 2;
9248a7a73f2SMichal Krawczyk 			PMD_INIT_LOG(INFO,
9258a7a73f2SMichal Krawczyk 				"Forcing large headers and decreasing maximum TX queue size to %d\n",
9268a7a73f2SMichal Krawczyk 				max_tx_queue_size);
9278a7a73f2SMichal Krawczyk 		} else {
9288a7a73f2SMichal Krawczyk 			PMD_INIT_LOG(ERR,
9298a7a73f2SMichal Krawczyk 				"Forcing large headers failed: LLQ is disabled or device does not support large headers\n");
9308a7a73f2SMichal Krawczyk 		}
9318a7a73f2SMichal Krawczyk 	}
9328a7a73f2SMichal Krawczyk 
9335920d930SMichal Krawczyk 	if (unlikely(max_rx_queue_size == 0 || max_tx_queue_size == 0)) {
934f2462150SFerruh Yigit 		PMD_INIT_LOG(ERR, "Invalid queue size");
9351173fca2SJan Medala 		return -EFAULT;
9361173fca2SJan Medala 	}
9371173fca2SJan Medala 
9385920d930SMichal Krawczyk 	ctx->max_tx_queue_size = max_tx_queue_size;
9395920d930SMichal Krawczyk 	ctx->max_rx_queue_size = max_rx_queue_size;
9402061fe41SRafal Kozik 
941ea93d37eSRafal Kozik 	return 0;
9421173fca2SJan Medala }
9431173fca2SJan Medala 
9441173fca2SJan Medala static void ena_stats_restart(struct rte_eth_dev *dev)
9451173fca2SJan Medala {
946890728ffSStephen Hemminger 	struct ena_adapter *adapter = dev->data->dev_private;
9471173fca2SJan Medala 
9481173fca2SJan Medala 	rte_atomic64_init(&adapter->drv_stats->ierrors);
9491173fca2SJan Medala 	rte_atomic64_init(&adapter->drv_stats->oerrors);
9501173fca2SJan Medala 	rte_atomic64_init(&adapter->drv_stats->rx_nombuf);
951e1e73e32SMichal Krawczyk 	adapter->drv_stats->rx_drops = 0;
9521173fca2SJan Medala }
9531173fca2SJan Medala 
954d5b0924bSMatan Azrad static int ena_stats_get(struct rte_eth_dev *dev,
9551173fca2SJan Medala 			  struct rte_eth_stats *stats)
9561173fca2SJan Medala {
9571173fca2SJan Medala 	struct ena_admin_basic_stats ena_stats;
958890728ffSStephen Hemminger 	struct ena_adapter *adapter = dev->data->dev_private;
9591173fca2SJan Medala 	struct ena_com_dev *ena_dev = &adapter->ena_dev;
9601173fca2SJan Medala 	int rc;
96145b6d861SMichal Krawczyk 	int i;
96245b6d861SMichal Krawczyk 	int max_rings_stats;
9631173fca2SJan Medala 
9641173fca2SJan Medala 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
965d5b0924bSMatan Azrad 		return -ENOTSUP;
9661173fca2SJan Medala 
9671173fca2SJan Medala 	memset(&ena_stats, 0, sizeof(ena_stats));
9681343c415SMichal Krawczyk 
9691343c415SMichal Krawczyk 	rte_spinlock_lock(&adapter->admin_lock);
9701173fca2SJan Medala 	rc = ena_com_get_dev_basic_stats(ena_dev, &ena_stats);
9711343c415SMichal Krawczyk 	rte_spinlock_unlock(&adapter->admin_lock);
9721173fca2SJan Medala 	if (unlikely(rc)) {
9736f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "Could not retrieve statistics from ENA\n");
974d5b0924bSMatan Azrad 		return rc;
9751173fca2SJan Medala 	}
9761173fca2SJan Medala 
9771173fca2SJan Medala 	/* Set of basic statistics from ENA */
9781173fca2SJan Medala 	stats->ipackets = __MERGE_64B_H_L(ena_stats.rx_pkts_high,
9791173fca2SJan Medala 					  ena_stats.rx_pkts_low);
9801173fca2SJan Medala 	stats->opackets = __MERGE_64B_H_L(ena_stats.tx_pkts_high,
9811173fca2SJan Medala 					  ena_stats.tx_pkts_low);
9821173fca2SJan Medala 	stats->ibytes = __MERGE_64B_H_L(ena_stats.rx_bytes_high,
9831173fca2SJan Medala 					ena_stats.rx_bytes_low);
9841173fca2SJan Medala 	stats->obytes = __MERGE_64B_H_L(ena_stats.tx_bytes_high,
9851173fca2SJan Medala 					ena_stats.tx_bytes_low);
9861173fca2SJan Medala 
9871173fca2SJan Medala 	/* Driver related stats */
988e1e73e32SMichal Krawczyk 	stats->imissed = adapter->drv_stats->rx_drops;
9891173fca2SJan Medala 	stats->ierrors = rte_atomic64_read(&adapter->drv_stats->ierrors);
9901173fca2SJan Medala 	stats->oerrors = rte_atomic64_read(&adapter->drv_stats->oerrors);
9911173fca2SJan Medala 	stats->rx_nombuf = rte_atomic64_read(&adapter->drv_stats->rx_nombuf);
99245b6d861SMichal Krawczyk 
99345b6d861SMichal Krawczyk 	max_rings_stats = RTE_MIN(dev->data->nb_rx_queues,
99445b6d861SMichal Krawczyk 		RTE_ETHDEV_QUEUE_STAT_CNTRS);
99545b6d861SMichal Krawczyk 	for (i = 0; i < max_rings_stats; ++i) {
99645b6d861SMichal Krawczyk 		struct ena_stats_rx *rx_stats = &adapter->rx_ring[i].rx_stats;
99745b6d861SMichal Krawczyk 
99845b6d861SMichal Krawczyk 		stats->q_ibytes[i] = rx_stats->bytes;
99945b6d861SMichal Krawczyk 		stats->q_ipackets[i] = rx_stats->cnt;
100045b6d861SMichal Krawczyk 		stats->q_errors[i] = rx_stats->bad_desc_num +
100145b6d861SMichal Krawczyk 			rx_stats->bad_req_id;
100245b6d861SMichal Krawczyk 	}
100345b6d861SMichal Krawczyk 
100445b6d861SMichal Krawczyk 	max_rings_stats = RTE_MIN(dev->data->nb_tx_queues,
100545b6d861SMichal Krawczyk 		RTE_ETHDEV_QUEUE_STAT_CNTRS);
100645b6d861SMichal Krawczyk 	for (i = 0; i < max_rings_stats; ++i) {
100745b6d861SMichal Krawczyk 		struct ena_stats_tx *tx_stats = &adapter->tx_ring[i].tx_stats;
100845b6d861SMichal Krawczyk 
100945b6d861SMichal Krawczyk 		stats->q_obytes[i] = tx_stats->bytes;
101045b6d861SMichal Krawczyk 		stats->q_opackets[i] = tx_stats->cnt;
101145b6d861SMichal Krawczyk 	}
101245b6d861SMichal Krawczyk 
1013d5b0924bSMatan Azrad 	return 0;
10141173fca2SJan Medala }
10151173fca2SJan Medala 
10161173fca2SJan Medala static int ena_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
10171173fca2SJan Medala {
10181173fca2SJan Medala 	struct ena_adapter *adapter;
10191173fca2SJan Medala 	struct ena_com_dev *ena_dev;
10201173fca2SJan Medala 	int rc = 0;
10211173fca2SJan Medala 
1022498c687aSRafal Kozik 	ena_assert_msg(dev->data != NULL, "Uninitialized device\n");
1023498c687aSRafal Kozik 	ena_assert_msg(dev->data->dev_private != NULL, "Uninitialized device\n");
1024890728ffSStephen Hemminger 	adapter = dev->data->dev_private;
10251173fca2SJan Medala 
10261173fca2SJan Medala 	ena_dev = &adapter->ena_dev;
1027498c687aSRafal Kozik 	ena_assert_msg(ena_dev != NULL, "Uninitialized device\n");
10281173fca2SJan Medala 
1029241da076SRafal Kozik 	if (mtu > ena_get_mtu_conf(adapter) || mtu < ENA_MIN_MTU) {
10306f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR,
1031241da076SRafal Kozik 			"Invalid MTU setting. new_mtu: %d "
1032241da076SRafal Kozik 			"max mtu: %d min mtu: %d\n",
1033241da076SRafal Kozik 			mtu, ena_get_mtu_conf(adapter), ENA_MIN_MTU);
1034241da076SRafal Kozik 		return -EINVAL;
10351173fca2SJan Medala 	}
10361173fca2SJan Medala 
10371173fca2SJan Medala 	rc = ena_com_set_dev_mtu(ena_dev, mtu);
10381173fca2SJan Medala 	if (rc)
10396f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "Could not set MTU: %d\n", mtu);
10401173fca2SJan Medala 	else
10416f1c9df9SStephen Hemminger 		PMD_DRV_LOG(NOTICE, "Set MTU: %d\n", mtu);
10421173fca2SJan Medala 
10431173fca2SJan Medala 	return rc;
10441173fca2SJan Medala }
10451173fca2SJan Medala 
10461173fca2SJan Medala static int ena_start(struct rte_eth_dev *dev)
10471173fca2SJan Medala {
1048890728ffSStephen Hemminger 	struct ena_adapter *adapter = dev->data->dev_private;
1049d9b8b106SMichal Krawczyk 	uint64_t ticks;
10501173fca2SJan Medala 	int rc = 0;
10511173fca2SJan Medala 
10521173fca2SJan Medala 	rc = ena_check_valid_conf(adapter);
10531173fca2SJan Medala 	if (rc)
10541173fca2SJan Medala 		return rc;
10551173fca2SJan Medala 
105626e5543dSRafal Kozik 	rc = ena_queue_start_all(dev, ENA_RING_TYPE_RX);
10571173fca2SJan Medala 	if (rc)
10581173fca2SJan Medala 		return rc;
10591173fca2SJan Medala 
106026e5543dSRafal Kozik 	rc = ena_queue_start_all(dev, ENA_RING_TYPE_TX);
10611173fca2SJan Medala 	if (rc)
106226e5543dSRafal Kozik 		goto err_start_tx;
10631173fca2SJan Medala 
10641173fca2SJan Medala 	if (adapter->rte_dev->data->dev_conf.rxmode.mq_mode &
1065361913adSDaria Kolistratova 	    ETH_MQ_RX_RSS_FLAG && adapter->rte_dev->data->nb_rx_queues > 0) {
10661173fca2SJan Medala 		rc = ena_rss_init_default(adapter);
10671173fca2SJan Medala 		if (rc)
106826e5543dSRafal Kozik 			goto err_rss_init;
10691173fca2SJan Medala 	}
10701173fca2SJan Medala 
10711173fca2SJan Medala 	ena_stats_restart(dev);
10721173fca2SJan Medala 
1073d9b8b106SMichal Krawczyk 	adapter->timestamp_wd = rte_get_timer_cycles();
1074d9b8b106SMichal Krawczyk 	adapter->keep_alive_timeout = ENA_DEVICE_KALIVE_TIMEOUT;
1075d9b8b106SMichal Krawczyk 
1076d9b8b106SMichal Krawczyk 	ticks = rte_get_timer_hz();
1077d9b8b106SMichal Krawczyk 	rte_timer_reset(&adapter->timer_wd, ticks, PERIODICAL, rte_lcore_id(),
1078d9b8b106SMichal Krawczyk 			ena_timer_wd_callback, adapter);
1079d9b8b106SMichal Krawczyk 
10807830e905SSolganik Alexander 	++adapter->dev_stats.dev_start;
10811173fca2SJan Medala 	adapter->state = ENA_ADAPTER_STATE_RUNNING;
10821173fca2SJan Medala 
10831173fca2SJan Medala 	return 0;
108426e5543dSRafal Kozik 
108526e5543dSRafal Kozik err_rss_init:
108626e5543dSRafal Kozik 	ena_queue_stop_all(dev, ENA_RING_TYPE_TX);
108726e5543dSRafal Kozik err_start_tx:
108826e5543dSRafal Kozik 	ena_queue_stop_all(dev, ENA_RING_TYPE_RX);
108926e5543dSRafal Kozik 	return rc;
10901173fca2SJan Medala }
10911173fca2SJan Medala 
109262024eb8SIvan Ilchenko static int ena_stop(struct rte_eth_dev *dev)
1093eb0ef49dSMichal Krawczyk {
1094890728ffSStephen Hemminger 	struct ena_adapter *adapter = dev->data->dev_private;
1095e457bc70SRafal Kozik 	struct ena_com_dev *ena_dev = &adapter->ena_dev;
1096e457bc70SRafal Kozik 	int rc;
1097eb0ef49dSMichal Krawczyk 
1098d9b8b106SMichal Krawczyk 	rte_timer_stop_sync(&adapter->timer_wd);
109926e5543dSRafal Kozik 	ena_queue_stop_all(dev, ENA_RING_TYPE_TX);
110026e5543dSRafal Kozik 	ena_queue_stop_all(dev, ENA_RING_TYPE_RX);
1101d9b8b106SMichal Krawczyk 
1102e457bc70SRafal Kozik 	if (adapter->trigger_reset) {
1103e457bc70SRafal Kozik 		rc = ena_com_dev_reset(ena_dev, adapter->reset_reason);
1104e457bc70SRafal Kozik 		if (rc)
11056f1c9df9SStephen Hemminger 			PMD_DRV_LOG(ERR, "Device reset failed rc=%d\n", rc);
1106e457bc70SRafal Kozik 	}
1107e457bc70SRafal Kozik 
11087830e905SSolganik Alexander 	++adapter->dev_stats.dev_stop;
1109eb0ef49dSMichal Krawczyk 	adapter->state = ENA_ADAPTER_STATE_STOPPED;
1110b8f5d2aeSThomas Monjalon 	dev->data->dev_started = 0;
111162024eb8SIvan Ilchenko 
111262024eb8SIvan Ilchenko 	return 0;
1113eb0ef49dSMichal Krawczyk }
1114eb0ef49dSMichal Krawczyk 
1115df238f84SMichal Krawczyk static int ena_create_io_queue(struct ena_ring *ring)
1116df238f84SMichal Krawczyk {
1117df238f84SMichal Krawczyk 	struct ena_adapter *adapter;
1118df238f84SMichal Krawczyk 	struct ena_com_dev *ena_dev;
1119df238f84SMichal Krawczyk 	struct ena_com_create_io_ctx ctx =
1120df238f84SMichal Krawczyk 		/* policy set to _HOST just to satisfy icc compiler */
1121df238f84SMichal Krawczyk 		{ ENA_ADMIN_PLACEMENT_POLICY_HOST,
1122df238f84SMichal Krawczyk 		  0, 0, 0, 0, 0 };
1123df238f84SMichal Krawczyk 	uint16_t ena_qid;
1124778677dcSRafal Kozik 	unsigned int i;
1125df238f84SMichal Krawczyk 	int rc;
1126df238f84SMichal Krawczyk 
1127df238f84SMichal Krawczyk 	adapter = ring->adapter;
1128df238f84SMichal Krawczyk 	ena_dev = &adapter->ena_dev;
1129df238f84SMichal Krawczyk 
1130df238f84SMichal Krawczyk 	if (ring->type == ENA_RING_TYPE_TX) {
1131df238f84SMichal Krawczyk 		ena_qid = ENA_IO_TXQ_IDX(ring->id);
1132df238f84SMichal Krawczyk 		ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX;
1133df238f84SMichal Krawczyk 		ctx.mem_queue_type = ena_dev->tx_mem_queue_type;
1134778677dcSRafal Kozik 		for (i = 0; i < ring->ring_size; i++)
1135778677dcSRafal Kozik 			ring->empty_tx_reqs[i] = i;
1136df238f84SMichal Krawczyk 	} else {
1137df238f84SMichal Krawczyk 		ena_qid = ENA_IO_RXQ_IDX(ring->id);
1138df238f84SMichal Krawczyk 		ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX;
1139778677dcSRafal Kozik 		for (i = 0; i < ring->ring_size; i++)
1140778677dcSRafal Kozik 			ring->empty_rx_reqs[i] = i;
1141df238f84SMichal Krawczyk 	}
1142badc3a6aSMichal Krawczyk 	ctx.queue_size = ring->ring_size;
1143df238f84SMichal Krawczyk 	ctx.qid = ena_qid;
1144df238f84SMichal Krawczyk 	ctx.msix_vector = -1; /* interrupts not used */
11454217cb0bSMichal Krawczyk 	ctx.numa_node = ring->numa_socket_id;
1146df238f84SMichal Krawczyk 
1147df238f84SMichal Krawczyk 	rc = ena_com_create_io_queue(ena_dev, &ctx);
1148df238f84SMichal Krawczyk 	if (rc) {
11496f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR,
1150df238f84SMichal Krawczyk 			"failed to create io queue #%d (qid:%d) rc: %d\n",
1151df238f84SMichal Krawczyk 			ring->id, ena_qid, rc);
1152df238f84SMichal Krawczyk 		return rc;
1153df238f84SMichal Krawczyk 	}
1154df238f84SMichal Krawczyk 
1155df238f84SMichal Krawczyk 	rc = ena_com_get_io_handlers(ena_dev, ena_qid,
1156df238f84SMichal Krawczyk 				     &ring->ena_com_io_sq,
1157df238f84SMichal Krawczyk 				     &ring->ena_com_io_cq);
1158df238f84SMichal Krawczyk 	if (rc) {
11596f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR,
1160df238f84SMichal Krawczyk 			"Failed to get io queue handlers. queue num %d rc: %d\n",
1161df238f84SMichal Krawczyk 			ring->id, rc);
1162df238f84SMichal Krawczyk 		ena_com_destroy_io_queue(ena_dev, ena_qid);
1163df238f84SMichal Krawczyk 		return rc;
1164df238f84SMichal Krawczyk 	}
1165df238f84SMichal Krawczyk 
1166df238f84SMichal Krawczyk 	if (ring->type == ENA_RING_TYPE_TX)
1167df238f84SMichal Krawczyk 		ena_com_update_numa_node(ring->ena_com_io_cq, ctx.numa_node);
1168df238f84SMichal Krawczyk 
1169df238f84SMichal Krawczyk 	return 0;
1170df238f84SMichal Krawczyk }
1171df238f84SMichal Krawczyk 
117226e5543dSRafal Kozik static void ena_queue_stop(struct ena_ring *ring)
1173df238f84SMichal Krawczyk {
117426e5543dSRafal Kozik 	struct ena_com_dev *ena_dev = &ring->adapter->ena_dev;
1175df238f84SMichal Krawczyk 
117626e5543dSRafal Kozik 	if (ring->type == ENA_RING_TYPE_RX) {
117726e5543dSRafal Kozik 		ena_com_destroy_io_queue(ena_dev, ENA_IO_RXQ_IDX(ring->id));
117826e5543dSRafal Kozik 		ena_rx_queue_release_bufs(ring);
117926e5543dSRafal Kozik 	} else {
118026e5543dSRafal Kozik 		ena_com_destroy_io_queue(ena_dev, ENA_IO_TXQ_IDX(ring->id));
118126e5543dSRafal Kozik 		ena_tx_queue_release_bufs(ring);
1182df238f84SMichal Krawczyk 	}
1183df238f84SMichal Krawczyk }
1184df238f84SMichal Krawczyk 
118526e5543dSRafal Kozik static void ena_queue_stop_all(struct rte_eth_dev *dev,
118626e5543dSRafal Kozik 			      enum ena_ring_type ring_type)
118726e5543dSRafal Kozik {
1188890728ffSStephen Hemminger 	struct ena_adapter *adapter = dev->data->dev_private;
118926e5543dSRafal Kozik 	struct ena_ring *queues = NULL;
119026e5543dSRafal Kozik 	uint16_t nb_queues, i;
119126e5543dSRafal Kozik 
119226e5543dSRafal Kozik 	if (ring_type == ENA_RING_TYPE_RX) {
119326e5543dSRafal Kozik 		queues = adapter->rx_ring;
119426e5543dSRafal Kozik 		nb_queues = dev->data->nb_rx_queues;
119526e5543dSRafal Kozik 	} else {
119626e5543dSRafal Kozik 		queues = adapter->tx_ring;
119726e5543dSRafal Kozik 		nb_queues = dev->data->nb_tx_queues;
119826e5543dSRafal Kozik 	}
119926e5543dSRafal Kozik 
120026e5543dSRafal Kozik 	for (i = 0; i < nb_queues; ++i)
120126e5543dSRafal Kozik 		if (queues[i].configured)
120226e5543dSRafal Kozik 			ena_queue_stop(&queues[i]);
120326e5543dSRafal Kozik }
120426e5543dSRafal Kozik 
120526e5543dSRafal Kozik static int ena_queue_start(struct ena_ring *ring)
12061173fca2SJan Medala {
1207a467e8f3SMichal Krawczyk 	int rc, bufs_num;
12081173fca2SJan Medala 
12091173fca2SJan Medala 	ena_assert_msg(ring->configured == 1,
121026e5543dSRafal Kozik 		       "Trying to start unconfigured queue\n");
12111173fca2SJan Medala 
1212df238f84SMichal Krawczyk 	rc = ena_create_io_queue(ring);
1213df238f84SMichal Krawczyk 	if (rc) {
1214498c687aSRafal Kozik 		PMD_INIT_LOG(ERR, "Failed to create IO queue!");
1215df238f84SMichal Krawczyk 		return rc;
1216df238f84SMichal Krawczyk 	}
1217df238f84SMichal Krawczyk 
12181173fca2SJan Medala 	ring->next_to_clean = 0;
12191173fca2SJan Medala 	ring->next_to_use = 0;
12201173fca2SJan Medala 
12217830e905SSolganik Alexander 	if (ring->type == ENA_RING_TYPE_TX) {
12227830e905SSolganik Alexander 		ring->tx_stats.available_desc =
1223b2b02edeSMichal Krawczyk 			ena_com_free_q_entries(ring->ena_com_io_sq);
12241173fca2SJan Medala 		return 0;
12257830e905SSolganik Alexander 	}
12261173fca2SJan Medala 
1227a467e8f3SMichal Krawczyk 	bufs_num = ring->ring_size - 1;
1228a467e8f3SMichal Krawczyk 	rc = ena_populate_rx_queue(ring, bufs_num);
1229a467e8f3SMichal Krawczyk 	if (rc != bufs_num) {
123026e5543dSRafal Kozik 		ena_com_destroy_io_queue(&ring->adapter->ena_dev,
123126e5543dSRafal Kozik 					 ENA_IO_RXQ_IDX(ring->id));
1232f2462150SFerruh Yigit 		PMD_INIT_LOG(ERR, "Failed to populate rx ring !");
1233241da076SRafal Kozik 		return ENA_COM_FAULT;
12341173fca2SJan Medala 	}
12354387e81cSIdo Segev 	/* Flush per-core RX buffers pools cache as they can be used on other
12364387e81cSIdo Segev 	 * cores as well.
12374387e81cSIdo Segev 	 */
12384387e81cSIdo Segev 	rte_mempool_cache_flush(NULL, ring->mb_pool);
12391173fca2SJan Medala 
12401173fca2SJan Medala 	return 0;
12411173fca2SJan Medala }
12421173fca2SJan Medala 
12431173fca2SJan Medala static int ena_tx_queue_setup(struct rte_eth_dev *dev,
12441173fca2SJan Medala 			      uint16_t queue_idx,
12451173fca2SJan Medala 			      uint16_t nb_desc,
12464217cb0bSMichal Krawczyk 			      unsigned int socket_id,
124756b8b9b7SRafal Kozik 			      const struct rte_eth_txconf *tx_conf)
12481173fca2SJan Medala {
12491173fca2SJan Medala 	struct ena_ring *txq = NULL;
1250890728ffSStephen Hemminger 	struct ena_adapter *adapter = dev->data->dev_private;
12511173fca2SJan Medala 	unsigned int i;
12521173fca2SJan Medala 
12531173fca2SJan Medala 	txq = &adapter->tx_ring[queue_idx];
12541173fca2SJan Medala 
12551173fca2SJan Medala 	if (txq->configured) {
12566f1c9df9SStephen Hemminger 		PMD_DRV_LOG(CRIT,
12571173fca2SJan Medala 			"API violation. Queue %d is already configured\n",
12581173fca2SJan Medala 			queue_idx);
1259241da076SRafal Kozik 		return ENA_COM_FAULT;
12601173fca2SJan Medala 	}
12611173fca2SJan Medala 
12621daff526SJakub Palider 	if (!rte_is_power_of_2(nb_desc)) {
12636f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR,
1264498c687aSRafal Kozik 			"Unsupported size of TX queue: %d is not a power of 2.\n",
12651daff526SJakub Palider 			nb_desc);
12661daff526SJakub Palider 		return -EINVAL;
12671daff526SJakub Palider 	}
12681daff526SJakub Palider 
12695920d930SMichal Krawczyk 	if (nb_desc > adapter->max_tx_ring_size) {
12706f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR,
12711173fca2SJan Medala 			"Unsupported size of TX queue (max size: %d)\n",
12725920d930SMichal Krawczyk 			adapter->max_tx_ring_size);
12731173fca2SJan Medala 		return -EINVAL;
12741173fca2SJan Medala 	}
12751173fca2SJan Medala 
1276ea93d37eSRafal Kozik 	if (nb_desc == RTE_ETH_DEV_FALLBACK_TX_RINGSIZE)
12775920d930SMichal Krawczyk 		nb_desc = adapter->max_tx_ring_size;
1278ea93d37eSRafal Kozik 
12791173fca2SJan Medala 	txq->port_id = dev->data->port_id;
12801173fca2SJan Medala 	txq->next_to_clean = 0;
12811173fca2SJan Medala 	txq->next_to_use = 0;
12821173fca2SJan Medala 	txq->ring_size = nb_desc;
1283c0006061SMichal Krawczyk 	txq->size_mask = nb_desc - 1;
12844217cb0bSMichal Krawczyk 	txq->numa_socket_id = socket_id;
12851173fca2SJan Medala 
12861173fca2SJan Medala 	txq->tx_buffer_info = rte_zmalloc("txq->tx_buffer_info",
12871173fca2SJan Medala 					  sizeof(struct ena_tx_buffer) *
12881173fca2SJan Medala 					  txq->ring_size,
12891173fca2SJan Medala 					  RTE_CACHE_LINE_SIZE);
12901173fca2SJan Medala 	if (!txq->tx_buffer_info) {
12916f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "failed to alloc mem for tx buffer info\n");
1292df238f84SMichal Krawczyk 		return -ENOMEM;
12931173fca2SJan Medala 	}
12941173fca2SJan Medala 
12951173fca2SJan Medala 	txq->empty_tx_reqs = rte_zmalloc("txq->empty_tx_reqs",
12961173fca2SJan Medala 					 sizeof(u16) * txq->ring_size,
12971173fca2SJan Medala 					 RTE_CACHE_LINE_SIZE);
12981173fca2SJan Medala 	if (!txq->empty_tx_reqs) {
12996f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "failed to alloc mem for tx reqs\n");
1300df238f84SMichal Krawczyk 		rte_free(txq->tx_buffer_info);
1301df238f84SMichal Krawczyk 		return -ENOMEM;
13021173fca2SJan Medala 	}
1303241da076SRafal Kozik 
13042fca2a98SMichal Krawczyk 	txq->push_buf_intermediate_buf =
13052fca2a98SMichal Krawczyk 		rte_zmalloc("txq->push_buf_intermediate_buf",
13062fca2a98SMichal Krawczyk 			    txq->tx_max_header_size,
13072fca2a98SMichal Krawczyk 			    RTE_CACHE_LINE_SIZE);
13082fca2a98SMichal Krawczyk 	if (!txq->push_buf_intermediate_buf) {
13096f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "failed to alloc push buff for LLQ\n");
13102fca2a98SMichal Krawczyk 		rte_free(txq->tx_buffer_info);
13112fca2a98SMichal Krawczyk 		rte_free(txq->empty_tx_reqs);
13122fca2a98SMichal Krawczyk 		return -ENOMEM;
13132fca2a98SMichal Krawczyk 	}
13142fca2a98SMichal Krawczyk 
13151173fca2SJan Medala 	for (i = 0; i < txq->ring_size; i++)
13161173fca2SJan Medala 		txq->empty_tx_reqs[i] = i;
13171173fca2SJan Medala 
13182081d5e2SMichal Krawczyk 	if (tx_conf != NULL) {
13192081d5e2SMichal Krawczyk 		txq->offloads =
13202081d5e2SMichal Krawczyk 			tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
13212081d5e2SMichal Krawczyk 	}
13221173fca2SJan Medala 	/* Store pointer to this queue in upper layer */
13231173fca2SJan Medala 	txq->configured = 1;
13241173fca2SJan Medala 	dev->data->tx_queues[queue_idx] = txq;
1325241da076SRafal Kozik 
1326241da076SRafal Kozik 	return 0;
13271173fca2SJan Medala }
13281173fca2SJan Medala 
13291173fca2SJan Medala static int ena_rx_queue_setup(struct rte_eth_dev *dev,
13301173fca2SJan Medala 			      uint16_t queue_idx,
13311173fca2SJan Medala 			      uint16_t nb_desc,
13324217cb0bSMichal Krawczyk 			      unsigned int socket_id,
1333a4996bd8SWei Dai 			      __rte_unused const struct rte_eth_rxconf *rx_conf,
13341173fca2SJan Medala 			      struct rte_mempool *mp)
13351173fca2SJan Medala {
1336890728ffSStephen Hemminger 	struct ena_adapter *adapter = dev->data->dev_private;
13371173fca2SJan Medala 	struct ena_ring *rxq = NULL;
133838364c26SMichal Krawczyk 	size_t buffer_size;
1339df238f84SMichal Krawczyk 	int i;
13401173fca2SJan Medala 
13411173fca2SJan Medala 	rxq = &adapter->rx_ring[queue_idx];
13421173fca2SJan Medala 	if (rxq->configured) {
13436f1c9df9SStephen Hemminger 		PMD_DRV_LOG(CRIT,
13441173fca2SJan Medala 			"API violation. Queue %d is already configured\n",
13451173fca2SJan Medala 			queue_idx);
1346241da076SRafal Kozik 		return ENA_COM_FAULT;
13471173fca2SJan Medala 	}
13481173fca2SJan Medala 
1349ea93d37eSRafal Kozik 	if (nb_desc == RTE_ETH_DEV_FALLBACK_RX_RINGSIZE)
13505920d930SMichal Krawczyk 		nb_desc = adapter->max_rx_ring_size;
1351ea93d37eSRafal Kozik 
13521daff526SJakub Palider 	if (!rte_is_power_of_2(nb_desc)) {
13536f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR,
1354498c687aSRafal Kozik 			"Unsupported size of RX queue: %d is not a power of 2.\n",
13551daff526SJakub Palider 			nb_desc);
13561daff526SJakub Palider 		return -EINVAL;
13571daff526SJakub Palider 	}
13581daff526SJakub Palider 
13595920d930SMichal Krawczyk 	if (nb_desc > adapter->max_rx_ring_size) {
13606f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR,
13611173fca2SJan Medala 			"Unsupported size of RX queue (max size: %d)\n",
13625920d930SMichal Krawczyk 			adapter->max_rx_ring_size);
13631173fca2SJan Medala 		return -EINVAL;
13641173fca2SJan Medala 	}
13651173fca2SJan Medala 
136638364c26SMichal Krawczyk 	/* ENA isn't supporting buffers smaller than 1400 bytes */
136738364c26SMichal Krawczyk 	buffer_size = rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM;
136838364c26SMichal Krawczyk 	if (buffer_size < ENA_RX_BUF_MIN_SIZE) {
136938364c26SMichal Krawczyk 		PMD_DRV_LOG(ERR,
137038364c26SMichal Krawczyk 			"Unsupported size of RX buffer: %zu (min size: %d)\n",
137138364c26SMichal Krawczyk 			buffer_size, ENA_RX_BUF_MIN_SIZE);
137238364c26SMichal Krawczyk 		return -EINVAL;
137338364c26SMichal Krawczyk 	}
137438364c26SMichal Krawczyk 
13751173fca2SJan Medala 	rxq->port_id = dev->data->port_id;
13761173fca2SJan Medala 	rxq->next_to_clean = 0;
13771173fca2SJan Medala 	rxq->next_to_use = 0;
13781173fca2SJan Medala 	rxq->ring_size = nb_desc;
1379c0006061SMichal Krawczyk 	rxq->size_mask = nb_desc - 1;
13804217cb0bSMichal Krawczyk 	rxq->numa_socket_id = socket_id;
13811173fca2SJan Medala 	rxq->mb_pool = mp;
13821173fca2SJan Medala 
13831173fca2SJan Medala 	rxq->rx_buffer_info = rte_zmalloc("rxq->buffer_info",
13841be097dcSMichal Krawczyk 		sizeof(struct ena_rx_buffer) * nb_desc,
13851173fca2SJan Medala 		RTE_CACHE_LINE_SIZE);
13861173fca2SJan Medala 	if (!rxq->rx_buffer_info) {
13876f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "failed to alloc mem for rx buffer info\n");
13881173fca2SJan Medala 		return -ENOMEM;
13891173fca2SJan Medala 	}
13901173fca2SJan Medala 
139179405ee1SRafal Kozik 	rxq->rx_refill_buffer = rte_zmalloc("rxq->rx_refill_buffer",
139279405ee1SRafal Kozik 					    sizeof(struct rte_mbuf *) * nb_desc,
139379405ee1SRafal Kozik 					    RTE_CACHE_LINE_SIZE);
139479405ee1SRafal Kozik 
139579405ee1SRafal Kozik 	if (!rxq->rx_refill_buffer) {
13966f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "failed to alloc mem for rx refill buffer\n");
139779405ee1SRafal Kozik 		rte_free(rxq->rx_buffer_info);
139879405ee1SRafal Kozik 		rxq->rx_buffer_info = NULL;
139979405ee1SRafal Kozik 		return -ENOMEM;
140079405ee1SRafal Kozik 	}
140179405ee1SRafal Kozik 
1402c2034976SMichal Krawczyk 	rxq->empty_rx_reqs = rte_zmalloc("rxq->empty_rx_reqs",
1403c2034976SMichal Krawczyk 					 sizeof(uint16_t) * nb_desc,
1404c2034976SMichal Krawczyk 					 RTE_CACHE_LINE_SIZE);
1405c2034976SMichal Krawczyk 	if (!rxq->empty_rx_reqs) {
14066f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "failed to alloc mem for empty rx reqs\n");
1407c2034976SMichal Krawczyk 		rte_free(rxq->rx_buffer_info);
1408c2034976SMichal Krawczyk 		rxq->rx_buffer_info = NULL;
140979405ee1SRafal Kozik 		rte_free(rxq->rx_refill_buffer);
141079405ee1SRafal Kozik 		rxq->rx_refill_buffer = NULL;
1411c2034976SMichal Krawczyk 		return -ENOMEM;
1412c2034976SMichal Krawczyk 	}
1413c2034976SMichal Krawczyk 
1414c2034976SMichal Krawczyk 	for (i = 0; i < nb_desc; i++)
1415eccbe2ffSRafal Kozik 		rxq->empty_rx_reqs[i] = i;
1416c2034976SMichal Krawczyk 
14171173fca2SJan Medala 	/* Store pointer to this queue in upper layer */
14181173fca2SJan Medala 	rxq->configured = 1;
14191173fca2SJan Medala 	dev->data->rx_queues[queue_idx] = rxq;
14201173fca2SJan Medala 
1421df238f84SMichal Krawczyk 	return 0;
14221173fca2SJan Medala }
14231173fca2SJan Medala 
142483fd97b2SMichal Krawczyk static int ena_add_single_rx_desc(struct ena_com_io_sq *io_sq,
142583fd97b2SMichal Krawczyk 				  struct rte_mbuf *mbuf, uint16_t id)
142683fd97b2SMichal Krawczyk {
142783fd97b2SMichal Krawczyk 	struct ena_com_buf ebuf;
142883fd97b2SMichal Krawczyk 	int rc;
142983fd97b2SMichal Krawczyk 
143083fd97b2SMichal Krawczyk 	/* prepare physical address for DMA transaction */
143183fd97b2SMichal Krawczyk 	ebuf.paddr = mbuf->buf_iova + RTE_PKTMBUF_HEADROOM;
143283fd97b2SMichal Krawczyk 	ebuf.len = mbuf->buf_len - RTE_PKTMBUF_HEADROOM;
143383fd97b2SMichal Krawczyk 
143483fd97b2SMichal Krawczyk 	/* pass resource to device */
143583fd97b2SMichal Krawczyk 	rc = ena_com_add_single_rx_desc(io_sq, &ebuf, id);
143683fd97b2SMichal Krawczyk 	if (unlikely(rc != 0))
143783fd97b2SMichal Krawczyk 		PMD_DRV_LOG(WARNING, "failed adding rx desc\n");
143883fd97b2SMichal Krawczyk 
143983fd97b2SMichal Krawczyk 	return rc;
144083fd97b2SMichal Krawczyk }
144183fd97b2SMichal Krawczyk 
14421173fca2SJan Medala static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count)
14431173fca2SJan Medala {
14441173fca2SJan Medala 	unsigned int i;
14451173fca2SJan Medala 	int rc;
14461daff526SJakub Palider 	uint16_t next_to_use = rxq->next_to_use;
1447c2034976SMichal Krawczyk 	uint16_t in_use, req_id;
144879405ee1SRafal Kozik 	struct rte_mbuf **mbufs = rxq->rx_refill_buffer;
14491173fca2SJan Medala 
14501173fca2SJan Medala 	if (unlikely(!count))
14511173fca2SJan Medala 		return 0;
14521173fca2SJan Medala 
1453c0006061SMichal Krawczyk 	in_use = rxq->ring_size - 1 -
1454c0006061SMichal Krawczyk 		ena_com_free_q_entries(rxq->ena_com_io_sq);
1455c0006061SMichal Krawczyk 	ena_assert_msg(((in_use + count) < rxq->ring_size),
1456c0006061SMichal Krawczyk 		"bad ring state\n");
14571173fca2SJan Medala 
14581173fca2SJan Medala 	/* get resources for incoming packets */
145979405ee1SRafal Kozik 	rc = rte_mempool_get_bulk(rxq->mb_pool, (void **)mbufs, count);
14601173fca2SJan Medala 	if (unlikely(rc < 0)) {
14611173fca2SJan Medala 		rte_atomic64_inc(&rxq->adapter->drv_stats->rx_nombuf);
14627830e905SSolganik Alexander 		++rxq->rx_stats.mbuf_alloc_fail;
14631173fca2SJan Medala 		PMD_RX_LOG(DEBUG, "there are no enough free buffers");
14641173fca2SJan Medala 		return 0;
14651173fca2SJan Medala 	}
14661173fca2SJan Medala 
14671173fca2SJan Medala 	for (i = 0; i < count; i++) {
146879405ee1SRafal Kozik 		struct rte_mbuf *mbuf = mbufs[i];
14691be097dcSMichal Krawczyk 		struct ena_rx_buffer *rx_info;
14701173fca2SJan Medala 
147179405ee1SRafal Kozik 		if (likely((i + 4) < count))
147279405ee1SRafal Kozik 			rte_prefetch0(mbufs[i + 4]);
1473c2034976SMichal Krawczyk 
1474c0006061SMichal Krawczyk 		req_id = rxq->empty_rx_reqs[next_to_use];
14751be097dcSMichal Krawczyk 		rx_info = &rxq->rx_buffer_info[req_id];
1476241da076SRafal Kozik 
147783fd97b2SMichal Krawczyk 		rc = ena_add_single_rx_desc(rxq->ena_com_io_sq, mbuf, req_id);
147883fd97b2SMichal Krawczyk 		if (unlikely(rc != 0))
14791173fca2SJan Medala 			break;
148083fd97b2SMichal Krawczyk 
14811be097dcSMichal Krawczyk 		rx_info->mbuf = mbuf;
1482c0006061SMichal Krawczyk 		next_to_use = ENA_IDX_NEXT_MASKED(next_to_use, rxq->size_mask);
14831173fca2SJan Medala 	}
14841173fca2SJan Medala 
148579405ee1SRafal Kozik 	if (unlikely(i < count)) {
14866f1c9df9SStephen Hemminger 		PMD_DRV_LOG(WARNING, "refilled rx qid %d with only %d "
1487241da076SRafal Kozik 			"buffers (from %d)\n", rxq->id, i, count);
148879405ee1SRafal Kozik 		rte_mempool_put_bulk(rxq->mb_pool, (void **)(&mbufs[i]),
148979405ee1SRafal Kozik 				     count - i);
14907830e905SSolganik Alexander 		++rxq->rx_stats.refill_partial;
149179405ee1SRafal Kozik 	}
1492241da076SRafal Kozik 
14935e02e19eSJan Medala 	/* When we submitted free recources to device... */
14943d19e1abSRafal Kozik 	if (likely(i > 0)) {
149538faa87eSMichal Krawczyk 		/* ...let HW know that it can fill buffers with data. */
14961173fca2SJan Medala 		ena_com_write_sq_doorbell(rxq->ena_com_io_sq);
14971173fca2SJan Medala 
14985e02e19eSJan Medala 		rxq->next_to_use = next_to_use;
14995e02e19eSJan Medala 	}
15005e02e19eSJan Medala 
15011173fca2SJan Medala 	return i;
15021173fca2SJan Medala }
15031173fca2SJan Medala 
15041173fca2SJan Medala static int ena_device_init(struct ena_com_dev *ena_dev,
1505e859d2b8SRafal Kozik 			   struct ena_com_dev_get_features_ctx *get_feat_ctx,
1506e859d2b8SRafal Kozik 			   bool *wd_state)
15071173fca2SJan Medala {
1508ca148440SMichal Krawczyk 	uint32_t aenq_groups;
15091173fca2SJan Medala 	int rc;
1510c4144557SJan Medala 	bool readless_supported;
15111173fca2SJan Medala 
15121173fca2SJan Medala 	/* Initialize mmio registers */
15131173fca2SJan Medala 	rc = ena_com_mmio_reg_read_request_init(ena_dev);
15141173fca2SJan Medala 	if (rc) {
15156f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "failed to init mmio read less\n");
15161173fca2SJan Medala 		return rc;
15171173fca2SJan Medala 	}
15181173fca2SJan Medala 
1519c4144557SJan Medala 	/* The PCIe configuration space revision id indicate if mmio reg
1520c4144557SJan Medala 	 * read is disabled.
1521c4144557SJan Medala 	 */
1522c4144557SJan Medala 	readless_supported =
1523c4144557SJan Medala 		!(((struct rte_pci_device *)ena_dev->dmadev)->id.class_id
1524c4144557SJan Medala 			       & ENA_MMIO_DISABLE_REG_READ);
1525c4144557SJan Medala 	ena_com_set_mmio_read_mode(ena_dev, readless_supported);
1526c4144557SJan Medala 
15271173fca2SJan Medala 	/* reset device */
15283adcba9aSMichal Krawczyk 	rc = ena_com_dev_reset(ena_dev, ENA_REGS_RESET_NORMAL);
15291173fca2SJan Medala 	if (rc) {
15306f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "cannot reset device\n");
15311173fca2SJan Medala 		goto err_mmio_read_less;
15321173fca2SJan Medala 	}
15331173fca2SJan Medala 
15341173fca2SJan Medala 	/* check FW version */
15351173fca2SJan Medala 	rc = ena_com_validate_version(ena_dev);
15361173fca2SJan Medala 	if (rc) {
15376f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "device version is too low\n");
15381173fca2SJan Medala 		goto err_mmio_read_less;
15391173fca2SJan Medala 	}
15401173fca2SJan Medala 
15411173fca2SJan Medala 	ena_dev->dma_addr_bits = ena_com_get_dma_width(ena_dev);
15421173fca2SJan Medala 
15431173fca2SJan Medala 	/* ENA device administration layer init */
1544b68309beSRafal Kozik 	rc = ena_com_admin_init(ena_dev, &aenq_handlers);
15451173fca2SJan Medala 	if (rc) {
15466f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR,
15471173fca2SJan Medala 			"cannot initialize ena admin queue with device\n");
15481173fca2SJan Medala 		goto err_mmio_read_less;
15491173fca2SJan Medala 	}
15501173fca2SJan Medala 
15511173fca2SJan Medala 	/* To enable the msix interrupts the driver needs to know the number
15521173fca2SJan Medala 	 * of queues. So the driver uses polling mode to retrieve this
15531173fca2SJan Medala 	 * information.
15541173fca2SJan Medala 	 */
15551173fca2SJan Medala 	ena_com_set_admin_polling_mode(ena_dev, true);
15561173fca2SJan Medala 
1557201ff2e5SJakub Palider 	ena_config_host_info(ena_dev);
1558201ff2e5SJakub Palider 
15591173fca2SJan Medala 	/* Get Device Attributes and features */
15601173fca2SJan Medala 	rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx);
15611173fca2SJan Medala 	if (rc) {
15626f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR,
15631173fca2SJan Medala 			"cannot get attribute for ena device rc= %d\n", rc);
15641173fca2SJan Medala 		goto err_admin_init;
15651173fca2SJan Medala 	}
15661173fca2SJan Medala 
1567f01f060cSRafal Kozik 	aenq_groups = BIT(ENA_ADMIN_LINK_CHANGE) |
1568d9b8b106SMichal Krawczyk 		      BIT(ENA_ADMIN_NOTIFICATION) |
1569983cce2dSRafal Kozik 		      BIT(ENA_ADMIN_KEEP_ALIVE) |
1570983cce2dSRafal Kozik 		      BIT(ENA_ADMIN_FATAL_ERROR) |
1571983cce2dSRafal Kozik 		      BIT(ENA_ADMIN_WARNING);
1572ca148440SMichal Krawczyk 
1573ca148440SMichal Krawczyk 	aenq_groups &= get_feat_ctx->aenq.supported_groups;
1574ca148440SMichal Krawczyk 	rc = ena_com_set_aenq_config(ena_dev, aenq_groups);
1575ca148440SMichal Krawczyk 	if (rc) {
15766f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "Cannot configure aenq groups rc: %d\n", rc);
1577ca148440SMichal Krawczyk 		goto err_admin_init;
1578ca148440SMichal Krawczyk 	}
1579ca148440SMichal Krawczyk 
1580e859d2b8SRafal Kozik 	*wd_state = !!(aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE));
1581e859d2b8SRafal Kozik 
15821173fca2SJan Medala 	return 0;
15831173fca2SJan Medala 
15841173fca2SJan Medala err_admin_init:
15851173fca2SJan Medala 	ena_com_admin_destroy(ena_dev);
15861173fca2SJan Medala 
15871173fca2SJan Medala err_mmio_read_less:
15881173fca2SJan Medala 	ena_com_mmio_reg_read_request_destroy(ena_dev);
15891173fca2SJan Medala 
15901173fca2SJan Medala 	return rc;
15911173fca2SJan Medala }
15921173fca2SJan Medala 
1593ca148440SMichal Krawczyk static void ena_interrupt_handler_rte(void *cb_arg)
159415773e06SMichal Krawczyk {
1595890728ffSStephen Hemminger 	struct ena_adapter *adapter = cb_arg;
159615773e06SMichal Krawczyk 	struct ena_com_dev *ena_dev = &adapter->ena_dev;
159715773e06SMichal Krawczyk 
159815773e06SMichal Krawczyk 	ena_com_admin_q_comp_intr_handler(ena_dev);
15993d19e1abSRafal Kozik 	if (likely(adapter->state != ENA_ADAPTER_STATE_CLOSED))
1600ca148440SMichal Krawczyk 		ena_com_aenq_intr_handler(ena_dev, adapter);
160115773e06SMichal Krawczyk }
160215773e06SMichal Krawczyk 
16035efb9fc7SMichal Krawczyk static void check_for_missing_keep_alive(struct ena_adapter *adapter)
16045efb9fc7SMichal Krawczyk {
1605e859d2b8SRafal Kozik 	if (!adapter->wd_state)
1606e859d2b8SRafal Kozik 		return;
1607e859d2b8SRafal Kozik 
16085efb9fc7SMichal Krawczyk 	if (adapter->keep_alive_timeout == ENA_HW_HINTS_NO_TIMEOUT)
16095efb9fc7SMichal Krawczyk 		return;
16105efb9fc7SMichal Krawczyk 
16115efb9fc7SMichal Krawczyk 	if (unlikely((rte_get_timer_cycles() - adapter->timestamp_wd) >=
16125efb9fc7SMichal Krawczyk 	    adapter->keep_alive_timeout)) {
16136f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "Keep alive timeout\n");
16145efb9fc7SMichal Krawczyk 		adapter->reset_reason = ENA_REGS_RESET_KEEP_ALIVE_TO;
16155efb9fc7SMichal Krawczyk 		adapter->trigger_reset = true;
16167830e905SSolganik Alexander 		++adapter->dev_stats.wd_expired;
16175efb9fc7SMichal Krawczyk 	}
16185efb9fc7SMichal Krawczyk }
16195efb9fc7SMichal Krawczyk 
16205efb9fc7SMichal Krawczyk /* Check if admin queue is enabled */
16215efb9fc7SMichal Krawczyk static void check_for_admin_com_state(struct ena_adapter *adapter)
16225efb9fc7SMichal Krawczyk {
16235efb9fc7SMichal Krawczyk 	if (unlikely(!ena_com_get_admin_running_state(&adapter->ena_dev))) {
16246f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "ENA admin queue is not in running state!\n");
16255efb9fc7SMichal Krawczyk 		adapter->reset_reason = ENA_REGS_RESET_ADMIN_TO;
16265efb9fc7SMichal Krawczyk 		adapter->trigger_reset = true;
16275efb9fc7SMichal Krawczyk 	}
16285efb9fc7SMichal Krawczyk }
16295efb9fc7SMichal Krawczyk 
1630d9b8b106SMichal Krawczyk static void ena_timer_wd_callback(__rte_unused struct rte_timer *timer,
1631d9b8b106SMichal Krawczyk 				  void *arg)
1632d9b8b106SMichal Krawczyk {
1633890728ffSStephen Hemminger 	struct ena_adapter *adapter = arg;
1634d9b8b106SMichal Krawczyk 	struct rte_eth_dev *dev = adapter->rte_dev;
1635d9b8b106SMichal Krawczyk 
16365efb9fc7SMichal Krawczyk 	check_for_missing_keep_alive(adapter);
16375efb9fc7SMichal Krawczyk 	check_for_admin_com_state(adapter);
1638d9b8b106SMichal Krawczyk 
16395efb9fc7SMichal Krawczyk 	if (unlikely(adapter->trigger_reset)) {
16406f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "Trigger reset is on\n");
16415723fbedSFerruh Yigit 		rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET,
1642d9b8b106SMichal Krawczyk 			NULL);
1643d9b8b106SMichal Krawczyk 	}
1644d9b8b106SMichal Krawczyk }
1645d9b8b106SMichal Krawczyk 
16462fca2a98SMichal Krawczyk static inline void
16478a7a73f2SMichal Krawczyk set_default_llq_configurations(struct ena_llq_configurations *llq_config,
16488a7a73f2SMichal Krawczyk 			       struct ena_admin_feature_llq_desc *llq,
16498a7a73f2SMichal Krawczyk 			       bool use_large_llq_hdr)
16502fca2a98SMichal Krawczyk {
16512fca2a98SMichal Krawczyk 	llq_config->llq_header_location = ENA_ADMIN_INLINE_HEADER;
16522fca2a98SMichal Krawczyk 	llq_config->llq_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY;
16532fca2a98SMichal Krawczyk 	llq_config->llq_num_decs_before_header =
16542fca2a98SMichal Krawczyk 		ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2;
16558a7a73f2SMichal Krawczyk 
16568a7a73f2SMichal Krawczyk 	if (use_large_llq_hdr &&
16578a7a73f2SMichal Krawczyk 	    (llq->entry_size_ctrl_supported & ENA_ADMIN_LIST_ENTRY_SIZE_256B)) {
16588a7a73f2SMichal Krawczyk 		llq_config->llq_ring_entry_size =
16598a7a73f2SMichal Krawczyk 			ENA_ADMIN_LIST_ENTRY_SIZE_256B;
16608a7a73f2SMichal Krawczyk 		llq_config->llq_ring_entry_size_value = 256;
16618a7a73f2SMichal Krawczyk 	} else {
16628a7a73f2SMichal Krawczyk 		llq_config->llq_ring_entry_size =
16638a7a73f2SMichal Krawczyk 			ENA_ADMIN_LIST_ENTRY_SIZE_128B;
16642fca2a98SMichal Krawczyk 		llq_config->llq_ring_entry_size_value = 128;
16652fca2a98SMichal Krawczyk 	}
16668a7a73f2SMichal Krawczyk }
16672fca2a98SMichal Krawczyk 
16682fca2a98SMichal Krawczyk static int
16692fca2a98SMichal Krawczyk ena_set_queues_placement_policy(struct ena_adapter *adapter,
16702fca2a98SMichal Krawczyk 				struct ena_com_dev *ena_dev,
16712fca2a98SMichal Krawczyk 				struct ena_admin_feature_llq_desc *llq,
16722fca2a98SMichal Krawczyk 				struct ena_llq_configurations *llq_default_configurations)
16732fca2a98SMichal Krawczyk {
16742fca2a98SMichal Krawczyk 	int rc;
16752fca2a98SMichal Krawczyk 	u32 llq_feature_mask;
16762fca2a98SMichal Krawczyk 
16772fca2a98SMichal Krawczyk 	llq_feature_mask = 1 << ENA_ADMIN_LLQ;
16782fca2a98SMichal Krawczyk 	if (!(ena_dev->supported_features & llq_feature_mask)) {
16796f1c9df9SStephen Hemminger 		PMD_DRV_LOG(INFO,
16802fca2a98SMichal Krawczyk 			"LLQ is not supported. Fallback to host mode policy.\n");
16812fca2a98SMichal Krawczyk 		ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
16822fca2a98SMichal Krawczyk 		return 0;
16832fca2a98SMichal Krawczyk 	}
16842fca2a98SMichal Krawczyk 
16852fca2a98SMichal Krawczyk 	rc = ena_com_config_dev_mode(ena_dev, llq, llq_default_configurations);
16862fca2a98SMichal Krawczyk 	if (unlikely(rc)) {
16872fca2a98SMichal Krawczyk 		PMD_INIT_LOG(WARNING, "Failed to config dev mode. "
1688498c687aSRafal Kozik 			"Fallback to host mode policy.");
16892fca2a98SMichal Krawczyk 		ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
16902fca2a98SMichal Krawczyk 		return 0;
16912fca2a98SMichal Krawczyk 	}
16922fca2a98SMichal Krawczyk 
16932fca2a98SMichal Krawczyk 	/* Nothing to config, exit */
16942fca2a98SMichal Krawczyk 	if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
16952fca2a98SMichal Krawczyk 		return 0;
16962fca2a98SMichal Krawczyk 
16972fca2a98SMichal Krawczyk 	if (!adapter->dev_mem_base) {
16986f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "Unable to access LLQ bar resource. "
16992fca2a98SMichal Krawczyk 			"Fallback to host mode policy.\n.");
17002fca2a98SMichal Krawczyk 		ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
17012fca2a98SMichal Krawczyk 		return 0;
17022fca2a98SMichal Krawczyk 	}
17032fca2a98SMichal Krawczyk 
17042fca2a98SMichal Krawczyk 	ena_dev->mem_bar = adapter->dev_mem_base;
17052fca2a98SMichal Krawczyk 
17062fca2a98SMichal Krawczyk 	return 0;
17072fca2a98SMichal Krawczyk }
17082fca2a98SMichal Krawczyk 
17095920d930SMichal Krawczyk static uint32_t ena_calc_max_io_queue_num(struct ena_com_dev *ena_dev,
171001bd6877SRafal Kozik 	struct ena_com_dev_get_features_ctx *get_feat_ctx)
171101bd6877SRafal Kozik {
17125920d930SMichal Krawczyk 	uint32_t io_tx_sq_num, io_tx_cq_num, io_rx_num, max_num_io_queues;
171301bd6877SRafal Kozik 
1714ea93d37eSRafal Kozik 	/* Regular queues capabilities */
1715ea93d37eSRafal Kozik 	if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
1716ea93d37eSRafal Kozik 		struct ena_admin_queue_ext_feature_fields *max_queue_ext =
1717ea93d37eSRafal Kozik 			&get_feat_ctx->max_queue_ext.max_queue_ext;
17182fca2a98SMichal Krawczyk 		io_rx_num = RTE_MIN(max_queue_ext->max_rx_sq_num,
17192fca2a98SMichal Krawczyk 				    max_queue_ext->max_rx_cq_num);
17202fca2a98SMichal Krawczyk 		io_tx_sq_num = max_queue_ext->max_tx_sq_num;
17212fca2a98SMichal Krawczyk 		io_tx_cq_num = max_queue_ext->max_tx_cq_num;
1722ea93d37eSRafal Kozik 	} else {
1723ea93d37eSRafal Kozik 		struct ena_admin_queue_feature_desc *max_queues =
1724ea93d37eSRafal Kozik 			&get_feat_ctx->max_queues;
17252fca2a98SMichal Krawczyk 		io_tx_sq_num = max_queues->max_sq_num;
17262fca2a98SMichal Krawczyk 		io_tx_cq_num = max_queues->max_cq_num;
17272fca2a98SMichal Krawczyk 		io_rx_num = RTE_MIN(io_tx_sq_num, io_tx_cq_num);
1728ea93d37eSRafal Kozik 	}
172901bd6877SRafal Kozik 
17302fca2a98SMichal Krawczyk 	/* In case of LLQ use the llq number in the get feature cmd */
17312fca2a98SMichal Krawczyk 	if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
17322fca2a98SMichal Krawczyk 		io_tx_sq_num = get_feat_ctx->llq.max_llq_num;
17332fca2a98SMichal Krawczyk 
17345920d930SMichal Krawczyk 	max_num_io_queues = RTE_MIN(ENA_MAX_NUM_IO_QUEUES, io_rx_num);
17355920d930SMichal Krawczyk 	max_num_io_queues = RTE_MIN(max_num_io_queues, io_tx_sq_num);
17365920d930SMichal Krawczyk 	max_num_io_queues = RTE_MIN(max_num_io_queues, io_tx_cq_num);
173701bd6877SRafal Kozik 
17385920d930SMichal Krawczyk 	if (unlikely(max_num_io_queues == 0)) {
17396f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "Number of IO queues should not be 0\n");
174001bd6877SRafal Kozik 		return -EFAULT;
174101bd6877SRafal Kozik 	}
174201bd6877SRafal Kozik 
17435920d930SMichal Krawczyk 	return max_num_io_queues;
174401bd6877SRafal Kozik }
174501bd6877SRafal Kozik 
17461173fca2SJan Medala static int eth_ena_dev_init(struct rte_eth_dev *eth_dev)
17471173fca2SJan Medala {
1748ea93d37eSRafal Kozik 	struct ena_calc_queue_size_ctx calc_queue_ctx = { 0 };
17491173fca2SJan Medala 	struct rte_pci_device *pci_dev;
1750eb0ef49dSMichal Krawczyk 	struct rte_intr_handle *intr_handle;
1751890728ffSStephen Hemminger 	struct ena_adapter *adapter = eth_dev->data->dev_private;
17521173fca2SJan Medala 	struct ena_com_dev *ena_dev = &adapter->ena_dev;
17531173fca2SJan Medala 	struct ena_com_dev_get_features_ctx get_feat_ctx;
17542fca2a98SMichal Krawczyk 	struct ena_llq_configurations llq_config;
17552fca2a98SMichal Krawczyk 	const char *queue_type_str;
17565920d930SMichal Krawczyk 	uint32_t max_num_io_queues;
1757ea93d37eSRafal Kozik 	int rc;
17581173fca2SJan Medala 	static int adapters_found;
175933dde075SMichal Krawczyk 	bool disable_meta_caching;
17605f267cb0SFerruh Yigit 	bool wd_state = false;
17611173fca2SJan Medala 
17621173fca2SJan Medala 	eth_dev->dev_ops = &ena_dev_ops;
17631173fca2SJan Medala 	eth_dev->rx_pkt_burst = &eth_ena_recv_pkts;
17641173fca2SJan Medala 	eth_dev->tx_pkt_burst = &eth_ena_xmit_pkts;
1765b3fc5a1aSKonstantin Ananyev 	eth_dev->tx_pkt_prepare = &eth_ena_prep_pkts;
17661173fca2SJan Medala 
17671173fca2SJan Medala 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
17681173fca2SJan Medala 		return 0;
17691173fca2SJan Medala 
1770f30e69b4SFerruh Yigit 	eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
1771f30e69b4SFerruh Yigit 
1772fd976890SMichal Krawczyk 	memset(adapter, 0, sizeof(struct ena_adapter));
1773fd976890SMichal Krawczyk 	ena_dev = &adapter->ena_dev;
1774fd976890SMichal Krawczyk 
1775fd976890SMichal Krawczyk 	adapter->rte_eth_dev_data = eth_dev->data;
1776fd976890SMichal Krawczyk 	adapter->rte_dev = eth_dev;
1777fd976890SMichal Krawczyk 
1778c0802544SFerruh Yigit 	pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
17791173fca2SJan Medala 	adapter->pdev = pci_dev;
17801173fca2SJan Medala 
1781f2462150SFerruh Yigit 	PMD_INIT_LOG(INFO, "Initializing %x:%x:%x.%d",
17821173fca2SJan Medala 		     pci_dev->addr.domain,
17831173fca2SJan Medala 		     pci_dev->addr.bus,
17841173fca2SJan Medala 		     pci_dev->addr.devid,
17851173fca2SJan Medala 		     pci_dev->addr.function);
17861173fca2SJan Medala 
1787eb0ef49dSMichal Krawczyk 	intr_handle = &pci_dev->intr_handle;
1788eb0ef49dSMichal Krawczyk 
17891173fca2SJan Medala 	adapter->regs = pci_dev->mem_resource[ENA_REGS_BAR].addr;
17901173fca2SJan Medala 	adapter->dev_mem_base = pci_dev->mem_resource[ENA_MEM_BAR].addr;
17911173fca2SJan Medala 
17921d339597SRafal Kozik 	if (!adapter->regs) {
1793f2462150SFerruh Yigit 		PMD_INIT_LOG(CRIT, "Failed to access registers BAR(%d)",
17941173fca2SJan Medala 			     ENA_REGS_BAR);
17951d339597SRafal Kozik 		return -ENXIO;
17961d339597SRafal Kozik 	}
17971173fca2SJan Medala 
17981173fca2SJan Medala 	ena_dev->reg_bar = adapter->regs;
17991173fca2SJan Medala 	ena_dev->dmadev = adapter->pdev;
18001173fca2SJan Medala 
18011173fca2SJan Medala 	adapter->id_number = adapters_found;
18021173fca2SJan Medala 
18031173fca2SJan Medala 	snprintf(adapter->name, ENA_NAME_MAX_LEN, "ena_%d",
18041173fca2SJan Medala 		 adapter->id_number);
18051173fca2SJan Medala 
18068a7a73f2SMichal Krawczyk 	rc = ena_parse_devargs(adapter, pci_dev->device.devargs);
18078a7a73f2SMichal Krawczyk 	if (rc != 0) {
18088a7a73f2SMichal Krawczyk 		PMD_INIT_LOG(CRIT, "Failed to parse devargs\n");
18098a7a73f2SMichal Krawczyk 		goto err;
18108a7a73f2SMichal Krawczyk 	}
18118a7a73f2SMichal Krawczyk 
18121173fca2SJan Medala 	/* device specific initialization routine */
1813e859d2b8SRafal Kozik 	rc = ena_device_init(ena_dev, &get_feat_ctx, &wd_state);
18141173fca2SJan Medala 	if (rc) {
1815f2462150SFerruh Yigit 		PMD_INIT_LOG(CRIT, "Failed to init ENA device");
1816241da076SRafal Kozik 		goto err;
18171173fca2SJan Medala 	}
1818e859d2b8SRafal Kozik 	adapter->wd_state = wd_state;
18191173fca2SJan Medala 
18208a7a73f2SMichal Krawczyk 	set_default_llq_configurations(&llq_config, &get_feat_ctx.llq,
18218a7a73f2SMichal Krawczyk 		adapter->use_large_llq_hdr);
18222fca2a98SMichal Krawczyk 	rc = ena_set_queues_placement_policy(adapter, ena_dev,
18232fca2a98SMichal Krawczyk 					     &get_feat_ctx.llq, &llq_config);
18242fca2a98SMichal Krawczyk 	if (unlikely(rc)) {
18252fca2a98SMichal Krawczyk 		PMD_INIT_LOG(CRIT, "Failed to set placement policy");
18262fca2a98SMichal Krawczyk 		return rc;
18272fca2a98SMichal Krawczyk 	}
18282fca2a98SMichal Krawczyk 
18292fca2a98SMichal Krawczyk 	if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
18302fca2a98SMichal Krawczyk 		queue_type_str = "Regular";
18312fca2a98SMichal Krawczyk 	else
18322fca2a98SMichal Krawczyk 		queue_type_str = "Low latency";
18336f1c9df9SStephen Hemminger 	PMD_DRV_LOG(INFO, "Placement policy: %s\n", queue_type_str);
1834ea93d37eSRafal Kozik 
1835ea93d37eSRafal Kozik 	calc_queue_ctx.ena_dev = ena_dev;
1836ea93d37eSRafal Kozik 	calc_queue_ctx.get_feat_ctx = &get_feat_ctx;
18371173fca2SJan Medala 
18385920d930SMichal Krawczyk 	max_num_io_queues = ena_calc_max_io_queue_num(ena_dev, &get_feat_ctx);
18398a7a73f2SMichal Krawczyk 	rc = ena_calc_io_queue_size(&calc_queue_ctx,
18408a7a73f2SMichal Krawczyk 		adapter->use_large_llq_hdr);
18415920d930SMichal Krawczyk 	if (unlikely((rc != 0) || (max_num_io_queues == 0))) {
1842241da076SRafal Kozik 		rc = -EFAULT;
1843241da076SRafal Kozik 		goto err_device_destroy;
1844241da076SRafal Kozik 	}
18451173fca2SJan Medala 
18465920d930SMichal Krawczyk 	adapter->max_tx_ring_size = calc_queue_ctx.max_tx_queue_size;
18475920d930SMichal Krawczyk 	adapter->max_rx_ring_size = calc_queue_ctx.max_rx_queue_size;
1848ea93d37eSRafal Kozik 	adapter->max_tx_sgl_size = calc_queue_ctx.max_tx_sgl_size;
1849ea93d37eSRafal Kozik 	adapter->max_rx_sgl_size = calc_queue_ctx.max_rx_sgl_size;
18505920d930SMichal Krawczyk 	adapter->max_num_io_queues = max_num_io_queues;
18512061fe41SRafal Kozik 
185233dde075SMichal Krawczyk 	if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
185333dde075SMichal Krawczyk 		disable_meta_caching =
185433dde075SMichal Krawczyk 			!!(get_feat_ctx.llq.accel_mode.u.get.supported_flags &
185533dde075SMichal Krawczyk 			BIT(ENA_ADMIN_DISABLE_META_CACHING));
185633dde075SMichal Krawczyk 	} else {
185733dde075SMichal Krawczyk 		disable_meta_caching = false;
185833dde075SMichal Krawczyk 	}
185933dde075SMichal Krawczyk 
18601173fca2SJan Medala 	/* prepare ring structures */
186133dde075SMichal Krawczyk 	ena_init_rings(adapter, disable_meta_caching);
18621173fca2SJan Medala 
1863372c1af5SJan Medala 	ena_config_debug_area(adapter);
1864372c1af5SJan Medala 
18651173fca2SJan Medala 	/* Set max MTU for this device */
18661173fca2SJan Medala 	adapter->max_mtu = get_feat_ctx.dev_attr.max_mtu;
18671173fca2SJan Medala 
1868117ba4a6SMichal Krawczyk 	/* set device support for offloads */
1869117ba4a6SMichal Krawczyk 	adapter->offloads.tso4_supported = (get_feat_ctx.offload.tx &
1870117ba4a6SMichal Krawczyk 		ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK) != 0;
1871117ba4a6SMichal Krawczyk 	adapter->offloads.tx_csum_supported = (get_feat_ctx.offload.tx &
1872117ba4a6SMichal Krawczyk 		ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK) != 0;
1873ef538c1aSMichal Krawczyk 	adapter->offloads.rx_csum_supported =
1874117ba4a6SMichal Krawczyk 		(get_feat_ctx.offload.rx_supported &
1875117ba4a6SMichal Krawczyk 		ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK) != 0;
187683277a7cSJakub Palider 
18771173fca2SJan Medala 	/* Copy MAC address and point DPDK to it */
18786d13ea8eSOlivier Matz 	eth_dev->data->mac_addrs = (struct rte_ether_addr *)adapter->mac_addr;
1879538da7a1SOlivier Matz 	rte_ether_addr_copy((struct rte_ether_addr *)
1880538da7a1SOlivier Matz 			get_feat_ctx.dev_attr.mac_addr,
18816d13ea8eSOlivier Matz 			(struct rte_ether_addr *)adapter->mac_addr);
18821173fca2SJan Medala 
18831173fca2SJan Medala 	adapter->drv_stats = rte_zmalloc("adapter stats",
18841173fca2SJan Medala 					 sizeof(*adapter->drv_stats),
18851173fca2SJan Medala 					 RTE_CACHE_LINE_SIZE);
18861173fca2SJan Medala 	if (!adapter->drv_stats) {
18876f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "failed to alloc mem for adapter stats\n");
1888241da076SRafal Kozik 		rc = -ENOMEM;
1889241da076SRafal Kozik 		goto err_delete_debug_area;
18901173fca2SJan Medala 	}
18911173fca2SJan Medala 
18921343c415SMichal Krawczyk 	rte_spinlock_init(&adapter->admin_lock);
18931343c415SMichal Krawczyk 
1894eb0ef49dSMichal Krawczyk 	rte_intr_callback_register(intr_handle,
1895eb0ef49dSMichal Krawczyk 				   ena_interrupt_handler_rte,
1896eb0ef49dSMichal Krawczyk 				   adapter);
1897eb0ef49dSMichal Krawczyk 	rte_intr_enable(intr_handle);
1898eb0ef49dSMichal Krawczyk 	ena_com_set_admin_polling_mode(ena_dev, false);
1899ca148440SMichal Krawczyk 	ena_com_admin_aenq_enable(ena_dev);
1900eb0ef49dSMichal Krawczyk 
1901d9b8b106SMichal Krawczyk 	if (adapters_found == 0)
1902d9b8b106SMichal Krawczyk 		rte_timer_subsystem_init();
1903d9b8b106SMichal Krawczyk 	rte_timer_init(&adapter->timer_wd);
1904d9b8b106SMichal Krawczyk 
19051173fca2SJan Medala 	adapters_found++;
19061173fca2SJan Medala 	adapter->state = ENA_ADAPTER_STATE_INIT;
19071173fca2SJan Medala 
19081173fca2SJan Medala 	return 0;
1909241da076SRafal Kozik 
1910241da076SRafal Kozik err_delete_debug_area:
1911241da076SRafal Kozik 	ena_com_delete_debug_area(ena_dev);
1912241da076SRafal Kozik 
1913241da076SRafal Kozik err_device_destroy:
1914241da076SRafal Kozik 	ena_com_delete_host_info(ena_dev);
1915241da076SRafal Kozik 	ena_com_admin_destroy(ena_dev);
1916241da076SRafal Kozik 
1917241da076SRafal Kozik err:
1918241da076SRafal Kozik 	return rc;
19191173fca2SJan Medala }
19201173fca2SJan Medala 
1921e457bc70SRafal Kozik static void ena_destroy_device(struct rte_eth_dev *eth_dev)
1922eb0ef49dSMichal Krawczyk {
1923890728ffSStephen Hemminger 	struct ena_adapter *adapter = eth_dev->data->dev_private;
1924e457bc70SRafal Kozik 	struct ena_com_dev *ena_dev = &adapter->ena_dev;
1925eb0ef49dSMichal Krawczyk 
1926e457bc70SRafal Kozik 	if (adapter->state == ENA_ADAPTER_STATE_FREE)
1927e457bc70SRafal Kozik 		return;
1928e457bc70SRafal Kozik 
1929e457bc70SRafal Kozik 	ena_com_set_admin_running_state(ena_dev, false);
1930eb0ef49dSMichal Krawczyk 
1931eb0ef49dSMichal Krawczyk 	if (adapter->state != ENA_ADAPTER_STATE_CLOSED)
1932eb0ef49dSMichal Krawczyk 		ena_close(eth_dev);
1933eb0ef49dSMichal Krawczyk 
1934e457bc70SRafal Kozik 	ena_com_delete_debug_area(ena_dev);
1935e457bc70SRafal Kozik 	ena_com_delete_host_info(ena_dev);
1936e457bc70SRafal Kozik 
1937e457bc70SRafal Kozik 	ena_com_abort_admin_commands(ena_dev);
1938e457bc70SRafal Kozik 	ena_com_wait_for_abort_completion(ena_dev);
1939e457bc70SRafal Kozik 	ena_com_admin_destroy(ena_dev);
1940e457bc70SRafal Kozik 	ena_com_mmio_reg_read_request_destroy(ena_dev);
1941e457bc70SRafal Kozik 
1942e457bc70SRafal Kozik 	adapter->state = ENA_ADAPTER_STATE_FREE;
1943e457bc70SRafal Kozik }
1944e457bc70SRafal Kozik 
1945e457bc70SRafal Kozik static int eth_ena_dev_uninit(struct rte_eth_dev *eth_dev)
1946e457bc70SRafal Kozik {
1947e457bc70SRafal Kozik 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1948e457bc70SRafal Kozik 		return 0;
1949e457bc70SRafal Kozik 
1950e457bc70SRafal Kozik 	ena_destroy_device(eth_dev);
1951e457bc70SRafal Kozik 
1952eb0ef49dSMichal Krawczyk 	return 0;
1953eb0ef49dSMichal Krawczyk }
1954eb0ef49dSMichal Krawczyk 
19551173fca2SJan Medala static int ena_dev_configure(struct rte_eth_dev *dev)
19561173fca2SJan Medala {
1957890728ffSStephen Hemminger 	struct ena_adapter *adapter = dev->data->dev_private;
19587369f88fSRafal Kozik 
19591173fca2SJan Medala 	adapter->state = ENA_ADAPTER_STATE_CONFIG;
19601173fca2SJan Medala 
1961a4996bd8SWei Dai 	adapter->tx_selected_offloads = dev->data->dev_conf.txmode.offloads;
1962a4996bd8SWei Dai 	adapter->rx_selected_offloads = dev->data->dev_conf.rxmode.offloads;
19631173fca2SJan Medala 	return 0;
19641173fca2SJan Medala }
19651173fca2SJan Medala 
196633dde075SMichal Krawczyk static void ena_init_rings(struct ena_adapter *adapter,
196733dde075SMichal Krawczyk 			   bool disable_meta_caching)
19681173fca2SJan Medala {
19695920d930SMichal Krawczyk 	size_t i;
19701173fca2SJan Medala 
19715920d930SMichal Krawczyk 	for (i = 0; i < adapter->max_num_io_queues; i++) {
19721173fca2SJan Medala 		struct ena_ring *ring = &adapter->tx_ring[i];
19731173fca2SJan Medala 
19741173fca2SJan Medala 		ring->configured = 0;
19751173fca2SJan Medala 		ring->type = ENA_RING_TYPE_TX;
19761173fca2SJan Medala 		ring->adapter = adapter;
19771173fca2SJan Medala 		ring->id = i;
19781173fca2SJan Medala 		ring->tx_mem_queue_type = adapter->ena_dev.tx_mem_queue_type;
19791173fca2SJan Medala 		ring->tx_max_header_size = adapter->ena_dev.tx_max_header_size;
19802061fe41SRafal Kozik 		ring->sgl_size = adapter->max_tx_sgl_size;
198133dde075SMichal Krawczyk 		ring->disable_meta_caching = disable_meta_caching;
19821173fca2SJan Medala 	}
19831173fca2SJan Medala 
19845920d930SMichal Krawczyk 	for (i = 0; i < adapter->max_num_io_queues; i++) {
19851173fca2SJan Medala 		struct ena_ring *ring = &adapter->rx_ring[i];
19861173fca2SJan Medala 
19871173fca2SJan Medala 		ring->configured = 0;
19881173fca2SJan Medala 		ring->type = ENA_RING_TYPE_RX;
19891173fca2SJan Medala 		ring->adapter = adapter;
19901173fca2SJan Medala 		ring->id = i;
1991ea93d37eSRafal Kozik 		ring->sgl_size = adapter->max_rx_sgl_size;
19921173fca2SJan Medala 	}
19931173fca2SJan Medala }
19941173fca2SJan Medala 
1995bdad90d1SIvan Ilchenko static int ena_infos_get(struct rte_eth_dev *dev,
19961173fca2SJan Medala 			  struct rte_eth_dev_info *dev_info)
19971173fca2SJan Medala {
19981173fca2SJan Medala 	struct ena_adapter *adapter;
19991173fca2SJan Medala 	struct ena_com_dev *ena_dev;
200056b8b9b7SRafal Kozik 	uint64_t rx_feat = 0, tx_feat = 0;
20011173fca2SJan Medala 
2002498c687aSRafal Kozik 	ena_assert_msg(dev->data != NULL, "Uninitialized device\n");
2003498c687aSRafal Kozik 	ena_assert_msg(dev->data->dev_private != NULL, "Uninitialized device\n");
2004890728ffSStephen Hemminger 	adapter = dev->data->dev_private;
20051173fca2SJan Medala 
20061173fca2SJan Medala 	ena_dev = &adapter->ena_dev;
2007498c687aSRafal Kozik 	ena_assert_msg(ena_dev != NULL, "Uninitialized device\n");
20081173fca2SJan Medala 
2009e274f573SMarc Sune 	dev_info->speed_capa =
2010e274f573SMarc Sune 			ETH_LINK_SPEED_1G   |
2011e274f573SMarc Sune 			ETH_LINK_SPEED_2_5G |
2012e274f573SMarc Sune 			ETH_LINK_SPEED_5G   |
2013e274f573SMarc Sune 			ETH_LINK_SPEED_10G  |
2014e274f573SMarc Sune 			ETH_LINK_SPEED_25G  |
2015e274f573SMarc Sune 			ETH_LINK_SPEED_40G  |
2016b2feed01SThomas Monjalon 			ETH_LINK_SPEED_50G  |
2017b2feed01SThomas Monjalon 			ETH_LINK_SPEED_100G;
2018e274f573SMarc Sune 
20191173fca2SJan Medala 	/* Set Tx & Rx features available for device */
2020117ba4a6SMichal Krawczyk 	if (adapter->offloads.tso4_supported)
20211173fca2SJan Medala 		tx_feat	|= DEV_TX_OFFLOAD_TCP_TSO;
20221173fca2SJan Medala 
2023117ba4a6SMichal Krawczyk 	if (adapter->offloads.tx_csum_supported)
20241173fca2SJan Medala 		tx_feat |= DEV_TX_OFFLOAD_IPV4_CKSUM |
20251173fca2SJan Medala 			DEV_TX_OFFLOAD_UDP_CKSUM |
20261173fca2SJan Medala 			DEV_TX_OFFLOAD_TCP_CKSUM;
20271173fca2SJan Medala 
2028117ba4a6SMichal Krawczyk 	if (adapter->offloads.rx_csum_supported)
20291173fca2SJan Medala 		rx_feat |= DEV_RX_OFFLOAD_IPV4_CKSUM |
20301173fca2SJan Medala 			DEV_RX_OFFLOAD_UDP_CKSUM  |
20311173fca2SJan Medala 			DEV_RX_OFFLOAD_TCP_CKSUM;
20321173fca2SJan Medala 
2033a0a4ff40SRafal Kozik 	rx_feat |= DEV_RX_OFFLOAD_JUMBO_FRAME;
2034a0a4ff40SRafal Kozik 
20351173fca2SJan Medala 	/* Inform framework about available features */
20361173fca2SJan Medala 	dev_info->rx_offload_capa = rx_feat;
20377369f88fSRafal Kozik 	dev_info->rx_queue_offload_capa = rx_feat;
20381173fca2SJan Medala 	dev_info->tx_offload_capa = tx_feat;
203956b8b9b7SRafal Kozik 	dev_info->tx_queue_offload_capa = tx_feat;
20401173fca2SJan Medala 
2041b01ead20SRafal Kozik 	dev_info->flow_type_rss_offloads = ETH_RSS_IP | ETH_RSS_TCP |
2042b01ead20SRafal Kozik 					   ETH_RSS_UDP;
2043b01ead20SRafal Kozik 
20441173fca2SJan Medala 	dev_info->min_rx_bufsize = ENA_MIN_FRAME_LEN;
20451173fca2SJan Medala 	dev_info->max_rx_pktlen  = adapter->max_mtu;
20461173fca2SJan Medala 	dev_info->max_mac_addrs = 1;
20471173fca2SJan Medala 
20485920d930SMichal Krawczyk 	dev_info->max_rx_queues = adapter->max_num_io_queues;
20495920d930SMichal Krawczyk 	dev_info->max_tx_queues = adapter->max_num_io_queues;
20501173fca2SJan Medala 	dev_info->reta_size = ENA_RX_RSS_TABLE_SIZE;
205156b8b9b7SRafal Kozik 
205256b8b9b7SRafal Kozik 	adapter->tx_supported_offloads = tx_feat;
20537369f88fSRafal Kozik 	adapter->rx_supported_offloads = rx_feat;
205492680dc2SRafal Kozik 
20555920d930SMichal Krawczyk 	dev_info->rx_desc_lim.nb_max = adapter->max_rx_ring_size;
205692680dc2SRafal Kozik 	dev_info->rx_desc_lim.nb_min = ENA_MIN_RING_DESC;
2057ea93d37eSRafal Kozik 	dev_info->rx_desc_lim.nb_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS,
2058ea93d37eSRafal Kozik 					adapter->max_rx_sgl_size);
2059ea93d37eSRafal Kozik 	dev_info->rx_desc_lim.nb_mtu_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS,
2060ea93d37eSRafal Kozik 					adapter->max_rx_sgl_size);
206192680dc2SRafal Kozik 
20625920d930SMichal Krawczyk 	dev_info->tx_desc_lim.nb_max = adapter->max_tx_ring_size;
206392680dc2SRafal Kozik 	dev_info->tx_desc_lim.nb_min = ENA_MIN_RING_DESC;
206492680dc2SRafal Kozik 	dev_info->tx_desc_lim.nb_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS,
2065ea93d37eSRafal Kozik 					adapter->max_tx_sgl_size);
206692680dc2SRafal Kozik 	dev_info->tx_desc_lim.nb_mtu_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS,
2067ea93d37eSRafal Kozik 					adapter->max_tx_sgl_size);
2068bdad90d1SIvan Ilchenko 
2069bdad90d1SIvan Ilchenko 	return 0;
20701173fca2SJan Medala }
20711173fca2SJan Medala 
20721be097dcSMichal Krawczyk static inline void ena_init_rx_mbuf(struct rte_mbuf *mbuf, uint16_t len)
20731be097dcSMichal Krawczyk {
20741be097dcSMichal Krawczyk 	mbuf->data_len = len;
20751be097dcSMichal Krawczyk 	mbuf->data_off = RTE_PKTMBUF_HEADROOM;
20761be097dcSMichal Krawczyk 	mbuf->refcnt = 1;
20771be097dcSMichal Krawczyk 	mbuf->next = NULL;
20781be097dcSMichal Krawczyk }
20791be097dcSMichal Krawczyk 
20801be097dcSMichal Krawczyk static struct rte_mbuf *ena_rx_mbuf(struct ena_ring *rx_ring,
20811be097dcSMichal Krawczyk 				    struct ena_com_rx_buf_info *ena_bufs,
20821be097dcSMichal Krawczyk 				    uint32_t descs,
20831be097dcSMichal Krawczyk 				    uint16_t *next_to_clean,
20841be097dcSMichal Krawczyk 				    uint8_t offset)
20851be097dcSMichal Krawczyk {
20861be097dcSMichal Krawczyk 	struct rte_mbuf *mbuf;
20871be097dcSMichal Krawczyk 	struct rte_mbuf *mbuf_head;
20881be097dcSMichal Krawczyk 	struct ena_rx_buffer *rx_info;
208983fd97b2SMichal Krawczyk 	int rc;
20901be097dcSMichal Krawczyk 	uint16_t ntc, len, req_id, buf = 0;
20911be097dcSMichal Krawczyk 
20921be097dcSMichal Krawczyk 	if (unlikely(descs == 0))
20931be097dcSMichal Krawczyk 		return NULL;
20941be097dcSMichal Krawczyk 
20951be097dcSMichal Krawczyk 	ntc = *next_to_clean;
20961be097dcSMichal Krawczyk 
20971be097dcSMichal Krawczyk 	len = ena_bufs[buf].len;
20981be097dcSMichal Krawczyk 	req_id = ena_bufs[buf].req_id;
20991be097dcSMichal Krawczyk 
21001be097dcSMichal Krawczyk 	rx_info = &rx_ring->rx_buffer_info[req_id];
21011be097dcSMichal Krawczyk 
21021be097dcSMichal Krawczyk 	mbuf = rx_info->mbuf;
21031be097dcSMichal Krawczyk 	RTE_ASSERT(mbuf != NULL);
21041be097dcSMichal Krawczyk 
21051be097dcSMichal Krawczyk 	ena_init_rx_mbuf(mbuf, len);
21061be097dcSMichal Krawczyk 
21071be097dcSMichal Krawczyk 	/* Fill the mbuf head with the data specific for 1st segment. */
21081be097dcSMichal Krawczyk 	mbuf_head = mbuf;
21091be097dcSMichal Krawczyk 	mbuf_head->nb_segs = descs;
21101be097dcSMichal Krawczyk 	mbuf_head->port = rx_ring->port_id;
21111be097dcSMichal Krawczyk 	mbuf_head->pkt_len = len;
21121be097dcSMichal Krawczyk 	mbuf_head->data_off += offset;
21131be097dcSMichal Krawczyk 
21141be097dcSMichal Krawczyk 	rx_info->mbuf = NULL;
2115c0006061SMichal Krawczyk 	rx_ring->empty_rx_reqs[ntc] = req_id;
2116c0006061SMichal Krawczyk 	ntc = ENA_IDX_NEXT_MASKED(ntc, rx_ring->size_mask);
21171be097dcSMichal Krawczyk 
21181be097dcSMichal Krawczyk 	while (--descs) {
21191be097dcSMichal Krawczyk 		++buf;
21201be097dcSMichal Krawczyk 		len = ena_bufs[buf].len;
21211be097dcSMichal Krawczyk 		req_id = ena_bufs[buf].req_id;
21221be097dcSMichal Krawczyk 
21231be097dcSMichal Krawczyk 		rx_info = &rx_ring->rx_buffer_info[req_id];
21241be097dcSMichal Krawczyk 		RTE_ASSERT(rx_info->mbuf != NULL);
21251be097dcSMichal Krawczyk 
212683fd97b2SMichal Krawczyk 		if (unlikely(len == 0)) {
212783fd97b2SMichal Krawczyk 			/*
212883fd97b2SMichal Krawczyk 			 * Some devices can pass descriptor with the length 0.
212983fd97b2SMichal Krawczyk 			 * To avoid confusion, the PMD is simply putting the
213083fd97b2SMichal Krawczyk 			 * descriptor back, as it was never used. We'll avoid
213183fd97b2SMichal Krawczyk 			 * mbuf allocation that way.
213283fd97b2SMichal Krawczyk 			 */
213383fd97b2SMichal Krawczyk 			rc = ena_add_single_rx_desc(rx_ring->ena_com_io_sq,
213483fd97b2SMichal Krawczyk 				rx_info->mbuf, req_id);
213583fd97b2SMichal Krawczyk 			if (unlikely(rc != 0)) {
213683fd97b2SMichal Krawczyk 				/* Free the mbuf in case of an error. */
213783fd97b2SMichal Krawczyk 				rte_mbuf_raw_free(rx_info->mbuf);
213883fd97b2SMichal Krawczyk 			} else {
213983fd97b2SMichal Krawczyk 				/*
214083fd97b2SMichal Krawczyk 				 * If there was no error, just exit the loop as
214183fd97b2SMichal Krawczyk 				 * 0 length descriptor is always the last one.
214283fd97b2SMichal Krawczyk 				 */
214383fd97b2SMichal Krawczyk 				break;
214483fd97b2SMichal Krawczyk 			}
214583fd97b2SMichal Krawczyk 		} else {
21461be097dcSMichal Krawczyk 			/* Create an mbuf chain. */
21471be097dcSMichal Krawczyk 			mbuf->next = rx_info->mbuf;
21481be097dcSMichal Krawczyk 			mbuf = mbuf->next;
21491be097dcSMichal Krawczyk 
21501be097dcSMichal Krawczyk 			ena_init_rx_mbuf(mbuf, len);
21511be097dcSMichal Krawczyk 			mbuf_head->pkt_len += len;
215283fd97b2SMichal Krawczyk 		}
21531be097dcSMichal Krawczyk 
215483fd97b2SMichal Krawczyk 		/*
215583fd97b2SMichal Krawczyk 		 * Mark the descriptor as depleted and perform necessary
215683fd97b2SMichal Krawczyk 		 * cleanup.
215783fd97b2SMichal Krawczyk 		 * This code will execute in two cases:
215883fd97b2SMichal Krawczyk 		 *  1. Descriptor len was greater than 0 - normal situation.
215983fd97b2SMichal Krawczyk 		 *  2. Descriptor len was 0 and we failed to add the descriptor
216083fd97b2SMichal Krawczyk 		 *     to the device. In that situation, we should try to add
216183fd97b2SMichal Krawczyk 		 *     the mbuf again in the populate routine and mark the
216283fd97b2SMichal Krawczyk 		 *     descriptor as used up by the device.
216383fd97b2SMichal Krawczyk 		 */
21641be097dcSMichal Krawczyk 		rx_info->mbuf = NULL;
2165c0006061SMichal Krawczyk 		rx_ring->empty_rx_reqs[ntc] = req_id;
2166c0006061SMichal Krawczyk 		ntc = ENA_IDX_NEXT_MASKED(ntc, rx_ring->size_mask);
21671be097dcSMichal Krawczyk 	}
21681be097dcSMichal Krawczyk 
21691be097dcSMichal Krawczyk 	*next_to_clean = ntc;
21701be097dcSMichal Krawczyk 
21711be097dcSMichal Krawczyk 	return mbuf_head;
21721be097dcSMichal Krawczyk }
21731be097dcSMichal Krawczyk 
21741173fca2SJan Medala static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
21751173fca2SJan Medala 				  uint16_t nb_pkts)
21761173fca2SJan Medala {
21771173fca2SJan Medala 	struct ena_ring *rx_ring = (struct ena_ring *)(rx_queue);
217877550607SMichal Krawczyk 	unsigned int free_queue_entries;
217977550607SMichal Krawczyk 	unsigned int refill_threshold;
21801173fca2SJan Medala 	uint16_t next_to_clean = rx_ring->next_to_clean;
218174456796SMichal Krawczyk 	uint16_t descs_in_use;
21821be097dcSMichal Krawczyk 	struct rte_mbuf *mbuf;
21831be097dcSMichal Krawczyk 	uint16_t completed;
21841173fca2SJan Medala 	struct ena_com_rx_ctx ena_rx_ctx;
21851be097dcSMichal Krawczyk 	int i, rc = 0;
21861173fca2SJan Medala 
21871173fca2SJan Medala 	/* Check adapter state */
21881173fca2SJan Medala 	if (unlikely(rx_ring->adapter->state != ENA_ADAPTER_STATE_RUNNING)) {
21896f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ALERT,
21901173fca2SJan Medala 			"Trying to receive pkts while device is NOT running\n");
21911173fca2SJan Medala 		return 0;
21921173fca2SJan Medala 	}
21931173fca2SJan Medala 
2194c0006061SMichal Krawczyk 	descs_in_use = rx_ring->ring_size -
219574456796SMichal Krawczyk 		ena_com_free_q_entries(rx_ring->ena_com_io_sq) - 1;
219674456796SMichal Krawczyk 	nb_pkts = RTE_MIN(descs_in_use, nb_pkts);
21971173fca2SJan Medala 
21981173fca2SJan Medala 	for (completed = 0; completed < nb_pkts; completed++) {
2199ea93d37eSRafal Kozik 		ena_rx_ctx.max_bufs = rx_ring->sgl_size;
22001173fca2SJan Medala 		ena_rx_ctx.ena_bufs = rx_ring->ena_bufs;
22011173fca2SJan Medala 		ena_rx_ctx.descs = 0;
22027b3a3c4bSMaciej Bielski 		ena_rx_ctx.pkt_offset = 0;
22031173fca2SJan Medala 		/* receive packet context */
22041173fca2SJan Medala 		rc = ena_com_rx_pkt(rx_ring->ena_com_io_cq,
22051173fca2SJan Medala 				    rx_ring->ena_com_io_sq,
22061173fca2SJan Medala 				    &ena_rx_ctx);
22071173fca2SJan Medala 		if (unlikely(rc)) {
22086f1c9df9SStephen Hemminger 			PMD_DRV_LOG(ERR, "ena_com_rx_pkt error %d\n", rc);
220905cffdcfSMichal Krawczyk 			if (rc == ENA_COM_NO_SPACE) {
221005cffdcfSMichal Krawczyk 				++rx_ring->rx_stats.bad_desc_num;
22119b260dbfSRafal Kozik 				rx_ring->adapter->reset_reason =
22129b260dbfSRafal Kozik 					ENA_REGS_RESET_TOO_MANY_RX_DESCS;
221305cffdcfSMichal Krawczyk 			} else {
221405cffdcfSMichal Krawczyk 				++rx_ring->rx_stats.bad_req_id;
221505cffdcfSMichal Krawczyk 				rx_ring->adapter->reset_reason =
221605cffdcfSMichal Krawczyk 					ENA_REGS_RESET_INV_RX_REQ_ID;
221705cffdcfSMichal Krawczyk 			}
2218241da076SRafal Kozik 			rx_ring->adapter->trigger_reset = true;
22191173fca2SJan Medala 			return 0;
22201173fca2SJan Medala 		}
22211173fca2SJan Medala 
22221be097dcSMichal Krawczyk 		mbuf = ena_rx_mbuf(rx_ring,
22231be097dcSMichal Krawczyk 			ena_rx_ctx.ena_bufs,
22241be097dcSMichal Krawczyk 			ena_rx_ctx.descs,
22251be097dcSMichal Krawczyk 			&next_to_clean,
22261be097dcSMichal Krawczyk 			ena_rx_ctx.pkt_offset);
22271be097dcSMichal Krawczyk 		if (unlikely(mbuf == NULL)) {
22281be097dcSMichal Krawczyk 			for (i = 0; i < ena_rx_ctx.descs; ++i) {
2229c0006061SMichal Krawczyk 				rx_ring->empty_rx_reqs[next_to_clean] =
22301be097dcSMichal Krawczyk 					rx_ring->ena_bufs[i].req_id;
2231c0006061SMichal Krawczyk 				next_to_clean = ENA_IDX_NEXT_MASKED(
2232c0006061SMichal Krawczyk 					next_to_clean, rx_ring->size_mask);
22331173fca2SJan Medala 			}
2234f00930d9SRafal Kozik 			break;
22351be097dcSMichal Krawczyk 		}
22361173fca2SJan Medala 
22371173fca2SJan Medala 		/* fill mbuf attributes if any */
22381be097dcSMichal Krawczyk 		ena_rx_mbuf_prepare(mbuf, &ena_rx_ctx);
22397830e905SSolganik Alexander 
22401be097dcSMichal Krawczyk 		if (unlikely(mbuf->ol_flags &
2241ef74b5f7SMichal Krawczyk 				(PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD))) {
2242ef74b5f7SMichal Krawczyk 			rte_atomic64_inc(&rx_ring->adapter->drv_stats->ierrors);
22437830e905SSolganik Alexander 			++rx_ring->rx_stats.bad_csum;
2244ef74b5f7SMichal Krawczyk 		}
22457830e905SSolganik Alexander 
22461be097dcSMichal Krawczyk 		mbuf->hash.rss = ena_rx_ctx.hash;
22471173fca2SJan Medala 
22481be097dcSMichal Krawczyk 		rx_pkts[completed] = mbuf;
22491be097dcSMichal Krawczyk 		rx_ring->rx_stats.bytes += mbuf->pkt_len;
22501173fca2SJan Medala 	}
22511173fca2SJan Medala 
22521be097dcSMichal Krawczyk 	rx_ring->rx_stats.cnt += completed;
2253ec78af6bSMichal Krawczyk 	rx_ring->next_to_clean = next_to_clean;
2254ec78af6bSMichal Krawczyk 
225577550607SMichal Krawczyk 	free_queue_entries = ena_com_free_q_entries(rx_ring->ena_com_io_sq);
225677550607SMichal Krawczyk 	refill_threshold =
2257c0006061SMichal Krawczyk 		RTE_MIN(rx_ring->ring_size / ENA_REFILL_THRESH_DIVIDER,
225877550607SMichal Krawczyk 		(unsigned int)ENA_REFILL_THRESH_PACKET);
225977550607SMichal Krawczyk 
22601173fca2SJan Medala 	/* Burst refill to save doorbells, memory barriers, const interval */
226177550607SMichal Krawczyk 	if (free_queue_entries > refill_threshold) {
2262a45462c5SRafal Kozik 		ena_com_update_dev_comp_head(rx_ring->ena_com_io_cq);
226377550607SMichal Krawczyk 		ena_populate_rx_queue(rx_ring, free_queue_entries);
2264a45462c5SRafal Kozik 	}
22651173fca2SJan Medala 
22661be097dcSMichal Krawczyk 	return completed;
22671173fca2SJan Medala }
22681173fca2SJan Medala 
2269b3fc5a1aSKonstantin Ananyev static uint16_t
227083277a7cSJakub Palider eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
2271b3fc5a1aSKonstantin Ananyev 		uint16_t nb_pkts)
2272b3fc5a1aSKonstantin Ananyev {
2273b3fc5a1aSKonstantin Ananyev 	int32_t ret;
2274b3fc5a1aSKonstantin Ananyev 	uint32_t i;
2275b3fc5a1aSKonstantin Ananyev 	struct rte_mbuf *m;
227683277a7cSJakub Palider 	struct ena_ring *tx_ring = (struct ena_ring *)(tx_queue);
2277a7c528e5SOlivier Matz 	struct rte_ipv4_hdr *ip_hdr;
2278b3fc5a1aSKonstantin Ananyev 	uint64_t ol_flags;
227983277a7cSJakub Palider 	uint16_t frag_field;
228083277a7cSJakub Palider 
2281b3fc5a1aSKonstantin Ananyev 	for (i = 0; i != nb_pkts; i++) {
2282b3fc5a1aSKonstantin Ananyev 		m = tx_pkts[i];
2283b3fc5a1aSKonstantin Ananyev 		ol_flags = m->ol_flags;
2284b3fc5a1aSKonstantin Ananyev 
2285bc5ef57dSMichal Krawczyk 		if (!(ol_flags & PKT_TX_IPV4))
2286bc5ef57dSMichal Krawczyk 			continue;
2287bc5ef57dSMichal Krawczyk 
2288bc5ef57dSMichal Krawczyk 		/* If there was not L2 header length specified, assume it is
2289bc5ef57dSMichal Krawczyk 		 * length of the ethernet header.
2290bc5ef57dSMichal Krawczyk 		 */
2291bc5ef57dSMichal Krawczyk 		if (unlikely(m->l2_len == 0))
22926d13ea8eSOlivier Matz 			m->l2_len = sizeof(struct rte_ether_hdr);
2293bc5ef57dSMichal Krawczyk 
2294a7c528e5SOlivier Matz 		ip_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *,
2295bc5ef57dSMichal Krawczyk 						 m->l2_len);
2296bc5ef57dSMichal Krawczyk 		frag_field = rte_be_to_cpu_16(ip_hdr->fragment_offset);
2297bc5ef57dSMichal Krawczyk 
229824ac604eSOlivier Matz 		if ((frag_field & RTE_IPV4_HDR_DF_FLAG) != 0) {
2299bc5ef57dSMichal Krawczyk 			m->packet_type |= RTE_PTYPE_L4_NONFRAG;
2300bc5ef57dSMichal Krawczyk 
2301bc5ef57dSMichal Krawczyk 			/* If IPv4 header has DF flag enabled and TSO support is
2302bc5ef57dSMichal Krawczyk 			 * disabled, partial chcecksum should not be calculated.
2303bc5ef57dSMichal Krawczyk 			 */
2304117ba4a6SMichal Krawczyk 			if (!tx_ring->adapter->offloads.tso4_supported)
2305bc5ef57dSMichal Krawczyk 				continue;
2306bc5ef57dSMichal Krawczyk 		}
2307bc5ef57dSMichal Krawczyk 
2308b3fc5a1aSKonstantin Ananyev 		if ((ol_flags & ENA_TX_OFFLOAD_NOTSUP_MASK) != 0 ||
2309b3fc5a1aSKonstantin Ananyev 				(ol_flags & PKT_TX_L4_MASK) ==
2310b3fc5a1aSKonstantin Ananyev 				PKT_TX_SCTP_CKSUM) {
2311baeed5f4SMichal Krawczyk 			rte_errno = ENOTSUP;
2312b3fc5a1aSKonstantin Ananyev 			return i;
2313b3fc5a1aSKonstantin Ananyev 		}
2314b3fc5a1aSKonstantin Ananyev 
2315b3fc5a1aSKonstantin Ananyev #ifdef RTE_LIBRTE_ETHDEV_DEBUG
2316b3fc5a1aSKonstantin Ananyev 		ret = rte_validate_tx_offload(m);
2317b3fc5a1aSKonstantin Ananyev 		if (ret != 0) {
2318baeed5f4SMichal Krawczyk 			rte_errno = -ret;
2319b3fc5a1aSKonstantin Ananyev 			return i;
2320b3fc5a1aSKonstantin Ananyev 		}
2321b3fc5a1aSKonstantin Ananyev #endif
232283277a7cSJakub Palider 
232383277a7cSJakub Palider 		/* In case we are supposed to TSO and have DF not set (DF=0)
232483277a7cSJakub Palider 		 * hardware must be provided with partial checksum, otherwise
232583277a7cSJakub Palider 		 * it will take care of necessary calculations.
232683277a7cSJakub Palider 		 */
232783277a7cSJakub Palider 
2328b3fc5a1aSKonstantin Ananyev 		ret = rte_net_intel_cksum_flags_prepare(m,
2329b3fc5a1aSKonstantin Ananyev 			ol_flags & ~PKT_TX_TCP_SEG);
2330b3fc5a1aSKonstantin Ananyev 		if (ret != 0) {
2331baeed5f4SMichal Krawczyk 			rte_errno = -ret;
2332b3fc5a1aSKonstantin Ananyev 			return i;
2333b3fc5a1aSKonstantin Ananyev 		}
2334b3fc5a1aSKonstantin Ananyev 	}
2335b3fc5a1aSKonstantin Ananyev 
2336b3fc5a1aSKonstantin Ananyev 	return i;
2337b3fc5a1aSKonstantin Ananyev }
2338b3fc5a1aSKonstantin Ananyev 
2339f01f060cSRafal Kozik static void ena_update_hints(struct ena_adapter *adapter,
2340f01f060cSRafal Kozik 			     struct ena_admin_ena_hw_hints *hints)
2341f01f060cSRafal Kozik {
2342f01f060cSRafal Kozik 	if (hints->admin_completion_tx_timeout)
2343f01f060cSRafal Kozik 		adapter->ena_dev.admin_queue.completion_timeout =
2344f01f060cSRafal Kozik 			hints->admin_completion_tx_timeout * 1000;
2345f01f060cSRafal Kozik 
2346f01f060cSRafal Kozik 	if (hints->mmio_read_timeout)
2347f01f060cSRafal Kozik 		/* convert to usec */
2348f01f060cSRafal Kozik 		adapter->ena_dev.mmio_read.reg_read_to =
2349f01f060cSRafal Kozik 			hints->mmio_read_timeout * 1000;
2350d9b8b106SMichal Krawczyk 
2351d9b8b106SMichal Krawczyk 	if (hints->driver_watchdog_timeout) {
2352d9b8b106SMichal Krawczyk 		if (hints->driver_watchdog_timeout == ENA_HW_HINTS_NO_TIMEOUT)
2353d9b8b106SMichal Krawczyk 			adapter->keep_alive_timeout = ENA_HW_HINTS_NO_TIMEOUT;
2354d9b8b106SMichal Krawczyk 		else
2355d9b8b106SMichal Krawczyk 			// Convert msecs to ticks
2356d9b8b106SMichal Krawczyk 			adapter->keep_alive_timeout =
2357d9b8b106SMichal Krawczyk 				(hints->driver_watchdog_timeout *
2358d9b8b106SMichal Krawczyk 				rte_get_timer_hz()) / 1000;
2359d9b8b106SMichal Krawczyk 	}
2360f01f060cSRafal Kozik }
2361f01f060cSRafal Kozik 
2362*8a90f3d8SIgor Chauskin static int ena_check_space_and_linearize_mbuf(struct ena_ring *tx_ring,
23632061fe41SRafal Kozik 					      struct rte_mbuf *mbuf)
23642061fe41SRafal Kozik {
23652fca2a98SMichal Krawczyk 	struct ena_com_dev *ena_dev;
23662fca2a98SMichal Krawczyk 	int num_segments, header_len, rc;
23672061fe41SRafal Kozik 
23682fca2a98SMichal Krawczyk 	ena_dev = &tx_ring->adapter->ena_dev;
23692061fe41SRafal Kozik 	num_segments = mbuf->nb_segs;
23702fca2a98SMichal Krawczyk 	header_len = mbuf->data_len;
23712061fe41SRafal Kozik 
23722061fe41SRafal Kozik 	if (likely(num_segments < tx_ring->sgl_size))
2373*8a90f3d8SIgor Chauskin 		goto checkspace;
23742061fe41SRafal Kozik 
23752fca2a98SMichal Krawczyk 	if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV &&
23762fca2a98SMichal Krawczyk 	    (num_segments == tx_ring->sgl_size) &&
23772fca2a98SMichal Krawczyk 	    (header_len < tx_ring->tx_max_header_size))
2378*8a90f3d8SIgor Chauskin 		goto checkspace;
23792fca2a98SMichal Krawczyk 
2380*8a90f3d8SIgor Chauskin 	/* Checking for space for 2 additional metadata descriptors due to
2381*8a90f3d8SIgor Chauskin 	 * possible header split and metadata descriptor. Linearization will
2382*8a90f3d8SIgor Chauskin 	 * be needed so we reduce the segments number from num_segments to 1
2383*8a90f3d8SIgor Chauskin 	 */
2384*8a90f3d8SIgor Chauskin 	if (!ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, 3)) {
2385*8a90f3d8SIgor Chauskin 		PMD_DRV_LOG(DEBUG, "Not enough space in the tx queue\n");
2386*8a90f3d8SIgor Chauskin 		return ENA_COM_NO_MEM;
2387*8a90f3d8SIgor Chauskin 	}
23887830e905SSolganik Alexander 	++tx_ring->tx_stats.linearize;
23892061fe41SRafal Kozik 	rc = rte_pktmbuf_linearize(mbuf);
23907830e905SSolganik Alexander 	if (unlikely(rc)) {
23916f1c9df9SStephen Hemminger 		PMD_DRV_LOG(WARNING, "Mbuf linearize failed\n");
23927830e905SSolganik Alexander 		rte_atomic64_inc(&tx_ring->adapter->drv_stats->ierrors);
23937830e905SSolganik Alexander 		++tx_ring->tx_stats.linearize_failed;
23947830e905SSolganik Alexander 		return rc;
23957830e905SSolganik Alexander 	}
23962061fe41SRafal Kozik 
2397*8a90f3d8SIgor Chauskin 	return 0;
2398*8a90f3d8SIgor Chauskin 
2399*8a90f3d8SIgor Chauskin checkspace:
2400*8a90f3d8SIgor Chauskin 	/* Checking for space for 2 additional metadata descriptors due to
2401*8a90f3d8SIgor Chauskin 	 * possible header split and metadata descriptor
2402*8a90f3d8SIgor Chauskin 	 */
2403*8a90f3d8SIgor Chauskin 	if (!ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
2404*8a90f3d8SIgor Chauskin 					  num_segments + 2)) {
2405*8a90f3d8SIgor Chauskin 		PMD_DRV_LOG(DEBUG, "Not enough space in the tx queue\n");
2406*8a90f3d8SIgor Chauskin 		return ENA_COM_NO_MEM;
2407*8a90f3d8SIgor Chauskin 	}
2408*8a90f3d8SIgor Chauskin 
2409*8a90f3d8SIgor Chauskin 	return 0;
24102061fe41SRafal Kozik }
24112061fe41SRafal Kozik 
241236278b82SMichal Krawczyk static void ena_tx_map_mbuf(struct ena_ring *tx_ring,
241336278b82SMichal Krawczyk 	struct ena_tx_buffer *tx_info,
241436278b82SMichal Krawczyk 	struct rte_mbuf *mbuf,
241536278b82SMichal Krawczyk 	void **push_header,
241636278b82SMichal Krawczyk 	uint16_t *header_len)
241736278b82SMichal Krawczyk {
241836278b82SMichal Krawczyk 	struct ena_com_buf *ena_buf;
241936278b82SMichal Krawczyk 	uint16_t delta, seg_len, push_len;
242036278b82SMichal Krawczyk 
242136278b82SMichal Krawczyk 	delta = 0;
242236278b82SMichal Krawczyk 	seg_len = mbuf->data_len;
242336278b82SMichal Krawczyk 
242436278b82SMichal Krawczyk 	tx_info->mbuf = mbuf;
242536278b82SMichal Krawczyk 	ena_buf = tx_info->bufs;
242636278b82SMichal Krawczyk 
242736278b82SMichal Krawczyk 	if (tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
242836278b82SMichal Krawczyk 		/*
242936278b82SMichal Krawczyk 		 * Tx header might be (and will be in most cases) smaller than
243036278b82SMichal Krawczyk 		 * tx_max_header_size. But it's not an issue to send more data
243136278b82SMichal Krawczyk 		 * to the device, than actually needed if the mbuf size is
243236278b82SMichal Krawczyk 		 * greater than tx_max_header_size.
243336278b82SMichal Krawczyk 		 */
243436278b82SMichal Krawczyk 		push_len = RTE_MIN(mbuf->pkt_len, tx_ring->tx_max_header_size);
243536278b82SMichal Krawczyk 		*header_len = push_len;
243636278b82SMichal Krawczyk 
243736278b82SMichal Krawczyk 		if (likely(push_len <= seg_len)) {
243836278b82SMichal Krawczyk 			/* If the push header is in the single segment, then
243936278b82SMichal Krawczyk 			 * just point it to the 1st mbuf data.
244036278b82SMichal Krawczyk 			 */
244136278b82SMichal Krawczyk 			*push_header = rte_pktmbuf_mtod(mbuf, uint8_t *);
244236278b82SMichal Krawczyk 		} else {
244336278b82SMichal Krawczyk 			/* If the push header lays in the several segments, copy
244436278b82SMichal Krawczyk 			 * it to the intermediate buffer.
244536278b82SMichal Krawczyk 			 */
244636278b82SMichal Krawczyk 			rte_pktmbuf_read(mbuf, 0, push_len,
244736278b82SMichal Krawczyk 				tx_ring->push_buf_intermediate_buf);
244836278b82SMichal Krawczyk 			*push_header = tx_ring->push_buf_intermediate_buf;
244936278b82SMichal Krawczyk 			delta = push_len - seg_len;
245036278b82SMichal Krawczyk 		}
245136278b82SMichal Krawczyk 	} else {
245236278b82SMichal Krawczyk 		*push_header = NULL;
245336278b82SMichal Krawczyk 		*header_len = 0;
245436278b82SMichal Krawczyk 		push_len = 0;
245536278b82SMichal Krawczyk 	}
245636278b82SMichal Krawczyk 
245736278b82SMichal Krawczyk 	/* Process first segment taking into consideration pushed header */
245836278b82SMichal Krawczyk 	if (seg_len > push_len) {
245936278b82SMichal Krawczyk 		ena_buf->paddr = mbuf->buf_iova +
246036278b82SMichal Krawczyk 				mbuf->data_off +
246136278b82SMichal Krawczyk 				push_len;
246236278b82SMichal Krawczyk 		ena_buf->len = seg_len - push_len;
246336278b82SMichal Krawczyk 		ena_buf++;
246436278b82SMichal Krawczyk 		tx_info->num_of_bufs++;
246536278b82SMichal Krawczyk 	}
246636278b82SMichal Krawczyk 
246736278b82SMichal Krawczyk 	while ((mbuf = mbuf->next) != NULL) {
246836278b82SMichal Krawczyk 		seg_len = mbuf->data_len;
246936278b82SMichal Krawczyk 
247036278b82SMichal Krawczyk 		/* Skip mbufs if whole data is pushed as a header */
247136278b82SMichal Krawczyk 		if (unlikely(delta > seg_len)) {
247236278b82SMichal Krawczyk 			delta -= seg_len;
247336278b82SMichal Krawczyk 			continue;
247436278b82SMichal Krawczyk 		}
247536278b82SMichal Krawczyk 
247636278b82SMichal Krawczyk 		ena_buf->paddr = mbuf->buf_iova + mbuf->data_off + delta;
247736278b82SMichal Krawczyk 		ena_buf->len = seg_len - delta;
247836278b82SMichal Krawczyk 		ena_buf++;
247936278b82SMichal Krawczyk 		tx_info->num_of_bufs++;
248036278b82SMichal Krawczyk 
248136278b82SMichal Krawczyk 		delta = 0;
248236278b82SMichal Krawczyk 	}
248336278b82SMichal Krawczyk }
248436278b82SMichal Krawczyk 
248536278b82SMichal Krawczyk static int ena_xmit_mbuf(struct ena_ring *tx_ring, struct rte_mbuf *mbuf)
248636278b82SMichal Krawczyk {
248736278b82SMichal Krawczyk 	struct ena_tx_buffer *tx_info;
248836278b82SMichal Krawczyk 	struct ena_com_tx_ctx ena_tx_ctx = { { 0 } };
248936278b82SMichal Krawczyk 	uint16_t next_to_use;
249036278b82SMichal Krawczyk 	uint16_t header_len;
249136278b82SMichal Krawczyk 	uint16_t req_id;
249236278b82SMichal Krawczyk 	void *push_header;
249336278b82SMichal Krawczyk 	int nb_hw_desc;
249436278b82SMichal Krawczyk 	int rc;
249536278b82SMichal Krawczyk 
2496*8a90f3d8SIgor Chauskin 	rc = ena_check_space_and_linearize_mbuf(tx_ring, mbuf);
249736278b82SMichal Krawczyk 	if (unlikely(rc))
249836278b82SMichal Krawczyk 		return rc;
249936278b82SMichal Krawczyk 
250036278b82SMichal Krawczyk 	next_to_use = tx_ring->next_to_use;
250136278b82SMichal Krawczyk 
250236278b82SMichal Krawczyk 	req_id = tx_ring->empty_tx_reqs[next_to_use];
250336278b82SMichal Krawczyk 	tx_info = &tx_ring->tx_buffer_info[req_id];
250436278b82SMichal Krawczyk 	tx_info->num_of_bufs = 0;
250536278b82SMichal Krawczyk 
250636278b82SMichal Krawczyk 	ena_tx_map_mbuf(tx_ring, tx_info, mbuf, &push_header, &header_len);
250736278b82SMichal Krawczyk 
250836278b82SMichal Krawczyk 	ena_tx_ctx.ena_bufs = tx_info->bufs;
250936278b82SMichal Krawczyk 	ena_tx_ctx.push_header = push_header;
251036278b82SMichal Krawczyk 	ena_tx_ctx.num_bufs = tx_info->num_of_bufs;
251136278b82SMichal Krawczyk 	ena_tx_ctx.req_id = req_id;
251236278b82SMichal Krawczyk 	ena_tx_ctx.header_len = header_len;
251336278b82SMichal Krawczyk 
251436278b82SMichal Krawczyk 	/* Set Tx offloads flags, if applicable */
251536278b82SMichal Krawczyk 	ena_tx_mbuf_prepare(mbuf, &ena_tx_ctx, tx_ring->offloads,
251636278b82SMichal Krawczyk 		tx_ring->disable_meta_caching);
251736278b82SMichal Krawczyk 
251836278b82SMichal Krawczyk 	if (unlikely(ena_com_is_doorbell_needed(tx_ring->ena_com_io_sq,
251936278b82SMichal Krawczyk 			&ena_tx_ctx))) {
252036278b82SMichal Krawczyk 		PMD_DRV_LOG(DEBUG,
252136278b82SMichal Krawczyk 			"llq tx max burst size of queue %d achieved, writing doorbell to send burst\n",
252236278b82SMichal Krawczyk 			tx_ring->id);
252336278b82SMichal Krawczyk 		ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq);
25241f949ad9SAmit Bernstein 		tx_ring->tx_stats.doorbells++;
252536278b82SMichal Krawczyk 	}
252636278b82SMichal Krawczyk 
252736278b82SMichal Krawczyk 	/* prepare the packet's descriptors to dma engine */
252836278b82SMichal Krawczyk 	rc = ena_com_prepare_tx(tx_ring->ena_com_io_sq,	&ena_tx_ctx,
252936278b82SMichal Krawczyk 		&nb_hw_desc);
253036278b82SMichal Krawczyk 	if (unlikely(rc)) {
253136278b82SMichal Krawczyk 		++tx_ring->tx_stats.prepare_ctx_err;
253236278b82SMichal Krawczyk 		return rc;
253336278b82SMichal Krawczyk 	}
253436278b82SMichal Krawczyk 
253536278b82SMichal Krawczyk 	tx_info->tx_descs = nb_hw_desc;
253636278b82SMichal Krawczyk 
253736278b82SMichal Krawczyk 	tx_ring->tx_stats.cnt++;
253836278b82SMichal Krawczyk 	tx_ring->tx_stats.bytes += mbuf->pkt_len;
253936278b82SMichal Krawczyk 
254036278b82SMichal Krawczyk 	tx_ring->next_to_use = ENA_IDX_NEXT_MASKED(next_to_use,
254136278b82SMichal Krawczyk 		tx_ring->size_mask);
254236278b82SMichal Krawczyk 
254336278b82SMichal Krawczyk 	return 0;
254436278b82SMichal Krawczyk }
254536278b82SMichal Krawczyk 
254636278b82SMichal Krawczyk static void ena_tx_cleanup(struct ena_ring *tx_ring)
254736278b82SMichal Krawczyk {
254836278b82SMichal Krawczyk 	unsigned int cleanup_budget;
254936278b82SMichal Krawczyk 	unsigned int total_tx_descs = 0;
255036278b82SMichal Krawczyk 	uint16_t next_to_clean = tx_ring->next_to_clean;
255136278b82SMichal Krawczyk 
255236278b82SMichal Krawczyk 	cleanup_budget = RTE_MIN(tx_ring->ring_size / ENA_REFILL_THRESH_DIVIDER,
255336278b82SMichal Krawczyk 		(unsigned int)ENA_REFILL_THRESH_PACKET);
255436278b82SMichal Krawczyk 
255536278b82SMichal Krawczyk 	while (likely(total_tx_descs < cleanup_budget)) {
255636278b82SMichal Krawczyk 		struct rte_mbuf *mbuf;
255736278b82SMichal Krawczyk 		struct ena_tx_buffer *tx_info;
255836278b82SMichal Krawczyk 		uint16_t req_id;
255936278b82SMichal Krawczyk 
256036278b82SMichal Krawczyk 		if (ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq, &req_id) != 0)
256136278b82SMichal Krawczyk 			break;
256236278b82SMichal Krawczyk 
256336278b82SMichal Krawczyk 		if (unlikely(validate_tx_req_id(tx_ring, req_id) != 0))
256436278b82SMichal Krawczyk 			break;
256536278b82SMichal Krawczyk 
256636278b82SMichal Krawczyk 		/* Get Tx info & store how many descs were processed  */
256736278b82SMichal Krawczyk 		tx_info = &tx_ring->tx_buffer_info[req_id];
256836278b82SMichal Krawczyk 
256936278b82SMichal Krawczyk 		mbuf = tx_info->mbuf;
257036278b82SMichal Krawczyk 		rte_pktmbuf_free(mbuf);
257136278b82SMichal Krawczyk 
257236278b82SMichal Krawczyk 		tx_info->mbuf = NULL;
257336278b82SMichal Krawczyk 		tx_ring->empty_tx_reqs[next_to_clean] = req_id;
257436278b82SMichal Krawczyk 
257536278b82SMichal Krawczyk 		total_tx_descs += tx_info->tx_descs;
257636278b82SMichal Krawczyk 
257736278b82SMichal Krawczyk 		/* Put back descriptor to the ring for reuse */
257836278b82SMichal Krawczyk 		next_to_clean = ENA_IDX_NEXT_MASKED(next_to_clean,
257936278b82SMichal Krawczyk 			tx_ring->size_mask);
258036278b82SMichal Krawczyk 	}
258136278b82SMichal Krawczyk 
258236278b82SMichal Krawczyk 	if (likely(total_tx_descs > 0)) {
258336278b82SMichal Krawczyk 		/* acknowledge completion of sent packets */
258436278b82SMichal Krawczyk 		tx_ring->next_to_clean = next_to_clean;
258536278b82SMichal Krawczyk 		ena_com_comp_ack(tx_ring->ena_com_io_sq, total_tx_descs);
258636278b82SMichal Krawczyk 		ena_com_update_dev_comp_head(tx_ring->ena_com_io_cq);
258736278b82SMichal Krawczyk 	}
258836278b82SMichal Krawczyk }
258936278b82SMichal Krawczyk 
25901173fca2SJan Medala static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
25911173fca2SJan Medala 				  uint16_t nb_pkts)
25921173fca2SJan Medala {
25931173fca2SJan Medala 	struct ena_ring *tx_ring = (struct ena_ring *)(tx_queue);
259474456796SMichal Krawczyk 	uint16_t sent_idx = 0;
25951173fca2SJan Medala 
25961173fca2SJan Medala 	/* Check adapter state */
25971173fca2SJan Medala 	if (unlikely(tx_ring->adapter->state != ENA_ADAPTER_STATE_RUNNING)) {
25986f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ALERT,
25991173fca2SJan Medala 			"Trying to xmit pkts while device is NOT running\n");
26001173fca2SJan Medala 		return 0;
26011173fca2SJan Medala 	}
26021173fca2SJan Medala 
26031173fca2SJan Medala 	for (sent_idx = 0; sent_idx < nb_pkts; sent_idx++) {
260436278b82SMichal Krawczyk 		if (ena_xmit_mbuf(tx_ring, tx_pkts[sent_idx]))
26052061fe41SRafal Kozik 			break;
26062061fe41SRafal Kozik 
260736278b82SMichal Krawczyk 		rte_prefetch0(tx_pkts[ENA_IDX_ADD_MASKED(sent_idx, 4,
260836278b82SMichal Krawczyk 			tx_ring->size_mask)]);
26092fca2a98SMichal Krawczyk 	}
26102fca2a98SMichal Krawczyk 
26117830e905SSolganik Alexander 	tx_ring->tx_stats.available_desc =
2612b2b02edeSMichal Krawczyk 		ena_com_free_q_entries(tx_ring->ena_com_io_sq);
26131173fca2SJan Medala 
26145e02e19eSJan Medala 	/* If there are ready packets to be xmitted... */
26155e02e19eSJan Medala 	if (sent_idx > 0) {
26165e02e19eSJan Medala 		/* ...let HW do its best :-) */
26171173fca2SJan Medala 		ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq);
261845b6d861SMichal Krawczyk 		tx_ring->tx_stats.doorbells++;
26195e02e19eSJan Medala 	}
26205e02e19eSJan Medala 
262136278b82SMichal Krawczyk 	ena_tx_cleanup(tx_ring);
2622f7d82d24SRafal Kozik 
26237830e905SSolganik Alexander 	tx_ring->tx_stats.available_desc =
2624b2b02edeSMichal Krawczyk 		ena_com_free_q_entries(tx_ring->ena_com_io_sq);
26257830e905SSolganik Alexander 	tx_ring->tx_stats.tx_poll++;
26267830e905SSolganik Alexander 
26271173fca2SJan Medala 	return sent_idx;
26281173fca2SJan Medala }
26291173fca2SJan Medala 
263045718adaSMichal Krawczyk int ena_copy_eni_stats(struct ena_adapter *adapter)
263145718adaSMichal Krawczyk {
263245718adaSMichal Krawczyk 	struct ena_admin_eni_stats admin_eni_stats;
263345718adaSMichal Krawczyk 	int rc;
263445718adaSMichal Krawczyk 
263545718adaSMichal Krawczyk 	rte_spinlock_lock(&adapter->admin_lock);
263645718adaSMichal Krawczyk 	rc = ena_com_get_eni_stats(&adapter->ena_dev, &admin_eni_stats);
263745718adaSMichal Krawczyk 	rte_spinlock_unlock(&adapter->admin_lock);
263845718adaSMichal Krawczyk 	if (rc != 0) {
263945718adaSMichal Krawczyk 		if (rc == ENA_COM_UNSUPPORTED) {
264045718adaSMichal Krawczyk 			PMD_DRV_LOG(DEBUG,
264145718adaSMichal Krawczyk 				"Retrieving ENI metrics is not supported.\n");
264245718adaSMichal Krawczyk 		} else {
264345718adaSMichal Krawczyk 			PMD_DRV_LOG(WARNING,
264445718adaSMichal Krawczyk 				"Failed to get ENI metrics: %d\n", rc);
264545718adaSMichal Krawczyk 		}
264645718adaSMichal Krawczyk 		return rc;
264745718adaSMichal Krawczyk 	}
264845718adaSMichal Krawczyk 
264945718adaSMichal Krawczyk 	rte_memcpy(&adapter->eni_stats, &admin_eni_stats,
265045718adaSMichal Krawczyk 		sizeof(struct ena_stats_eni));
265145718adaSMichal Krawczyk 
265245718adaSMichal Krawczyk 	return 0;
265345718adaSMichal Krawczyk }
265445718adaSMichal Krawczyk 
26557830e905SSolganik Alexander /**
26567830e905SSolganik Alexander  * DPDK callback to retrieve names of extended device statistics
26577830e905SSolganik Alexander  *
26587830e905SSolganik Alexander  * @param dev
26597830e905SSolganik Alexander  *   Pointer to Ethernet device structure.
26607830e905SSolganik Alexander  * @param[out] xstats_names
26617830e905SSolganik Alexander  *   Buffer to insert names into.
26627830e905SSolganik Alexander  * @param n
26637830e905SSolganik Alexander  *   Number of names.
26647830e905SSolganik Alexander  *
26657830e905SSolganik Alexander  * @return
26667830e905SSolganik Alexander  *   Number of xstats names.
26677830e905SSolganik Alexander  */
26687830e905SSolganik Alexander static int ena_xstats_get_names(struct rte_eth_dev *dev,
26697830e905SSolganik Alexander 				struct rte_eth_xstat_name *xstats_names,
26707830e905SSolganik Alexander 				unsigned int n)
26717830e905SSolganik Alexander {
26727830e905SSolganik Alexander 	unsigned int xstats_count = ena_xstats_calc_num(dev);
26737830e905SSolganik Alexander 	unsigned int stat, i, count = 0;
26747830e905SSolganik Alexander 
26757830e905SSolganik Alexander 	if (n < xstats_count || !xstats_names)
26767830e905SSolganik Alexander 		return xstats_count;
26777830e905SSolganik Alexander 
26787830e905SSolganik Alexander 	for (stat = 0; stat < ENA_STATS_ARRAY_GLOBAL; stat++, count++)
26797830e905SSolganik Alexander 		strcpy(xstats_names[count].name,
26807830e905SSolganik Alexander 			ena_stats_global_strings[stat].name);
26817830e905SSolganik Alexander 
268245718adaSMichal Krawczyk 	for (stat = 0; stat < ENA_STATS_ARRAY_ENI; stat++, count++)
268345718adaSMichal Krawczyk 		strcpy(xstats_names[count].name,
268445718adaSMichal Krawczyk 			ena_stats_eni_strings[stat].name);
268545718adaSMichal Krawczyk 
26867830e905SSolganik Alexander 	for (stat = 0; stat < ENA_STATS_ARRAY_RX; stat++)
26877830e905SSolganik Alexander 		for (i = 0; i < dev->data->nb_rx_queues; i++, count++)
26887830e905SSolganik Alexander 			snprintf(xstats_names[count].name,
26897830e905SSolganik Alexander 				sizeof(xstats_names[count].name),
26907830e905SSolganik Alexander 				"rx_q%d_%s", i,
26917830e905SSolganik Alexander 				ena_stats_rx_strings[stat].name);
26927830e905SSolganik Alexander 
26937830e905SSolganik Alexander 	for (stat = 0; stat < ENA_STATS_ARRAY_TX; stat++)
26947830e905SSolganik Alexander 		for (i = 0; i < dev->data->nb_tx_queues; i++, count++)
26957830e905SSolganik Alexander 			snprintf(xstats_names[count].name,
26967830e905SSolganik Alexander 				sizeof(xstats_names[count].name),
26977830e905SSolganik Alexander 				"tx_q%d_%s", i,
26987830e905SSolganik Alexander 				ena_stats_tx_strings[stat].name);
26997830e905SSolganik Alexander 
27007830e905SSolganik Alexander 	return xstats_count;
27017830e905SSolganik Alexander }
27027830e905SSolganik Alexander 
27037830e905SSolganik Alexander /**
27047830e905SSolganik Alexander  * DPDK callback to get extended device statistics.
27057830e905SSolganik Alexander  *
27067830e905SSolganik Alexander  * @param dev
27077830e905SSolganik Alexander  *   Pointer to Ethernet device structure.
27087830e905SSolganik Alexander  * @param[out] stats
27097830e905SSolganik Alexander  *   Stats table output buffer.
27107830e905SSolganik Alexander  * @param n
27117830e905SSolganik Alexander  *   The size of the stats table.
27127830e905SSolganik Alexander  *
27137830e905SSolganik Alexander  * @return
27147830e905SSolganik Alexander  *   Number of xstats on success, negative on failure.
27157830e905SSolganik Alexander  */
27167830e905SSolganik Alexander static int ena_xstats_get(struct rte_eth_dev *dev,
27177830e905SSolganik Alexander 			  struct rte_eth_xstat *xstats,
27187830e905SSolganik Alexander 			  unsigned int n)
27197830e905SSolganik Alexander {
2720890728ffSStephen Hemminger 	struct ena_adapter *adapter = dev->data->dev_private;
27217830e905SSolganik Alexander 	unsigned int xstats_count = ena_xstats_calc_num(dev);
27227830e905SSolganik Alexander 	unsigned int stat, i, count = 0;
27237830e905SSolganik Alexander 	int stat_offset;
27247830e905SSolganik Alexander 	void *stats_begin;
27257830e905SSolganik Alexander 
27267830e905SSolganik Alexander 	if (n < xstats_count)
27277830e905SSolganik Alexander 		return xstats_count;
27287830e905SSolganik Alexander 
27297830e905SSolganik Alexander 	if (!xstats)
27307830e905SSolganik Alexander 		return 0;
27317830e905SSolganik Alexander 
27327830e905SSolganik Alexander 	for (stat = 0; stat < ENA_STATS_ARRAY_GLOBAL; stat++, count++) {
2733493107fdSMichal Krawczyk 		stat_offset = ena_stats_global_strings[stat].stat_offset;
27347830e905SSolganik Alexander 		stats_begin = &adapter->dev_stats;
27357830e905SSolganik Alexander 
27367830e905SSolganik Alexander 		xstats[count].id = count;
27377830e905SSolganik Alexander 		xstats[count].value = *((uint64_t *)
27387830e905SSolganik Alexander 			((char *)stats_begin + stat_offset));
27397830e905SSolganik Alexander 	}
27407830e905SSolganik Alexander 
274145718adaSMichal Krawczyk 	/* Even if the function below fails, we should copy previous (or initial
274245718adaSMichal Krawczyk 	 * values) to keep structure of rte_eth_xstat consistent.
274345718adaSMichal Krawczyk 	 */
274445718adaSMichal Krawczyk 	ena_copy_eni_stats(adapter);
274545718adaSMichal Krawczyk 	for (stat = 0; stat < ENA_STATS_ARRAY_ENI; stat++, count++) {
274645718adaSMichal Krawczyk 		stat_offset = ena_stats_eni_strings[stat].stat_offset;
274745718adaSMichal Krawczyk 		stats_begin = &adapter->eni_stats;
274845718adaSMichal Krawczyk 
274945718adaSMichal Krawczyk 		xstats[count].id = count;
275045718adaSMichal Krawczyk 		xstats[count].value = *((uint64_t *)
275145718adaSMichal Krawczyk 		    ((char *)stats_begin + stat_offset));
275245718adaSMichal Krawczyk 	}
275345718adaSMichal Krawczyk 
27547830e905SSolganik Alexander 	for (stat = 0; stat < ENA_STATS_ARRAY_RX; stat++) {
27557830e905SSolganik Alexander 		for (i = 0; i < dev->data->nb_rx_queues; i++, count++) {
27567830e905SSolganik Alexander 			stat_offset = ena_stats_rx_strings[stat].stat_offset;
27577830e905SSolganik Alexander 			stats_begin = &adapter->rx_ring[i].rx_stats;
27587830e905SSolganik Alexander 
27597830e905SSolganik Alexander 			xstats[count].id = count;
27607830e905SSolganik Alexander 			xstats[count].value = *((uint64_t *)
27617830e905SSolganik Alexander 				((char *)stats_begin + stat_offset));
27627830e905SSolganik Alexander 		}
27637830e905SSolganik Alexander 	}
27647830e905SSolganik Alexander 
27657830e905SSolganik Alexander 	for (stat = 0; stat < ENA_STATS_ARRAY_TX; stat++) {
27667830e905SSolganik Alexander 		for (i = 0; i < dev->data->nb_tx_queues; i++, count++) {
27677830e905SSolganik Alexander 			stat_offset = ena_stats_tx_strings[stat].stat_offset;
27687830e905SSolganik Alexander 			stats_begin = &adapter->tx_ring[i].rx_stats;
27697830e905SSolganik Alexander 
27707830e905SSolganik Alexander 			xstats[count].id = count;
27717830e905SSolganik Alexander 			xstats[count].value = *((uint64_t *)
27727830e905SSolganik Alexander 				((char *)stats_begin + stat_offset));
27737830e905SSolganik Alexander 		}
27747830e905SSolganik Alexander 	}
27757830e905SSolganik Alexander 
27767830e905SSolganik Alexander 	return count;
27777830e905SSolganik Alexander }
27787830e905SSolganik Alexander 
27797830e905SSolganik Alexander static int ena_xstats_get_by_id(struct rte_eth_dev *dev,
27807830e905SSolganik Alexander 				const uint64_t *ids,
27817830e905SSolganik Alexander 				uint64_t *values,
27827830e905SSolganik Alexander 				unsigned int n)
27837830e905SSolganik Alexander {
2784890728ffSStephen Hemminger 	struct ena_adapter *adapter = dev->data->dev_private;
27857830e905SSolganik Alexander 	uint64_t id;
27867830e905SSolganik Alexander 	uint64_t rx_entries, tx_entries;
27877830e905SSolganik Alexander 	unsigned int i;
27887830e905SSolganik Alexander 	int qid;
27897830e905SSolganik Alexander 	int valid = 0;
279045718adaSMichal Krawczyk 	bool was_eni_copied = false;
279145718adaSMichal Krawczyk 
27927830e905SSolganik Alexander 	for (i = 0; i < n; ++i) {
27937830e905SSolganik Alexander 		id = ids[i];
27947830e905SSolganik Alexander 		/* Check if id belongs to global statistics */
27957830e905SSolganik Alexander 		if (id < ENA_STATS_ARRAY_GLOBAL) {
27967830e905SSolganik Alexander 			values[i] = *((uint64_t *)&adapter->dev_stats + id);
27977830e905SSolganik Alexander 			++valid;
27987830e905SSolganik Alexander 			continue;
27997830e905SSolganik Alexander 		}
28007830e905SSolganik Alexander 
280145718adaSMichal Krawczyk 		/* Check if id belongs to ENI statistics */
28027830e905SSolganik Alexander 		id -= ENA_STATS_ARRAY_GLOBAL;
280345718adaSMichal Krawczyk 		if (id < ENA_STATS_ARRAY_ENI) {
280445718adaSMichal Krawczyk 			/* Avoid reading ENI stats multiple times in a single
280545718adaSMichal Krawczyk 			 * function call, as it requires communication with the
280645718adaSMichal Krawczyk 			 * admin queue.
280745718adaSMichal Krawczyk 			 */
280845718adaSMichal Krawczyk 			if (!was_eni_copied) {
280945718adaSMichal Krawczyk 				was_eni_copied = true;
281045718adaSMichal Krawczyk 				ena_copy_eni_stats(adapter);
281145718adaSMichal Krawczyk 			}
281245718adaSMichal Krawczyk 			values[i] = *((uint64_t *)&adapter->eni_stats + id);
281345718adaSMichal Krawczyk 			++valid;
281445718adaSMichal Krawczyk 			continue;
281545718adaSMichal Krawczyk 		}
281645718adaSMichal Krawczyk 
281745718adaSMichal Krawczyk 		/* Check if id belongs to rx queue statistics */
281845718adaSMichal Krawczyk 		id -= ENA_STATS_ARRAY_ENI;
28197830e905SSolganik Alexander 		rx_entries = ENA_STATS_ARRAY_RX * dev->data->nb_rx_queues;
28207830e905SSolganik Alexander 		if (id < rx_entries) {
28217830e905SSolganik Alexander 			qid = id % dev->data->nb_rx_queues;
28227830e905SSolganik Alexander 			id /= dev->data->nb_rx_queues;
28237830e905SSolganik Alexander 			values[i] = *((uint64_t *)
28247830e905SSolganik Alexander 				&adapter->rx_ring[qid].rx_stats + id);
28257830e905SSolganik Alexander 			++valid;
28267830e905SSolganik Alexander 			continue;
28277830e905SSolganik Alexander 		}
28287830e905SSolganik Alexander 				/* Check if id belongs to rx queue statistics */
28297830e905SSolganik Alexander 		id -= rx_entries;
28307830e905SSolganik Alexander 		tx_entries = ENA_STATS_ARRAY_TX * dev->data->nb_tx_queues;
28317830e905SSolganik Alexander 		if (id < tx_entries) {
28327830e905SSolganik Alexander 			qid = id % dev->data->nb_tx_queues;
28337830e905SSolganik Alexander 			id /= dev->data->nb_tx_queues;
28347830e905SSolganik Alexander 			values[i] = *((uint64_t *)
28357830e905SSolganik Alexander 				&adapter->tx_ring[qid].tx_stats + id);
28367830e905SSolganik Alexander 			++valid;
28377830e905SSolganik Alexander 			continue;
28387830e905SSolganik Alexander 		}
28397830e905SSolganik Alexander 	}
28407830e905SSolganik Alexander 
28417830e905SSolganik Alexander 	return valid;
28427830e905SSolganik Alexander }
28437830e905SSolganik Alexander 
28448a7a73f2SMichal Krawczyk static int ena_process_bool_devarg(const char *key,
28458a7a73f2SMichal Krawczyk 				   const char *value,
28468a7a73f2SMichal Krawczyk 				   void *opaque)
28478a7a73f2SMichal Krawczyk {
28488a7a73f2SMichal Krawczyk 	struct ena_adapter *adapter = opaque;
28498a7a73f2SMichal Krawczyk 	bool bool_value;
28508a7a73f2SMichal Krawczyk 
28518a7a73f2SMichal Krawczyk 	/* Parse the value. */
28528a7a73f2SMichal Krawczyk 	if (strcmp(value, "1") == 0) {
28538a7a73f2SMichal Krawczyk 		bool_value = true;
28548a7a73f2SMichal Krawczyk 	} else if (strcmp(value, "0") == 0) {
28558a7a73f2SMichal Krawczyk 		bool_value = false;
28568a7a73f2SMichal Krawczyk 	} else {
28578a7a73f2SMichal Krawczyk 		PMD_INIT_LOG(ERR,
28588a7a73f2SMichal Krawczyk 			"Invalid value: '%s' for key '%s'. Accepted: '0' or '1'\n",
28598a7a73f2SMichal Krawczyk 			value, key);
28608a7a73f2SMichal Krawczyk 		return -EINVAL;
28618a7a73f2SMichal Krawczyk 	}
28628a7a73f2SMichal Krawczyk 
28638a7a73f2SMichal Krawczyk 	/* Now, assign it to the proper adapter field. */
28648a7a73f2SMichal Krawczyk 	if (strcmp(key, ENA_DEVARG_LARGE_LLQ_HDR))
28658a7a73f2SMichal Krawczyk 		adapter->use_large_llq_hdr = bool_value;
28668a7a73f2SMichal Krawczyk 
28678a7a73f2SMichal Krawczyk 	return 0;
28688a7a73f2SMichal Krawczyk }
28698a7a73f2SMichal Krawczyk 
28708a7a73f2SMichal Krawczyk static int ena_parse_devargs(struct ena_adapter *adapter,
28718a7a73f2SMichal Krawczyk 			     struct rte_devargs *devargs)
28728a7a73f2SMichal Krawczyk {
28738a7a73f2SMichal Krawczyk 	static const char * const allowed_args[] = {
28748a7a73f2SMichal Krawczyk 		ENA_DEVARG_LARGE_LLQ_HDR,
28758a7a73f2SMichal Krawczyk 	};
28768a7a73f2SMichal Krawczyk 	struct rte_kvargs *kvlist;
28778a7a73f2SMichal Krawczyk 	int rc;
28788a7a73f2SMichal Krawczyk 
28798a7a73f2SMichal Krawczyk 	if (devargs == NULL)
28808a7a73f2SMichal Krawczyk 		return 0;
28818a7a73f2SMichal Krawczyk 
28828a7a73f2SMichal Krawczyk 	kvlist = rte_kvargs_parse(devargs->args, allowed_args);
28838a7a73f2SMichal Krawczyk 	if (kvlist == NULL) {
28848a7a73f2SMichal Krawczyk 		PMD_INIT_LOG(ERR, "Invalid device arguments: %s\n",
28858a7a73f2SMichal Krawczyk 			devargs->args);
28868a7a73f2SMichal Krawczyk 		return -EINVAL;
28878a7a73f2SMichal Krawczyk 	}
28888a7a73f2SMichal Krawczyk 
28898a7a73f2SMichal Krawczyk 	rc = rte_kvargs_process(kvlist, ENA_DEVARG_LARGE_LLQ_HDR,
28908a7a73f2SMichal Krawczyk 		ena_process_bool_devarg, adapter);
28918a7a73f2SMichal Krawczyk 
28928a7a73f2SMichal Krawczyk 	rte_kvargs_free(kvlist);
28938a7a73f2SMichal Krawczyk 
28948a7a73f2SMichal Krawczyk 	return rc;
28958a7a73f2SMichal Krawczyk }
28968a7a73f2SMichal Krawczyk 
2897ca148440SMichal Krawczyk /*********************************************************************
2898ca148440SMichal Krawczyk  *  PMD configuration
2899ca148440SMichal Krawczyk  *********************************************************************/
2900fdf91e0fSJan Blunck static int eth_ena_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
2901fdf91e0fSJan Blunck 	struct rte_pci_device *pci_dev)
2902fdf91e0fSJan Blunck {
2903fdf91e0fSJan Blunck 	return rte_eth_dev_pci_generic_probe(pci_dev,
2904fdf91e0fSJan Blunck 		sizeof(struct ena_adapter), eth_ena_dev_init);
2905fdf91e0fSJan Blunck }
2906fdf91e0fSJan Blunck 
2907fdf91e0fSJan Blunck static int eth_ena_pci_remove(struct rte_pci_device *pci_dev)
2908fdf91e0fSJan Blunck {
2909eb0ef49dSMichal Krawczyk 	return rte_eth_dev_pci_generic_remove(pci_dev, eth_ena_dev_uninit);
2910fdf91e0fSJan Blunck }
2911fdf91e0fSJan Blunck 
2912fdf91e0fSJan Blunck static struct rte_pci_driver rte_ena_pmd = {
29131173fca2SJan Medala 	.id_table = pci_id_ena_map,
291405e0eee0SRafal Kozik 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
291505e0eee0SRafal Kozik 		     RTE_PCI_DRV_WC_ACTIVATE,
2916fdf91e0fSJan Blunck 	.probe = eth_ena_pci_probe,
2917fdf91e0fSJan Blunck 	.remove = eth_ena_pci_remove,
29181173fca2SJan Medala };
29191173fca2SJan Medala 
2920fdf91e0fSJan Blunck RTE_PMD_REGISTER_PCI(net_ena, rte_ena_pmd);
292101f19227SShreyansh Jain RTE_PMD_REGISTER_PCI_TABLE(net_ena, pci_id_ena_map);
292206e81dc9SDavid Marchand RTE_PMD_REGISTER_KMOD_DEP(net_ena, "* igb_uio | uio_pci_generic | vfio-pci");
29238a7a73f2SMichal Krawczyk RTE_PMD_REGISTER_PARAM_STRING(net_ena, ENA_DEVARG_LARGE_LLQ_HDR "=<0|1>");
29249c99878aSJerin Jacob RTE_LOG_REGISTER(ena_logtype_init, pmd.net.ena.init, NOTICE);
29259c99878aSJerin Jacob RTE_LOG_REGISTER(ena_logtype_driver, pmd.net.ena.driver, NOTICE);
29266f1c9df9SStephen Hemminger #ifdef RTE_LIBRTE_ENA_DEBUG_RX
29279c99878aSJerin Jacob RTE_LOG_REGISTER(ena_logtype_rx, pmd.net.ena.rx, NOTICE);
29286f1c9df9SStephen Hemminger #endif
29296f1c9df9SStephen Hemminger #ifdef RTE_LIBRTE_ENA_DEBUG_TX
29309c99878aSJerin Jacob RTE_LOG_REGISTER(ena_logtype_tx, pmd.net.ena.tx, NOTICE);
29316f1c9df9SStephen Hemminger #endif
29326f1c9df9SStephen Hemminger #ifdef RTE_LIBRTE_ENA_DEBUG_TX_FREE
29339c99878aSJerin Jacob RTE_LOG_REGISTER(ena_logtype_tx_free, pmd.net.ena.tx_free, NOTICE);
29346f1c9df9SStephen Hemminger #endif
29356f1c9df9SStephen Hemminger #ifdef RTE_LIBRTE_ENA_COM_DEBUG
29369c99878aSJerin Jacob RTE_LOG_REGISTER(ena_logtype_com, pmd.net.ena.com, NOTICE);
29376f1c9df9SStephen Hemminger #endif
29383adcba9aSMichal Krawczyk 
29393adcba9aSMichal Krawczyk /******************************************************************************
29403adcba9aSMichal Krawczyk  ******************************** AENQ Handlers *******************************
29413adcba9aSMichal Krawczyk  *****************************************************************************/
2942ca148440SMichal Krawczyk static void ena_update_on_link_change(void *adapter_data,
2943ca148440SMichal Krawczyk 				      struct ena_admin_aenq_entry *aenq_e)
2944ca148440SMichal Krawczyk {
2945ca148440SMichal Krawczyk 	struct rte_eth_dev *eth_dev;
2946ca148440SMichal Krawczyk 	struct ena_adapter *adapter;
2947ca148440SMichal Krawczyk 	struct ena_admin_aenq_link_change_desc *aenq_link_desc;
2948ca148440SMichal Krawczyk 	uint32_t status;
2949ca148440SMichal Krawczyk 
2950890728ffSStephen Hemminger 	adapter = adapter_data;
2951ca148440SMichal Krawczyk 	aenq_link_desc = (struct ena_admin_aenq_link_change_desc *)aenq_e;
2952ca148440SMichal Krawczyk 	eth_dev = adapter->rte_dev;
2953ca148440SMichal Krawczyk 
2954ca148440SMichal Krawczyk 	status = get_ena_admin_aenq_link_change_desc_link_status(aenq_link_desc);
2955ca148440SMichal Krawczyk 	adapter->link_status = status;
2956ca148440SMichal Krawczyk 
2957ca148440SMichal Krawczyk 	ena_link_update(eth_dev, 0);
29585723fbedSFerruh Yigit 	rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL);
2959ca148440SMichal Krawczyk }
2960ca148440SMichal Krawczyk 
2961f01f060cSRafal Kozik static void ena_notification(void *data,
2962f01f060cSRafal Kozik 			     struct ena_admin_aenq_entry *aenq_e)
2963f01f060cSRafal Kozik {
2964890728ffSStephen Hemminger 	struct ena_adapter *adapter = data;
2965f01f060cSRafal Kozik 	struct ena_admin_ena_hw_hints *hints;
2966f01f060cSRafal Kozik 
2967f01f060cSRafal Kozik 	if (aenq_e->aenq_common_desc.group != ENA_ADMIN_NOTIFICATION)
29686f1c9df9SStephen Hemminger 		PMD_DRV_LOG(WARNING, "Invalid group(%x) expected %x\n",
2969f01f060cSRafal Kozik 			aenq_e->aenq_common_desc.group,
2970f01f060cSRafal Kozik 			ENA_ADMIN_NOTIFICATION);
2971f01f060cSRafal Kozik 
2972f01f060cSRafal Kozik 	switch (aenq_e->aenq_common_desc.syndrom) {
2973f01f060cSRafal Kozik 	case ENA_ADMIN_UPDATE_HINTS:
2974f01f060cSRafal Kozik 		hints = (struct ena_admin_ena_hw_hints *)
2975f01f060cSRafal Kozik 			(&aenq_e->inline_data_w4);
2976f01f060cSRafal Kozik 		ena_update_hints(adapter, hints);
2977f01f060cSRafal Kozik 		break;
2978f01f060cSRafal Kozik 	default:
29796f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "Invalid aenq notification link state %d\n",
2980f01f060cSRafal Kozik 			aenq_e->aenq_common_desc.syndrom);
2981f01f060cSRafal Kozik 	}
2982f01f060cSRafal Kozik }
2983f01f060cSRafal Kozik 
2984d9b8b106SMichal Krawczyk static void ena_keep_alive(void *adapter_data,
2985d9b8b106SMichal Krawczyk 			   __rte_unused struct ena_admin_aenq_entry *aenq_e)
2986d9b8b106SMichal Krawczyk {
2987890728ffSStephen Hemminger 	struct ena_adapter *adapter = adapter_data;
298894c3e376SRafal Kozik 	struct ena_admin_aenq_keep_alive_desc *desc;
298994c3e376SRafal Kozik 	uint64_t rx_drops;
2990e1e73e32SMichal Krawczyk 	uint64_t tx_drops;
2991d9b8b106SMichal Krawczyk 
2992d9b8b106SMichal Krawczyk 	adapter->timestamp_wd = rte_get_timer_cycles();
299394c3e376SRafal Kozik 
299494c3e376SRafal Kozik 	desc = (struct ena_admin_aenq_keep_alive_desc *)aenq_e;
299594c3e376SRafal Kozik 	rx_drops = ((uint64_t)desc->rx_drops_high << 32) | desc->rx_drops_low;
2996e1e73e32SMichal Krawczyk 	tx_drops = ((uint64_t)desc->tx_drops_high << 32) | desc->tx_drops_low;
2997e1e73e32SMichal Krawczyk 
2998e1e73e32SMichal Krawczyk 	adapter->drv_stats->rx_drops = rx_drops;
2999e1e73e32SMichal Krawczyk 	adapter->dev_stats.tx_drops = tx_drops;
3000d9b8b106SMichal Krawczyk }
3001d9b8b106SMichal Krawczyk 
30023adcba9aSMichal Krawczyk /**
30033adcba9aSMichal Krawczyk  * This handler will called for unknown event group or unimplemented handlers
30043adcba9aSMichal Krawczyk  **/
30053adcba9aSMichal Krawczyk static void unimplemented_aenq_handler(__rte_unused void *data,
30063adcba9aSMichal Krawczyk 				       __rte_unused struct ena_admin_aenq_entry *aenq_e)
30073adcba9aSMichal Krawczyk {
30086f1c9df9SStephen Hemminger 	PMD_DRV_LOG(ERR, "Unknown event was received or event with "
3009983cce2dSRafal Kozik 			  "unimplemented handler\n");
30103adcba9aSMichal Krawczyk }
30113adcba9aSMichal Krawczyk 
3012ca148440SMichal Krawczyk static struct ena_aenq_handlers aenq_handlers = {
30133adcba9aSMichal Krawczyk 	.handlers = {
3014ca148440SMichal Krawczyk 		[ENA_ADMIN_LINK_CHANGE] = ena_update_on_link_change,
3015f01f060cSRafal Kozik 		[ENA_ADMIN_NOTIFICATION] = ena_notification,
3016d9b8b106SMichal Krawczyk 		[ENA_ADMIN_KEEP_ALIVE] = ena_keep_alive
30173adcba9aSMichal Krawczyk 	},
30183adcba9aSMichal Krawczyk 	.unimplemented_handler = unimplemented_aenq_handler
30193adcba9aSMichal Krawczyk };
3020