xref: /dpdk/drivers/net/ena/ena_ethdev.c (revision b19f366cc9cb91a38710813a7d0078c46e67ff55)
1702928afSMaciej Bielski /* SPDX-License-Identifier: BSD-3-Clause
238364c26SMichal Krawczyk  * Copyright (c) 2015-2020 Amazon.com, Inc. or its affiliates.
31173fca2SJan Medala  * All rights reserved.
41173fca2SJan Medala  */
51173fca2SJan Medala 
66723c0fcSBruce Richardson #include <rte_string_fns.h>
71173fca2SJan Medala #include <rte_ether.h>
8df96fd0dSBruce Richardson #include <ethdev_driver.h>
9df96fd0dSBruce Richardson #include <ethdev_pci.h>
101173fca2SJan Medala #include <rte_tcp.h>
111173fca2SJan Medala #include <rte_atomic.h>
121173fca2SJan Medala #include <rte_dev.h>
131173fca2SJan Medala #include <rte_errno.h>
14372c1af5SJan Medala #include <rte_version.h>
15b3fc5a1aSKonstantin Ananyev #include <rte_net.h>
168a7a73f2SMichal Krawczyk #include <rte_kvargs.h>
171173fca2SJan Medala 
181173fca2SJan Medala #include "ena_ethdev.h"
191173fca2SJan Medala #include "ena_logs.h"
201173fca2SJan Medala #include "ena_platform.h"
211173fca2SJan Medala #include "ena_com.h"
221173fca2SJan Medala #include "ena_eth_com.h"
231173fca2SJan Medala 
241173fca2SJan Medala #include <ena_common_defs.h>
251173fca2SJan Medala #include <ena_regs_defs.h>
261173fca2SJan Medala #include <ena_admin_defs.h>
271173fca2SJan Medala #include <ena_eth_io_defs.h>
281173fca2SJan Medala 
29419c3e3eSMichal Krawczyk #define DRV_MODULE_VER_MAJOR	2
30aa022e60SMichal Krawczyk #define DRV_MODULE_VER_MINOR	2
3105cffdcfSMichal Krawczyk #define DRV_MODULE_VER_SUBMINOR	1
32372c1af5SJan Medala 
331173fca2SJan Medala #define ENA_IO_TXQ_IDX(q)	(2 * (q))
341173fca2SJan Medala #define ENA_IO_RXQ_IDX(q)	(2 * (q) + 1)
351173fca2SJan Medala /*reverse version of ENA_IO_RXQ_IDX*/
361173fca2SJan Medala #define ENA_IO_RXQ_IDX_REV(q)	((q - 1) / 2)
371173fca2SJan Medala 
381173fca2SJan Medala #define __MERGE_64B_H_L(h, l) (((uint64_t)h << 32) | l)
391173fca2SJan Medala #define TEST_BIT(val, bit_shift) (val & (1UL << bit_shift))
401173fca2SJan Medala 
411173fca2SJan Medala #define GET_L4_HDR_LEN(mbuf)					\
42f41b5156SOlivier Matz 	((rte_pktmbuf_mtod_offset(mbuf,	struct rte_tcp_hdr *,	\
431173fca2SJan Medala 		mbuf->l3_len + mbuf->l2_len)->data_off) >> 4)
441173fca2SJan Medala 
451173fca2SJan Medala #define ENA_RX_RSS_TABLE_LOG_SIZE  7
461173fca2SJan Medala #define ENA_RX_RSS_TABLE_SIZE	(1 << ENA_RX_RSS_TABLE_LOG_SIZE)
471173fca2SJan Medala #define ENA_HASH_KEY_SIZE	40
48372c1af5SJan Medala #define ETH_GSTRING_LEN	32
49372c1af5SJan Medala 
50a3c9a11aSAndrew Boyer #define ARRAY_SIZE(x) RTE_DIM(x)
51372c1af5SJan Medala 
5292680dc2SRafal Kozik #define ENA_MIN_RING_DESC	128
5392680dc2SRafal Kozik 
54372c1af5SJan Medala enum ethtool_stringset {
55372c1af5SJan Medala 	ETH_SS_TEST             = 0,
56372c1af5SJan Medala 	ETH_SS_STATS,
57372c1af5SJan Medala };
58372c1af5SJan Medala 
59372c1af5SJan Medala struct ena_stats {
60372c1af5SJan Medala 	char name[ETH_GSTRING_LEN];
61372c1af5SJan Medala 	int stat_offset;
62372c1af5SJan Medala };
63372c1af5SJan Medala 
64372c1af5SJan Medala #define ENA_STAT_ENTRY(stat, stat_type) { \
65372c1af5SJan Medala 	.name = #stat, \
66372c1af5SJan Medala 	.stat_offset = offsetof(struct ena_stats_##stat_type, stat) \
67372c1af5SJan Medala }
68372c1af5SJan Medala 
69372c1af5SJan Medala #define ENA_STAT_RX_ENTRY(stat) \
70372c1af5SJan Medala 	ENA_STAT_ENTRY(stat, rx)
71372c1af5SJan Medala 
72372c1af5SJan Medala #define ENA_STAT_TX_ENTRY(stat) \
73372c1af5SJan Medala 	ENA_STAT_ENTRY(stat, tx)
74372c1af5SJan Medala 
7545718adaSMichal Krawczyk #define ENA_STAT_ENI_ENTRY(stat) \
7645718adaSMichal Krawczyk 	ENA_STAT_ENTRY(stat, eni)
7745718adaSMichal Krawczyk 
78372c1af5SJan Medala #define ENA_STAT_GLOBAL_ENTRY(stat) \
79372c1af5SJan Medala 	ENA_STAT_ENTRY(stat, dev)
80372c1af5SJan Medala 
818a7a73f2SMichal Krawczyk /* Device arguments */
828a7a73f2SMichal Krawczyk #define ENA_DEVARG_LARGE_LLQ_HDR "large_llq_hdr"
838a7a73f2SMichal Krawczyk 
843adcba9aSMichal Krawczyk /*
853adcba9aSMichal Krawczyk  * Each rte_memzone should have unique name.
863adcba9aSMichal Krawczyk  * To satisfy it, count number of allocation and add it to name.
873adcba9aSMichal Krawczyk  */
88b14fcac0SIgor Chauskin rte_atomic32_t ena_alloc_cnt;
893adcba9aSMichal Krawczyk 
90372c1af5SJan Medala static const struct ena_stats ena_stats_global_strings[] = {
91372c1af5SJan Medala 	ENA_STAT_GLOBAL_ENTRY(wd_expired),
927830e905SSolganik Alexander 	ENA_STAT_GLOBAL_ENTRY(dev_start),
937830e905SSolganik Alexander 	ENA_STAT_GLOBAL_ENTRY(dev_stop),
94e1e73e32SMichal Krawczyk 	ENA_STAT_GLOBAL_ENTRY(tx_drops),
95372c1af5SJan Medala };
96372c1af5SJan Medala 
9745718adaSMichal Krawczyk static const struct ena_stats ena_stats_eni_strings[] = {
9845718adaSMichal Krawczyk 	ENA_STAT_ENI_ENTRY(bw_in_allowance_exceeded),
9945718adaSMichal Krawczyk 	ENA_STAT_ENI_ENTRY(bw_out_allowance_exceeded),
10045718adaSMichal Krawczyk 	ENA_STAT_ENI_ENTRY(pps_allowance_exceeded),
10145718adaSMichal Krawczyk 	ENA_STAT_ENI_ENTRY(conntrack_allowance_exceeded),
10245718adaSMichal Krawczyk 	ENA_STAT_ENI_ENTRY(linklocal_allowance_exceeded),
10345718adaSMichal Krawczyk };
10445718adaSMichal Krawczyk 
105372c1af5SJan Medala static const struct ena_stats ena_stats_tx_strings[] = {
106372c1af5SJan Medala 	ENA_STAT_TX_ENTRY(cnt),
107372c1af5SJan Medala 	ENA_STAT_TX_ENTRY(bytes),
1087830e905SSolganik Alexander 	ENA_STAT_TX_ENTRY(prepare_ctx_err),
109372c1af5SJan Medala 	ENA_STAT_TX_ENTRY(linearize),
110372c1af5SJan Medala 	ENA_STAT_TX_ENTRY(linearize_failed),
111372c1af5SJan Medala 	ENA_STAT_TX_ENTRY(tx_poll),
112372c1af5SJan Medala 	ENA_STAT_TX_ENTRY(doorbells),
113372c1af5SJan Medala 	ENA_STAT_TX_ENTRY(bad_req_id),
1147830e905SSolganik Alexander 	ENA_STAT_TX_ENTRY(available_desc),
115372c1af5SJan Medala };
116372c1af5SJan Medala 
117372c1af5SJan Medala static const struct ena_stats ena_stats_rx_strings[] = {
118372c1af5SJan Medala 	ENA_STAT_RX_ENTRY(cnt),
119372c1af5SJan Medala 	ENA_STAT_RX_ENTRY(bytes),
1207830e905SSolganik Alexander 	ENA_STAT_RX_ENTRY(refill_partial),
121372c1af5SJan Medala 	ENA_STAT_RX_ENTRY(bad_csum),
1227830e905SSolganik Alexander 	ENA_STAT_RX_ENTRY(mbuf_alloc_fail),
123372c1af5SJan Medala 	ENA_STAT_RX_ENTRY(bad_desc_num),
1247830e905SSolganik Alexander 	ENA_STAT_RX_ENTRY(bad_req_id),
125372c1af5SJan Medala };
126372c1af5SJan Medala 
127372c1af5SJan Medala #define ENA_STATS_ARRAY_GLOBAL	ARRAY_SIZE(ena_stats_global_strings)
12845718adaSMichal Krawczyk #define ENA_STATS_ARRAY_ENI	ARRAY_SIZE(ena_stats_eni_strings)
129372c1af5SJan Medala #define ENA_STATS_ARRAY_TX	ARRAY_SIZE(ena_stats_tx_strings)
130372c1af5SJan Medala #define ENA_STATS_ARRAY_RX	ARRAY_SIZE(ena_stats_rx_strings)
1311173fca2SJan Medala 
13256b8b9b7SRafal Kozik #define QUEUE_OFFLOADS (DEV_TX_OFFLOAD_TCP_CKSUM |\
13356b8b9b7SRafal Kozik 			DEV_TX_OFFLOAD_UDP_CKSUM |\
13456b8b9b7SRafal Kozik 			DEV_TX_OFFLOAD_IPV4_CKSUM |\
13556b8b9b7SRafal Kozik 			DEV_TX_OFFLOAD_TCP_TSO)
13656b8b9b7SRafal Kozik #define MBUF_OFFLOADS (PKT_TX_L4_MASK |\
13756b8b9b7SRafal Kozik 		       PKT_TX_IP_CKSUM |\
13856b8b9b7SRafal Kozik 		       PKT_TX_TCP_SEG)
13956b8b9b7SRafal Kozik 
1401173fca2SJan Medala /** Vendor ID used by Amazon devices */
1411173fca2SJan Medala #define PCI_VENDOR_ID_AMAZON 0x1D0F
1421173fca2SJan Medala /** Amazon devices */
1431173fca2SJan Medala #define PCI_DEVICE_ID_ENA_VF		0xEC20
144f7138b91SMichal Krawczyk #define PCI_DEVICE_ID_ENA_VF_RSERV0	0xEC21
1451173fca2SJan Medala 
146b3fc5a1aSKonstantin Ananyev #define	ENA_TX_OFFLOAD_MASK	(\
147b3fc5a1aSKonstantin Ananyev 	PKT_TX_L4_MASK |         \
148d6db681bSDidier Pallard 	PKT_TX_IPV6 |            \
149d6db681bSDidier Pallard 	PKT_TX_IPV4 |            \
150b3fc5a1aSKonstantin Ananyev 	PKT_TX_IP_CKSUM |        \
151b3fc5a1aSKonstantin Ananyev 	PKT_TX_TCP_SEG)
152b3fc5a1aSKonstantin Ananyev 
153b3fc5a1aSKonstantin Ananyev #define	ENA_TX_OFFLOAD_NOTSUP_MASK	\
154b3fc5a1aSKonstantin Ananyev 	(PKT_TX_OFFLOAD_MASK ^ ENA_TX_OFFLOAD_MASK)
155b3fc5a1aSKonstantin Ananyev 
15628a1fd4fSFerruh Yigit static const struct rte_pci_id pci_id_ena_map[] = {
157cb990571SDavid Marchand 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AMAZON, PCI_DEVICE_ID_ENA_VF) },
158f7138b91SMichal Krawczyk 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AMAZON, PCI_DEVICE_ID_ENA_VF_RSERV0) },
1591173fca2SJan Medala 	{ .device_id = 0 },
1601173fca2SJan Medala };
1611173fca2SJan Medala 
162ca148440SMichal Krawczyk static struct ena_aenq_handlers aenq_handlers;
1633adcba9aSMichal Krawczyk 
1641173fca2SJan Medala static int ena_device_init(struct ena_com_dev *ena_dev,
165e859d2b8SRafal Kozik 			   struct ena_com_dev_get_features_ctx *get_feat_ctx,
166e859d2b8SRafal Kozik 			   bool *wd_state);
1671173fca2SJan Medala static int ena_dev_configure(struct rte_eth_dev *dev);
16836278b82SMichal Krawczyk static void ena_tx_map_mbuf(struct ena_ring *tx_ring,
16936278b82SMichal Krawczyk 	struct ena_tx_buffer *tx_info,
17036278b82SMichal Krawczyk 	struct rte_mbuf *mbuf,
17136278b82SMichal Krawczyk 	void **push_header,
17236278b82SMichal Krawczyk 	uint16_t *header_len);
17336278b82SMichal Krawczyk static int ena_xmit_mbuf(struct ena_ring *tx_ring, struct rte_mbuf *mbuf);
17436278b82SMichal Krawczyk static void ena_tx_cleanup(struct ena_ring *tx_ring);
1751173fca2SJan Medala static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
1761173fca2SJan Medala 				  uint16_t nb_pkts);
177b3fc5a1aSKonstantin Ananyev static uint16_t eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
178b3fc5a1aSKonstantin Ananyev 		uint16_t nb_pkts);
1791173fca2SJan Medala static int ena_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
1801173fca2SJan Medala 			      uint16_t nb_desc, unsigned int socket_id,
1811173fca2SJan Medala 			      const struct rte_eth_txconf *tx_conf);
1821173fca2SJan Medala static int ena_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
1831173fca2SJan Medala 			      uint16_t nb_desc, unsigned int socket_id,
1841173fca2SJan Medala 			      const struct rte_eth_rxconf *rx_conf,
1851173fca2SJan Medala 			      struct rte_mempool *mp);
1861be097dcSMichal Krawczyk static inline void ena_init_rx_mbuf(struct rte_mbuf *mbuf, uint16_t len);
1871be097dcSMichal Krawczyk static struct rte_mbuf *ena_rx_mbuf(struct ena_ring *rx_ring,
1881be097dcSMichal Krawczyk 				    struct ena_com_rx_buf_info *ena_bufs,
1891be097dcSMichal Krawczyk 				    uint32_t descs,
1901be097dcSMichal Krawczyk 				    uint16_t *next_to_clean,
1911be097dcSMichal Krawczyk 				    uint8_t offset);
1921173fca2SJan Medala static uint16_t eth_ena_recv_pkts(void *rx_queue,
1931173fca2SJan Medala 				  struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
19483fd97b2SMichal Krawczyk static int ena_add_single_rx_desc(struct ena_com_io_sq *io_sq,
19583fd97b2SMichal Krawczyk 				  struct rte_mbuf *mbuf, uint16_t id);
1961173fca2SJan Medala static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count);
19733dde075SMichal Krawczyk static void ena_init_rings(struct ena_adapter *adapter,
19833dde075SMichal Krawczyk 			   bool disable_meta_caching);
1991173fca2SJan Medala static int ena_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
2001173fca2SJan Medala static int ena_start(struct rte_eth_dev *dev);
20162024eb8SIvan Ilchenko static int ena_stop(struct rte_eth_dev *dev);
202b142387bSThomas Monjalon static int ena_close(struct rte_eth_dev *dev);
2032081d5e2SMichal Krawczyk static int ena_dev_reset(struct rte_eth_dev *dev);
204d5b0924bSMatan Azrad static int ena_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats);
2051173fca2SJan Medala static void ena_rx_queue_release_all(struct rte_eth_dev *dev);
2061173fca2SJan Medala static void ena_tx_queue_release_all(struct rte_eth_dev *dev);
2071173fca2SJan Medala static void ena_rx_queue_release(void *queue);
2081173fca2SJan Medala static void ena_tx_queue_release(void *queue);
2091173fca2SJan Medala static void ena_rx_queue_release_bufs(struct ena_ring *ring);
2101173fca2SJan Medala static void ena_tx_queue_release_bufs(struct ena_ring *ring);
2111173fca2SJan Medala static int ena_link_update(struct rte_eth_dev *dev,
212dd2c630aSFerruh Yigit 			   int wait_to_complete);
213df238f84SMichal Krawczyk static int ena_create_io_queue(struct ena_ring *ring);
21426e5543dSRafal Kozik static void ena_queue_stop(struct ena_ring *ring);
21526e5543dSRafal Kozik static void ena_queue_stop_all(struct rte_eth_dev *dev,
21626e5543dSRafal Kozik 			      enum ena_ring_type ring_type);
21726e5543dSRafal Kozik static int ena_queue_start(struct ena_ring *ring);
21826e5543dSRafal Kozik static int ena_queue_start_all(struct rte_eth_dev *dev,
2191173fca2SJan Medala 			       enum ena_ring_type ring_type);
2201173fca2SJan Medala static void ena_stats_restart(struct rte_eth_dev *dev);
221bdad90d1SIvan Ilchenko static int ena_infos_get(struct rte_eth_dev *dev,
2221173fca2SJan Medala 			 struct rte_eth_dev_info *dev_info);
2231173fca2SJan Medala static int ena_rss_reta_update(struct rte_eth_dev *dev,
2241173fca2SJan Medala 			       struct rte_eth_rss_reta_entry64 *reta_conf,
2251173fca2SJan Medala 			       uint16_t reta_size);
2261173fca2SJan Medala static int ena_rss_reta_query(struct rte_eth_dev *dev,
2271173fca2SJan Medala 			      struct rte_eth_rss_reta_entry64 *reta_conf,
2281173fca2SJan Medala 			      uint16_t reta_size);
22915773e06SMichal Krawczyk static void ena_interrupt_handler_rte(void *cb_arg);
230d9b8b106SMichal Krawczyk static void ena_timer_wd_callback(struct rte_timer *timer, void *arg);
231e457bc70SRafal Kozik static void ena_destroy_device(struct rte_eth_dev *eth_dev);
232e457bc70SRafal Kozik static int eth_ena_dev_init(struct rte_eth_dev *eth_dev);
2337830e905SSolganik Alexander static int ena_xstats_get_names(struct rte_eth_dev *dev,
2347830e905SSolganik Alexander 				struct rte_eth_xstat_name *xstats_names,
2357830e905SSolganik Alexander 				unsigned int n);
2367830e905SSolganik Alexander static int ena_xstats_get(struct rte_eth_dev *dev,
2377830e905SSolganik Alexander 			  struct rte_eth_xstat *stats,
2387830e905SSolganik Alexander 			  unsigned int n);
2397830e905SSolganik Alexander static int ena_xstats_get_by_id(struct rte_eth_dev *dev,
2407830e905SSolganik Alexander 				const uint64_t *ids,
2417830e905SSolganik Alexander 				uint64_t *values,
2427830e905SSolganik Alexander 				unsigned int n);
2438a7a73f2SMichal Krawczyk static int ena_process_bool_devarg(const char *key,
2448a7a73f2SMichal Krawczyk 				   const char *value,
2458a7a73f2SMichal Krawczyk 				   void *opaque);
2468a7a73f2SMichal Krawczyk static int ena_parse_devargs(struct ena_adapter *adapter,
2478a7a73f2SMichal Krawczyk 			     struct rte_devargs *devargs);
24845718adaSMichal Krawczyk static int ena_copy_eni_stats(struct ena_adapter *adapter);
2491173fca2SJan Medala 
250103ab18cSFerruh Yigit static const struct eth_dev_ops ena_dev_ops = {
2511173fca2SJan Medala 	.dev_configure        = ena_dev_configure,
2521173fca2SJan Medala 	.dev_infos_get        = ena_infos_get,
2531173fca2SJan Medala 	.rx_queue_setup       = ena_rx_queue_setup,
2541173fca2SJan Medala 	.tx_queue_setup       = ena_tx_queue_setup,
2551173fca2SJan Medala 	.dev_start            = ena_start,
256eb0ef49dSMichal Krawczyk 	.dev_stop             = ena_stop,
2571173fca2SJan Medala 	.link_update          = ena_link_update,
2581173fca2SJan Medala 	.stats_get            = ena_stats_get,
2597830e905SSolganik Alexander 	.xstats_get_names     = ena_xstats_get_names,
2607830e905SSolganik Alexander 	.xstats_get	      = ena_xstats_get,
2617830e905SSolganik Alexander 	.xstats_get_by_id     = ena_xstats_get_by_id,
2621173fca2SJan Medala 	.mtu_set              = ena_mtu_set,
2631173fca2SJan Medala 	.rx_queue_release     = ena_rx_queue_release,
2641173fca2SJan Medala 	.tx_queue_release     = ena_tx_queue_release,
2651173fca2SJan Medala 	.dev_close            = ena_close,
2662081d5e2SMichal Krawczyk 	.dev_reset            = ena_dev_reset,
2671173fca2SJan Medala 	.reta_update          = ena_rss_reta_update,
2681173fca2SJan Medala 	.reta_query           = ena_rss_reta_query,
2691173fca2SJan Medala };
2701173fca2SJan Medala 
271086c6b66SMichal Krawczyk void ena_rss_key_fill(void *key, size_t size)
272086c6b66SMichal Krawczyk {
273086c6b66SMichal Krawczyk 	static bool key_generated;
274086c6b66SMichal Krawczyk 	static uint8_t default_key[ENA_HASH_KEY_SIZE];
275086c6b66SMichal Krawczyk 	size_t i;
276086c6b66SMichal Krawczyk 
277086c6b66SMichal Krawczyk 	RTE_ASSERT(size <= ENA_HASH_KEY_SIZE);
278086c6b66SMichal Krawczyk 
279086c6b66SMichal Krawczyk 	if (!key_generated) {
280086c6b66SMichal Krawczyk 		for (i = 0; i < ENA_HASH_KEY_SIZE; ++i)
281086c6b66SMichal Krawczyk 			default_key[i] = rte_rand() & 0xff;
282086c6b66SMichal Krawczyk 		key_generated = true;
283086c6b66SMichal Krawczyk 	}
284086c6b66SMichal Krawczyk 
285086c6b66SMichal Krawczyk 	rte_memcpy(key, default_key, size);
286086c6b66SMichal Krawczyk }
287086c6b66SMichal Krawczyk 
2881173fca2SJan Medala static inline void ena_rx_mbuf_prepare(struct rte_mbuf *mbuf,
2891173fca2SJan Medala 				       struct ena_com_rx_ctx *ena_rx_ctx)
2901173fca2SJan Medala {
2911173fca2SJan Medala 	uint64_t ol_flags = 0;
292fd617795SRafal Kozik 	uint32_t packet_type = 0;
2931173fca2SJan Medala 
2941173fca2SJan Medala 	if (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP)
295fd617795SRafal Kozik 		packet_type |= RTE_PTYPE_L4_TCP;
2961173fca2SJan Medala 	else if (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP)
297fd617795SRafal Kozik 		packet_type |= RTE_PTYPE_L4_UDP;
2981173fca2SJan Medala 
299856edce2SMichal Krawczyk 	if (ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV4) {
300fd617795SRafal Kozik 		packet_type |= RTE_PTYPE_L3_IPV4;
3011173fca2SJan Medala 		if (unlikely(ena_rx_ctx->l3_csum_err))
3021173fca2SJan Medala 			ol_flags |= PKT_RX_IP_CKSUM_BAD;
303856edce2SMichal Krawczyk 		else
304856edce2SMichal Krawczyk 			ol_flags |= PKT_RX_IP_CKSUM_GOOD;
305856edce2SMichal Krawczyk 	} else if (ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV6) {
306856edce2SMichal Krawczyk 		packet_type |= RTE_PTYPE_L3_IPV6;
307856edce2SMichal Krawczyk 	}
308856edce2SMichal Krawczyk 
309856edce2SMichal Krawczyk 	if (!ena_rx_ctx->l4_csum_checked || ena_rx_ctx->frag)
310856edce2SMichal Krawczyk 		ol_flags |= PKT_RX_L4_CKSUM_UNKNOWN;
311856edce2SMichal Krawczyk 	else
312856edce2SMichal Krawczyk 		if (unlikely(ena_rx_ctx->l4_csum_err))
313856edce2SMichal Krawczyk 			ol_flags |= PKT_RX_L4_CKSUM_BAD;
314856edce2SMichal Krawczyk 		else
315856edce2SMichal Krawczyk 			ol_flags |= PKT_RX_L4_CKSUM_GOOD;
3161173fca2SJan Medala 
3171173fca2SJan Medala 	mbuf->ol_flags = ol_flags;
318fd617795SRafal Kozik 	mbuf->packet_type = packet_type;
3191173fca2SJan Medala }
3201173fca2SJan Medala 
3211173fca2SJan Medala static inline void ena_tx_mbuf_prepare(struct rte_mbuf *mbuf,
32256b8b9b7SRafal Kozik 				       struct ena_com_tx_ctx *ena_tx_ctx,
32333dde075SMichal Krawczyk 				       uint64_t queue_offloads,
32433dde075SMichal Krawczyk 				       bool disable_meta_caching)
3251173fca2SJan Medala {
3261173fca2SJan Medala 	struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta;
3271173fca2SJan Medala 
32856b8b9b7SRafal Kozik 	if ((mbuf->ol_flags & MBUF_OFFLOADS) &&
32956b8b9b7SRafal Kozik 	    (queue_offloads & QUEUE_OFFLOADS)) {
3301173fca2SJan Medala 		/* check if TSO is required */
33156b8b9b7SRafal Kozik 		if ((mbuf->ol_flags & PKT_TX_TCP_SEG) &&
33256b8b9b7SRafal Kozik 		    (queue_offloads & DEV_TX_OFFLOAD_TCP_TSO)) {
3331173fca2SJan Medala 			ena_tx_ctx->tso_enable = true;
3341173fca2SJan Medala 
3351173fca2SJan Medala 			ena_meta->l4_hdr_len = GET_L4_HDR_LEN(mbuf);
3361173fca2SJan Medala 		}
3371173fca2SJan Medala 
3381173fca2SJan Medala 		/* check if L3 checksum is needed */
33956b8b9b7SRafal Kozik 		if ((mbuf->ol_flags & PKT_TX_IP_CKSUM) &&
34056b8b9b7SRafal Kozik 		    (queue_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM))
3411173fca2SJan Medala 			ena_tx_ctx->l3_csum_enable = true;
3421173fca2SJan Medala 
3431173fca2SJan Medala 		if (mbuf->ol_flags & PKT_TX_IPV6) {
3441173fca2SJan Medala 			ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV6;
3451173fca2SJan Medala 		} else {
3461173fca2SJan Medala 			ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV4;
3471173fca2SJan Medala 
3481173fca2SJan Medala 			/* set don't fragment (DF) flag */
3491173fca2SJan Medala 			if (mbuf->packet_type &
3501173fca2SJan Medala 				(RTE_PTYPE_L4_NONFRAG
3511173fca2SJan Medala 				 | RTE_PTYPE_INNER_L4_NONFRAG))
3521173fca2SJan Medala 				ena_tx_ctx->df = true;
3531173fca2SJan Medala 		}
3541173fca2SJan Medala 
3551173fca2SJan Medala 		/* check if L4 checksum is needed */
35640e7c021SMaciej Bielski 		if (((mbuf->ol_flags & PKT_TX_L4_MASK) == PKT_TX_TCP_CKSUM) &&
35756b8b9b7SRafal Kozik 		    (queue_offloads & DEV_TX_OFFLOAD_TCP_CKSUM)) {
3581173fca2SJan Medala 			ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_TCP;
3591173fca2SJan Medala 			ena_tx_ctx->l4_csum_enable = true;
36040e7c021SMaciej Bielski 		} else if (((mbuf->ol_flags & PKT_TX_L4_MASK) ==
36140e7c021SMaciej Bielski 				PKT_TX_UDP_CKSUM) &&
36256b8b9b7SRafal Kozik 				(queue_offloads & DEV_TX_OFFLOAD_UDP_CKSUM)) {
3631173fca2SJan Medala 			ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UDP;
3641173fca2SJan Medala 			ena_tx_ctx->l4_csum_enable = true;
36556b8b9b7SRafal Kozik 		} else {
3661173fca2SJan Medala 			ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UNKNOWN;
3671173fca2SJan Medala 			ena_tx_ctx->l4_csum_enable = false;
3681173fca2SJan Medala 		}
3691173fca2SJan Medala 
3701173fca2SJan Medala 		ena_meta->mss = mbuf->tso_segsz;
3711173fca2SJan Medala 		ena_meta->l3_hdr_len = mbuf->l3_len;
3721173fca2SJan Medala 		ena_meta->l3_hdr_offset = mbuf->l2_len;
3731173fca2SJan Medala 
3741173fca2SJan Medala 		ena_tx_ctx->meta_valid = true;
37533dde075SMichal Krawczyk 	} else if (disable_meta_caching) {
37633dde075SMichal Krawczyk 		memset(ena_meta, 0, sizeof(*ena_meta));
37733dde075SMichal Krawczyk 		ena_tx_ctx->meta_valid = true;
3781173fca2SJan Medala 	} else {
3791173fca2SJan Medala 		ena_tx_ctx->meta_valid = false;
3801173fca2SJan Medala 	}
3811173fca2SJan Medala }
3821173fca2SJan Medala 
383f7d82d24SRafal Kozik static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id)
384f7d82d24SRafal Kozik {
385f7d82d24SRafal Kozik 	struct ena_tx_buffer *tx_info = NULL;
386f7d82d24SRafal Kozik 
387f7d82d24SRafal Kozik 	if (likely(req_id < tx_ring->ring_size)) {
388f7d82d24SRafal Kozik 		tx_info = &tx_ring->tx_buffer_info[req_id];
389f7d82d24SRafal Kozik 		if (likely(tx_info->mbuf))
390f7d82d24SRafal Kozik 			return 0;
391f7d82d24SRafal Kozik 	}
392f7d82d24SRafal Kozik 
393f7d82d24SRafal Kozik 	if (tx_info)
3946f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "tx_info doesn't have valid mbuf\n");
395f7d82d24SRafal Kozik 	else
3966f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "Invalid req_id: %hu\n", req_id);
397f7d82d24SRafal Kozik 
398f7d82d24SRafal Kozik 	/* Trigger device reset */
3997830e905SSolganik Alexander 	++tx_ring->tx_stats.bad_req_id;
400f7d82d24SRafal Kozik 	tx_ring->adapter->reset_reason = ENA_REGS_RESET_INV_TX_REQ_ID;
401f7d82d24SRafal Kozik 	tx_ring->adapter->trigger_reset	= true;
402f7d82d24SRafal Kozik 	return -EFAULT;
403f7d82d24SRafal Kozik }
404f7d82d24SRafal Kozik 
405372c1af5SJan Medala static void ena_config_host_info(struct ena_com_dev *ena_dev)
406372c1af5SJan Medala {
407372c1af5SJan Medala 	struct ena_admin_host_info *host_info;
408372c1af5SJan Medala 	int rc;
409372c1af5SJan Medala 
410372c1af5SJan Medala 	/* Allocate only the host info */
411372c1af5SJan Medala 	rc = ena_com_allocate_host_info(ena_dev);
412372c1af5SJan Medala 	if (rc) {
4136f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "Cannot allocate host info\n");
414372c1af5SJan Medala 		return;
415372c1af5SJan Medala 	}
416372c1af5SJan Medala 
417372c1af5SJan Medala 	host_info = ena_dev->host_attr.host_info;
418372c1af5SJan Medala 
419372c1af5SJan Medala 	host_info->os_type = ENA_ADMIN_OS_DPDK;
420372c1af5SJan Medala 	host_info->kernel_ver = RTE_VERSION;
4216723c0fcSBruce Richardson 	strlcpy((char *)host_info->kernel_ver_str, rte_version(),
4226723c0fcSBruce Richardson 		sizeof(host_info->kernel_ver_str));
423372c1af5SJan Medala 	host_info->os_dist = RTE_VERSION;
4246723c0fcSBruce Richardson 	strlcpy((char *)host_info->os_dist_str, rte_version(),
4256723c0fcSBruce Richardson 		sizeof(host_info->os_dist_str));
426372c1af5SJan Medala 	host_info->driver_version =
427372c1af5SJan Medala 		(DRV_MODULE_VER_MAJOR) |
428372c1af5SJan Medala 		(DRV_MODULE_VER_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) |
429c4144557SJan Medala 		(DRV_MODULE_VER_SUBMINOR <<
430c4144557SJan Medala 			ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT);
431b9302eb9SRafal Kozik 	host_info->num_cpus = rte_lcore_count();
432372c1af5SJan Medala 
4337b3a3c4bSMaciej Bielski 	host_info->driver_supported_features =
4347b3a3c4bSMaciej Bielski 		ENA_ADMIN_HOST_INFO_RX_OFFSET_MASK;
4357b3a3c4bSMaciej Bielski 
436372c1af5SJan Medala 	rc = ena_com_set_host_attributes(ena_dev);
437372c1af5SJan Medala 	if (rc) {
438241da076SRafal Kozik 		if (rc == -ENA_COM_UNSUPPORTED)
4396f1c9df9SStephen Hemminger 			PMD_DRV_LOG(WARNING, "Cannot set host attributes\n");
440241da076SRafal Kozik 		else
4416f1c9df9SStephen Hemminger 			PMD_DRV_LOG(ERR, "Cannot set host attributes\n");
442241da076SRafal Kozik 
443372c1af5SJan Medala 		goto err;
444372c1af5SJan Medala 	}
445372c1af5SJan Medala 
446372c1af5SJan Medala 	return;
447372c1af5SJan Medala 
448372c1af5SJan Medala err:
449372c1af5SJan Medala 	ena_com_delete_host_info(ena_dev);
450372c1af5SJan Medala }
451372c1af5SJan Medala 
4527830e905SSolganik Alexander /* This function calculates the number of xstats based on the current config */
4537830e905SSolganik Alexander static unsigned int ena_xstats_calc_num(struct rte_eth_dev *dev)
454372c1af5SJan Medala {
45545718adaSMichal Krawczyk 	return ENA_STATS_ARRAY_GLOBAL + ENA_STATS_ARRAY_ENI +
4567830e905SSolganik Alexander 		(dev->data->nb_tx_queues * ENA_STATS_ARRAY_TX) +
4577830e905SSolganik Alexander 		(dev->data->nb_rx_queues * ENA_STATS_ARRAY_RX);
458372c1af5SJan Medala }
459372c1af5SJan Medala 
460372c1af5SJan Medala static void ena_config_debug_area(struct ena_adapter *adapter)
461372c1af5SJan Medala {
462372c1af5SJan Medala 	u32 debug_area_size;
463372c1af5SJan Medala 	int rc, ss_count;
464372c1af5SJan Medala 
4657830e905SSolganik Alexander 	ss_count = ena_xstats_calc_num(adapter->rte_dev);
466372c1af5SJan Medala 
467372c1af5SJan Medala 	/* allocate 32 bytes for each string and 64bit for the value */
468372c1af5SJan Medala 	debug_area_size = ss_count * ETH_GSTRING_LEN + sizeof(u64) * ss_count;
469372c1af5SJan Medala 
470372c1af5SJan Medala 	rc = ena_com_allocate_debug_area(&adapter->ena_dev, debug_area_size);
471372c1af5SJan Medala 	if (rc) {
4726f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "Cannot allocate debug area\n");
473372c1af5SJan Medala 		return;
474372c1af5SJan Medala 	}
475372c1af5SJan Medala 
476372c1af5SJan Medala 	rc = ena_com_set_host_attributes(&adapter->ena_dev);
477372c1af5SJan Medala 	if (rc) {
478241da076SRafal Kozik 		if (rc == -ENA_COM_UNSUPPORTED)
4796f1c9df9SStephen Hemminger 			PMD_DRV_LOG(WARNING, "Cannot set host attributes\n");
480241da076SRafal Kozik 		else
4816f1c9df9SStephen Hemminger 			PMD_DRV_LOG(ERR, "Cannot set host attributes\n");
482241da076SRafal Kozik 
483372c1af5SJan Medala 		goto err;
484372c1af5SJan Medala 	}
485372c1af5SJan Medala 
486372c1af5SJan Medala 	return;
487372c1af5SJan Medala err:
488372c1af5SJan Medala 	ena_com_delete_debug_area(&adapter->ena_dev);
489372c1af5SJan Medala }
490372c1af5SJan Medala 
491b142387bSThomas Monjalon static int ena_close(struct rte_eth_dev *dev)
4921173fca2SJan Medala {
4934d7877fdSMichal Krawczyk 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
4944d7877fdSMichal Krawczyk 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
495890728ffSStephen Hemminger 	struct ena_adapter *adapter = dev->data->dev_private;
49662024eb8SIvan Ilchenko 	int ret = 0;
4971173fca2SJan Medala 
49830410493SThomas Monjalon 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
49930410493SThomas Monjalon 		return 0;
50030410493SThomas Monjalon 
501df238f84SMichal Krawczyk 	if (adapter->state == ENA_ADAPTER_STATE_RUNNING)
50262024eb8SIvan Ilchenko 		ret = ena_stop(dev);
503eb0ef49dSMichal Krawczyk 	adapter->state = ENA_ADAPTER_STATE_CLOSED;
50415773e06SMichal Krawczyk 
5051173fca2SJan Medala 	ena_rx_queue_release_all(dev);
5061173fca2SJan Medala 	ena_tx_queue_release_all(dev);
5074d7877fdSMichal Krawczyk 
5084d7877fdSMichal Krawczyk 	rte_free(adapter->drv_stats);
5094d7877fdSMichal Krawczyk 	adapter->drv_stats = NULL;
5104d7877fdSMichal Krawczyk 
5114d7877fdSMichal Krawczyk 	rte_intr_disable(intr_handle);
5124d7877fdSMichal Krawczyk 	rte_intr_callback_unregister(intr_handle,
5134d7877fdSMichal Krawczyk 				     ena_interrupt_handler_rte,
5144d7877fdSMichal Krawczyk 				     adapter);
5154d7877fdSMichal Krawczyk 
5164d7877fdSMichal Krawczyk 	/*
5174d7877fdSMichal Krawczyk 	 * MAC is not allocated dynamically. Setting NULL should prevent from
5184d7877fdSMichal Krawczyk 	 * release of the resource in the rte_eth_dev_release_port().
5194d7877fdSMichal Krawczyk 	 */
5204d7877fdSMichal Krawczyk 	dev->data->mac_addrs = NULL;
521b142387bSThomas Monjalon 
52262024eb8SIvan Ilchenko 	return ret;
5231173fca2SJan Medala }
5241173fca2SJan Medala 
5252081d5e2SMichal Krawczyk static int
5262081d5e2SMichal Krawczyk ena_dev_reset(struct rte_eth_dev *dev)
5272081d5e2SMichal Krawczyk {
528e457bc70SRafal Kozik 	int rc = 0;
5292081d5e2SMichal Krawczyk 
530e457bc70SRafal Kozik 	ena_destroy_device(dev);
531e457bc70SRafal Kozik 	rc = eth_ena_dev_init(dev);
532241da076SRafal Kozik 	if (rc)
533498c687aSRafal Kozik 		PMD_INIT_LOG(CRIT, "Cannot initialize device");
534e457bc70SRafal Kozik 
5352081d5e2SMichal Krawczyk 	return rc;
5362081d5e2SMichal Krawczyk }
5372081d5e2SMichal Krawczyk 
5381173fca2SJan Medala static int ena_rss_reta_update(struct rte_eth_dev *dev,
5391173fca2SJan Medala 			       struct rte_eth_rss_reta_entry64 *reta_conf,
5401173fca2SJan Medala 			       uint16_t reta_size)
5411173fca2SJan Medala {
542890728ffSStephen Hemminger 	struct ena_adapter *adapter = dev->data->dev_private;
5431173fca2SJan Medala 	struct ena_com_dev *ena_dev = &adapter->ena_dev;
544241da076SRafal Kozik 	int rc, i;
5451173fca2SJan Medala 	u16 entry_value;
5461173fca2SJan Medala 	int conf_idx;
5471173fca2SJan Medala 	int idx;
5481173fca2SJan Medala 
5491173fca2SJan Medala 	if ((reta_size == 0) || (reta_conf == NULL))
5501173fca2SJan Medala 		return -EINVAL;
5511173fca2SJan Medala 
5521173fca2SJan Medala 	if (reta_size > ENA_RX_RSS_TABLE_SIZE) {
5536f1c9df9SStephen Hemminger 		PMD_DRV_LOG(WARNING,
5541173fca2SJan Medala 			"indirection table %d is bigger than supported (%d)\n",
5551173fca2SJan Medala 			reta_size, ENA_RX_RSS_TABLE_SIZE);
556241da076SRafal Kozik 		return -EINVAL;
5571173fca2SJan Medala 	}
5581173fca2SJan Medala 
5591173fca2SJan Medala 	for (i = 0 ; i < reta_size ; i++) {
5601173fca2SJan Medala 		/* each reta_conf is for 64 entries.
5611173fca2SJan Medala 		 * to support 128 we use 2 conf of 64
5621173fca2SJan Medala 		 */
5631173fca2SJan Medala 		conf_idx = i / RTE_RETA_GROUP_SIZE;
5641173fca2SJan Medala 		idx = i % RTE_RETA_GROUP_SIZE;
5651173fca2SJan Medala 		if (TEST_BIT(reta_conf[conf_idx].mask, idx)) {
5661173fca2SJan Medala 			entry_value =
5671173fca2SJan Medala 				ENA_IO_RXQ_IDX(reta_conf[conf_idx].reta[idx]);
568241da076SRafal Kozik 
569241da076SRafal Kozik 			rc = ena_com_indirect_table_fill_entry(ena_dev,
5701173fca2SJan Medala 							       i,
5711173fca2SJan Medala 							       entry_value);
572241da076SRafal Kozik 			if (unlikely(rc && rc != ENA_COM_UNSUPPORTED)) {
5736f1c9df9SStephen Hemminger 				PMD_DRV_LOG(ERR,
5741173fca2SJan Medala 					"Cannot fill indirect table\n");
575241da076SRafal Kozik 				return rc;
5761173fca2SJan Medala 			}
5771173fca2SJan Medala 		}
5781173fca2SJan Medala 	}
5791173fca2SJan Medala 
5801343c415SMichal Krawczyk 	rte_spinlock_lock(&adapter->admin_lock);
581241da076SRafal Kozik 	rc = ena_com_indirect_table_set(ena_dev);
5821343c415SMichal Krawczyk 	rte_spinlock_unlock(&adapter->admin_lock);
583241da076SRafal Kozik 	if (unlikely(rc && rc != ENA_COM_UNSUPPORTED)) {
5846f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "Cannot flush the indirect table\n");
585241da076SRafal Kozik 		return rc;
5861173fca2SJan Medala 	}
5871173fca2SJan Medala 
5886f1c9df9SStephen Hemminger 	PMD_DRV_LOG(DEBUG, "%s(): RSS configured %d entries  for port %d\n",
5891173fca2SJan Medala 		__func__, reta_size, adapter->rte_dev->data->port_id);
590241da076SRafal Kozik 
591241da076SRafal Kozik 	return 0;
5921173fca2SJan Medala }
5931173fca2SJan Medala 
5941173fca2SJan Medala /* Query redirection table. */
5951173fca2SJan Medala static int ena_rss_reta_query(struct rte_eth_dev *dev,
5961173fca2SJan Medala 			      struct rte_eth_rss_reta_entry64 *reta_conf,
5971173fca2SJan Medala 			      uint16_t reta_size)
5981173fca2SJan Medala {
599890728ffSStephen Hemminger 	struct ena_adapter *adapter = dev->data->dev_private;
6001173fca2SJan Medala 	struct ena_com_dev *ena_dev = &adapter->ena_dev;
601241da076SRafal Kozik 	int rc;
6021173fca2SJan Medala 	int i;
6031173fca2SJan Medala 	u32 indirect_table[ENA_RX_RSS_TABLE_SIZE] = {0};
6041173fca2SJan Medala 	int reta_conf_idx;
6051173fca2SJan Medala 	int reta_idx;
6061173fca2SJan Medala 
6071173fca2SJan Medala 	if (reta_size == 0 || reta_conf == NULL ||
6081173fca2SJan Medala 	    (reta_size > RTE_RETA_GROUP_SIZE && ((reta_conf + 1) == NULL)))
6091173fca2SJan Medala 		return -EINVAL;
6101173fca2SJan Medala 
6111343c415SMichal Krawczyk 	rte_spinlock_lock(&adapter->admin_lock);
612241da076SRafal Kozik 	rc = ena_com_indirect_table_get(ena_dev, indirect_table);
6131343c415SMichal Krawczyk 	rte_spinlock_unlock(&adapter->admin_lock);
614241da076SRafal Kozik 	if (unlikely(rc && rc != ENA_COM_UNSUPPORTED)) {
6156f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "cannot get indirect table\n");
616241da076SRafal Kozik 		return -ENOTSUP;
6171173fca2SJan Medala 	}
6181173fca2SJan Medala 
6191173fca2SJan Medala 	for (i = 0 ; i < reta_size ; i++) {
6201173fca2SJan Medala 		reta_conf_idx = i / RTE_RETA_GROUP_SIZE;
6211173fca2SJan Medala 		reta_idx = i % RTE_RETA_GROUP_SIZE;
6221173fca2SJan Medala 		if (TEST_BIT(reta_conf[reta_conf_idx].mask, reta_idx))
6231173fca2SJan Medala 			reta_conf[reta_conf_idx].reta[reta_idx] =
6241173fca2SJan Medala 				ENA_IO_RXQ_IDX_REV(indirect_table[i]);
6251173fca2SJan Medala 	}
626241da076SRafal Kozik 
627241da076SRafal Kozik 	return 0;
6281173fca2SJan Medala }
6291173fca2SJan Medala 
6301173fca2SJan Medala static int ena_rss_init_default(struct ena_adapter *adapter)
6311173fca2SJan Medala {
6321173fca2SJan Medala 	struct ena_com_dev *ena_dev = &adapter->ena_dev;
6331173fca2SJan Medala 	uint16_t nb_rx_queues = adapter->rte_dev->data->nb_rx_queues;
6341173fca2SJan Medala 	int rc, i;
6351173fca2SJan Medala 	u32 val;
6361173fca2SJan Medala 
6371173fca2SJan Medala 	rc = ena_com_rss_init(ena_dev, ENA_RX_RSS_TABLE_LOG_SIZE);
6381173fca2SJan Medala 	if (unlikely(rc)) {
6396f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "Cannot init indirect table\n");
6401173fca2SJan Medala 		goto err_rss_init;
6411173fca2SJan Medala 	}
6421173fca2SJan Medala 
6431173fca2SJan Medala 	for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++) {
6441173fca2SJan Medala 		val = i % nb_rx_queues;
6451173fca2SJan Medala 		rc = ena_com_indirect_table_fill_entry(ena_dev, i,
6461173fca2SJan Medala 						       ENA_IO_RXQ_IDX(val));
6473adcba9aSMichal Krawczyk 		if (unlikely(rc && (rc != ENA_COM_UNSUPPORTED))) {
6486f1c9df9SStephen Hemminger 			PMD_DRV_LOG(ERR, "Cannot fill indirect table\n");
6491173fca2SJan Medala 			goto err_fill_indir;
6501173fca2SJan Medala 		}
6511173fca2SJan Medala 	}
6521173fca2SJan Medala 
6531173fca2SJan Medala 	rc = ena_com_fill_hash_function(ena_dev, ENA_ADMIN_CRC32, NULL,
6541173fca2SJan Medala 					ENA_HASH_KEY_SIZE, 0xFFFFFFFF);
6553adcba9aSMichal Krawczyk 	if (unlikely(rc && (rc != ENA_COM_UNSUPPORTED))) {
6566f1c9df9SStephen Hemminger 		PMD_DRV_LOG(INFO, "Cannot fill hash function\n");
6571173fca2SJan Medala 		goto err_fill_indir;
6581173fca2SJan Medala 	}
6591173fca2SJan Medala 
6601173fca2SJan Medala 	rc = ena_com_set_default_hash_ctrl(ena_dev);
6613adcba9aSMichal Krawczyk 	if (unlikely(rc && (rc != ENA_COM_UNSUPPORTED))) {
6626f1c9df9SStephen Hemminger 		PMD_DRV_LOG(INFO, "Cannot fill hash control\n");
6631173fca2SJan Medala 		goto err_fill_indir;
6641173fca2SJan Medala 	}
6651173fca2SJan Medala 
6661173fca2SJan Medala 	rc = ena_com_indirect_table_set(ena_dev);
6673adcba9aSMichal Krawczyk 	if (unlikely(rc && (rc != ENA_COM_UNSUPPORTED))) {
6686f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "Cannot flush the indirect table\n");
6691173fca2SJan Medala 		goto err_fill_indir;
6701173fca2SJan Medala 	}
6716f1c9df9SStephen Hemminger 	PMD_DRV_LOG(DEBUG, "RSS configured for port %d\n",
6721173fca2SJan Medala 		adapter->rte_dev->data->port_id);
6731173fca2SJan Medala 
6741173fca2SJan Medala 	return 0;
6751173fca2SJan Medala 
6761173fca2SJan Medala err_fill_indir:
6771173fca2SJan Medala 	ena_com_rss_destroy(ena_dev);
6781173fca2SJan Medala err_rss_init:
6791173fca2SJan Medala 
6801173fca2SJan Medala 	return rc;
6811173fca2SJan Medala }
6821173fca2SJan Medala 
6831173fca2SJan Medala static void ena_rx_queue_release_all(struct rte_eth_dev *dev)
6841173fca2SJan Medala {
6851173fca2SJan Medala 	struct ena_ring **queues = (struct ena_ring **)dev->data->rx_queues;
6861173fca2SJan Medala 	int nb_queues = dev->data->nb_rx_queues;
6871173fca2SJan Medala 	int i;
6881173fca2SJan Medala 
6891173fca2SJan Medala 	for (i = 0; i < nb_queues; i++)
6901173fca2SJan Medala 		ena_rx_queue_release(queues[i]);
6911173fca2SJan Medala }
6921173fca2SJan Medala 
6931173fca2SJan Medala static void ena_tx_queue_release_all(struct rte_eth_dev *dev)
6941173fca2SJan Medala {
6951173fca2SJan Medala 	struct ena_ring **queues = (struct ena_ring **)dev->data->tx_queues;
6961173fca2SJan Medala 	int nb_queues = dev->data->nb_tx_queues;
6971173fca2SJan Medala 	int i;
6981173fca2SJan Medala 
6991173fca2SJan Medala 	for (i = 0; i < nb_queues; i++)
7001173fca2SJan Medala 		ena_tx_queue_release(queues[i]);
7011173fca2SJan Medala }
7021173fca2SJan Medala 
7031173fca2SJan Medala static void ena_rx_queue_release(void *queue)
7041173fca2SJan Medala {
7051173fca2SJan Medala 	struct ena_ring *ring = (struct ena_ring *)queue;
7061173fca2SJan Medala 
7071173fca2SJan Medala 	/* Free ring resources */
7081173fca2SJan Medala 	if (ring->rx_buffer_info)
7091173fca2SJan Medala 		rte_free(ring->rx_buffer_info);
7101173fca2SJan Medala 	ring->rx_buffer_info = NULL;
7111173fca2SJan Medala 
71279405ee1SRafal Kozik 	if (ring->rx_refill_buffer)
71379405ee1SRafal Kozik 		rte_free(ring->rx_refill_buffer);
71479405ee1SRafal Kozik 	ring->rx_refill_buffer = NULL;
71579405ee1SRafal Kozik 
716c2034976SMichal Krawczyk 	if (ring->empty_rx_reqs)
717c2034976SMichal Krawczyk 		rte_free(ring->empty_rx_reqs);
718c2034976SMichal Krawczyk 	ring->empty_rx_reqs = NULL;
719c2034976SMichal Krawczyk 
7201173fca2SJan Medala 	ring->configured = 0;
7211173fca2SJan Medala 
7226f1c9df9SStephen Hemminger 	PMD_DRV_LOG(NOTICE, "RX Queue %d:%d released\n",
7231173fca2SJan Medala 		ring->port_id, ring->id);
7241173fca2SJan Medala }
7251173fca2SJan Medala 
7261173fca2SJan Medala static void ena_tx_queue_release(void *queue)
7271173fca2SJan Medala {
7281173fca2SJan Medala 	struct ena_ring *ring = (struct ena_ring *)queue;
7291173fca2SJan Medala 
7301173fca2SJan Medala 	/* Free ring resources */
7312fca2a98SMichal Krawczyk 	if (ring->push_buf_intermediate_buf)
7322fca2a98SMichal Krawczyk 		rte_free(ring->push_buf_intermediate_buf);
7332fca2a98SMichal Krawczyk 
7341173fca2SJan Medala 	if (ring->tx_buffer_info)
7351173fca2SJan Medala 		rte_free(ring->tx_buffer_info);
7361173fca2SJan Medala 
7371173fca2SJan Medala 	if (ring->empty_tx_reqs)
7381173fca2SJan Medala 		rte_free(ring->empty_tx_reqs);
7391173fca2SJan Medala 
7401173fca2SJan Medala 	ring->empty_tx_reqs = NULL;
7411173fca2SJan Medala 	ring->tx_buffer_info = NULL;
7422fca2a98SMichal Krawczyk 	ring->push_buf_intermediate_buf = NULL;
7431173fca2SJan Medala 
7441173fca2SJan Medala 	ring->configured = 0;
7451173fca2SJan Medala 
7466f1c9df9SStephen Hemminger 	PMD_DRV_LOG(NOTICE, "TX Queue %d:%d released\n",
7471173fca2SJan Medala 		ring->port_id, ring->id);
7481173fca2SJan Medala }
7491173fca2SJan Medala 
7501173fca2SJan Medala static void ena_rx_queue_release_bufs(struct ena_ring *ring)
7511173fca2SJan Medala {
752709b1dcbSRafal Kozik 	unsigned int i;
7531173fca2SJan Medala 
7541be097dcSMichal Krawczyk 	for (i = 0; i < ring->ring_size; ++i) {
7551be097dcSMichal Krawczyk 		struct ena_rx_buffer *rx_info = &ring->rx_buffer_info[i];
7561be097dcSMichal Krawczyk 		if (rx_info->mbuf) {
7571be097dcSMichal Krawczyk 			rte_mbuf_raw_free(rx_info->mbuf);
7581be097dcSMichal Krawczyk 			rx_info->mbuf = NULL;
7591be097dcSMichal Krawczyk 		}
7601173fca2SJan Medala 	}
7611173fca2SJan Medala }
7621173fca2SJan Medala 
7631173fca2SJan Medala static void ena_tx_queue_release_bufs(struct ena_ring *ring)
7641173fca2SJan Medala {
765207a514cSMichal Krawczyk 	unsigned int i;
7661173fca2SJan Medala 
767207a514cSMichal Krawczyk 	for (i = 0; i < ring->ring_size; ++i) {
768207a514cSMichal Krawczyk 		struct ena_tx_buffer *tx_buf = &ring->tx_buffer_info[i];
7691173fca2SJan Medala 
7703c8bc29fSDavid Harton 		if (tx_buf->mbuf) {
7711173fca2SJan Medala 			rte_pktmbuf_free(tx_buf->mbuf);
7723c8bc29fSDavid Harton 			tx_buf->mbuf = NULL;
7733c8bc29fSDavid Harton 		}
7741173fca2SJan Medala 	}
7751173fca2SJan Medala }
7761173fca2SJan Medala 
7771173fca2SJan Medala static int ena_link_update(struct rte_eth_dev *dev,
7781173fca2SJan Medala 			   __rte_unused int wait_to_complete)
7791173fca2SJan Medala {
7801173fca2SJan Medala 	struct rte_eth_link *link = &dev->data->dev_link;
781890728ffSStephen Hemminger 	struct ena_adapter *adapter = dev->data->dev_private;
782ca148440SMichal Krawczyk 
783ca148440SMichal Krawczyk 	link->link_status = adapter->link_status ? ETH_LINK_UP : ETH_LINK_DOWN;
78441e59028SRafal Kozik 	link->link_speed = ETH_SPEED_NUM_NONE;
7851173fca2SJan Medala 	link->link_duplex = ETH_LINK_FULL_DUPLEX;
7861173fca2SJan Medala 
7871173fca2SJan Medala 	return 0;
7881173fca2SJan Medala }
7891173fca2SJan Medala 
79026e5543dSRafal Kozik static int ena_queue_start_all(struct rte_eth_dev *dev,
7911173fca2SJan Medala 			       enum ena_ring_type ring_type)
7921173fca2SJan Medala {
793890728ffSStephen Hemminger 	struct ena_adapter *adapter = dev->data->dev_private;
7941173fca2SJan Medala 	struct ena_ring *queues = NULL;
79553b61841SMichal Krawczyk 	int nb_queues;
7961173fca2SJan Medala 	int i = 0;
7971173fca2SJan Medala 	int rc = 0;
7981173fca2SJan Medala 
79953b61841SMichal Krawczyk 	if (ring_type == ENA_RING_TYPE_RX) {
80053b61841SMichal Krawczyk 		queues = adapter->rx_ring;
80153b61841SMichal Krawczyk 		nb_queues = dev->data->nb_rx_queues;
80253b61841SMichal Krawczyk 	} else {
80353b61841SMichal Krawczyk 		queues = adapter->tx_ring;
80453b61841SMichal Krawczyk 		nb_queues = dev->data->nb_tx_queues;
80553b61841SMichal Krawczyk 	}
80653b61841SMichal Krawczyk 	for (i = 0; i < nb_queues; i++) {
8071173fca2SJan Medala 		if (queues[i].configured) {
8081173fca2SJan Medala 			if (ring_type == ENA_RING_TYPE_RX) {
8091173fca2SJan Medala 				ena_assert_msg(
8101173fca2SJan Medala 					dev->data->rx_queues[i] == &queues[i],
8111173fca2SJan Medala 					"Inconsistent state of rx queues\n");
8121173fca2SJan Medala 			} else {
8131173fca2SJan Medala 				ena_assert_msg(
8141173fca2SJan Medala 					dev->data->tx_queues[i] == &queues[i],
8151173fca2SJan Medala 					"Inconsistent state of tx queues\n");
8161173fca2SJan Medala 			}
8171173fca2SJan Medala 
81826e5543dSRafal Kozik 			rc = ena_queue_start(&queues[i]);
8191173fca2SJan Medala 
8201173fca2SJan Medala 			if (rc) {
8211173fca2SJan Medala 				PMD_INIT_LOG(ERR,
82226e5543dSRafal Kozik 					     "failed to start queue %d type(%d)",
8231173fca2SJan Medala 					     i, ring_type);
82426e5543dSRafal Kozik 				goto err;
8251173fca2SJan Medala 			}
8261173fca2SJan Medala 		}
8271173fca2SJan Medala 	}
8281173fca2SJan Medala 
8291173fca2SJan Medala 	return 0;
83026e5543dSRafal Kozik 
83126e5543dSRafal Kozik err:
83226e5543dSRafal Kozik 	while (i--)
83326e5543dSRafal Kozik 		if (queues[i].configured)
83426e5543dSRafal Kozik 			ena_queue_stop(&queues[i]);
83526e5543dSRafal Kozik 
83626e5543dSRafal Kozik 	return rc;
8371173fca2SJan Medala }
8381173fca2SJan Medala 
8391173fca2SJan Medala static uint32_t ena_get_mtu_conf(struct ena_adapter *adapter)
8401173fca2SJan Medala {
8411173fca2SJan Medala 	uint32_t max_frame_len = adapter->max_mtu;
8421173fca2SJan Medala 
8437369f88fSRafal Kozik 	if (adapter->rte_eth_dev_data->dev_conf.rxmode.offloads &
8447369f88fSRafal Kozik 	    DEV_RX_OFFLOAD_JUMBO_FRAME)
8451173fca2SJan Medala 		max_frame_len =
8461173fca2SJan Medala 			adapter->rte_eth_dev_data->dev_conf.rxmode.max_rx_pkt_len;
8471173fca2SJan Medala 
8481173fca2SJan Medala 	return max_frame_len;
8491173fca2SJan Medala }
8501173fca2SJan Medala 
8511173fca2SJan Medala static int ena_check_valid_conf(struct ena_adapter *adapter)
8521173fca2SJan Medala {
8531173fca2SJan Medala 	uint32_t max_frame_len = ena_get_mtu_conf(adapter);
8541173fca2SJan Medala 
855241da076SRafal Kozik 	if (max_frame_len > adapter->max_mtu || max_frame_len < ENA_MIN_MTU) {
856241da076SRafal Kozik 		PMD_INIT_LOG(ERR, "Unsupported MTU of %d. "
857498c687aSRafal Kozik 				  "max mtu: %d, min mtu: %d",
858241da076SRafal Kozik 			     max_frame_len, adapter->max_mtu, ENA_MIN_MTU);
859241da076SRafal Kozik 		return ENA_COM_UNSUPPORTED;
8601173fca2SJan Medala 	}
8611173fca2SJan Medala 
8621173fca2SJan Medala 	return 0;
8631173fca2SJan Medala }
8641173fca2SJan Medala 
8651173fca2SJan Medala static int
8668a7a73f2SMichal Krawczyk ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx,
8678a7a73f2SMichal Krawczyk 		       bool use_large_llq_hdr)
8681173fca2SJan Medala {
8692fca2a98SMichal Krawczyk 	struct ena_admin_feature_llq_desc *llq = &ctx->get_feat_ctx->llq;
8702fca2a98SMichal Krawczyk 	struct ena_com_dev *ena_dev = ctx->ena_dev;
8715920d930SMichal Krawczyk 	uint32_t max_tx_queue_size;
8725920d930SMichal Krawczyk 	uint32_t max_rx_queue_size;
8731173fca2SJan Medala 
8742fca2a98SMichal Krawczyk 	if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
875ea93d37eSRafal Kozik 		struct ena_admin_queue_ext_feature_fields *max_queue_ext =
876ea93d37eSRafal Kozik 			&ctx->get_feat_ctx->max_queue_ext.max_queue_ext;
8775920d930SMichal Krawczyk 		max_rx_queue_size = RTE_MIN(max_queue_ext->max_rx_cq_depth,
878ea93d37eSRafal Kozik 			max_queue_ext->max_rx_sq_depth);
8795920d930SMichal Krawczyk 		max_tx_queue_size = max_queue_ext->max_tx_cq_depth;
8802fca2a98SMichal Krawczyk 
8812fca2a98SMichal Krawczyk 		if (ena_dev->tx_mem_queue_type ==
8822fca2a98SMichal Krawczyk 		    ENA_ADMIN_PLACEMENT_POLICY_DEV) {
8835920d930SMichal Krawczyk 			max_tx_queue_size = RTE_MIN(max_tx_queue_size,
8842fca2a98SMichal Krawczyk 				llq->max_llq_depth);
8852fca2a98SMichal Krawczyk 		} else {
8865920d930SMichal Krawczyk 			max_tx_queue_size = RTE_MIN(max_tx_queue_size,
887ea93d37eSRafal Kozik 				max_queue_ext->max_tx_sq_depth);
8882fca2a98SMichal Krawczyk 		}
8892fca2a98SMichal Krawczyk 
890ea93d37eSRafal Kozik 		ctx->max_rx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS,
891ea93d37eSRafal Kozik 			max_queue_ext->max_per_packet_rx_descs);
892ea93d37eSRafal Kozik 		ctx->max_tx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS,
893ea93d37eSRafal Kozik 			max_queue_ext->max_per_packet_tx_descs);
894ea93d37eSRafal Kozik 	} else {
895ea93d37eSRafal Kozik 		struct ena_admin_queue_feature_desc *max_queues =
896ea93d37eSRafal Kozik 			&ctx->get_feat_ctx->max_queues;
8975920d930SMichal Krawczyk 		max_rx_queue_size = RTE_MIN(max_queues->max_cq_depth,
898ea93d37eSRafal Kozik 			max_queues->max_sq_depth);
8995920d930SMichal Krawczyk 		max_tx_queue_size = max_queues->max_cq_depth;
9002fca2a98SMichal Krawczyk 
9012fca2a98SMichal Krawczyk 		if (ena_dev->tx_mem_queue_type ==
9022fca2a98SMichal Krawczyk 		    ENA_ADMIN_PLACEMENT_POLICY_DEV) {
9035920d930SMichal Krawczyk 			max_tx_queue_size = RTE_MIN(max_tx_queue_size,
9042fca2a98SMichal Krawczyk 				llq->max_llq_depth);
9052fca2a98SMichal Krawczyk 		} else {
9065920d930SMichal Krawczyk 			max_tx_queue_size = RTE_MIN(max_tx_queue_size,
9072fca2a98SMichal Krawczyk 				max_queues->max_sq_depth);
9082fca2a98SMichal Krawczyk 		}
9092fca2a98SMichal Krawczyk 
910ea93d37eSRafal Kozik 		ctx->max_rx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS,
911ea93d37eSRafal Kozik 			max_queues->max_packet_rx_descs);
9125920d930SMichal Krawczyk 		ctx->max_tx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS,
9135920d930SMichal Krawczyk 			max_queues->max_packet_tx_descs);
914ea93d37eSRafal Kozik 	}
9151173fca2SJan Medala 
916ea93d37eSRafal Kozik 	/* Round down to the nearest power of 2 */
9175920d930SMichal Krawczyk 	max_rx_queue_size = rte_align32prevpow2(max_rx_queue_size);
9185920d930SMichal Krawczyk 	max_tx_queue_size = rte_align32prevpow2(max_tx_queue_size);
9191173fca2SJan Medala 
9208a7a73f2SMichal Krawczyk 	if (use_large_llq_hdr) {
9218a7a73f2SMichal Krawczyk 		if ((llq->entry_size_ctrl_supported &
9228a7a73f2SMichal Krawczyk 		     ENA_ADMIN_LIST_ENTRY_SIZE_256B) &&
9238a7a73f2SMichal Krawczyk 		    (ena_dev->tx_mem_queue_type ==
9248a7a73f2SMichal Krawczyk 		     ENA_ADMIN_PLACEMENT_POLICY_DEV)) {
9258a7a73f2SMichal Krawczyk 			max_tx_queue_size /= 2;
9268a7a73f2SMichal Krawczyk 			PMD_INIT_LOG(INFO,
9278a7a73f2SMichal Krawczyk 				"Forcing large headers and decreasing maximum TX queue size to %d\n",
9288a7a73f2SMichal Krawczyk 				max_tx_queue_size);
9298a7a73f2SMichal Krawczyk 		} else {
9308a7a73f2SMichal Krawczyk 			PMD_INIT_LOG(ERR,
9318a7a73f2SMichal Krawczyk 				"Forcing large headers failed: LLQ is disabled or device does not support large headers\n");
9328a7a73f2SMichal Krawczyk 		}
9338a7a73f2SMichal Krawczyk 	}
9348a7a73f2SMichal Krawczyk 
9355920d930SMichal Krawczyk 	if (unlikely(max_rx_queue_size == 0 || max_tx_queue_size == 0)) {
936f2462150SFerruh Yigit 		PMD_INIT_LOG(ERR, "Invalid queue size");
9371173fca2SJan Medala 		return -EFAULT;
9381173fca2SJan Medala 	}
9391173fca2SJan Medala 
9405920d930SMichal Krawczyk 	ctx->max_tx_queue_size = max_tx_queue_size;
9415920d930SMichal Krawczyk 	ctx->max_rx_queue_size = max_rx_queue_size;
9422061fe41SRafal Kozik 
943ea93d37eSRafal Kozik 	return 0;
9441173fca2SJan Medala }
9451173fca2SJan Medala 
9461173fca2SJan Medala static void ena_stats_restart(struct rte_eth_dev *dev)
9471173fca2SJan Medala {
948890728ffSStephen Hemminger 	struct ena_adapter *adapter = dev->data->dev_private;
9491173fca2SJan Medala 
9501173fca2SJan Medala 	rte_atomic64_init(&adapter->drv_stats->ierrors);
9511173fca2SJan Medala 	rte_atomic64_init(&adapter->drv_stats->oerrors);
9521173fca2SJan Medala 	rte_atomic64_init(&adapter->drv_stats->rx_nombuf);
953e1e73e32SMichal Krawczyk 	adapter->drv_stats->rx_drops = 0;
9541173fca2SJan Medala }
9551173fca2SJan Medala 
956d5b0924bSMatan Azrad static int ena_stats_get(struct rte_eth_dev *dev,
9571173fca2SJan Medala 			  struct rte_eth_stats *stats)
9581173fca2SJan Medala {
9591173fca2SJan Medala 	struct ena_admin_basic_stats ena_stats;
960890728ffSStephen Hemminger 	struct ena_adapter *adapter = dev->data->dev_private;
9611173fca2SJan Medala 	struct ena_com_dev *ena_dev = &adapter->ena_dev;
9621173fca2SJan Medala 	int rc;
96345b6d861SMichal Krawczyk 	int i;
96445b6d861SMichal Krawczyk 	int max_rings_stats;
9651173fca2SJan Medala 
9661173fca2SJan Medala 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
967d5b0924bSMatan Azrad 		return -ENOTSUP;
9681173fca2SJan Medala 
9691173fca2SJan Medala 	memset(&ena_stats, 0, sizeof(ena_stats));
9701343c415SMichal Krawczyk 
9711343c415SMichal Krawczyk 	rte_spinlock_lock(&adapter->admin_lock);
9721173fca2SJan Medala 	rc = ena_com_get_dev_basic_stats(ena_dev, &ena_stats);
9731343c415SMichal Krawczyk 	rte_spinlock_unlock(&adapter->admin_lock);
9741173fca2SJan Medala 	if (unlikely(rc)) {
9756f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "Could not retrieve statistics from ENA\n");
976d5b0924bSMatan Azrad 		return rc;
9771173fca2SJan Medala 	}
9781173fca2SJan Medala 
9791173fca2SJan Medala 	/* Set of basic statistics from ENA */
9801173fca2SJan Medala 	stats->ipackets = __MERGE_64B_H_L(ena_stats.rx_pkts_high,
9811173fca2SJan Medala 					  ena_stats.rx_pkts_low);
9821173fca2SJan Medala 	stats->opackets = __MERGE_64B_H_L(ena_stats.tx_pkts_high,
9831173fca2SJan Medala 					  ena_stats.tx_pkts_low);
9841173fca2SJan Medala 	stats->ibytes = __MERGE_64B_H_L(ena_stats.rx_bytes_high,
9851173fca2SJan Medala 					ena_stats.rx_bytes_low);
9861173fca2SJan Medala 	stats->obytes = __MERGE_64B_H_L(ena_stats.tx_bytes_high,
9871173fca2SJan Medala 					ena_stats.tx_bytes_low);
9881173fca2SJan Medala 
9891173fca2SJan Medala 	/* Driver related stats */
990e1e73e32SMichal Krawczyk 	stats->imissed = adapter->drv_stats->rx_drops;
9911173fca2SJan Medala 	stats->ierrors = rte_atomic64_read(&adapter->drv_stats->ierrors);
9921173fca2SJan Medala 	stats->oerrors = rte_atomic64_read(&adapter->drv_stats->oerrors);
9931173fca2SJan Medala 	stats->rx_nombuf = rte_atomic64_read(&adapter->drv_stats->rx_nombuf);
99445b6d861SMichal Krawczyk 
99545b6d861SMichal Krawczyk 	max_rings_stats = RTE_MIN(dev->data->nb_rx_queues,
99645b6d861SMichal Krawczyk 		RTE_ETHDEV_QUEUE_STAT_CNTRS);
99745b6d861SMichal Krawczyk 	for (i = 0; i < max_rings_stats; ++i) {
99845b6d861SMichal Krawczyk 		struct ena_stats_rx *rx_stats = &adapter->rx_ring[i].rx_stats;
99945b6d861SMichal Krawczyk 
100045b6d861SMichal Krawczyk 		stats->q_ibytes[i] = rx_stats->bytes;
100145b6d861SMichal Krawczyk 		stats->q_ipackets[i] = rx_stats->cnt;
100245b6d861SMichal Krawczyk 		stats->q_errors[i] = rx_stats->bad_desc_num +
100345b6d861SMichal Krawczyk 			rx_stats->bad_req_id;
100445b6d861SMichal Krawczyk 	}
100545b6d861SMichal Krawczyk 
100645b6d861SMichal Krawczyk 	max_rings_stats = RTE_MIN(dev->data->nb_tx_queues,
100745b6d861SMichal Krawczyk 		RTE_ETHDEV_QUEUE_STAT_CNTRS);
100845b6d861SMichal Krawczyk 	for (i = 0; i < max_rings_stats; ++i) {
100945b6d861SMichal Krawczyk 		struct ena_stats_tx *tx_stats = &adapter->tx_ring[i].tx_stats;
101045b6d861SMichal Krawczyk 
101145b6d861SMichal Krawczyk 		stats->q_obytes[i] = tx_stats->bytes;
101245b6d861SMichal Krawczyk 		stats->q_opackets[i] = tx_stats->cnt;
101345b6d861SMichal Krawczyk 	}
101445b6d861SMichal Krawczyk 
1015d5b0924bSMatan Azrad 	return 0;
10161173fca2SJan Medala }
10171173fca2SJan Medala 
10181173fca2SJan Medala static int ena_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
10191173fca2SJan Medala {
10201173fca2SJan Medala 	struct ena_adapter *adapter;
10211173fca2SJan Medala 	struct ena_com_dev *ena_dev;
10221173fca2SJan Medala 	int rc = 0;
10231173fca2SJan Medala 
1024498c687aSRafal Kozik 	ena_assert_msg(dev->data != NULL, "Uninitialized device\n");
1025498c687aSRafal Kozik 	ena_assert_msg(dev->data->dev_private != NULL, "Uninitialized device\n");
1026890728ffSStephen Hemminger 	adapter = dev->data->dev_private;
10271173fca2SJan Medala 
10281173fca2SJan Medala 	ena_dev = &adapter->ena_dev;
1029498c687aSRafal Kozik 	ena_assert_msg(ena_dev != NULL, "Uninitialized device\n");
10301173fca2SJan Medala 
1031241da076SRafal Kozik 	if (mtu > ena_get_mtu_conf(adapter) || mtu < ENA_MIN_MTU) {
10326f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR,
1033241da076SRafal Kozik 			"Invalid MTU setting. new_mtu: %d "
1034241da076SRafal Kozik 			"max mtu: %d min mtu: %d\n",
1035241da076SRafal Kozik 			mtu, ena_get_mtu_conf(adapter), ENA_MIN_MTU);
1036241da076SRafal Kozik 		return -EINVAL;
10371173fca2SJan Medala 	}
10381173fca2SJan Medala 
10391173fca2SJan Medala 	rc = ena_com_set_dev_mtu(ena_dev, mtu);
10401173fca2SJan Medala 	if (rc)
10416f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "Could not set MTU: %d\n", mtu);
10421173fca2SJan Medala 	else
10436f1c9df9SStephen Hemminger 		PMD_DRV_LOG(NOTICE, "Set MTU: %d\n", mtu);
10441173fca2SJan Medala 
10451173fca2SJan Medala 	return rc;
10461173fca2SJan Medala }
10471173fca2SJan Medala 
10481173fca2SJan Medala static int ena_start(struct rte_eth_dev *dev)
10491173fca2SJan Medala {
1050890728ffSStephen Hemminger 	struct ena_adapter *adapter = dev->data->dev_private;
1051d9b8b106SMichal Krawczyk 	uint64_t ticks;
10521173fca2SJan Medala 	int rc = 0;
10531173fca2SJan Medala 
10541173fca2SJan Medala 	rc = ena_check_valid_conf(adapter);
10551173fca2SJan Medala 	if (rc)
10561173fca2SJan Medala 		return rc;
10571173fca2SJan Medala 
105826e5543dSRafal Kozik 	rc = ena_queue_start_all(dev, ENA_RING_TYPE_RX);
10591173fca2SJan Medala 	if (rc)
10601173fca2SJan Medala 		return rc;
10611173fca2SJan Medala 
106226e5543dSRafal Kozik 	rc = ena_queue_start_all(dev, ENA_RING_TYPE_TX);
10631173fca2SJan Medala 	if (rc)
106426e5543dSRafal Kozik 		goto err_start_tx;
10651173fca2SJan Medala 
10661173fca2SJan Medala 	if (adapter->rte_dev->data->dev_conf.rxmode.mq_mode &
1067361913adSDaria Kolistratova 	    ETH_MQ_RX_RSS_FLAG && adapter->rte_dev->data->nb_rx_queues > 0) {
10681173fca2SJan Medala 		rc = ena_rss_init_default(adapter);
10691173fca2SJan Medala 		if (rc)
107026e5543dSRafal Kozik 			goto err_rss_init;
10711173fca2SJan Medala 	}
10721173fca2SJan Medala 
10731173fca2SJan Medala 	ena_stats_restart(dev);
10741173fca2SJan Medala 
1075d9b8b106SMichal Krawczyk 	adapter->timestamp_wd = rte_get_timer_cycles();
1076d9b8b106SMichal Krawczyk 	adapter->keep_alive_timeout = ENA_DEVICE_KALIVE_TIMEOUT;
1077d9b8b106SMichal Krawczyk 
1078d9b8b106SMichal Krawczyk 	ticks = rte_get_timer_hz();
1079d9b8b106SMichal Krawczyk 	rte_timer_reset(&adapter->timer_wd, ticks, PERIODICAL, rte_lcore_id(),
1080d9b8b106SMichal Krawczyk 			ena_timer_wd_callback, adapter);
1081d9b8b106SMichal Krawczyk 
10827830e905SSolganik Alexander 	++adapter->dev_stats.dev_start;
10831173fca2SJan Medala 	adapter->state = ENA_ADAPTER_STATE_RUNNING;
10841173fca2SJan Medala 
10851173fca2SJan Medala 	return 0;
108626e5543dSRafal Kozik 
108726e5543dSRafal Kozik err_rss_init:
108826e5543dSRafal Kozik 	ena_queue_stop_all(dev, ENA_RING_TYPE_TX);
108926e5543dSRafal Kozik err_start_tx:
109026e5543dSRafal Kozik 	ena_queue_stop_all(dev, ENA_RING_TYPE_RX);
109126e5543dSRafal Kozik 	return rc;
10921173fca2SJan Medala }
10931173fca2SJan Medala 
109462024eb8SIvan Ilchenko static int ena_stop(struct rte_eth_dev *dev)
1095eb0ef49dSMichal Krawczyk {
1096890728ffSStephen Hemminger 	struct ena_adapter *adapter = dev->data->dev_private;
1097e457bc70SRafal Kozik 	struct ena_com_dev *ena_dev = &adapter->ena_dev;
1098e457bc70SRafal Kozik 	int rc;
1099eb0ef49dSMichal Krawczyk 
1100d9b8b106SMichal Krawczyk 	rte_timer_stop_sync(&adapter->timer_wd);
110126e5543dSRafal Kozik 	ena_queue_stop_all(dev, ENA_RING_TYPE_TX);
110226e5543dSRafal Kozik 	ena_queue_stop_all(dev, ENA_RING_TYPE_RX);
1103d9b8b106SMichal Krawczyk 
1104e457bc70SRafal Kozik 	if (adapter->trigger_reset) {
1105e457bc70SRafal Kozik 		rc = ena_com_dev_reset(ena_dev, adapter->reset_reason);
1106e457bc70SRafal Kozik 		if (rc)
11076f1c9df9SStephen Hemminger 			PMD_DRV_LOG(ERR, "Device reset failed rc=%d\n", rc);
1108e457bc70SRafal Kozik 	}
1109e457bc70SRafal Kozik 
11107830e905SSolganik Alexander 	++adapter->dev_stats.dev_stop;
1111eb0ef49dSMichal Krawczyk 	adapter->state = ENA_ADAPTER_STATE_STOPPED;
1112b8f5d2aeSThomas Monjalon 	dev->data->dev_started = 0;
111362024eb8SIvan Ilchenko 
111462024eb8SIvan Ilchenko 	return 0;
1115eb0ef49dSMichal Krawczyk }
1116eb0ef49dSMichal Krawczyk 
1117df238f84SMichal Krawczyk static int ena_create_io_queue(struct ena_ring *ring)
1118df238f84SMichal Krawczyk {
1119df238f84SMichal Krawczyk 	struct ena_adapter *adapter;
1120df238f84SMichal Krawczyk 	struct ena_com_dev *ena_dev;
1121df238f84SMichal Krawczyk 	struct ena_com_create_io_ctx ctx =
1122df238f84SMichal Krawczyk 		/* policy set to _HOST just to satisfy icc compiler */
1123df238f84SMichal Krawczyk 		{ ENA_ADMIN_PLACEMENT_POLICY_HOST,
1124df238f84SMichal Krawczyk 		  0, 0, 0, 0, 0 };
1125df238f84SMichal Krawczyk 	uint16_t ena_qid;
1126778677dcSRafal Kozik 	unsigned int i;
1127df238f84SMichal Krawczyk 	int rc;
1128df238f84SMichal Krawczyk 
1129df238f84SMichal Krawczyk 	adapter = ring->adapter;
1130df238f84SMichal Krawczyk 	ena_dev = &adapter->ena_dev;
1131df238f84SMichal Krawczyk 
1132df238f84SMichal Krawczyk 	if (ring->type == ENA_RING_TYPE_TX) {
1133df238f84SMichal Krawczyk 		ena_qid = ENA_IO_TXQ_IDX(ring->id);
1134df238f84SMichal Krawczyk 		ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX;
1135df238f84SMichal Krawczyk 		ctx.mem_queue_type = ena_dev->tx_mem_queue_type;
1136778677dcSRafal Kozik 		for (i = 0; i < ring->ring_size; i++)
1137778677dcSRafal Kozik 			ring->empty_tx_reqs[i] = i;
1138df238f84SMichal Krawczyk 	} else {
1139df238f84SMichal Krawczyk 		ena_qid = ENA_IO_RXQ_IDX(ring->id);
1140df238f84SMichal Krawczyk 		ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX;
1141778677dcSRafal Kozik 		for (i = 0; i < ring->ring_size; i++)
1142778677dcSRafal Kozik 			ring->empty_rx_reqs[i] = i;
1143df238f84SMichal Krawczyk 	}
1144badc3a6aSMichal Krawczyk 	ctx.queue_size = ring->ring_size;
1145df238f84SMichal Krawczyk 	ctx.qid = ena_qid;
1146df238f84SMichal Krawczyk 	ctx.msix_vector = -1; /* interrupts not used */
11474217cb0bSMichal Krawczyk 	ctx.numa_node = ring->numa_socket_id;
1148df238f84SMichal Krawczyk 
1149df238f84SMichal Krawczyk 	rc = ena_com_create_io_queue(ena_dev, &ctx);
1150df238f84SMichal Krawczyk 	if (rc) {
11516f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR,
1152df238f84SMichal Krawczyk 			"failed to create io queue #%d (qid:%d) rc: %d\n",
1153df238f84SMichal Krawczyk 			ring->id, ena_qid, rc);
1154df238f84SMichal Krawczyk 		return rc;
1155df238f84SMichal Krawczyk 	}
1156df238f84SMichal Krawczyk 
1157df238f84SMichal Krawczyk 	rc = ena_com_get_io_handlers(ena_dev, ena_qid,
1158df238f84SMichal Krawczyk 				     &ring->ena_com_io_sq,
1159df238f84SMichal Krawczyk 				     &ring->ena_com_io_cq);
1160df238f84SMichal Krawczyk 	if (rc) {
11616f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR,
1162df238f84SMichal Krawczyk 			"Failed to get io queue handlers. queue num %d rc: %d\n",
1163df238f84SMichal Krawczyk 			ring->id, rc);
1164df238f84SMichal Krawczyk 		ena_com_destroy_io_queue(ena_dev, ena_qid);
1165df238f84SMichal Krawczyk 		return rc;
1166df238f84SMichal Krawczyk 	}
1167df238f84SMichal Krawczyk 
1168df238f84SMichal Krawczyk 	if (ring->type == ENA_RING_TYPE_TX)
1169df238f84SMichal Krawczyk 		ena_com_update_numa_node(ring->ena_com_io_cq, ctx.numa_node);
1170df238f84SMichal Krawczyk 
1171df238f84SMichal Krawczyk 	return 0;
1172df238f84SMichal Krawczyk }
1173df238f84SMichal Krawczyk 
117426e5543dSRafal Kozik static void ena_queue_stop(struct ena_ring *ring)
1175df238f84SMichal Krawczyk {
117626e5543dSRafal Kozik 	struct ena_com_dev *ena_dev = &ring->adapter->ena_dev;
1177df238f84SMichal Krawczyk 
117826e5543dSRafal Kozik 	if (ring->type == ENA_RING_TYPE_RX) {
117926e5543dSRafal Kozik 		ena_com_destroy_io_queue(ena_dev, ENA_IO_RXQ_IDX(ring->id));
118026e5543dSRafal Kozik 		ena_rx_queue_release_bufs(ring);
118126e5543dSRafal Kozik 	} else {
118226e5543dSRafal Kozik 		ena_com_destroy_io_queue(ena_dev, ENA_IO_TXQ_IDX(ring->id));
118326e5543dSRafal Kozik 		ena_tx_queue_release_bufs(ring);
1184df238f84SMichal Krawczyk 	}
1185df238f84SMichal Krawczyk }
1186df238f84SMichal Krawczyk 
118726e5543dSRafal Kozik static void ena_queue_stop_all(struct rte_eth_dev *dev,
118826e5543dSRafal Kozik 			      enum ena_ring_type ring_type)
118926e5543dSRafal Kozik {
1190890728ffSStephen Hemminger 	struct ena_adapter *adapter = dev->data->dev_private;
119126e5543dSRafal Kozik 	struct ena_ring *queues = NULL;
119226e5543dSRafal Kozik 	uint16_t nb_queues, i;
119326e5543dSRafal Kozik 
119426e5543dSRafal Kozik 	if (ring_type == ENA_RING_TYPE_RX) {
119526e5543dSRafal Kozik 		queues = adapter->rx_ring;
119626e5543dSRafal Kozik 		nb_queues = dev->data->nb_rx_queues;
119726e5543dSRafal Kozik 	} else {
119826e5543dSRafal Kozik 		queues = adapter->tx_ring;
119926e5543dSRafal Kozik 		nb_queues = dev->data->nb_tx_queues;
120026e5543dSRafal Kozik 	}
120126e5543dSRafal Kozik 
120226e5543dSRafal Kozik 	for (i = 0; i < nb_queues; ++i)
120326e5543dSRafal Kozik 		if (queues[i].configured)
120426e5543dSRafal Kozik 			ena_queue_stop(&queues[i]);
120526e5543dSRafal Kozik }
120626e5543dSRafal Kozik 
120726e5543dSRafal Kozik static int ena_queue_start(struct ena_ring *ring)
12081173fca2SJan Medala {
1209a467e8f3SMichal Krawczyk 	int rc, bufs_num;
12101173fca2SJan Medala 
12111173fca2SJan Medala 	ena_assert_msg(ring->configured == 1,
121226e5543dSRafal Kozik 		       "Trying to start unconfigured queue\n");
12131173fca2SJan Medala 
1214df238f84SMichal Krawczyk 	rc = ena_create_io_queue(ring);
1215df238f84SMichal Krawczyk 	if (rc) {
1216498c687aSRafal Kozik 		PMD_INIT_LOG(ERR, "Failed to create IO queue!");
1217df238f84SMichal Krawczyk 		return rc;
1218df238f84SMichal Krawczyk 	}
1219df238f84SMichal Krawczyk 
12201173fca2SJan Medala 	ring->next_to_clean = 0;
12211173fca2SJan Medala 	ring->next_to_use = 0;
12221173fca2SJan Medala 
12237830e905SSolganik Alexander 	if (ring->type == ENA_RING_TYPE_TX) {
12247830e905SSolganik Alexander 		ring->tx_stats.available_desc =
1225b2b02edeSMichal Krawczyk 			ena_com_free_q_entries(ring->ena_com_io_sq);
12261173fca2SJan Medala 		return 0;
12277830e905SSolganik Alexander 	}
12281173fca2SJan Medala 
1229a467e8f3SMichal Krawczyk 	bufs_num = ring->ring_size - 1;
1230a467e8f3SMichal Krawczyk 	rc = ena_populate_rx_queue(ring, bufs_num);
1231a467e8f3SMichal Krawczyk 	if (rc != bufs_num) {
123226e5543dSRafal Kozik 		ena_com_destroy_io_queue(&ring->adapter->ena_dev,
123326e5543dSRafal Kozik 					 ENA_IO_RXQ_IDX(ring->id));
1234f2462150SFerruh Yigit 		PMD_INIT_LOG(ERR, "Failed to populate rx ring !");
1235241da076SRafal Kozik 		return ENA_COM_FAULT;
12361173fca2SJan Medala 	}
12374387e81cSIdo Segev 	/* Flush per-core RX buffers pools cache as they can be used on other
12384387e81cSIdo Segev 	 * cores as well.
12394387e81cSIdo Segev 	 */
12404387e81cSIdo Segev 	rte_mempool_cache_flush(NULL, ring->mb_pool);
12411173fca2SJan Medala 
12421173fca2SJan Medala 	return 0;
12431173fca2SJan Medala }
12441173fca2SJan Medala 
12451173fca2SJan Medala static int ena_tx_queue_setup(struct rte_eth_dev *dev,
12461173fca2SJan Medala 			      uint16_t queue_idx,
12471173fca2SJan Medala 			      uint16_t nb_desc,
12484217cb0bSMichal Krawczyk 			      unsigned int socket_id,
124956b8b9b7SRafal Kozik 			      const struct rte_eth_txconf *tx_conf)
12501173fca2SJan Medala {
12511173fca2SJan Medala 	struct ena_ring *txq = NULL;
1252890728ffSStephen Hemminger 	struct ena_adapter *adapter = dev->data->dev_private;
12531173fca2SJan Medala 	unsigned int i;
12541173fca2SJan Medala 
12551173fca2SJan Medala 	txq = &adapter->tx_ring[queue_idx];
12561173fca2SJan Medala 
12571173fca2SJan Medala 	if (txq->configured) {
12586f1c9df9SStephen Hemminger 		PMD_DRV_LOG(CRIT,
12591173fca2SJan Medala 			"API violation. Queue %d is already configured\n",
12601173fca2SJan Medala 			queue_idx);
1261241da076SRafal Kozik 		return ENA_COM_FAULT;
12621173fca2SJan Medala 	}
12631173fca2SJan Medala 
12641daff526SJakub Palider 	if (!rte_is_power_of_2(nb_desc)) {
12656f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR,
1266498c687aSRafal Kozik 			"Unsupported size of TX queue: %d is not a power of 2.\n",
12671daff526SJakub Palider 			nb_desc);
12681daff526SJakub Palider 		return -EINVAL;
12691daff526SJakub Palider 	}
12701daff526SJakub Palider 
12715920d930SMichal Krawczyk 	if (nb_desc > adapter->max_tx_ring_size) {
12726f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR,
12731173fca2SJan Medala 			"Unsupported size of TX queue (max size: %d)\n",
12745920d930SMichal Krawczyk 			adapter->max_tx_ring_size);
12751173fca2SJan Medala 		return -EINVAL;
12761173fca2SJan Medala 	}
12771173fca2SJan Medala 
1278ea93d37eSRafal Kozik 	if (nb_desc == RTE_ETH_DEV_FALLBACK_TX_RINGSIZE)
12795920d930SMichal Krawczyk 		nb_desc = adapter->max_tx_ring_size;
1280ea93d37eSRafal Kozik 
12811173fca2SJan Medala 	txq->port_id = dev->data->port_id;
12821173fca2SJan Medala 	txq->next_to_clean = 0;
12831173fca2SJan Medala 	txq->next_to_use = 0;
12841173fca2SJan Medala 	txq->ring_size = nb_desc;
1285c0006061SMichal Krawczyk 	txq->size_mask = nb_desc - 1;
12864217cb0bSMichal Krawczyk 	txq->numa_socket_id = socket_id;
12871d973d8fSIgor Chauskin 	txq->pkts_without_db = false;
12881173fca2SJan Medala 
12891173fca2SJan Medala 	txq->tx_buffer_info = rte_zmalloc("txq->tx_buffer_info",
12901173fca2SJan Medala 					  sizeof(struct ena_tx_buffer) *
12911173fca2SJan Medala 					  txq->ring_size,
12921173fca2SJan Medala 					  RTE_CACHE_LINE_SIZE);
12931173fca2SJan Medala 	if (!txq->tx_buffer_info) {
12946f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "failed to alloc mem for tx buffer info\n");
1295df238f84SMichal Krawczyk 		return -ENOMEM;
12961173fca2SJan Medala 	}
12971173fca2SJan Medala 
12981173fca2SJan Medala 	txq->empty_tx_reqs = rte_zmalloc("txq->empty_tx_reqs",
12991173fca2SJan Medala 					 sizeof(u16) * txq->ring_size,
13001173fca2SJan Medala 					 RTE_CACHE_LINE_SIZE);
13011173fca2SJan Medala 	if (!txq->empty_tx_reqs) {
13026f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "failed to alloc mem for tx reqs\n");
1303df238f84SMichal Krawczyk 		rte_free(txq->tx_buffer_info);
1304df238f84SMichal Krawczyk 		return -ENOMEM;
13051173fca2SJan Medala 	}
1306241da076SRafal Kozik 
13072fca2a98SMichal Krawczyk 	txq->push_buf_intermediate_buf =
13082fca2a98SMichal Krawczyk 		rte_zmalloc("txq->push_buf_intermediate_buf",
13092fca2a98SMichal Krawczyk 			    txq->tx_max_header_size,
13102fca2a98SMichal Krawczyk 			    RTE_CACHE_LINE_SIZE);
13112fca2a98SMichal Krawczyk 	if (!txq->push_buf_intermediate_buf) {
13126f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "failed to alloc push buff for LLQ\n");
13132fca2a98SMichal Krawczyk 		rte_free(txq->tx_buffer_info);
13142fca2a98SMichal Krawczyk 		rte_free(txq->empty_tx_reqs);
13152fca2a98SMichal Krawczyk 		return -ENOMEM;
13162fca2a98SMichal Krawczyk 	}
13172fca2a98SMichal Krawczyk 
13181173fca2SJan Medala 	for (i = 0; i < txq->ring_size; i++)
13191173fca2SJan Medala 		txq->empty_tx_reqs[i] = i;
13201173fca2SJan Medala 
13212081d5e2SMichal Krawczyk 	if (tx_conf != NULL) {
13222081d5e2SMichal Krawczyk 		txq->offloads =
13232081d5e2SMichal Krawczyk 			tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
13242081d5e2SMichal Krawczyk 	}
13251173fca2SJan Medala 	/* Store pointer to this queue in upper layer */
13261173fca2SJan Medala 	txq->configured = 1;
13271173fca2SJan Medala 	dev->data->tx_queues[queue_idx] = txq;
1328241da076SRafal Kozik 
1329241da076SRafal Kozik 	return 0;
13301173fca2SJan Medala }
13311173fca2SJan Medala 
13321173fca2SJan Medala static int ena_rx_queue_setup(struct rte_eth_dev *dev,
13331173fca2SJan Medala 			      uint16_t queue_idx,
13341173fca2SJan Medala 			      uint16_t nb_desc,
13354217cb0bSMichal Krawczyk 			      unsigned int socket_id,
1336a4996bd8SWei Dai 			      __rte_unused const struct rte_eth_rxconf *rx_conf,
13371173fca2SJan Medala 			      struct rte_mempool *mp)
13381173fca2SJan Medala {
1339890728ffSStephen Hemminger 	struct ena_adapter *adapter = dev->data->dev_private;
13401173fca2SJan Medala 	struct ena_ring *rxq = NULL;
134138364c26SMichal Krawczyk 	size_t buffer_size;
1342df238f84SMichal Krawczyk 	int i;
13431173fca2SJan Medala 
13441173fca2SJan Medala 	rxq = &adapter->rx_ring[queue_idx];
13451173fca2SJan Medala 	if (rxq->configured) {
13466f1c9df9SStephen Hemminger 		PMD_DRV_LOG(CRIT,
13471173fca2SJan Medala 			"API violation. Queue %d is already configured\n",
13481173fca2SJan Medala 			queue_idx);
1349241da076SRafal Kozik 		return ENA_COM_FAULT;
13501173fca2SJan Medala 	}
13511173fca2SJan Medala 
1352ea93d37eSRafal Kozik 	if (nb_desc == RTE_ETH_DEV_FALLBACK_RX_RINGSIZE)
13535920d930SMichal Krawczyk 		nb_desc = adapter->max_rx_ring_size;
1354ea93d37eSRafal Kozik 
13551daff526SJakub Palider 	if (!rte_is_power_of_2(nb_desc)) {
13566f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR,
1357498c687aSRafal Kozik 			"Unsupported size of RX queue: %d is not a power of 2.\n",
13581daff526SJakub Palider 			nb_desc);
13591daff526SJakub Palider 		return -EINVAL;
13601daff526SJakub Palider 	}
13611daff526SJakub Palider 
13625920d930SMichal Krawczyk 	if (nb_desc > adapter->max_rx_ring_size) {
13636f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR,
13641173fca2SJan Medala 			"Unsupported size of RX queue (max size: %d)\n",
13655920d930SMichal Krawczyk 			adapter->max_rx_ring_size);
13661173fca2SJan Medala 		return -EINVAL;
13671173fca2SJan Medala 	}
13681173fca2SJan Medala 
136938364c26SMichal Krawczyk 	/* ENA isn't supporting buffers smaller than 1400 bytes */
137038364c26SMichal Krawczyk 	buffer_size = rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM;
137138364c26SMichal Krawczyk 	if (buffer_size < ENA_RX_BUF_MIN_SIZE) {
137238364c26SMichal Krawczyk 		PMD_DRV_LOG(ERR,
137338364c26SMichal Krawczyk 			"Unsupported size of RX buffer: %zu (min size: %d)\n",
137438364c26SMichal Krawczyk 			buffer_size, ENA_RX_BUF_MIN_SIZE);
137538364c26SMichal Krawczyk 		return -EINVAL;
137638364c26SMichal Krawczyk 	}
137738364c26SMichal Krawczyk 
13781173fca2SJan Medala 	rxq->port_id = dev->data->port_id;
13791173fca2SJan Medala 	rxq->next_to_clean = 0;
13801173fca2SJan Medala 	rxq->next_to_use = 0;
13811173fca2SJan Medala 	rxq->ring_size = nb_desc;
1382c0006061SMichal Krawczyk 	rxq->size_mask = nb_desc - 1;
13834217cb0bSMichal Krawczyk 	rxq->numa_socket_id = socket_id;
13841173fca2SJan Medala 	rxq->mb_pool = mp;
13851173fca2SJan Medala 
13861173fca2SJan Medala 	rxq->rx_buffer_info = rte_zmalloc("rxq->buffer_info",
13871be097dcSMichal Krawczyk 		sizeof(struct ena_rx_buffer) * nb_desc,
13881173fca2SJan Medala 		RTE_CACHE_LINE_SIZE);
13891173fca2SJan Medala 	if (!rxq->rx_buffer_info) {
13906f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "failed to alloc mem for rx buffer info\n");
13911173fca2SJan Medala 		return -ENOMEM;
13921173fca2SJan Medala 	}
13931173fca2SJan Medala 
139479405ee1SRafal Kozik 	rxq->rx_refill_buffer = rte_zmalloc("rxq->rx_refill_buffer",
139579405ee1SRafal Kozik 					    sizeof(struct rte_mbuf *) * nb_desc,
139679405ee1SRafal Kozik 					    RTE_CACHE_LINE_SIZE);
139779405ee1SRafal Kozik 
139879405ee1SRafal Kozik 	if (!rxq->rx_refill_buffer) {
13996f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "failed to alloc mem for rx refill buffer\n");
140079405ee1SRafal Kozik 		rte_free(rxq->rx_buffer_info);
140179405ee1SRafal Kozik 		rxq->rx_buffer_info = NULL;
140279405ee1SRafal Kozik 		return -ENOMEM;
140379405ee1SRafal Kozik 	}
140479405ee1SRafal Kozik 
1405c2034976SMichal Krawczyk 	rxq->empty_rx_reqs = rte_zmalloc("rxq->empty_rx_reqs",
1406c2034976SMichal Krawczyk 					 sizeof(uint16_t) * nb_desc,
1407c2034976SMichal Krawczyk 					 RTE_CACHE_LINE_SIZE);
1408c2034976SMichal Krawczyk 	if (!rxq->empty_rx_reqs) {
14096f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "failed to alloc mem for empty rx reqs\n");
1410c2034976SMichal Krawczyk 		rte_free(rxq->rx_buffer_info);
1411c2034976SMichal Krawczyk 		rxq->rx_buffer_info = NULL;
141279405ee1SRafal Kozik 		rte_free(rxq->rx_refill_buffer);
141379405ee1SRafal Kozik 		rxq->rx_refill_buffer = NULL;
1414c2034976SMichal Krawczyk 		return -ENOMEM;
1415c2034976SMichal Krawczyk 	}
1416c2034976SMichal Krawczyk 
1417c2034976SMichal Krawczyk 	for (i = 0; i < nb_desc; i++)
1418eccbe2ffSRafal Kozik 		rxq->empty_rx_reqs[i] = i;
1419c2034976SMichal Krawczyk 
14201173fca2SJan Medala 	/* Store pointer to this queue in upper layer */
14211173fca2SJan Medala 	rxq->configured = 1;
14221173fca2SJan Medala 	dev->data->rx_queues[queue_idx] = rxq;
14231173fca2SJan Medala 
1424df238f84SMichal Krawczyk 	return 0;
14251173fca2SJan Medala }
14261173fca2SJan Medala 
142783fd97b2SMichal Krawczyk static int ena_add_single_rx_desc(struct ena_com_io_sq *io_sq,
142883fd97b2SMichal Krawczyk 				  struct rte_mbuf *mbuf, uint16_t id)
142983fd97b2SMichal Krawczyk {
143083fd97b2SMichal Krawczyk 	struct ena_com_buf ebuf;
143183fd97b2SMichal Krawczyk 	int rc;
143283fd97b2SMichal Krawczyk 
143383fd97b2SMichal Krawczyk 	/* prepare physical address for DMA transaction */
143483fd97b2SMichal Krawczyk 	ebuf.paddr = mbuf->buf_iova + RTE_PKTMBUF_HEADROOM;
143583fd97b2SMichal Krawczyk 	ebuf.len = mbuf->buf_len - RTE_PKTMBUF_HEADROOM;
143683fd97b2SMichal Krawczyk 
143783fd97b2SMichal Krawczyk 	/* pass resource to device */
143883fd97b2SMichal Krawczyk 	rc = ena_com_add_single_rx_desc(io_sq, &ebuf, id);
143983fd97b2SMichal Krawczyk 	if (unlikely(rc != 0))
144083fd97b2SMichal Krawczyk 		PMD_DRV_LOG(WARNING, "failed adding rx desc\n");
144183fd97b2SMichal Krawczyk 
144283fd97b2SMichal Krawczyk 	return rc;
144383fd97b2SMichal Krawczyk }
144483fd97b2SMichal Krawczyk 
14451173fca2SJan Medala static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count)
14461173fca2SJan Medala {
14471173fca2SJan Medala 	unsigned int i;
14481173fca2SJan Medala 	int rc;
14491daff526SJakub Palider 	uint16_t next_to_use = rxq->next_to_use;
1450c2034976SMichal Krawczyk 	uint16_t in_use, req_id;
145179405ee1SRafal Kozik 	struct rte_mbuf **mbufs = rxq->rx_refill_buffer;
14521173fca2SJan Medala 
14531173fca2SJan Medala 	if (unlikely(!count))
14541173fca2SJan Medala 		return 0;
14551173fca2SJan Medala 
1456c0006061SMichal Krawczyk 	in_use = rxq->ring_size - 1 -
1457c0006061SMichal Krawczyk 		ena_com_free_q_entries(rxq->ena_com_io_sq);
1458c0006061SMichal Krawczyk 	ena_assert_msg(((in_use + count) < rxq->ring_size),
1459c0006061SMichal Krawczyk 		"bad ring state\n");
14601173fca2SJan Medala 
14611173fca2SJan Medala 	/* get resources for incoming packets */
14623c8bc29fSDavid Harton 	rc = rte_pktmbuf_alloc_bulk(rxq->mb_pool, mbufs, count);
14631173fca2SJan Medala 	if (unlikely(rc < 0)) {
14641173fca2SJan Medala 		rte_atomic64_inc(&rxq->adapter->drv_stats->rx_nombuf);
14657830e905SSolganik Alexander 		++rxq->rx_stats.mbuf_alloc_fail;
14661173fca2SJan Medala 		PMD_RX_LOG(DEBUG, "there are no enough free buffers");
14671173fca2SJan Medala 		return 0;
14681173fca2SJan Medala 	}
14691173fca2SJan Medala 
14701173fca2SJan Medala 	for (i = 0; i < count; i++) {
147179405ee1SRafal Kozik 		struct rte_mbuf *mbuf = mbufs[i];
14721be097dcSMichal Krawczyk 		struct ena_rx_buffer *rx_info;
14731173fca2SJan Medala 
147479405ee1SRafal Kozik 		if (likely((i + 4) < count))
147579405ee1SRafal Kozik 			rte_prefetch0(mbufs[i + 4]);
1476c2034976SMichal Krawczyk 
1477c0006061SMichal Krawczyk 		req_id = rxq->empty_rx_reqs[next_to_use];
14781be097dcSMichal Krawczyk 		rx_info = &rxq->rx_buffer_info[req_id];
1479241da076SRafal Kozik 
148083fd97b2SMichal Krawczyk 		rc = ena_add_single_rx_desc(rxq->ena_com_io_sq, mbuf, req_id);
148183fd97b2SMichal Krawczyk 		if (unlikely(rc != 0))
14821173fca2SJan Medala 			break;
148383fd97b2SMichal Krawczyk 
14841be097dcSMichal Krawczyk 		rx_info->mbuf = mbuf;
1485c0006061SMichal Krawczyk 		next_to_use = ENA_IDX_NEXT_MASKED(next_to_use, rxq->size_mask);
14861173fca2SJan Medala 	}
14871173fca2SJan Medala 
148879405ee1SRafal Kozik 	if (unlikely(i < count)) {
14896f1c9df9SStephen Hemminger 		PMD_DRV_LOG(WARNING, "refilled rx qid %d with only %d "
1490241da076SRafal Kozik 			"buffers (from %d)\n", rxq->id, i, count);
14913c8bc29fSDavid Harton 		rte_pktmbuf_free_bulk(&mbufs[i], count - i);
14927830e905SSolganik Alexander 		++rxq->rx_stats.refill_partial;
149379405ee1SRafal Kozik 	}
1494241da076SRafal Kozik 
14955e02e19eSJan Medala 	/* When we submitted free recources to device... */
14963d19e1abSRafal Kozik 	if (likely(i > 0)) {
149738faa87eSMichal Krawczyk 		/* ...let HW know that it can fill buffers with data. */
14981173fca2SJan Medala 		ena_com_write_sq_doorbell(rxq->ena_com_io_sq);
14991173fca2SJan Medala 
15005e02e19eSJan Medala 		rxq->next_to_use = next_to_use;
15015e02e19eSJan Medala 	}
15025e02e19eSJan Medala 
15031173fca2SJan Medala 	return i;
15041173fca2SJan Medala }
15051173fca2SJan Medala 
15061173fca2SJan Medala static int ena_device_init(struct ena_com_dev *ena_dev,
1507e859d2b8SRafal Kozik 			   struct ena_com_dev_get_features_ctx *get_feat_ctx,
1508e859d2b8SRafal Kozik 			   bool *wd_state)
15091173fca2SJan Medala {
1510ca148440SMichal Krawczyk 	uint32_t aenq_groups;
15111173fca2SJan Medala 	int rc;
1512c4144557SJan Medala 	bool readless_supported;
15131173fca2SJan Medala 
15141173fca2SJan Medala 	/* Initialize mmio registers */
15151173fca2SJan Medala 	rc = ena_com_mmio_reg_read_request_init(ena_dev);
15161173fca2SJan Medala 	if (rc) {
15176f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "failed to init mmio read less\n");
15181173fca2SJan Medala 		return rc;
15191173fca2SJan Medala 	}
15201173fca2SJan Medala 
1521c4144557SJan Medala 	/* The PCIe configuration space revision id indicate if mmio reg
1522c4144557SJan Medala 	 * read is disabled.
1523c4144557SJan Medala 	 */
1524c4144557SJan Medala 	readless_supported =
1525c4144557SJan Medala 		!(((struct rte_pci_device *)ena_dev->dmadev)->id.class_id
1526c4144557SJan Medala 			       & ENA_MMIO_DISABLE_REG_READ);
1527c4144557SJan Medala 	ena_com_set_mmio_read_mode(ena_dev, readless_supported);
1528c4144557SJan Medala 
15291173fca2SJan Medala 	/* reset device */
15303adcba9aSMichal Krawczyk 	rc = ena_com_dev_reset(ena_dev, ENA_REGS_RESET_NORMAL);
15311173fca2SJan Medala 	if (rc) {
15326f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "cannot reset device\n");
15331173fca2SJan Medala 		goto err_mmio_read_less;
15341173fca2SJan Medala 	}
15351173fca2SJan Medala 
15361173fca2SJan Medala 	/* check FW version */
15371173fca2SJan Medala 	rc = ena_com_validate_version(ena_dev);
15381173fca2SJan Medala 	if (rc) {
15396f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "device version is too low\n");
15401173fca2SJan Medala 		goto err_mmio_read_less;
15411173fca2SJan Medala 	}
15421173fca2SJan Medala 
15431173fca2SJan Medala 	ena_dev->dma_addr_bits = ena_com_get_dma_width(ena_dev);
15441173fca2SJan Medala 
15451173fca2SJan Medala 	/* ENA device administration layer init */
1546b68309beSRafal Kozik 	rc = ena_com_admin_init(ena_dev, &aenq_handlers);
15471173fca2SJan Medala 	if (rc) {
15486f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR,
15491173fca2SJan Medala 			"cannot initialize ena admin queue with device\n");
15501173fca2SJan Medala 		goto err_mmio_read_less;
15511173fca2SJan Medala 	}
15521173fca2SJan Medala 
15531173fca2SJan Medala 	/* To enable the msix interrupts the driver needs to know the number
15541173fca2SJan Medala 	 * of queues. So the driver uses polling mode to retrieve this
15551173fca2SJan Medala 	 * information.
15561173fca2SJan Medala 	 */
15571173fca2SJan Medala 	ena_com_set_admin_polling_mode(ena_dev, true);
15581173fca2SJan Medala 
1559201ff2e5SJakub Palider 	ena_config_host_info(ena_dev);
1560201ff2e5SJakub Palider 
15611173fca2SJan Medala 	/* Get Device Attributes and features */
15621173fca2SJan Medala 	rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx);
15631173fca2SJan Medala 	if (rc) {
15646f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR,
15651173fca2SJan Medala 			"cannot get attribute for ena device rc= %d\n", rc);
15661173fca2SJan Medala 		goto err_admin_init;
15671173fca2SJan Medala 	}
15681173fca2SJan Medala 
1569f01f060cSRafal Kozik 	aenq_groups = BIT(ENA_ADMIN_LINK_CHANGE) |
1570d9b8b106SMichal Krawczyk 		      BIT(ENA_ADMIN_NOTIFICATION) |
1571983cce2dSRafal Kozik 		      BIT(ENA_ADMIN_KEEP_ALIVE) |
1572983cce2dSRafal Kozik 		      BIT(ENA_ADMIN_FATAL_ERROR) |
1573983cce2dSRafal Kozik 		      BIT(ENA_ADMIN_WARNING);
1574ca148440SMichal Krawczyk 
1575ca148440SMichal Krawczyk 	aenq_groups &= get_feat_ctx->aenq.supported_groups;
1576ca148440SMichal Krawczyk 	rc = ena_com_set_aenq_config(ena_dev, aenq_groups);
1577ca148440SMichal Krawczyk 	if (rc) {
15786f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "Cannot configure aenq groups rc: %d\n", rc);
1579ca148440SMichal Krawczyk 		goto err_admin_init;
1580ca148440SMichal Krawczyk 	}
1581ca148440SMichal Krawczyk 
1582e859d2b8SRafal Kozik 	*wd_state = !!(aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE));
1583e859d2b8SRafal Kozik 
15841173fca2SJan Medala 	return 0;
15851173fca2SJan Medala 
15861173fca2SJan Medala err_admin_init:
15871173fca2SJan Medala 	ena_com_admin_destroy(ena_dev);
15881173fca2SJan Medala 
15891173fca2SJan Medala err_mmio_read_less:
15901173fca2SJan Medala 	ena_com_mmio_reg_read_request_destroy(ena_dev);
15911173fca2SJan Medala 
15921173fca2SJan Medala 	return rc;
15931173fca2SJan Medala }
15941173fca2SJan Medala 
1595ca148440SMichal Krawczyk static void ena_interrupt_handler_rte(void *cb_arg)
159615773e06SMichal Krawczyk {
1597890728ffSStephen Hemminger 	struct ena_adapter *adapter = cb_arg;
159815773e06SMichal Krawczyk 	struct ena_com_dev *ena_dev = &adapter->ena_dev;
159915773e06SMichal Krawczyk 
160015773e06SMichal Krawczyk 	ena_com_admin_q_comp_intr_handler(ena_dev);
16013d19e1abSRafal Kozik 	if (likely(adapter->state != ENA_ADAPTER_STATE_CLOSED))
1602ca148440SMichal Krawczyk 		ena_com_aenq_intr_handler(ena_dev, adapter);
160315773e06SMichal Krawczyk }
160415773e06SMichal Krawczyk 
16055efb9fc7SMichal Krawczyk static void check_for_missing_keep_alive(struct ena_adapter *adapter)
16065efb9fc7SMichal Krawczyk {
1607e859d2b8SRafal Kozik 	if (!adapter->wd_state)
1608e859d2b8SRafal Kozik 		return;
1609e859d2b8SRafal Kozik 
16105efb9fc7SMichal Krawczyk 	if (adapter->keep_alive_timeout == ENA_HW_HINTS_NO_TIMEOUT)
16115efb9fc7SMichal Krawczyk 		return;
16125efb9fc7SMichal Krawczyk 
16135efb9fc7SMichal Krawczyk 	if (unlikely((rte_get_timer_cycles() - adapter->timestamp_wd) >=
16145efb9fc7SMichal Krawczyk 	    adapter->keep_alive_timeout)) {
16156f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "Keep alive timeout\n");
16165efb9fc7SMichal Krawczyk 		adapter->reset_reason = ENA_REGS_RESET_KEEP_ALIVE_TO;
16175efb9fc7SMichal Krawczyk 		adapter->trigger_reset = true;
16187830e905SSolganik Alexander 		++adapter->dev_stats.wd_expired;
16195efb9fc7SMichal Krawczyk 	}
16205efb9fc7SMichal Krawczyk }
16215efb9fc7SMichal Krawczyk 
16225efb9fc7SMichal Krawczyk /* Check if admin queue is enabled */
16235efb9fc7SMichal Krawczyk static void check_for_admin_com_state(struct ena_adapter *adapter)
16245efb9fc7SMichal Krawczyk {
16255efb9fc7SMichal Krawczyk 	if (unlikely(!ena_com_get_admin_running_state(&adapter->ena_dev))) {
16266f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "ENA admin queue is not in running state!\n");
16275efb9fc7SMichal Krawczyk 		adapter->reset_reason = ENA_REGS_RESET_ADMIN_TO;
16285efb9fc7SMichal Krawczyk 		adapter->trigger_reset = true;
16295efb9fc7SMichal Krawczyk 	}
16305efb9fc7SMichal Krawczyk }
16315efb9fc7SMichal Krawczyk 
1632d9b8b106SMichal Krawczyk static void ena_timer_wd_callback(__rte_unused struct rte_timer *timer,
1633d9b8b106SMichal Krawczyk 				  void *arg)
1634d9b8b106SMichal Krawczyk {
1635890728ffSStephen Hemminger 	struct ena_adapter *adapter = arg;
1636d9b8b106SMichal Krawczyk 	struct rte_eth_dev *dev = adapter->rte_dev;
1637d9b8b106SMichal Krawczyk 
16385efb9fc7SMichal Krawczyk 	check_for_missing_keep_alive(adapter);
16395efb9fc7SMichal Krawczyk 	check_for_admin_com_state(adapter);
1640d9b8b106SMichal Krawczyk 
16415efb9fc7SMichal Krawczyk 	if (unlikely(adapter->trigger_reset)) {
16426f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "Trigger reset is on\n");
16435723fbedSFerruh Yigit 		rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET,
1644d9b8b106SMichal Krawczyk 			NULL);
1645d9b8b106SMichal Krawczyk 	}
1646d9b8b106SMichal Krawczyk }
1647d9b8b106SMichal Krawczyk 
16482fca2a98SMichal Krawczyk static inline void
16498a7a73f2SMichal Krawczyk set_default_llq_configurations(struct ena_llq_configurations *llq_config,
16508a7a73f2SMichal Krawczyk 			       struct ena_admin_feature_llq_desc *llq,
16518a7a73f2SMichal Krawczyk 			       bool use_large_llq_hdr)
16522fca2a98SMichal Krawczyk {
16532fca2a98SMichal Krawczyk 	llq_config->llq_header_location = ENA_ADMIN_INLINE_HEADER;
16542fca2a98SMichal Krawczyk 	llq_config->llq_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY;
16552fca2a98SMichal Krawczyk 	llq_config->llq_num_decs_before_header =
16562fca2a98SMichal Krawczyk 		ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2;
16578a7a73f2SMichal Krawczyk 
16588a7a73f2SMichal Krawczyk 	if (use_large_llq_hdr &&
16598a7a73f2SMichal Krawczyk 	    (llq->entry_size_ctrl_supported & ENA_ADMIN_LIST_ENTRY_SIZE_256B)) {
16608a7a73f2SMichal Krawczyk 		llq_config->llq_ring_entry_size =
16618a7a73f2SMichal Krawczyk 			ENA_ADMIN_LIST_ENTRY_SIZE_256B;
16628a7a73f2SMichal Krawczyk 		llq_config->llq_ring_entry_size_value = 256;
16638a7a73f2SMichal Krawczyk 	} else {
16648a7a73f2SMichal Krawczyk 		llq_config->llq_ring_entry_size =
16658a7a73f2SMichal Krawczyk 			ENA_ADMIN_LIST_ENTRY_SIZE_128B;
16662fca2a98SMichal Krawczyk 		llq_config->llq_ring_entry_size_value = 128;
16672fca2a98SMichal Krawczyk 	}
16688a7a73f2SMichal Krawczyk }
16692fca2a98SMichal Krawczyk 
16702fca2a98SMichal Krawczyk static int
16712fca2a98SMichal Krawczyk ena_set_queues_placement_policy(struct ena_adapter *adapter,
16722fca2a98SMichal Krawczyk 				struct ena_com_dev *ena_dev,
16732fca2a98SMichal Krawczyk 				struct ena_admin_feature_llq_desc *llq,
16742fca2a98SMichal Krawczyk 				struct ena_llq_configurations *llq_default_configurations)
16752fca2a98SMichal Krawczyk {
16762fca2a98SMichal Krawczyk 	int rc;
16772fca2a98SMichal Krawczyk 	u32 llq_feature_mask;
16782fca2a98SMichal Krawczyk 
16792fca2a98SMichal Krawczyk 	llq_feature_mask = 1 << ENA_ADMIN_LLQ;
16802fca2a98SMichal Krawczyk 	if (!(ena_dev->supported_features & llq_feature_mask)) {
16816f1c9df9SStephen Hemminger 		PMD_DRV_LOG(INFO,
16822fca2a98SMichal Krawczyk 			"LLQ is not supported. Fallback to host mode policy.\n");
16832fca2a98SMichal Krawczyk 		ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
16842fca2a98SMichal Krawczyk 		return 0;
16852fca2a98SMichal Krawczyk 	}
16862fca2a98SMichal Krawczyk 
16872fca2a98SMichal Krawczyk 	rc = ena_com_config_dev_mode(ena_dev, llq, llq_default_configurations);
16882fca2a98SMichal Krawczyk 	if (unlikely(rc)) {
16892fca2a98SMichal Krawczyk 		PMD_INIT_LOG(WARNING, "Failed to config dev mode. "
1690498c687aSRafal Kozik 			"Fallback to host mode policy.");
16912fca2a98SMichal Krawczyk 		ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
16922fca2a98SMichal Krawczyk 		return 0;
16932fca2a98SMichal Krawczyk 	}
16942fca2a98SMichal Krawczyk 
16952fca2a98SMichal Krawczyk 	/* Nothing to config, exit */
16962fca2a98SMichal Krawczyk 	if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
16972fca2a98SMichal Krawczyk 		return 0;
16982fca2a98SMichal Krawczyk 
16992fca2a98SMichal Krawczyk 	if (!adapter->dev_mem_base) {
17006f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "Unable to access LLQ bar resource. "
17012fca2a98SMichal Krawczyk 			"Fallback to host mode policy.\n.");
17022fca2a98SMichal Krawczyk 		ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
17032fca2a98SMichal Krawczyk 		return 0;
17042fca2a98SMichal Krawczyk 	}
17052fca2a98SMichal Krawczyk 
17062fca2a98SMichal Krawczyk 	ena_dev->mem_bar = adapter->dev_mem_base;
17072fca2a98SMichal Krawczyk 
17082fca2a98SMichal Krawczyk 	return 0;
17092fca2a98SMichal Krawczyk }
17102fca2a98SMichal Krawczyk 
17115920d930SMichal Krawczyk static uint32_t ena_calc_max_io_queue_num(struct ena_com_dev *ena_dev,
171201bd6877SRafal Kozik 	struct ena_com_dev_get_features_ctx *get_feat_ctx)
171301bd6877SRafal Kozik {
17145920d930SMichal Krawczyk 	uint32_t io_tx_sq_num, io_tx_cq_num, io_rx_num, max_num_io_queues;
171501bd6877SRafal Kozik 
1716ea93d37eSRafal Kozik 	/* Regular queues capabilities */
1717ea93d37eSRafal Kozik 	if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
1718ea93d37eSRafal Kozik 		struct ena_admin_queue_ext_feature_fields *max_queue_ext =
1719ea93d37eSRafal Kozik 			&get_feat_ctx->max_queue_ext.max_queue_ext;
17202fca2a98SMichal Krawczyk 		io_rx_num = RTE_MIN(max_queue_ext->max_rx_sq_num,
17212fca2a98SMichal Krawczyk 				    max_queue_ext->max_rx_cq_num);
17222fca2a98SMichal Krawczyk 		io_tx_sq_num = max_queue_ext->max_tx_sq_num;
17232fca2a98SMichal Krawczyk 		io_tx_cq_num = max_queue_ext->max_tx_cq_num;
1724ea93d37eSRafal Kozik 	} else {
1725ea93d37eSRafal Kozik 		struct ena_admin_queue_feature_desc *max_queues =
1726ea93d37eSRafal Kozik 			&get_feat_ctx->max_queues;
17272fca2a98SMichal Krawczyk 		io_tx_sq_num = max_queues->max_sq_num;
17282fca2a98SMichal Krawczyk 		io_tx_cq_num = max_queues->max_cq_num;
17292fca2a98SMichal Krawczyk 		io_rx_num = RTE_MIN(io_tx_sq_num, io_tx_cq_num);
1730ea93d37eSRafal Kozik 	}
173101bd6877SRafal Kozik 
17322fca2a98SMichal Krawczyk 	/* In case of LLQ use the llq number in the get feature cmd */
17332fca2a98SMichal Krawczyk 	if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
17342fca2a98SMichal Krawczyk 		io_tx_sq_num = get_feat_ctx->llq.max_llq_num;
17352fca2a98SMichal Krawczyk 
17365920d930SMichal Krawczyk 	max_num_io_queues = RTE_MIN(ENA_MAX_NUM_IO_QUEUES, io_rx_num);
17375920d930SMichal Krawczyk 	max_num_io_queues = RTE_MIN(max_num_io_queues, io_tx_sq_num);
17385920d930SMichal Krawczyk 	max_num_io_queues = RTE_MIN(max_num_io_queues, io_tx_cq_num);
173901bd6877SRafal Kozik 
17405920d930SMichal Krawczyk 	if (unlikely(max_num_io_queues == 0)) {
17416f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "Number of IO queues should not be 0\n");
174201bd6877SRafal Kozik 		return -EFAULT;
174301bd6877SRafal Kozik 	}
174401bd6877SRafal Kozik 
17455920d930SMichal Krawczyk 	return max_num_io_queues;
174601bd6877SRafal Kozik }
174701bd6877SRafal Kozik 
17481173fca2SJan Medala static int eth_ena_dev_init(struct rte_eth_dev *eth_dev)
17491173fca2SJan Medala {
1750ea93d37eSRafal Kozik 	struct ena_calc_queue_size_ctx calc_queue_ctx = { 0 };
17511173fca2SJan Medala 	struct rte_pci_device *pci_dev;
1752eb0ef49dSMichal Krawczyk 	struct rte_intr_handle *intr_handle;
1753890728ffSStephen Hemminger 	struct ena_adapter *adapter = eth_dev->data->dev_private;
17541173fca2SJan Medala 	struct ena_com_dev *ena_dev = &adapter->ena_dev;
17551173fca2SJan Medala 	struct ena_com_dev_get_features_ctx get_feat_ctx;
17562fca2a98SMichal Krawczyk 	struct ena_llq_configurations llq_config;
17572fca2a98SMichal Krawczyk 	const char *queue_type_str;
17585920d930SMichal Krawczyk 	uint32_t max_num_io_queues;
1759ea93d37eSRafal Kozik 	int rc;
17601173fca2SJan Medala 	static int adapters_found;
176133dde075SMichal Krawczyk 	bool disable_meta_caching;
17625f267cb0SFerruh Yigit 	bool wd_state = false;
17631173fca2SJan Medala 
17641173fca2SJan Medala 	eth_dev->dev_ops = &ena_dev_ops;
17651173fca2SJan Medala 	eth_dev->rx_pkt_burst = &eth_ena_recv_pkts;
17661173fca2SJan Medala 	eth_dev->tx_pkt_burst = &eth_ena_xmit_pkts;
1767b3fc5a1aSKonstantin Ananyev 	eth_dev->tx_pkt_prepare = &eth_ena_prep_pkts;
17681173fca2SJan Medala 
17691173fca2SJan Medala 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
17701173fca2SJan Medala 		return 0;
17711173fca2SJan Medala 
1772f30e69b4SFerruh Yigit 	eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
1773f30e69b4SFerruh Yigit 
1774fd976890SMichal Krawczyk 	memset(adapter, 0, sizeof(struct ena_adapter));
1775fd976890SMichal Krawczyk 	ena_dev = &adapter->ena_dev;
1776fd976890SMichal Krawczyk 
1777fd976890SMichal Krawczyk 	adapter->rte_eth_dev_data = eth_dev->data;
1778fd976890SMichal Krawczyk 	adapter->rte_dev = eth_dev;
1779fd976890SMichal Krawczyk 
1780c0802544SFerruh Yigit 	pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
17811173fca2SJan Medala 	adapter->pdev = pci_dev;
17821173fca2SJan Medala 
1783f2462150SFerruh Yigit 	PMD_INIT_LOG(INFO, "Initializing %x:%x:%x.%d",
17841173fca2SJan Medala 		     pci_dev->addr.domain,
17851173fca2SJan Medala 		     pci_dev->addr.bus,
17861173fca2SJan Medala 		     pci_dev->addr.devid,
17871173fca2SJan Medala 		     pci_dev->addr.function);
17881173fca2SJan Medala 
1789eb0ef49dSMichal Krawczyk 	intr_handle = &pci_dev->intr_handle;
1790eb0ef49dSMichal Krawczyk 
17911173fca2SJan Medala 	adapter->regs = pci_dev->mem_resource[ENA_REGS_BAR].addr;
17921173fca2SJan Medala 	adapter->dev_mem_base = pci_dev->mem_resource[ENA_MEM_BAR].addr;
17931173fca2SJan Medala 
17941d339597SRafal Kozik 	if (!adapter->regs) {
1795f2462150SFerruh Yigit 		PMD_INIT_LOG(CRIT, "Failed to access registers BAR(%d)",
17961173fca2SJan Medala 			     ENA_REGS_BAR);
17971d339597SRafal Kozik 		return -ENXIO;
17981d339597SRafal Kozik 	}
17991173fca2SJan Medala 
18001173fca2SJan Medala 	ena_dev->reg_bar = adapter->regs;
18011173fca2SJan Medala 	ena_dev->dmadev = adapter->pdev;
18021173fca2SJan Medala 
18031173fca2SJan Medala 	adapter->id_number = adapters_found;
18041173fca2SJan Medala 
18051173fca2SJan Medala 	snprintf(adapter->name, ENA_NAME_MAX_LEN, "ena_%d",
18061173fca2SJan Medala 		 adapter->id_number);
18071173fca2SJan Medala 
18088a7a73f2SMichal Krawczyk 	rc = ena_parse_devargs(adapter, pci_dev->device.devargs);
18098a7a73f2SMichal Krawczyk 	if (rc != 0) {
18108a7a73f2SMichal Krawczyk 		PMD_INIT_LOG(CRIT, "Failed to parse devargs\n");
18118a7a73f2SMichal Krawczyk 		goto err;
18128a7a73f2SMichal Krawczyk 	}
18138a7a73f2SMichal Krawczyk 
18141173fca2SJan Medala 	/* device specific initialization routine */
1815e859d2b8SRafal Kozik 	rc = ena_device_init(ena_dev, &get_feat_ctx, &wd_state);
18161173fca2SJan Medala 	if (rc) {
1817f2462150SFerruh Yigit 		PMD_INIT_LOG(CRIT, "Failed to init ENA device");
1818241da076SRafal Kozik 		goto err;
18191173fca2SJan Medala 	}
1820e859d2b8SRafal Kozik 	adapter->wd_state = wd_state;
18211173fca2SJan Medala 
18228a7a73f2SMichal Krawczyk 	set_default_llq_configurations(&llq_config, &get_feat_ctx.llq,
18238a7a73f2SMichal Krawczyk 		adapter->use_large_llq_hdr);
18242fca2a98SMichal Krawczyk 	rc = ena_set_queues_placement_policy(adapter, ena_dev,
18252fca2a98SMichal Krawczyk 					     &get_feat_ctx.llq, &llq_config);
18262fca2a98SMichal Krawczyk 	if (unlikely(rc)) {
18272fca2a98SMichal Krawczyk 		PMD_INIT_LOG(CRIT, "Failed to set placement policy");
18282fca2a98SMichal Krawczyk 		return rc;
18292fca2a98SMichal Krawczyk 	}
18302fca2a98SMichal Krawczyk 
18312fca2a98SMichal Krawczyk 	if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
18322fca2a98SMichal Krawczyk 		queue_type_str = "Regular";
18332fca2a98SMichal Krawczyk 	else
18342fca2a98SMichal Krawczyk 		queue_type_str = "Low latency";
18356f1c9df9SStephen Hemminger 	PMD_DRV_LOG(INFO, "Placement policy: %s\n", queue_type_str);
1836ea93d37eSRafal Kozik 
1837ea93d37eSRafal Kozik 	calc_queue_ctx.ena_dev = ena_dev;
1838ea93d37eSRafal Kozik 	calc_queue_ctx.get_feat_ctx = &get_feat_ctx;
18391173fca2SJan Medala 
18405920d930SMichal Krawczyk 	max_num_io_queues = ena_calc_max_io_queue_num(ena_dev, &get_feat_ctx);
18418a7a73f2SMichal Krawczyk 	rc = ena_calc_io_queue_size(&calc_queue_ctx,
18428a7a73f2SMichal Krawczyk 		adapter->use_large_llq_hdr);
18435920d930SMichal Krawczyk 	if (unlikely((rc != 0) || (max_num_io_queues == 0))) {
1844241da076SRafal Kozik 		rc = -EFAULT;
1845241da076SRafal Kozik 		goto err_device_destroy;
1846241da076SRafal Kozik 	}
18471173fca2SJan Medala 
18485920d930SMichal Krawczyk 	adapter->max_tx_ring_size = calc_queue_ctx.max_tx_queue_size;
18495920d930SMichal Krawczyk 	adapter->max_rx_ring_size = calc_queue_ctx.max_rx_queue_size;
1850ea93d37eSRafal Kozik 	adapter->max_tx_sgl_size = calc_queue_ctx.max_tx_sgl_size;
1851ea93d37eSRafal Kozik 	adapter->max_rx_sgl_size = calc_queue_ctx.max_rx_sgl_size;
18525920d930SMichal Krawczyk 	adapter->max_num_io_queues = max_num_io_queues;
18532061fe41SRafal Kozik 
185433dde075SMichal Krawczyk 	if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
185533dde075SMichal Krawczyk 		disable_meta_caching =
185633dde075SMichal Krawczyk 			!!(get_feat_ctx.llq.accel_mode.u.get.supported_flags &
185733dde075SMichal Krawczyk 			BIT(ENA_ADMIN_DISABLE_META_CACHING));
185833dde075SMichal Krawczyk 	} else {
185933dde075SMichal Krawczyk 		disable_meta_caching = false;
186033dde075SMichal Krawczyk 	}
186133dde075SMichal Krawczyk 
18621173fca2SJan Medala 	/* prepare ring structures */
186333dde075SMichal Krawczyk 	ena_init_rings(adapter, disable_meta_caching);
18641173fca2SJan Medala 
1865372c1af5SJan Medala 	ena_config_debug_area(adapter);
1866372c1af5SJan Medala 
18671173fca2SJan Medala 	/* Set max MTU for this device */
18681173fca2SJan Medala 	adapter->max_mtu = get_feat_ctx.dev_attr.max_mtu;
18691173fca2SJan Medala 
1870117ba4a6SMichal Krawczyk 	/* set device support for offloads */
1871117ba4a6SMichal Krawczyk 	adapter->offloads.tso4_supported = (get_feat_ctx.offload.tx &
1872117ba4a6SMichal Krawczyk 		ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK) != 0;
1873117ba4a6SMichal Krawczyk 	adapter->offloads.tx_csum_supported = (get_feat_ctx.offload.tx &
1874117ba4a6SMichal Krawczyk 		ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK) != 0;
1875ef538c1aSMichal Krawczyk 	adapter->offloads.rx_csum_supported =
1876117ba4a6SMichal Krawczyk 		(get_feat_ctx.offload.rx_supported &
1877117ba4a6SMichal Krawczyk 		ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK) != 0;
187883277a7cSJakub Palider 
18791173fca2SJan Medala 	/* Copy MAC address and point DPDK to it */
18806d13ea8eSOlivier Matz 	eth_dev->data->mac_addrs = (struct rte_ether_addr *)adapter->mac_addr;
1881538da7a1SOlivier Matz 	rte_ether_addr_copy((struct rte_ether_addr *)
1882538da7a1SOlivier Matz 			get_feat_ctx.dev_attr.mac_addr,
18836d13ea8eSOlivier Matz 			(struct rte_ether_addr *)adapter->mac_addr);
18841173fca2SJan Medala 
18851173fca2SJan Medala 	adapter->drv_stats = rte_zmalloc("adapter stats",
18861173fca2SJan Medala 					 sizeof(*adapter->drv_stats),
18871173fca2SJan Medala 					 RTE_CACHE_LINE_SIZE);
18881173fca2SJan Medala 	if (!adapter->drv_stats) {
18896f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "failed to alloc mem for adapter stats\n");
1890241da076SRafal Kozik 		rc = -ENOMEM;
1891241da076SRafal Kozik 		goto err_delete_debug_area;
18921173fca2SJan Medala 	}
18931173fca2SJan Medala 
18941343c415SMichal Krawczyk 	rte_spinlock_init(&adapter->admin_lock);
18951343c415SMichal Krawczyk 
1896eb0ef49dSMichal Krawczyk 	rte_intr_callback_register(intr_handle,
1897eb0ef49dSMichal Krawczyk 				   ena_interrupt_handler_rte,
1898eb0ef49dSMichal Krawczyk 				   adapter);
1899eb0ef49dSMichal Krawczyk 	rte_intr_enable(intr_handle);
1900eb0ef49dSMichal Krawczyk 	ena_com_set_admin_polling_mode(ena_dev, false);
1901ca148440SMichal Krawczyk 	ena_com_admin_aenq_enable(ena_dev);
1902eb0ef49dSMichal Krawczyk 
1903d9b8b106SMichal Krawczyk 	if (adapters_found == 0)
1904d9b8b106SMichal Krawczyk 		rte_timer_subsystem_init();
1905d9b8b106SMichal Krawczyk 	rte_timer_init(&adapter->timer_wd);
1906d9b8b106SMichal Krawczyk 
19071173fca2SJan Medala 	adapters_found++;
19081173fca2SJan Medala 	adapter->state = ENA_ADAPTER_STATE_INIT;
19091173fca2SJan Medala 
19101173fca2SJan Medala 	return 0;
1911241da076SRafal Kozik 
1912241da076SRafal Kozik err_delete_debug_area:
1913241da076SRafal Kozik 	ena_com_delete_debug_area(ena_dev);
1914241da076SRafal Kozik 
1915241da076SRafal Kozik err_device_destroy:
1916241da076SRafal Kozik 	ena_com_delete_host_info(ena_dev);
1917241da076SRafal Kozik 	ena_com_admin_destroy(ena_dev);
1918241da076SRafal Kozik 
1919241da076SRafal Kozik err:
1920241da076SRafal Kozik 	return rc;
19211173fca2SJan Medala }
19221173fca2SJan Medala 
1923e457bc70SRafal Kozik static void ena_destroy_device(struct rte_eth_dev *eth_dev)
1924eb0ef49dSMichal Krawczyk {
1925890728ffSStephen Hemminger 	struct ena_adapter *adapter = eth_dev->data->dev_private;
1926e457bc70SRafal Kozik 	struct ena_com_dev *ena_dev = &adapter->ena_dev;
1927eb0ef49dSMichal Krawczyk 
1928e457bc70SRafal Kozik 	if (adapter->state == ENA_ADAPTER_STATE_FREE)
1929e457bc70SRafal Kozik 		return;
1930e457bc70SRafal Kozik 
1931e457bc70SRafal Kozik 	ena_com_set_admin_running_state(ena_dev, false);
1932eb0ef49dSMichal Krawczyk 
1933eb0ef49dSMichal Krawczyk 	if (adapter->state != ENA_ADAPTER_STATE_CLOSED)
1934eb0ef49dSMichal Krawczyk 		ena_close(eth_dev);
1935eb0ef49dSMichal Krawczyk 
1936e457bc70SRafal Kozik 	ena_com_delete_debug_area(ena_dev);
1937e457bc70SRafal Kozik 	ena_com_delete_host_info(ena_dev);
1938e457bc70SRafal Kozik 
1939e457bc70SRafal Kozik 	ena_com_abort_admin_commands(ena_dev);
1940e457bc70SRafal Kozik 	ena_com_wait_for_abort_completion(ena_dev);
1941e457bc70SRafal Kozik 	ena_com_admin_destroy(ena_dev);
1942e457bc70SRafal Kozik 	ena_com_mmio_reg_read_request_destroy(ena_dev);
1943e457bc70SRafal Kozik 
1944e457bc70SRafal Kozik 	adapter->state = ENA_ADAPTER_STATE_FREE;
1945e457bc70SRafal Kozik }
1946e457bc70SRafal Kozik 
1947e457bc70SRafal Kozik static int eth_ena_dev_uninit(struct rte_eth_dev *eth_dev)
1948e457bc70SRafal Kozik {
1949e457bc70SRafal Kozik 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1950e457bc70SRafal Kozik 		return 0;
1951e457bc70SRafal Kozik 
1952e457bc70SRafal Kozik 	ena_destroy_device(eth_dev);
1953e457bc70SRafal Kozik 
1954eb0ef49dSMichal Krawczyk 	return 0;
1955eb0ef49dSMichal Krawczyk }
1956eb0ef49dSMichal Krawczyk 
19571173fca2SJan Medala static int ena_dev_configure(struct rte_eth_dev *dev)
19581173fca2SJan Medala {
1959890728ffSStephen Hemminger 	struct ena_adapter *adapter = dev->data->dev_private;
19607369f88fSRafal Kozik 
19611173fca2SJan Medala 	adapter->state = ENA_ADAPTER_STATE_CONFIG;
19621173fca2SJan Medala 
1963a4996bd8SWei Dai 	adapter->tx_selected_offloads = dev->data->dev_conf.txmode.offloads;
1964a4996bd8SWei Dai 	adapter->rx_selected_offloads = dev->data->dev_conf.rxmode.offloads;
19651173fca2SJan Medala 	return 0;
19661173fca2SJan Medala }
19671173fca2SJan Medala 
196833dde075SMichal Krawczyk static void ena_init_rings(struct ena_adapter *adapter,
196933dde075SMichal Krawczyk 			   bool disable_meta_caching)
19701173fca2SJan Medala {
19715920d930SMichal Krawczyk 	size_t i;
19721173fca2SJan Medala 
19735920d930SMichal Krawczyk 	for (i = 0; i < adapter->max_num_io_queues; i++) {
19741173fca2SJan Medala 		struct ena_ring *ring = &adapter->tx_ring[i];
19751173fca2SJan Medala 
19761173fca2SJan Medala 		ring->configured = 0;
19771173fca2SJan Medala 		ring->type = ENA_RING_TYPE_TX;
19781173fca2SJan Medala 		ring->adapter = adapter;
19791173fca2SJan Medala 		ring->id = i;
19801173fca2SJan Medala 		ring->tx_mem_queue_type = adapter->ena_dev.tx_mem_queue_type;
19811173fca2SJan Medala 		ring->tx_max_header_size = adapter->ena_dev.tx_max_header_size;
19822061fe41SRafal Kozik 		ring->sgl_size = adapter->max_tx_sgl_size;
198333dde075SMichal Krawczyk 		ring->disable_meta_caching = disable_meta_caching;
19841173fca2SJan Medala 	}
19851173fca2SJan Medala 
19865920d930SMichal Krawczyk 	for (i = 0; i < adapter->max_num_io_queues; i++) {
19871173fca2SJan Medala 		struct ena_ring *ring = &adapter->rx_ring[i];
19881173fca2SJan Medala 
19891173fca2SJan Medala 		ring->configured = 0;
19901173fca2SJan Medala 		ring->type = ENA_RING_TYPE_RX;
19911173fca2SJan Medala 		ring->adapter = adapter;
19921173fca2SJan Medala 		ring->id = i;
1993ea93d37eSRafal Kozik 		ring->sgl_size = adapter->max_rx_sgl_size;
19941173fca2SJan Medala 	}
19951173fca2SJan Medala }
19961173fca2SJan Medala 
1997bdad90d1SIvan Ilchenko static int ena_infos_get(struct rte_eth_dev *dev,
19981173fca2SJan Medala 			  struct rte_eth_dev_info *dev_info)
19991173fca2SJan Medala {
20001173fca2SJan Medala 	struct ena_adapter *adapter;
20011173fca2SJan Medala 	struct ena_com_dev *ena_dev;
200256b8b9b7SRafal Kozik 	uint64_t rx_feat = 0, tx_feat = 0;
20031173fca2SJan Medala 
2004498c687aSRafal Kozik 	ena_assert_msg(dev->data != NULL, "Uninitialized device\n");
2005498c687aSRafal Kozik 	ena_assert_msg(dev->data->dev_private != NULL, "Uninitialized device\n");
2006890728ffSStephen Hemminger 	adapter = dev->data->dev_private;
20071173fca2SJan Medala 
20081173fca2SJan Medala 	ena_dev = &adapter->ena_dev;
2009498c687aSRafal Kozik 	ena_assert_msg(ena_dev != NULL, "Uninitialized device\n");
20101173fca2SJan Medala 
2011e274f573SMarc Sune 	dev_info->speed_capa =
2012e274f573SMarc Sune 			ETH_LINK_SPEED_1G   |
2013e274f573SMarc Sune 			ETH_LINK_SPEED_2_5G |
2014e274f573SMarc Sune 			ETH_LINK_SPEED_5G   |
2015e274f573SMarc Sune 			ETH_LINK_SPEED_10G  |
2016e274f573SMarc Sune 			ETH_LINK_SPEED_25G  |
2017e274f573SMarc Sune 			ETH_LINK_SPEED_40G  |
2018b2feed01SThomas Monjalon 			ETH_LINK_SPEED_50G  |
2019b2feed01SThomas Monjalon 			ETH_LINK_SPEED_100G;
2020e274f573SMarc Sune 
20211173fca2SJan Medala 	/* Set Tx & Rx features available for device */
2022117ba4a6SMichal Krawczyk 	if (adapter->offloads.tso4_supported)
20231173fca2SJan Medala 		tx_feat	|= DEV_TX_OFFLOAD_TCP_TSO;
20241173fca2SJan Medala 
2025117ba4a6SMichal Krawczyk 	if (adapter->offloads.tx_csum_supported)
20261173fca2SJan Medala 		tx_feat |= DEV_TX_OFFLOAD_IPV4_CKSUM |
20271173fca2SJan Medala 			DEV_TX_OFFLOAD_UDP_CKSUM |
20281173fca2SJan Medala 			DEV_TX_OFFLOAD_TCP_CKSUM;
20291173fca2SJan Medala 
2030117ba4a6SMichal Krawczyk 	if (adapter->offloads.rx_csum_supported)
20311173fca2SJan Medala 		rx_feat |= DEV_RX_OFFLOAD_IPV4_CKSUM |
20321173fca2SJan Medala 			DEV_RX_OFFLOAD_UDP_CKSUM  |
20331173fca2SJan Medala 			DEV_RX_OFFLOAD_TCP_CKSUM;
20341173fca2SJan Medala 
2035a0a4ff40SRafal Kozik 	rx_feat |= DEV_RX_OFFLOAD_JUMBO_FRAME;
2036a0a4ff40SRafal Kozik 
20371173fca2SJan Medala 	/* Inform framework about available features */
20381173fca2SJan Medala 	dev_info->rx_offload_capa = rx_feat;
20397369f88fSRafal Kozik 	dev_info->rx_queue_offload_capa = rx_feat;
20401173fca2SJan Medala 	dev_info->tx_offload_capa = tx_feat;
204156b8b9b7SRafal Kozik 	dev_info->tx_queue_offload_capa = tx_feat;
20421173fca2SJan Medala 
2043b01ead20SRafal Kozik 	dev_info->flow_type_rss_offloads = ETH_RSS_IP | ETH_RSS_TCP |
2044b01ead20SRafal Kozik 					   ETH_RSS_UDP;
2045b01ead20SRafal Kozik 
20461173fca2SJan Medala 	dev_info->min_rx_bufsize = ENA_MIN_FRAME_LEN;
20471173fca2SJan Medala 	dev_info->max_rx_pktlen  = adapter->max_mtu;
20481173fca2SJan Medala 	dev_info->max_mac_addrs = 1;
20491173fca2SJan Medala 
20505920d930SMichal Krawczyk 	dev_info->max_rx_queues = adapter->max_num_io_queues;
20515920d930SMichal Krawczyk 	dev_info->max_tx_queues = adapter->max_num_io_queues;
20521173fca2SJan Medala 	dev_info->reta_size = ENA_RX_RSS_TABLE_SIZE;
205356b8b9b7SRafal Kozik 
205456b8b9b7SRafal Kozik 	adapter->tx_supported_offloads = tx_feat;
20557369f88fSRafal Kozik 	adapter->rx_supported_offloads = rx_feat;
205692680dc2SRafal Kozik 
20575920d930SMichal Krawczyk 	dev_info->rx_desc_lim.nb_max = adapter->max_rx_ring_size;
205892680dc2SRafal Kozik 	dev_info->rx_desc_lim.nb_min = ENA_MIN_RING_DESC;
2059ea93d37eSRafal Kozik 	dev_info->rx_desc_lim.nb_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS,
2060ea93d37eSRafal Kozik 					adapter->max_rx_sgl_size);
2061ea93d37eSRafal Kozik 	dev_info->rx_desc_lim.nb_mtu_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS,
2062ea93d37eSRafal Kozik 					adapter->max_rx_sgl_size);
206392680dc2SRafal Kozik 
20645920d930SMichal Krawczyk 	dev_info->tx_desc_lim.nb_max = adapter->max_tx_ring_size;
206592680dc2SRafal Kozik 	dev_info->tx_desc_lim.nb_min = ENA_MIN_RING_DESC;
206692680dc2SRafal Kozik 	dev_info->tx_desc_lim.nb_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS,
2067ea93d37eSRafal Kozik 					adapter->max_tx_sgl_size);
206892680dc2SRafal Kozik 	dev_info->tx_desc_lim.nb_mtu_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS,
2069ea93d37eSRafal Kozik 					adapter->max_tx_sgl_size);
2070bdad90d1SIvan Ilchenko 
2071bdad90d1SIvan Ilchenko 	return 0;
20721173fca2SJan Medala }
20731173fca2SJan Medala 
20741be097dcSMichal Krawczyk static inline void ena_init_rx_mbuf(struct rte_mbuf *mbuf, uint16_t len)
20751be097dcSMichal Krawczyk {
20761be097dcSMichal Krawczyk 	mbuf->data_len = len;
20771be097dcSMichal Krawczyk 	mbuf->data_off = RTE_PKTMBUF_HEADROOM;
20781be097dcSMichal Krawczyk 	mbuf->refcnt = 1;
20791be097dcSMichal Krawczyk 	mbuf->next = NULL;
20801be097dcSMichal Krawczyk }
20811be097dcSMichal Krawczyk 
20821be097dcSMichal Krawczyk static struct rte_mbuf *ena_rx_mbuf(struct ena_ring *rx_ring,
20831be097dcSMichal Krawczyk 				    struct ena_com_rx_buf_info *ena_bufs,
20841be097dcSMichal Krawczyk 				    uint32_t descs,
20851be097dcSMichal Krawczyk 				    uint16_t *next_to_clean,
20861be097dcSMichal Krawczyk 				    uint8_t offset)
20871be097dcSMichal Krawczyk {
20881be097dcSMichal Krawczyk 	struct rte_mbuf *mbuf;
20891be097dcSMichal Krawczyk 	struct rte_mbuf *mbuf_head;
20901be097dcSMichal Krawczyk 	struct ena_rx_buffer *rx_info;
209183fd97b2SMichal Krawczyk 	int rc;
20921be097dcSMichal Krawczyk 	uint16_t ntc, len, req_id, buf = 0;
20931be097dcSMichal Krawczyk 
20941be097dcSMichal Krawczyk 	if (unlikely(descs == 0))
20951be097dcSMichal Krawczyk 		return NULL;
20961be097dcSMichal Krawczyk 
20971be097dcSMichal Krawczyk 	ntc = *next_to_clean;
20981be097dcSMichal Krawczyk 
20991be097dcSMichal Krawczyk 	len = ena_bufs[buf].len;
21001be097dcSMichal Krawczyk 	req_id = ena_bufs[buf].req_id;
21011be097dcSMichal Krawczyk 
21021be097dcSMichal Krawczyk 	rx_info = &rx_ring->rx_buffer_info[req_id];
21031be097dcSMichal Krawczyk 
21041be097dcSMichal Krawczyk 	mbuf = rx_info->mbuf;
21051be097dcSMichal Krawczyk 	RTE_ASSERT(mbuf != NULL);
21061be097dcSMichal Krawczyk 
21071be097dcSMichal Krawczyk 	ena_init_rx_mbuf(mbuf, len);
21081be097dcSMichal Krawczyk 
21091be097dcSMichal Krawczyk 	/* Fill the mbuf head with the data specific for 1st segment. */
21101be097dcSMichal Krawczyk 	mbuf_head = mbuf;
21111be097dcSMichal Krawczyk 	mbuf_head->nb_segs = descs;
21121be097dcSMichal Krawczyk 	mbuf_head->port = rx_ring->port_id;
21131be097dcSMichal Krawczyk 	mbuf_head->pkt_len = len;
21141be097dcSMichal Krawczyk 	mbuf_head->data_off += offset;
21151be097dcSMichal Krawczyk 
21161be097dcSMichal Krawczyk 	rx_info->mbuf = NULL;
2117c0006061SMichal Krawczyk 	rx_ring->empty_rx_reqs[ntc] = req_id;
2118c0006061SMichal Krawczyk 	ntc = ENA_IDX_NEXT_MASKED(ntc, rx_ring->size_mask);
21191be097dcSMichal Krawczyk 
21201be097dcSMichal Krawczyk 	while (--descs) {
21211be097dcSMichal Krawczyk 		++buf;
21221be097dcSMichal Krawczyk 		len = ena_bufs[buf].len;
21231be097dcSMichal Krawczyk 		req_id = ena_bufs[buf].req_id;
21241be097dcSMichal Krawczyk 
21251be097dcSMichal Krawczyk 		rx_info = &rx_ring->rx_buffer_info[req_id];
21261be097dcSMichal Krawczyk 		RTE_ASSERT(rx_info->mbuf != NULL);
21271be097dcSMichal Krawczyk 
212883fd97b2SMichal Krawczyk 		if (unlikely(len == 0)) {
212983fd97b2SMichal Krawczyk 			/*
213083fd97b2SMichal Krawczyk 			 * Some devices can pass descriptor with the length 0.
213183fd97b2SMichal Krawczyk 			 * To avoid confusion, the PMD is simply putting the
213283fd97b2SMichal Krawczyk 			 * descriptor back, as it was never used. We'll avoid
213383fd97b2SMichal Krawczyk 			 * mbuf allocation that way.
213483fd97b2SMichal Krawczyk 			 */
213583fd97b2SMichal Krawczyk 			rc = ena_add_single_rx_desc(rx_ring->ena_com_io_sq,
213683fd97b2SMichal Krawczyk 				rx_info->mbuf, req_id);
213783fd97b2SMichal Krawczyk 			if (unlikely(rc != 0)) {
213883fd97b2SMichal Krawczyk 				/* Free the mbuf in case of an error. */
213983fd97b2SMichal Krawczyk 				rte_mbuf_raw_free(rx_info->mbuf);
214083fd97b2SMichal Krawczyk 			} else {
214183fd97b2SMichal Krawczyk 				/*
214283fd97b2SMichal Krawczyk 				 * If there was no error, just exit the loop as
214383fd97b2SMichal Krawczyk 				 * 0 length descriptor is always the last one.
214483fd97b2SMichal Krawczyk 				 */
214583fd97b2SMichal Krawczyk 				break;
214683fd97b2SMichal Krawczyk 			}
214783fd97b2SMichal Krawczyk 		} else {
21481be097dcSMichal Krawczyk 			/* Create an mbuf chain. */
21491be097dcSMichal Krawczyk 			mbuf->next = rx_info->mbuf;
21501be097dcSMichal Krawczyk 			mbuf = mbuf->next;
21511be097dcSMichal Krawczyk 
21521be097dcSMichal Krawczyk 			ena_init_rx_mbuf(mbuf, len);
21531be097dcSMichal Krawczyk 			mbuf_head->pkt_len += len;
215483fd97b2SMichal Krawczyk 		}
21551be097dcSMichal Krawczyk 
215683fd97b2SMichal Krawczyk 		/*
215783fd97b2SMichal Krawczyk 		 * Mark the descriptor as depleted and perform necessary
215883fd97b2SMichal Krawczyk 		 * cleanup.
215983fd97b2SMichal Krawczyk 		 * This code will execute in two cases:
216083fd97b2SMichal Krawczyk 		 *  1. Descriptor len was greater than 0 - normal situation.
216183fd97b2SMichal Krawczyk 		 *  2. Descriptor len was 0 and we failed to add the descriptor
216283fd97b2SMichal Krawczyk 		 *     to the device. In that situation, we should try to add
216383fd97b2SMichal Krawczyk 		 *     the mbuf again in the populate routine and mark the
216483fd97b2SMichal Krawczyk 		 *     descriptor as used up by the device.
216583fd97b2SMichal Krawczyk 		 */
21661be097dcSMichal Krawczyk 		rx_info->mbuf = NULL;
2167c0006061SMichal Krawczyk 		rx_ring->empty_rx_reqs[ntc] = req_id;
2168c0006061SMichal Krawczyk 		ntc = ENA_IDX_NEXT_MASKED(ntc, rx_ring->size_mask);
21691be097dcSMichal Krawczyk 	}
21701be097dcSMichal Krawczyk 
21711be097dcSMichal Krawczyk 	*next_to_clean = ntc;
21721be097dcSMichal Krawczyk 
21731be097dcSMichal Krawczyk 	return mbuf_head;
21741be097dcSMichal Krawczyk }
21751be097dcSMichal Krawczyk 
21761173fca2SJan Medala static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
21771173fca2SJan Medala 				  uint16_t nb_pkts)
21781173fca2SJan Medala {
21791173fca2SJan Medala 	struct ena_ring *rx_ring = (struct ena_ring *)(rx_queue);
218077550607SMichal Krawczyk 	unsigned int free_queue_entries;
218177550607SMichal Krawczyk 	unsigned int refill_threshold;
21821173fca2SJan Medala 	uint16_t next_to_clean = rx_ring->next_to_clean;
218374456796SMichal Krawczyk 	uint16_t descs_in_use;
21841be097dcSMichal Krawczyk 	struct rte_mbuf *mbuf;
21851be097dcSMichal Krawczyk 	uint16_t completed;
21861173fca2SJan Medala 	struct ena_com_rx_ctx ena_rx_ctx;
21871be097dcSMichal Krawczyk 	int i, rc = 0;
21881173fca2SJan Medala 
21891173fca2SJan Medala 	/* Check adapter state */
21901173fca2SJan Medala 	if (unlikely(rx_ring->adapter->state != ENA_ADAPTER_STATE_RUNNING)) {
21916f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ALERT,
21921173fca2SJan Medala 			"Trying to receive pkts while device is NOT running\n");
21931173fca2SJan Medala 		return 0;
21941173fca2SJan Medala 	}
21951173fca2SJan Medala 
2196c0006061SMichal Krawczyk 	descs_in_use = rx_ring->ring_size -
219774456796SMichal Krawczyk 		ena_com_free_q_entries(rx_ring->ena_com_io_sq) - 1;
219874456796SMichal Krawczyk 	nb_pkts = RTE_MIN(descs_in_use, nb_pkts);
21991173fca2SJan Medala 
22001173fca2SJan Medala 	for (completed = 0; completed < nb_pkts; completed++) {
2201ea93d37eSRafal Kozik 		ena_rx_ctx.max_bufs = rx_ring->sgl_size;
22021173fca2SJan Medala 		ena_rx_ctx.ena_bufs = rx_ring->ena_bufs;
22031173fca2SJan Medala 		ena_rx_ctx.descs = 0;
22047b3a3c4bSMaciej Bielski 		ena_rx_ctx.pkt_offset = 0;
22051173fca2SJan Medala 		/* receive packet context */
22061173fca2SJan Medala 		rc = ena_com_rx_pkt(rx_ring->ena_com_io_cq,
22071173fca2SJan Medala 				    rx_ring->ena_com_io_sq,
22081173fca2SJan Medala 				    &ena_rx_ctx);
22091173fca2SJan Medala 		if (unlikely(rc)) {
22106f1c9df9SStephen Hemminger 			PMD_DRV_LOG(ERR, "ena_com_rx_pkt error %d\n", rc);
221105cffdcfSMichal Krawczyk 			if (rc == ENA_COM_NO_SPACE) {
221205cffdcfSMichal Krawczyk 				++rx_ring->rx_stats.bad_desc_num;
22139b260dbfSRafal Kozik 				rx_ring->adapter->reset_reason =
22149b260dbfSRafal Kozik 					ENA_REGS_RESET_TOO_MANY_RX_DESCS;
221505cffdcfSMichal Krawczyk 			} else {
221605cffdcfSMichal Krawczyk 				++rx_ring->rx_stats.bad_req_id;
221705cffdcfSMichal Krawczyk 				rx_ring->adapter->reset_reason =
221805cffdcfSMichal Krawczyk 					ENA_REGS_RESET_INV_RX_REQ_ID;
221905cffdcfSMichal Krawczyk 			}
2220241da076SRafal Kozik 			rx_ring->adapter->trigger_reset = true;
22211173fca2SJan Medala 			return 0;
22221173fca2SJan Medala 		}
22231173fca2SJan Medala 
22241be097dcSMichal Krawczyk 		mbuf = ena_rx_mbuf(rx_ring,
22251be097dcSMichal Krawczyk 			ena_rx_ctx.ena_bufs,
22261be097dcSMichal Krawczyk 			ena_rx_ctx.descs,
22271be097dcSMichal Krawczyk 			&next_to_clean,
22281be097dcSMichal Krawczyk 			ena_rx_ctx.pkt_offset);
22291be097dcSMichal Krawczyk 		if (unlikely(mbuf == NULL)) {
22301be097dcSMichal Krawczyk 			for (i = 0; i < ena_rx_ctx.descs; ++i) {
2231c0006061SMichal Krawczyk 				rx_ring->empty_rx_reqs[next_to_clean] =
22321be097dcSMichal Krawczyk 					rx_ring->ena_bufs[i].req_id;
2233c0006061SMichal Krawczyk 				next_to_clean = ENA_IDX_NEXT_MASKED(
2234c0006061SMichal Krawczyk 					next_to_clean, rx_ring->size_mask);
22351173fca2SJan Medala 			}
2236f00930d9SRafal Kozik 			break;
22371be097dcSMichal Krawczyk 		}
22381173fca2SJan Medala 
22391173fca2SJan Medala 		/* fill mbuf attributes if any */
22401be097dcSMichal Krawczyk 		ena_rx_mbuf_prepare(mbuf, &ena_rx_ctx);
22417830e905SSolganik Alexander 
22421be097dcSMichal Krawczyk 		if (unlikely(mbuf->ol_flags &
2243ef74b5f7SMichal Krawczyk 				(PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD))) {
2244ef74b5f7SMichal Krawczyk 			rte_atomic64_inc(&rx_ring->adapter->drv_stats->ierrors);
22457830e905SSolganik Alexander 			++rx_ring->rx_stats.bad_csum;
2246ef74b5f7SMichal Krawczyk 		}
22477830e905SSolganik Alexander 
22481be097dcSMichal Krawczyk 		mbuf->hash.rss = ena_rx_ctx.hash;
22491173fca2SJan Medala 
22501be097dcSMichal Krawczyk 		rx_pkts[completed] = mbuf;
22511be097dcSMichal Krawczyk 		rx_ring->rx_stats.bytes += mbuf->pkt_len;
22521173fca2SJan Medala 	}
22531173fca2SJan Medala 
22541be097dcSMichal Krawczyk 	rx_ring->rx_stats.cnt += completed;
2255ec78af6bSMichal Krawczyk 	rx_ring->next_to_clean = next_to_clean;
2256ec78af6bSMichal Krawczyk 
225777550607SMichal Krawczyk 	free_queue_entries = ena_com_free_q_entries(rx_ring->ena_com_io_sq);
225877550607SMichal Krawczyk 	refill_threshold =
2259c0006061SMichal Krawczyk 		RTE_MIN(rx_ring->ring_size / ENA_REFILL_THRESH_DIVIDER,
226077550607SMichal Krawczyk 		(unsigned int)ENA_REFILL_THRESH_PACKET);
226177550607SMichal Krawczyk 
22621173fca2SJan Medala 	/* Burst refill to save doorbells, memory barriers, const interval */
226377550607SMichal Krawczyk 	if (free_queue_entries > refill_threshold) {
2264a45462c5SRafal Kozik 		ena_com_update_dev_comp_head(rx_ring->ena_com_io_cq);
226577550607SMichal Krawczyk 		ena_populate_rx_queue(rx_ring, free_queue_entries);
2266a45462c5SRafal Kozik 	}
22671173fca2SJan Medala 
22681be097dcSMichal Krawczyk 	return completed;
22691173fca2SJan Medala }
22701173fca2SJan Medala 
2271b3fc5a1aSKonstantin Ananyev static uint16_t
227283277a7cSJakub Palider eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
2273b3fc5a1aSKonstantin Ananyev 		uint16_t nb_pkts)
2274b3fc5a1aSKonstantin Ananyev {
2275b3fc5a1aSKonstantin Ananyev 	int32_t ret;
2276b3fc5a1aSKonstantin Ananyev 	uint32_t i;
2277b3fc5a1aSKonstantin Ananyev 	struct rte_mbuf *m;
227883277a7cSJakub Palider 	struct ena_ring *tx_ring = (struct ena_ring *)(tx_queue);
2279a7c528e5SOlivier Matz 	struct rte_ipv4_hdr *ip_hdr;
2280b3fc5a1aSKonstantin Ananyev 	uint64_t ol_flags;
228183277a7cSJakub Palider 	uint16_t frag_field;
228283277a7cSJakub Palider 
2283b3fc5a1aSKonstantin Ananyev 	for (i = 0; i != nb_pkts; i++) {
2284b3fc5a1aSKonstantin Ananyev 		m = tx_pkts[i];
2285b3fc5a1aSKonstantin Ananyev 		ol_flags = m->ol_flags;
2286b3fc5a1aSKonstantin Ananyev 
2287bc5ef57dSMichal Krawczyk 		if (!(ol_flags & PKT_TX_IPV4))
2288bc5ef57dSMichal Krawczyk 			continue;
2289bc5ef57dSMichal Krawczyk 
2290bc5ef57dSMichal Krawczyk 		/* If there was not L2 header length specified, assume it is
2291bc5ef57dSMichal Krawczyk 		 * length of the ethernet header.
2292bc5ef57dSMichal Krawczyk 		 */
2293bc5ef57dSMichal Krawczyk 		if (unlikely(m->l2_len == 0))
22946d13ea8eSOlivier Matz 			m->l2_len = sizeof(struct rte_ether_hdr);
2295bc5ef57dSMichal Krawczyk 
2296a7c528e5SOlivier Matz 		ip_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *,
2297bc5ef57dSMichal Krawczyk 						 m->l2_len);
2298bc5ef57dSMichal Krawczyk 		frag_field = rte_be_to_cpu_16(ip_hdr->fragment_offset);
2299bc5ef57dSMichal Krawczyk 
230024ac604eSOlivier Matz 		if ((frag_field & RTE_IPV4_HDR_DF_FLAG) != 0) {
2301bc5ef57dSMichal Krawczyk 			m->packet_type |= RTE_PTYPE_L4_NONFRAG;
2302bc5ef57dSMichal Krawczyk 
2303bc5ef57dSMichal Krawczyk 			/* If IPv4 header has DF flag enabled and TSO support is
2304bc5ef57dSMichal Krawczyk 			 * disabled, partial chcecksum should not be calculated.
2305bc5ef57dSMichal Krawczyk 			 */
2306117ba4a6SMichal Krawczyk 			if (!tx_ring->adapter->offloads.tso4_supported)
2307bc5ef57dSMichal Krawczyk 				continue;
2308bc5ef57dSMichal Krawczyk 		}
2309bc5ef57dSMichal Krawczyk 
2310b3fc5a1aSKonstantin Ananyev 		if ((ol_flags & ENA_TX_OFFLOAD_NOTSUP_MASK) != 0 ||
2311b3fc5a1aSKonstantin Ananyev 				(ol_flags & PKT_TX_L4_MASK) ==
2312b3fc5a1aSKonstantin Ananyev 				PKT_TX_SCTP_CKSUM) {
2313baeed5f4SMichal Krawczyk 			rte_errno = ENOTSUP;
2314b3fc5a1aSKonstantin Ananyev 			return i;
2315b3fc5a1aSKonstantin Ananyev 		}
2316b3fc5a1aSKonstantin Ananyev 
2317b3fc5a1aSKonstantin Ananyev #ifdef RTE_LIBRTE_ETHDEV_DEBUG
2318b3fc5a1aSKonstantin Ananyev 		ret = rte_validate_tx_offload(m);
2319b3fc5a1aSKonstantin Ananyev 		if (ret != 0) {
2320baeed5f4SMichal Krawczyk 			rte_errno = -ret;
2321b3fc5a1aSKonstantin Ananyev 			return i;
2322b3fc5a1aSKonstantin Ananyev 		}
2323b3fc5a1aSKonstantin Ananyev #endif
232483277a7cSJakub Palider 
232583277a7cSJakub Palider 		/* In case we are supposed to TSO and have DF not set (DF=0)
232683277a7cSJakub Palider 		 * hardware must be provided with partial checksum, otherwise
232783277a7cSJakub Palider 		 * it will take care of necessary calculations.
232883277a7cSJakub Palider 		 */
232983277a7cSJakub Palider 
2330b3fc5a1aSKonstantin Ananyev 		ret = rte_net_intel_cksum_flags_prepare(m,
2331b3fc5a1aSKonstantin Ananyev 			ol_flags & ~PKT_TX_TCP_SEG);
2332b3fc5a1aSKonstantin Ananyev 		if (ret != 0) {
2333baeed5f4SMichal Krawczyk 			rte_errno = -ret;
2334b3fc5a1aSKonstantin Ananyev 			return i;
2335b3fc5a1aSKonstantin Ananyev 		}
2336b3fc5a1aSKonstantin Ananyev 	}
2337b3fc5a1aSKonstantin Ananyev 
2338b3fc5a1aSKonstantin Ananyev 	return i;
2339b3fc5a1aSKonstantin Ananyev }
2340b3fc5a1aSKonstantin Ananyev 
2341f01f060cSRafal Kozik static void ena_update_hints(struct ena_adapter *adapter,
2342f01f060cSRafal Kozik 			     struct ena_admin_ena_hw_hints *hints)
2343f01f060cSRafal Kozik {
2344f01f060cSRafal Kozik 	if (hints->admin_completion_tx_timeout)
2345f01f060cSRafal Kozik 		adapter->ena_dev.admin_queue.completion_timeout =
2346f01f060cSRafal Kozik 			hints->admin_completion_tx_timeout * 1000;
2347f01f060cSRafal Kozik 
2348f01f060cSRafal Kozik 	if (hints->mmio_read_timeout)
2349f01f060cSRafal Kozik 		/* convert to usec */
2350f01f060cSRafal Kozik 		adapter->ena_dev.mmio_read.reg_read_to =
2351f01f060cSRafal Kozik 			hints->mmio_read_timeout * 1000;
2352d9b8b106SMichal Krawczyk 
2353d9b8b106SMichal Krawczyk 	if (hints->driver_watchdog_timeout) {
2354d9b8b106SMichal Krawczyk 		if (hints->driver_watchdog_timeout == ENA_HW_HINTS_NO_TIMEOUT)
2355d9b8b106SMichal Krawczyk 			adapter->keep_alive_timeout = ENA_HW_HINTS_NO_TIMEOUT;
2356d9b8b106SMichal Krawczyk 		else
2357d9b8b106SMichal Krawczyk 			// Convert msecs to ticks
2358d9b8b106SMichal Krawczyk 			adapter->keep_alive_timeout =
2359d9b8b106SMichal Krawczyk 				(hints->driver_watchdog_timeout *
2360d9b8b106SMichal Krawczyk 				rte_get_timer_hz()) / 1000;
2361d9b8b106SMichal Krawczyk 	}
2362f01f060cSRafal Kozik }
2363f01f060cSRafal Kozik 
23648a90f3d8SIgor Chauskin static int ena_check_space_and_linearize_mbuf(struct ena_ring *tx_ring,
23652061fe41SRafal Kozik 					      struct rte_mbuf *mbuf)
23662061fe41SRafal Kozik {
23672fca2a98SMichal Krawczyk 	struct ena_com_dev *ena_dev;
23682fca2a98SMichal Krawczyk 	int num_segments, header_len, rc;
23692061fe41SRafal Kozik 
23702fca2a98SMichal Krawczyk 	ena_dev = &tx_ring->adapter->ena_dev;
23712061fe41SRafal Kozik 	num_segments = mbuf->nb_segs;
23722fca2a98SMichal Krawczyk 	header_len = mbuf->data_len;
23732061fe41SRafal Kozik 
23742061fe41SRafal Kozik 	if (likely(num_segments < tx_ring->sgl_size))
23758a90f3d8SIgor Chauskin 		goto checkspace;
23762061fe41SRafal Kozik 
23772fca2a98SMichal Krawczyk 	if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV &&
23782fca2a98SMichal Krawczyk 	    (num_segments == tx_ring->sgl_size) &&
23792fca2a98SMichal Krawczyk 	    (header_len < tx_ring->tx_max_header_size))
23808a90f3d8SIgor Chauskin 		goto checkspace;
23812fca2a98SMichal Krawczyk 
23828a90f3d8SIgor Chauskin 	/* Checking for space for 2 additional metadata descriptors due to
23838a90f3d8SIgor Chauskin 	 * possible header split and metadata descriptor. Linearization will
23848a90f3d8SIgor Chauskin 	 * be needed so we reduce the segments number from num_segments to 1
23858a90f3d8SIgor Chauskin 	 */
23868a90f3d8SIgor Chauskin 	if (!ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, 3)) {
23878a90f3d8SIgor Chauskin 		PMD_DRV_LOG(DEBUG, "Not enough space in the tx queue\n");
23888a90f3d8SIgor Chauskin 		return ENA_COM_NO_MEM;
23898a90f3d8SIgor Chauskin 	}
23907830e905SSolganik Alexander 	++tx_ring->tx_stats.linearize;
23912061fe41SRafal Kozik 	rc = rte_pktmbuf_linearize(mbuf);
23927830e905SSolganik Alexander 	if (unlikely(rc)) {
23936f1c9df9SStephen Hemminger 		PMD_DRV_LOG(WARNING, "Mbuf linearize failed\n");
23947830e905SSolganik Alexander 		rte_atomic64_inc(&tx_ring->adapter->drv_stats->ierrors);
23957830e905SSolganik Alexander 		++tx_ring->tx_stats.linearize_failed;
23967830e905SSolganik Alexander 		return rc;
23977830e905SSolganik Alexander 	}
23982061fe41SRafal Kozik 
23998a90f3d8SIgor Chauskin 	return 0;
24008a90f3d8SIgor Chauskin 
24018a90f3d8SIgor Chauskin checkspace:
24028a90f3d8SIgor Chauskin 	/* Checking for space for 2 additional metadata descriptors due to
24038a90f3d8SIgor Chauskin 	 * possible header split and metadata descriptor
24048a90f3d8SIgor Chauskin 	 */
24058a90f3d8SIgor Chauskin 	if (!ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
24068a90f3d8SIgor Chauskin 					  num_segments + 2)) {
24078a90f3d8SIgor Chauskin 		PMD_DRV_LOG(DEBUG, "Not enough space in the tx queue\n");
24088a90f3d8SIgor Chauskin 		return ENA_COM_NO_MEM;
24098a90f3d8SIgor Chauskin 	}
24108a90f3d8SIgor Chauskin 
24118a90f3d8SIgor Chauskin 	return 0;
24122061fe41SRafal Kozik }
24132061fe41SRafal Kozik 
241436278b82SMichal Krawczyk static void ena_tx_map_mbuf(struct ena_ring *tx_ring,
241536278b82SMichal Krawczyk 	struct ena_tx_buffer *tx_info,
241636278b82SMichal Krawczyk 	struct rte_mbuf *mbuf,
241736278b82SMichal Krawczyk 	void **push_header,
241836278b82SMichal Krawczyk 	uint16_t *header_len)
241936278b82SMichal Krawczyk {
242036278b82SMichal Krawczyk 	struct ena_com_buf *ena_buf;
242136278b82SMichal Krawczyk 	uint16_t delta, seg_len, push_len;
242236278b82SMichal Krawczyk 
242336278b82SMichal Krawczyk 	delta = 0;
242436278b82SMichal Krawczyk 	seg_len = mbuf->data_len;
242536278b82SMichal Krawczyk 
242636278b82SMichal Krawczyk 	tx_info->mbuf = mbuf;
242736278b82SMichal Krawczyk 	ena_buf = tx_info->bufs;
242836278b82SMichal Krawczyk 
242936278b82SMichal Krawczyk 	if (tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
243036278b82SMichal Krawczyk 		/*
243136278b82SMichal Krawczyk 		 * Tx header might be (and will be in most cases) smaller than
243236278b82SMichal Krawczyk 		 * tx_max_header_size. But it's not an issue to send more data
243336278b82SMichal Krawczyk 		 * to the device, than actually needed if the mbuf size is
243436278b82SMichal Krawczyk 		 * greater than tx_max_header_size.
243536278b82SMichal Krawczyk 		 */
243636278b82SMichal Krawczyk 		push_len = RTE_MIN(mbuf->pkt_len, tx_ring->tx_max_header_size);
243736278b82SMichal Krawczyk 		*header_len = push_len;
243836278b82SMichal Krawczyk 
243936278b82SMichal Krawczyk 		if (likely(push_len <= seg_len)) {
244036278b82SMichal Krawczyk 			/* If the push header is in the single segment, then
244136278b82SMichal Krawczyk 			 * just point it to the 1st mbuf data.
244236278b82SMichal Krawczyk 			 */
244336278b82SMichal Krawczyk 			*push_header = rte_pktmbuf_mtod(mbuf, uint8_t *);
244436278b82SMichal Krawczyk 		} else {
244536278b82SMichal Krawczyk 			/* If the push header lays in the several segments, copy
244636278b82SMichal Krawczyk 			 * it to the intermediate buffer.
244736278b82SMichal Krawczyk 			 */
244836278b82SMichal Krawczyk 			rte_pktmbuf_read(mbuf, 0, push_len,
244936278b82SMichal Krawczyk 				tx_ring->push_buf_intermediate_buf);
245036278b82SMichal Krawczyk 			*push_header = tx_ring->push_buf_intermediate_buf;
245136278b82SMichal Krawczyk 			delta = push_len - seg_len;
245236278b82SMichal Krawczyk 		}
245336278b82SMichal Krawczyk 	} else {
245436278b82SMichal Krawczyk 		*push_header = NULL;
245536278b82SMichal Krawczyk 		*header_len = 0;
245636278b82SMichal Krawczyk 		push_len = 0;
245736278b82SMichal Krawczyk 	}
245836278b82SMichal Krawczyk 
245936278b82SMichal Krawczyk 	/* Process first segment taking into consideration pushed header */
246036278b82SMichal Krawczyk 	if (seg_len > push_len) {
246136278b82SMichal Krawczyk 		ena_buf->paddr = mbuf->buf_iova +
246236278b82SMichal Krawczyk 				mbuf->data_off +
246336278b82SMichal Krawczyk 				push_len;
246436278b82SMichal Krawczyk 		ena_buf->len = seg_len - push_len;
246536278b82SMichal Krawczyk 		ena_buf++;
246636278b82SMichal Krawczyk 		tx_info->num_of_bufs++;
246736278b82SMichal Krawczyk 	}
246836278b82SMichal Krawczyk 
246936278b82SMichal Krawczyk 	while ((mbuf = mbuf->next) != NULL) {
247036278b82SMichal Krawczyk 		seg_len = mbuf->data_len;
247136278b82SMichal Krawczyk 
247236278b82SMichal Krawczyk 		/* Skip mbufs if whole data is pushed as a header */
247336278b82SMichal Krawczyk 		if (unlikely(delta > seg_len)) {
247436278b82SMichal Krawczyk 			delta -= seg_len;
247536278b82SMichal Krawczyk 			continue;
247636278b82SMichal Krawczyk 		}
247736278b82SMichal Krawczyk 
247836278b82SMichal Krawczyk 		ena_buf->paddr = mbuf->buf_iova + mbuf->data_off + delta;
247936278b82SMichal Krawczyk 		ena_buf->len = seg_len - delta;
248036278b82SMichal Krawczyk 		ena_buf++;
248136278b82SMichal Krawczyk 		tx_info->num_of_bufs++;
248236278b82SMichal Krawczyk 
248336278b82SMichal Krawczyk 		delta = 0;
248436278b82SMichal Krawczyk 	}
248536278b82SMichal Krawczyk }
248636278b82SMichal Krawczyk 
248736278b82SMichal Krawczyk static int ena_xmit_mbuf(struct ena_ring *tx_ring, struct rte_mbuf *mbuf)
248836278b82SMichal Krawczyk {
248936278b82SMichal Krawczyk 	struct ena_tx_buffer *tx_info;
249036278b82SMichal Krawczyk 	struct ena_com_tx_ctx ena_tx_ctx = { { 0 } };
249136278b82SMichal Krawczyk 	uint16_t next_to_use;
249236278b82SMichal Krawczyk 	uint16_t header_len;
249336278b82SMichal Krawczyk 	uint16_t req_id;
249436278b82SMichal Krawczyk 	void *push_header;
249536278b82SMichal Krawczyk 	int nb_hw_desc;
249636278b82SMichal Krawczyk 	int rc;
249736278b82SMichal Krawczyk 
24988a90f3d8SIgor Chauskin 	rc = ena_check_space_and_linearize_mbuf(tx_ring, mbuf);
249936278b82SMichal Krawczyk 	if (unlikely(rc))
250036278b82SMichal Krawczyk 		return rc;
250136278b82SMichal Krawczyk 
250236278b82SMichal Krawczyk 	next_to_use = tx_ring->next_to_use;
250336278b82SMichal Krawczyk 
250436278b82SMichal Krawczyk 	req_id = tx_ring->empty_tx_reqs[next_to_use];
250536278b82SMichal Krawczyk 	tx_info = &tx_ring->tx_buffer_info[req_id];
250636278b82SMichal Krawczyk 	tx_info->num_of_bufs = 0;
250736278b82SMichal Krawczyk 
250836278b82SMichal Krawczyk 	ena_tx_map_mbuf(tx_ring, tx_info, mbuf, &push_header, &header_len);
250936278b82SMichal Krawczyk 
251036278b82SMichal Krawczyk 	ena_tx_ctx.ena_bufs = tx_info->bufs;
251136278b82SMichal Krawczyk 	ena_tx_ctx.push_header = push_header;
251236278b82SMichal Krawczyk 	ena_tx_ctx.num_bufs = tx_info->num_of_bufs;
251336278b82SMichal Krawczyk 	ena_tx_ctx.req_id = req_id;
251436278b82SMichal Krawczyk 	ena_tx_ctx.header_len = header_len;
251536278b82SMichal Krawczyk 
251636278b82SMichal Krawczyk 	/* Set Tx offloads flags, if applicable */
251736278b82SMichal Krawczyk 	ena_tx_mbuf_prepare(mbuf, &ena_tx_ctx, tx_ring->offloads,
251836278b82SMichal Krawczyk 		tx_ring->disable_meta_caching);
251936278b82SMichal Krawczyk 
252036278b82SMichal Krawczyk 	if (unlikely(ena_com_is_doorbell_needed(tx_ring->ena_com_io_sq,
252136278b82SMichal Krawczyk 			&ena_tx_ctx))) {
252236278b82SMichal Krawczyk 		PMD_DRV_LOG(DEBUG,
252336278b82SMichal Krawczyk 			"llq tx max burst size of queue %d achieved, writing doorbell to send burst\n",
252436278b82SMichal Krawczyk 			tx_ring->id);
252536278b82SMichal Krawczyk 		ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq);
25261f949ad9SAmit Bernstein 		tx_ring->tx_stats.doorbells++;
25271d973d8fSIgor Chauskin 		tx_ring->pkts_without_db = false;
252836278b82SMichal Krawczyk 	}
252936278b82SMichal Krawczyk 
253036278b82SMichal Krawczyk 	/* prepare the packet's descriptors to dma engine */
253136278b82SMichal Krawczyk 	rc = ena_com_prepare_tx(tx_ring->ena_com_io_sq,	&ena_tx_ctx,
253236278b82SMichal Krawczyk 		&nb_hw_desc);
253336278b82SMichal Krawczyk 	if (unlikely(rc)) {
253436278b82SMichal Krawczyk 		++tx_ring->tx_stats.prepare_ctx_err;
253536278b82SMichal Krawczyk 		return rc;
253636278b82SMichal Krawczyk 	}
253736278b82SMichal Krawczyk 
253836278b82SMichal Krawczyk 	tx_info->tx_descs = nb_hw_desc;
253936278b82SMichal Krawczyk 
254036278b82SMichal Krawczyk 	tx_ring->tx_stats.cnt++;
254136278b82SMichal Krawczyk 	tx_ring->tx_stats.bytes += mbuf->pkt_len;
254236278b82SMichal Krawczyk 
254336278b82SMichal Krawczyk 	tx_ring->next_to_use = ENA_IDX_NEXT_MASKED(next_to_use,
254436278b82SMichal Krawczyk 		tx_ring->size_mask);
254536278b82SMichal Krawczyk 
254636278b82SMichal Krawczyk 	return 0;
254736278b82SMichal Krawczyk }
254836278b82SMichal Krawczyk 
254936278b82SMichal Krawczyk static void ena_tx_cleanup(struct ena_ring *tx_ring)
255036278b82SMichal Krawczyk {
255136278b82SMichal Krawczyk 	unsigned int cleanup_budget;
255236278b82SMichal Krawczyk 	unsigned int total_tx_descs = 0;
255336278b82SMichal Krawczyk 	uint16_t next_to_clean = tx_ring->next_to_clean;
255436278b82SMichal Krawczyk 
255536278b82SMichal Krawczyk 	cleanup_budget = RTE_MIN(tx_ring->ring_size / ENA_REFILL_THRESH_DIVIDER,
255636278b82SMichal Krawczyk 		(unsigned int)ENA_REFILL_THRESH_PACKET);
255736278b82SMichal Krawczyk 
255836278b82SMichal Krawczyk 	while (likely(total_tx_descs < cleanup_budget)) {
255936278b82SMichal Krawczyk 		struct rte_mbuf *mbuf;
256036278b82SMichal Krawczyk 		struct ena_tx_buffer *tx_info;
256136278b82SMichal Krawczyk 		uint16_t req_id;
256236278b82SMichal Krawczyk 
256336278b82SMichal Krawczyk 		if (ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq, &req_id) != 0)
256436278b82SMichal Krawczyk 			break;
256536278b82SMichal Krawczyk 
256636278b82SMichal Krawczyk 		if (unlikely(validate_tx_req_id(tx_ring, req_id) != 0))
256736278b82SMichal Krawczyk 			break;
256836278b82SMichal Krawczyk 
256936278b82SMichal Krawczyk 		/* Get Tx info & store how many descs were processed  */
257036278b82SMichal Krawczyk 		tx_info = &tx_ring->tx_buffer_info[req_id];
257136278b82SMichal Krawczyk 
257236278b82SMichal Krawczyk 		mbuf = tx_info->mbuf;
257336278b82SMichal Krawczyk 		rte_pktmbuf_free(mbuf);
257436278b82SMichal Krawczyk 
257536278b82SMichal Krawczyk 		tx_info->mbuf = NULL;
257636278b82SMichal Krawczyk 		tx_ring->empty_tx_reqs[next_to_clean] = req_id;
257736278b82SMichal Krawczyk 
257836278b82SMichal Krawczyk 		total_tx_descs += tx_info->tx_descs;
257936278b82SMichal Krawczyk 
258036278b82SMichal Krawczyk 		/* Put back descriptor to the ring for reuse */
258136278b82SMichal Krawczyk 		next_to_clean = ENA_IDX_NEXT_MASKED(next_to_clean,
258236278b82SMichal Krawczyk 			tx_ring->size_mask);
258336278b82SMichal Krawczyk 	}
258436278b82SMichal Krawczyk 
258536278b82SMichal Krawczyk 	if (likely(total_tx_descs > 0)) {
258636278b82SMichal Krawczyk 		/* acknowledge completion of sent packets */
258736278b82SMichal Krawczyk 		tx_ring->next_to_clean = next_to_clean;
258836278b82SMichal Krawczyk 		ena_com_comp_ack(tx_ring->ena_com_io_sq, total_tx_descs);
258936278b82SMichal Krawczyk 		ena_com_update_dev_comp_head(tx_ring->ena_com_io_cq);
259036278b82SMichal Krawczyk 	}
259136278b82SMichal Krawczyk }
259236278b82SMichal Krawczyk 
25931173fca2SJan Medala static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
25941173fca2SJan Medala 				  uint16_t nb_pkts)
25951173fca2SJan Medala {
25961173fca2SJan Medala 	struct ena_ring *tx_ring = (struct ena_ring *)(tx_queue);
259774456796SMichal Krawczyk 	uint16_t sent_idx = 0;
25981173fca2SJan Medala 
25991173fca2SJan Medala 	/* Check adapter state */
26001173fca2SJan Medala 	if (unlikely(tx_ring->adapter->state != ENA_ADAPTER_STATE_RUNNING)) {
26016f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ALERT,
26021173fca2SJan Medala 			"Trying to xmit pkts while device is NOT running\n");
26031173fca2SJan Medala 		return 0;
26041173fca2SJan Medala 	}
26051173fca2SJan Medala 
26061173fca2SJan Medala 	for (sent_idx = 0; sent_idx < nb_pkts; sent_idx++) {
260736278b82SMichal Krawczyk 		if (ena_xmit_mbuf(tx_ring, tx_pkts[sent_idx]))
26082061fe41SRafal Kozik 			break;
26091d973d8fSIgor Chauskin 		tx_ring->pkts_without_db = true;
261036278b82SMichal Krawczyk 		rte_prefetch0(tx_pkts[ENA_IDX_ADD_MASKED(sent_idx, 4,
261136278b82SMichal Krawczyk 			tx_ring->size_mask)]);
26122fca2a98SMichal Krawczyk 	}
26132fca2a98SMichal Krawczyk 
26147830e905SSolganik Alexander 	tx_ring->tx_stats.available_desc =
2615b2b02edeSMichal Krawczyk 		ena_com_free_q_entries(tx_ring->ena_com_io_sq);
26161173fca2SJan Medala 
26175e02e19eSJan Medala 	/* If there are ready packets to be xmitted... */
26181d973d8fSIgor Chauskin 	if (likely(tx_ring->pkts_without_db)) {
26195e02e19eSJan Medala 		/* ...let HW do its best :-) */
26201173fca2SJan Medala 		ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq);
262145b6d861SMichal Krawczyk 		tx_ring->tx_stats.doorbells++;
26221d973d8fSIgor Chauskin 		tx_ring->pkts_without_db = false;
26235e02e19eSJan Medala 	}
26245e02e19eSJan Medala 
262536278b82SMichal Krawczyk 	ena_tx_cleanup(tx_ring);
2626f7d82d24SRafal Kozik 
26277830e905SSolganik Alexander 	tx_ring->tx_stats.available_desc =
2628b2b02edeSMichal Krawczyk 		ena_com_free_q_entries(tx_ring->ena_com_io_sq);
26297830e905SSolganik Alexander 	tx_ring->tx_stats.tx_poll++;
26307830e905SSolganik Alexander 
26311173fca2SJan Medala 	return sent_idx;
26321173fca2SJan Medala }
26331173fca2SJan Medala 
263445718adaSMichal Krawczyk int ena_copy_eni_stats(struct ena_adapter *adapter)
263545718adaSMichal Krawczyk {
263645718adaSMichal Krawczyk 	struct ena_admin_eni_stats admin_eni_stats;
263745718adaSMichal Krawczyk 	int rc;
263845718adaSMichal Krawczyk 
263945718adaSMichal Krawczyk 	rte_spinlock_lock(&adapter->admin_lock);
264045718adaSMichal Krawczyk 	rc = ena_com_get_eni_stats(&adapter->ena_dev, &admin_eni_stats);
264145718adaSMichal Krawczyk 	rte_spinlock_unlock(&adapter->admin_lock);
264245718adaSMichal Krawczyk 	if (rc != 0) {
264345718adaSMichal Krawczyk 		if (rc == ENA_COM_UNSUPPORTED) {
264445718adaSMichal Krawczyk 			PMD_DRV_LOG(DEBUG,
264545718adaSMichal Krawczyk 				"Retrieving ENI metrics is not supported.\n");
264645718adaSMichal Krawczyk 		} else {
264745718adaSMichal Krawczyk 			PMD_DRV_LOG(WARNING,
264845718adaSMichal Krawczyk 				"Failed to get ENI metrics: %d\n", rc);
264945718adaSMichal Krawczyk 		}
265045718adaSMichal Krawczyk 		return rc;
265145718adaSMichal Krawczyk 	}
265245718adaSMichal Krawczyk 
265345718adaSMichal Krawczyk 	rte_memcpy(&adapter->eni_stats, &admin_eni_stats,
265445718adaSMichal Krawczyk 		sizeof(struct ena_stats_eni));
265545718adaSMichal Krawczyk 
265645718adaSMichal Krawczyk 	return 0;
265745718adaSMichal Krawczyk }
265845718adaSMichal Krawczyk 
26597830e905SSolganik Alexander /**
26607830e905SSolganik Alexander  * DPDK callback to retrieve names of extended device statistics
26617830e905SSolganik Alexander  *
26627830e905SSolganik Alexander  * @param dev
26637830e905SSolganik Alexander  *   Pointer to Ethernet device structure.
26647830e905SSolganik Alexander  * @param[out] xstats_names
26657830e905SSolganik Alexander  *   Buffer to insert names into.
26667830e905SSolganik Alexander  * @param n
26677830e905SSolganik Alexander  *   Number of names.
26687830e905SSolganik Alexander  *
26697830e905SSolganik Alexander  * @return
26707830e905SSolganik Alexander  *   Number of xstats names.
26717830e905SSolganik Alexander  */
26727830e905SSolganik Alexander static int ena_xstats_get_names(struct rte_eth_dev *dev,
26737830e905SSolganik Alexander 				struct rte_eth_xstat_name *xstats_names,
26747830e905SSolganik Alexander 				unsigned int n)
26757830e905SSolganik Alexander {
26767830e905SSolganik Alexander 	unsigned int xstats_count = ena_xstats_calc_num(dev);
26777830e905SSolganik Alexander 	unsigned int stat, i, count = 0;
26787830e905SSolganik Alexander 
26797830e905SSolganik Alexander 	if (n < xstats_count || !xstats_names)
26807830e905SSolganik Alexander 		return xstats_count;
26817830e905SSolganik Alexander 
26827830e905SSolganik Alexander 	for (stat = 0; stat < ENA_STATS_ARRAY_GLOBAL; stat++, count++)
26837830e905SSolganik Alexander 		strcpy(xstats_names[count].name,
26847830e905SSolganik Alexander 			ena_stats_global_strings[stat].name);
26857830e905SSolganik Alexander 
268645718adaSMichal Krawczyk 	for (stat = 0; stat < ENA_STATS_ARRAY_ENI; stat++, count++)
268745718adaSMichal Krawczyk 		strcpy(xstats_names[count].name,
268845718adaSMichal Krawczyk 			ena_stats_eni_strings[stat].name);
268945718adaSMichal Krawczyk 
26907830e905SSolganik Alexander 	for (stat = 0; stat < ENA_STATS_ARRAY_RX; stat++)
26917830e905SSolganik Alexander 		for (i = 0; i < dev->data->nb_rx_queues; i++, count++)
26927830e905SSolganik Alexander 			snprintf(xstats_names[count].name,
26937830e905SSolganik Alexander 				sizeof(xstats_names[count].name),
26947830e905SSolganik Alexander 				"rx_q%d_%s", i,
26957830e905SSolganik Alexander 				ena_stats_rx_strings[stat].name);
26967830e905SSolganik Alexander 
26977830e905SSolganik Alexander 	for (stat = 0; stat < ENA_STATS_ARRAY_TX; stat++)
26987830e905SSolganik Alexander 		for (i = 0; i < dev->data->nb_tx_queues; i++, count++)
26997830e905SSolganik Alexander 			snprintf(xstats_names[count].name,
27007830e905SSolganik Alexander 				sizeof(xstats_names[count].name),
27017830e905SSolganik Alexander 				"tx_q%d_%s", i,
27027830e905SSolganik Alexander 				ena_stats_tx_strings[stat].name);
27037830e905SSolganik Alexander 
27047830e905SSolganik Alexander 	return xstats_count;
27057830e905SSolganik Alexander }
27067830e905SSolganik Alexander 
27077830e905SSolganik Alexander /**
27087830e905SSolganik Alexander  * DPDK callback to get extended device statistics.
27097830e905SSolganik Alexander  *
27107830e905SSolganik Alexander  * @param dev
27117830e905SSolganik Alexander  *   Pointer to Ethernet device structure.
27127830e905SSolganik Alexander  * @param[out] stats
27137830e905SSolganik Alexander  *   Stats table output buffer.
27147830e905SSolganik Alexander  * @param n
27157830e905SSolganik Alexander  *   The size of the stats table.
27167830e905SSolganik Alexander  *
27177830e905SSolganik Alexander  * @return
27187830e905SSolganik Alexander  *   Number of xstats on success, negative on failure.
27197830e905SSolganik Alexander  */
27207830e905SSolganik Alexander static int ena_xstats_get(struct rte_eth_dev *dev,
27217830e905SSolganik Alexander 			  struct rte_eth_xstat *xstats,
27227830e905SSolganik Alexander 			  unsigned int n)
27237830e905SSolganik Alexander {
2724890728ffSStephen Hemminger 	struct ena_adapter *adapter = dev->data->dev_private;
27257830e905SSolganik Alexander 	unsigned int xstats_count = ena_xstats_calc_num(dev);
27267830e905SSolganik Alexander 	unsigned int stat, i, count = 0;
27277830e905SSolganik Alexander 	int stat_offset;
27287830e905SSolganik Alexander 	void *stats_begin;
27297830e905SSolganik Alexander 
27307830e905SSolganik Alexander 	if (n < xstats_count)
27317830e905SSolganik Alexander 		return xstats_count;
27327830e905SSolganik Alexander 
27337830e905SSolganik Alexander 	if (!xstats)
27347830e905SSolganik Alexander 		return 0;
27357830e905SSolganik Alexander 
27367830e905SSolganik Alexander 	for (stat = 0; stat < ENA_STATS_ARRAY_GLOBAL; stat++, count++) {
2737493107fdSMichal Krawczyk 		stat_offset = ena_stats_global_strings[stat].stat_offset;
27387830e905SSolganik Alexander 		stats_begin = &adapter->dev_stats;
27397830e905SSolganik Alexander 
27407830e905SSolganik Alexander 		xstats[count].id = count;
27417830e905SSolganik Alexander 		xstats[count].value = *((uint64_t *)
27427830e905SSolganik Alexander 			((char *)stats_begin + stat_offset));
27437830e905SSolganik Alexander 	}
27447830e905SSolganik Alexander 
274545718adaSMichal Krawczyk 	/* Even if the function below fails, we should copy previous (or initial
274645718adaSMichal Krawczyk 	 * values) to keep structure of rte_eth_xstat consistent.
274745718adaSMichal Krawczyk 	 */
274845718adaSMichal Krawczyk 	ena_copy_eni_stats(adapter);
274945718adaSMichal Krawczyk 	for (stat = 0; stat < ENA_STATS_ARRAY_ENI; stat++, count++) {
275045718adaSMichal Krawczyk 		stat_offset = ena_stats_eni_strings[stat].stat_offset;
275145718adaSMichal Krawczyk 		stats_begin = &adapter->eni_stats;
275245718adaSMichal Krawczyk 
275345718adaSMichal Krawczyk 		xstats[count].id = count;
275445718adaSMichal Krawczyk 		xstats[count].value = *((uint64_t *)
275545718adaSMichal Krawczyk 		    ((char *)stats_begin + stat_offset));
275645718adaSMichal Krawczyk 	}
275745718adaSMichal Krawczyk 
27587830e905SSolganik Alexander 	for (stat = 0; stat < ENA_STATS_ARRAY_RX; stat++) {
27597830e905SSolganik Alexander 		for (i = 0; i < dev->data->nb_rx_queues; i++, count++) {
27607830e905SSolganik Alexander 			stat_offset = ena_stats_rx_strings[stat].stat_offset;
27617830e905SSolganik Alexander 			stats_begin = &adapter->rx_ring[i].rx_stats;
27627830e905SSolganik Alexander 
27637830e905SSolganik Alexander 			xstats[count].id = count;
27647830e905SSolganik Alexander 			xstats[count].value = *((uint64_t *)
27657830e905SSolganik Alexander 				((char *)stats_begin + stat_offset));
27667830e905SSolganik Alexander 		}
27677830e905SSolganik Alexander 	}
27687830e905SSolganik Alexander 
27697830e905SSolganik Alexander 	for (stat = 0; stat < ENA_STATS_ARRAY_TX; stat++) {
27707830e905SSolganik Alexander 		for (i = 0; i < dev->data->nb_tx_queues; i++, count++) {
27717830e905SSolganik Alexander 			stat_offset = ena_stats_tx_strings[stat].stat_offset;
27727830e905SSolganik Alexander 			stats_begin = &adapter->tx_ring[i].rx_stats;
27737830e905SSolganik Alexander 
27747830e905SSolganik Alexander 			xstats[count].id = count;
27757830e905SSolganik Alexander 			xstats[count].value = *((uint64_t *)
27767830e905SSolganik Alexander 				((char *)stats_begin + stat_offset));
27777830e905SSolganik Alexander 		}
27787830e905SSolganik Alexander 	}
27797830e905SSolganik Alexander 
27807830e905SSolganik Alexander 	return count;
27817830e905SSolganik Alexander }
27827830e905SSolganik Alexander 
27837830e905SSolganik Alexander static int ena_xstats_get_by_id(struct rte_eth_dev *dev,
27847830e905SSolganik Alexander 				const uint64_t *ids,
27857830e905SSolganik Alexander 				uint64_t *values,
27867830e905SSolganik Alexander 				unsigned int n)
27877830e905SSolganik Alexander {
2788890728ffSStephen Hemminger 	struct ena_adapter *adapter = dev->data->dev_private;
27897830e905SSolganik Alexander 	uint64_t id;
27907830e905SSolganik Alexander 	uint64_t rx_entries, tx_entries;
27917830e905SSolganik Alexander 	unsigned int i;
27927830e905SSolganik Alexander 	int qid;
27937830e905SSolganik Alexander 	int valid = 0;
279445718adaSMichal Krawczyk 	bool was_eni_copied = false;
279545718adaSMichal Krawczyk 
27967830e905SSolganik Alexander 	for (i = 0; i < n; ++i) {
27977830e905SSolganik Alexander 		id = ids[i];
27987830e905SSolganik Alexander 		/* Check if id belongs to global statistics */
27997830e905SSolganik Alexander 		if (id < ENA_STATS_ARRAY_GLOBAL) {
28007830e905SSolganik Alexander 			values[i] = *((uint64_t *)&adapter->dev_stats + id);
28017830e905SSolganik Alexander 			++valid;
28027830e905SSolganik Alexander 			continue;
28037830e905SSolganik Alexander 		}
28047830e905SSolganik Alexander 
280545718adaSMichal Krawczyk 		/* Check if id belongs to ENI statistics */
28067830e905SSolganik Alexander 		id -= ENA_STATS_ARRAY_GLOBAL;
280745718adaSMichal Krawczyk 		if (id < ENA_STATS_ARRAY_ENI) {
280845718adaSMichal Krawczyk 			/* Avoid reading ENI stats multiple times in a single
280945718adaSMichal Krawczyk 			 * function call, as it requires communication with the
281045718adaSMichal Krawczyk 			 * admin queue.
281145718adaSMichal Krawczyk 			 */
281245718adaSMichal Krawczyk 			if (!was_eni_copied) {
281345718adaSMichal Krawczyk 				was_eni_copied = true;
281445718adaSMichal Krawczyk 				ena_copy_eni_stats(adapter);
281545718adaSMichal Krawczyk 			}
281645718adaSMichal Krawczyk 			values[i] = *((uint64_t *)&adapter->eni_stats + id);
281745718adaSMichal Krawczyk 			++valid;
281845718adaSMichal Krawczyk 			continue;
281945718adaSMichal Krawczyk 		}
282045718adaSMichal Krawczyk 
282145718adaSMichal Krawczyk 		/* Check if id belongs to rx queue statistics */
282245718adaSMichal Krawczyk 		id -= ENA_STATS_ARRAY_ENI;
28237830e905SSolganik Alexander 		rx_entries = ENA_STATS_ARRAY_RX * dev->data->nb_rx_queues;
28247830e905SSolganik Alexander 		if (id < rx_entries) {
28257830e905SSolganik Alexander 			qid = id % dev->data->nb_rx_queues;
28267830e905SSolganik Alexander 			id /= dev->data->nb_rx_queues;
28277830e905SSolganik Alexander 			values[i] = *((uint64_t *)
28287830e905SSolganik Alexander 				&adapter->rx_ring[qid].rx_stats + id);
28297830e905SSolganik Alexander 			++valid;
28307830e905SSolganik Alexander 			continue;
28317830e905SSolganik Alexander 		}
28327830e905SSolganik Alexander 				/* Check if id belongs to rx queue statistics */
28337830e905SSolganik Alexander 		id -= rx_entries;
28347830e905SSolganik Alexander 		tx_entries = ENA_STATS_ARRAY_TX * dev->data->nb_tx_queues;
28357830e905SSolganik Alexander 		if (id < tx_entries) {
28367830e905SSolganik Alexander 			qid = id % dev->data->nb_tx_queues;
28377830e905SSolganik Alexander 			id /= dev->data->nb_tx_queues;
28387830e905SSolganik Alexander 			values[i] = *((uint64_t *)
28397830e905SSolganik Alexander 				&adapter->tx_ring[qid].tx_stats + id);
28407830e905SSolganik Alexander 			++valid;
28417830e905SSolganik Alexander 			continue;
28427830e905SSolganik Alexander 		}
28437830e905SSolganik Alexander 	}
28447830e905SSolganik Alexander 
28457830e905SSolganik Alexander 	return valid;
28467830e905SSolganik Alexander }
28477830e905SSolganik Alexander 
28488a7a73f2SMichal Krawczyk static int ena_process_bool_devarg(const char *key,
28498a7a73f2SMichal Krawczyk 				   const char *value,
28508a7a73f2SMichal Krawczyk 				   void *opaque)
28518a7a73f2SMichal Krawczyk {
28528a7a73f2SMichal Krawczyk 	struct ena_adapter *adapter = opaque;
28538a7a73f2SMichal Krawczyk 	bool bool_value;
28548a7a73f2SMichal Krawczyk 
28558a7a73f2SMichal Krawczyk 	/* Parse the value. */
28568a7a73f2SMichal Krawczyk 	if (strcmp(value, "1") == 0) {
28578a7a73f2SMichal Krawczyk 		bool_value = true;
28588a7a73f2SMichal Krawczyk 	} else if (strcmp(value, "0") == 0) {
28598a7a73f2SMichal Krawczyk 		bool_value = false;
28608a7a73f2SMichal Krawczyk 	} else {
28618a7a73f2SMichal Krawczyk 		PMD_INIT_LOG(ERR,
28628a7a73f2SMichal Krawczyk 			"Invalid value: '%s' for key '%s'. Accepted: '0' or '1'\n",
28638a7a73f2SMichal Krawczyk 			value, key);
28648a7a73f2SMichal Krawczyk 		return -EINVAL;
28658a7a73f2SMichal Krawczyk 	}
28668a7a73f2SMichal Krawczyk 
28678a7a73f2SMichal Krawczyk 	/* Now, assign it to the proper adapter field. */
28688a7a73f2SMichal Krawczyk 	if (strcmp(key, ENA_DEVARG_LARGE_LLQ_HDR))
28698a7a73f2SMichal Krawczyk 		adapter->use_large_llq_hdr = bool_value;
28708a7a73f2SMichal Krawczyk 
28718a7a73f2SMichal Krawczyk 	return 0;
28728a7a73f2SMichal Krawczyk }
28738a7a73f2SMichal Krawczyk 
28748a7a73f2SMichal Krawczyk static int ena_parse_devargs(struct ena_adapter *adapter,
28758a7a73f2SMichal Krawczyk 			     struct rte_devargs *devargs)
28768a7a73f2SMichal Krawczyk {
28778a7a73f2SMichal Krawczyk 	static const char * const allowed_args[] = {
28788a7a73f2SMichal Krawczyk 		ENA_DEVARG_LARGE_LLQ_HDR,
28798a7a73f2SMichal Krawczyk 	};
28808a7a73f2SMichal Krawczyk 	struct rte_kvargs *kvlist;
28818a7a73f2SMichal Krawczyk 	int rc;
28828a7a73f2SMichal Krawczyk 
28838a7a73f2SMichal Krawczyk 	if (devargs == NULL)
28848a7a73f2SMichal Krawczyk 		return 0;
28858a7a73f2SMichal Krawczyk 
28868a7a73f2SMichal Krawczyk 	kvlist = rte_kvargs_parse(devargs->args, allowed_args);
28878a7a73f2SMichal Krawczyk 	if (kvlist == NULL) {
28888a7a73f2SMichal Krawczyk 		PMD_INIT_LOG(ERR, "Invalid device arguments: %s\n",
28898a7a73f2SMichal Krawczyk 			devargs->args);
28908a7a73f2SMichal Krawczyk 		return -EINVAL;
28918a7a73f2SMichal Krawczyk 	}
28928a7a73f2SMichal Krawczyk 
28938a7a73f2SMichal Krawczyk 	rc = rte_kvargs_process(kvlist, ENA_DEVARG_LARGE_LLQ_HDR,
28948a7a73f2SMichal Krawczyk 		ena_process_bool_devarg, adapter);
28958a7a73f2SMichal Krawczyk 
28968a7a73f2SMichal Krawczyk 	rte_kvargs_free(kvlist);
28978a7a73f2SMichal Krawczyk 
28988a7a73f2SMichal Krawczyk 	return rc;
28998a7a73f2SMichal Krawczyk }
29008a7a73f2SMichal Krawczyk 
2901ca148440SMichal Krawczyk /*********************************************************************
2902ca148440SMichal Krawczyk  *  PMD configuration
2903ca148440SMichal Krawczyk  *********************************************************************/
2904fdf91e0fSJan Blunck static int eth_ena_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
2905fdf91e0fSJan Blunck 	struct rte_pci_device *pci_dev)
2906fdf91e0fSJan Blunck {
2907fdf91e0fSJan Blunck 	return rte_eth_dev_pci_generic_probe(pci_dev,
2908fdf91e0fSJan Blunck 		sizeof(struct ena_adapter), eth_ena_dev_init);
2909fdf91e0fSJan Blunck }
2910fdf91e0fSJan Blunck 
2911fdf91e0fSJan Blunck static int eth_ena_pci_remove(struct rte_pci_device *pci_dev)
2912fdf91e0fSJan Blunck {
2913eb0ef49dSMichal Krawczyk 	return rte_eth_dev_pci_generic_remove(pci_dev, eth_ena_dev_uninit);
2914fdf91e0fSJan Blunck }
2915fdf91e0fSJan Blunck 
2916fdf91e0fSJan Blunck static struct rte_pci_driver rte_ena_pmd = {
29171173fca2SJan Medala 	.id_table = pci_id_ena_map,
291805e0eee0SRafal Kozik 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
291905e0eee0SRafal Kozik 		     RTE_PCI_DRV_WC_ACTIVATE,
2920fdf91e0fSJan Blunck 	.probe = eth_ena_pci_probe,
2921fdf91e0fSJan Blunck 	.remove = eth_ena_pci_remove,
29221173fca2SJan Medala };
29231173fca2SJan Medala 
2924fdf91e0fSJan Blunck RTE_PMD_REGISTER_PCI(net_ena, rte_ena_pmd);
292501f19227SShreyansh Jain RTE_PMD_REGISTER_PCI_TABLE(net_ena, pci_id_ena_map);
292606e81dc9SDavid Marchand RTE_PMD_REGISTER_KMOD_DEP(net_ena, "* igb_uio | uio_pci_generic | vfio-pci");
29278a7a73f2SMichal Krawczyk RTE_PMD_REGISTER_PARAM_STRING(net_ena, ENA_DEVARG_LARGE_LLQ_HDR "=<0|1>");
2928eeded204SDavid Marchand RTE_LOG_REGISTER_SUFFIX(ena_logtype_init, init, NOTICE);
2929eeded204SDavid Marchand RTE_LOG_REGISTER_SUFFIX(ena_logtype_driver, driver, NOTICE);
29306f1c9df9SStephen Hemminger #ifdef RTE_LIBRTE_ENA_DEBUG_RX
2931eeded204SDavid Marchand RTE_LOG_REGISTER_SUFFIX(ena_logtype_rx, rx, NOTICE);
29326f1c9df9SStephen Hemminger #endif
29336f1c9df9SStephen Hemminger #ifdef RTE_LIBRTE_ENA_DEBUG_TX
2934eeded204SDavid Marchand RTE_LOG_REGISTER_SUFFIX(ena_logtype_tx, tx, NOTICE);
29356f1c9df9SStephen Hemminger #endif
29366f1c9df9SStephen Hemminger #ifdef RTE_LIBRTE_ENA_DEBUG_TX_FREE
2937eeded204SDavid Marchand RTE_LOG_REGISTER_SUFFIX(ena_logtype_tx_free, tx_free, NOTICE);
29386f1c9df9SStephen Hemminger #endif
29396f1c9df9SStephen Hemminger #ifdef RTE_LIBRTE_ENA_COM_DEBUG
2940eeded204SDavid Marchand RTE_LOG_REGISTER_SUFFIX(ena_logtype_com, com, NOTICE);
29416f1c9df9SStephen Hemminger #endif
29423adcba9aSMichal Krawczyk 
29433adcba9aSMichal Krawczyk /******************************************************************************
29443adcba9aSMichal Krawczyk  ******************************** AENQ Handlers *******************************
29453adcba9aSMichal Krawczyk  *****************************************************************************/
2946ca148440SMichal Krawczyk static void ena_update_on_link_change(void *adapter_data,
2947ca148440SMichal Krawczyk 				      struct ena_admin_aenq_entry *aenq_e)
2948ca148440SMichal Krawczyk {
2949ca148440SMichal Krawczyk 	struct rte_eth_dev *eth_dev;
2950ca148440SMichal Krawczyk 	struct ena_adapter *adapter;
2951ca148440SMichal Krawczyk 	struct ena_admin_aenq_link_change_desc *aenq_link_desc;
2952ca148440SMichal Krawczyk 	uint32_t status;
2953ca148440SMichal Krawczyk 
2954890728ffSStephen Hemminger 	adapter = adapter_data;
2955ca148440SMichal Krawczyk 	aenq_link_desc = (struct ena_admin_aenq_link_change_desc *)aenq_e;
2956ca148440SMichal Krawczyk 	eth_dev = adapter->rte_dev;
2957ca148440SMichal Krawczyk 
2958ca148440SMichal Krawczyk 	status = get_ena_admin_aenq_link_change_desc_link_status(aenq_link_desc);
2959ca148440SMichal Krawczyk 	adapter->link_status = status;
2960ca148440SMichal Krawczyk 
2961ca148440SMichal Krawczyk 	ena_link_update(eth_dev, 0);
29625723fbedSFerruh Yigit 	rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL);
2963ca148440SMichal Krawczyk }
2964ca148440SMichal Krawczyk 
2965f01f060cSRafal Kozik static void ena_notification(void *data,
2966f01f060cSRafal Kozik 			     struct ena_admin_aenq_entry *aenq_e)
2967f01f060cSRafal Kozik {
2968890728ffSStephen Hemminger 	struct ena_adapter *adapter = data;
2969f01f060cSRafal Kozik 	struct ena_admin_ena_hw_hints *hints;
2970f01f060cSRafal Kozik 
2971f01f060cSRafal Kozik 	if (aenq_e->aenq_common_desc.group != ENA_ADMIN_NOTIFICATION)
29726f1c9df9SStephen Hemminger 		PMD_DRV_LOG(WARNING, "Invalid group(%x) expected %x\n",
2973f01f060cSRafal Kozik 			aenq_e->aenq_common_desc.group,
2974f01f060cSRafal Kozik 			ENA_ADMIN_NOTIFICATION);
2975f01f060cSRafal Kozik 
2976*b19f366cSMichal Krawczyk 	switch (aenq_e->aenq_common_desc.syndrome) {
2977f01f060cSRafal Kozik 	case ENA_ADMIN_UPDATE_HINTS:
2978f01f060cSRafal Kozik 		hints = (struct ena_admin_ena_hw_hints *)
2979f01f060cSRafal Kozik 			(&aenq_e->inline_data_w4);
2980f01f060cSRafal Kozik 		ena_update_hints(adapter, hints);
2981f01f060cSRafal Kozik 		break;
2982f01f060cSRafal Kozik 	default:
29836f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "Invalid aenq notification link state %d\n",
2984*b19f366cSMichal Krawczyk 			aenq_e->aenq_common_desc.syndrome);
2985f01f060cSRafal Kozik 	}
2986f01f060cSRafal Kozik }
2987f01f060cSRafal Kozik 
2988d9b8b106SMichal Krawczyk static void ena_keep_alive(void *adapter_data,
2989d9b8b106SMichal Krawczyk 			   __rte_unused struct ena_admin_aenq_entry *aenq_e)
2990d9b8b106SMichal Krawczyk {
2991890728ffSStephen Hemminger 	struct ena_adapter *adapter = adapter_data;
299294c3e376SRafal Kozik 	struct ena_admin_aenq_keep_alive_desc *desc;
299394c3e376SRafal Kozik 	uint64_t rx_drops;
2994e1e73e32SMichal Krawczyk 	uint64_t tx_drops;
2995d9b8b106SMichal Krawczyk 
2996d9b8b106SMichal Krawczyk 	adapter->timestamp_wd = rte_get_timer_cycles();
299794c3e376SRafal Kozik 
299894c3e376SRafal Kozik 	desc = (struct ena_admin_aenq_keep_alive_desc *)aenq_e;
299994c3e376SRafal Kozik 	rx_drops = ((uint64_t)desc->rx_drops_high << 32) | desc->rx_drops_low;
3000e1e73e32SMichal Krawczyk 	tx_drops = ((uint64_t)desc->tx_drops_high << 32) | desc->tx_drops_low;
3001e1e73e32SMichal Krawczyk 
3002e1e73e32SMichal Krawczyk 	adapter->drv_stats->rx_drops = rx_drops;
3003e1e73e32SMichal Krawczyk 	adapter->dev_stats.tx_drops = tx_drops;
3004d9b8b106SMichal Krawczyk }
3005d9b8b106SMichal Krawczyk 
30063adcba9aSMichal Krawczyk /**
30073adcba9aSMichal Krawczyk  * This handler will called for unknown event group or unimplemented handlers
30083adcba9aSMichal Krawczyk  **/
30093adcba9aSMichal Krawczyk static void unimplemented_aenq_handler(__rte_unused void *data,
30103adcba9aSMichal Krawczyk 				       __rte_unused struct ena_admin_aenq_entry *aenq_e)
30113adcba9aSMichal Krawczyk {
30126f1c9df9SStephen Hemminger 	PMD_DRV_LOG(ERR, "Unknown event was received or event with "
3013983cce2dSRafal Kozik 			  "unimplemented handler\n");
30143adcba9aSMichal Krawczyk }
30153adcba9aSMichal Krawczyk 
3016ca148440SMichal Krawczyk static struct ena_aenq_handlers aenq_handlers = {
30173adcba9aSMichal Krawczyk 	.handlers = {
3018ca148440SMichal Krawczyk 		[ENA_ADMIN_LINK_CHANGE] = ena_update_on_link_change,
3019f01f060cSRafal Kozik 		[ENA_ADMIN_NOTIFICATION] = ena_notification,
3020d9b8b106SMichal Krawczyk 		[ENA_ADMIN_KEEP_ALIVE] = ena_keep_alive
30213adcba9aSMichal Krawczyk 	},
30223adcba9aSMichal Krawczyk 	.unimplemented_handler = unimplemented_aenq_handler
30233adcba9aSMichal Krawczyk };
3024