xref: /dpdk/drivers/net/ena/ena_ethdev.c (revision 1be097dc96e05a10966db1096feadf956e7f147e)
1702928afSMaciej Bielski /* SPDX-License-Identifier: BSD-3-Clause
238364c26SMichal Krawczyk  * Copyright (c) 2015-2020 Amazon.com, Inc. or its affiliates.
31173fca2SJan Medala  * All rights reserved.
41173fca2SJan Medala  */
51173fca2SJan Medala 
66723c0fcSBruce Richardson #include <rte_string_fns.h>
71173fca2SJan Medala #include <rte_ether.h>
8ffc905f3SFerruh Yigit #include <rte_ethdev_driver.h>
9fdf91e0fSJan Blunck #include <rte_ethdev_pci.h>
101173fca2SJan Medala #include <rte_tcp.h>
111173fca2SJan Medala #include <rte_atomic.h>
121173fca2SJan Medala #include <rte_dev.h>
131173fca2SJan Medala #include <rte_errno.h>
14372c1af5SJan Medala #include <rte_version.h>
15b3fc5a1aSKonstantin Ananyev #include <rte_net.h>
168a7a73f2SMichal Krawczyk #include <rte_kvargs.h>
171173fca2SJan Medala 
181173fca2SJan Medala #include "ena_ethdev.h"
191173fca2SJan Medala #include "ena_logs.h"
201173fca2SJan Medala #include "ena_platform.h"
211173fca2SJan Medala #include "ena_com.h"
221173fca2SJan Medala #include "ena_eth_com.h"
231173fca2SJan Medala 
241173fca2SJan Medala #include <ena_common_defs.h>
251173fca2SJan Medala #include <ena_regs_defs.h>
261173fca2SJan Medala #include <ena_admin_defs.h>
271173fca2SJan Medala #include <ena_eth_io_defs.h>
281173fca2SJan Medala 
29419c3e3eSMichal Krawczyk #define DRV_MODULE_VER_MAJOR	2
30419c3e3eSMichal Krawczyk #define DRV_MODULE_VER_MINOR	0
317b3a3c4bSMaciej Bielski #define DRV_MODULE_VER_SUBMINOR	3
32372c1af5SJan Medala 
331173fca2SJan Medala #define ENA_IO_TXQ_IDX(q)	(2 * (q))
341173fca2SJan Medala #define ENA_IO_RXQ_IDX(q)	(2 * (q) + 1)
351173fca2SJan Medala /*reverse version of ENA_IO_RXQ_IDX*/
361173fca2SJan Medala #define ENA_IO_RXQ_IDX_REV(q)	((q - 1) / 2)
371173fca2SJan Medala 
381173fca2SJan Medala /* While processing submitted and completed descriptors (rx and tx path
391173fca2SJan Medala  * respectively) in a loop it is desired to:
401173fca2SJan Medala  *  - perform batch submissions while populating sumbissmion queue
411173fca2SJan Medala  *  - avoid blocking transmission of other packets during cleanup phase
421173fca2SJan Medala  * Hence the utilization ratio of 1/8 of a queue size.
431173fca2SJan Medala  */
441173fca2SJan Medala #define ENA_RING_DESCS_RATIO(ring_size)	(ring_size / 8)
451173fca2SJan Medala 
461173fca2SJan Medala #define __MERGE_64B_H_L(h, l) (((uint64_t)h << 32) | l)
471173fca2SJan Medala #define TEST_BIT(val, bit_shift) (val & (1UL << bit_shift))
481173fca2SJan Medala 
491173fca2SJan Medala #define GET_L4_HDR_LEN(mbuf)					\
50f41b5156SOlivier Matz 	((rte_pktmbuf_mtod_offset(mbuf,	struct rte_tcp_hdr *,	\
511173fca2SJan Medala 		mbuf->l3_len + mbuf->l2_len)->data_off) >> 4)
521173fca2SJan Medala 
531173fca2SJan Medala #define ENA_RX_RSS_TABLE_LOG_SIZE  7
541173fca2SJan Medala #define ENA_RX_RSS_TABLE_SIZE	(1 << ENA_RX_RSS_TABLE_LOG_SIZE)
551173fca2SJan Medala #define ENA_HASH_KEY_SIZE	40
56372c1af5SJan Medala #define ETH_GSTRING_LEN	32
57372c1af5SJan Medala 
58372c1af5SJan Medala #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
59372c1af5SJan Medala 
6092680dc2SRafal Kozik #define ENA_MIN_RING_DESC	128
6192680dc2SRafal Kozik 
62372c1af5SJan Medala enum ethtool_stringset {
63372c1af5SJan Medala 	ETH_SS_TEST             = 0,
64372c1af5SJan Medala 	ETH_SS_STATS,
65372c1af5SJan Medala };
66372c1af5SJan Medala 
67372c1af5SJan Medala struct ena_stats {
68372c1af5SJan Medala 	char name[ETH_GSTRING_LEN];
69372c1af5SJan Medala 	int stat_offset;
70372c1af5SJan Medala };
71372c1af5SJan Medala 
72372c1af5SJan Medala #define ENA_STAT_ENTRY(stat, stat_type) { \
73372c1af5SJan Medala 	.name = #stat, \
74372c1af5SJan Medala 	.stat_offset = offsetof(struct ena_stats_##stat_type, stat) \
75372c1af5SJan Medala }
76372c1af5SJan Medala 
77372c1af5SJan Medala #define ENA_STAT_RX_ENTRY(stat) \
78372c1af5SJan Medala 	ENA_STAT_ENTRY(stat, rx)
79372c1af5SJan Medala 
80372c1af5SJan Medala #define ENA_STAT_TX_ENTRY(stat) \
81372c1af5SJan Medala 	ENA_STAT_ENTRY(stat, tx)
82372c1af5SJan Medala 
83372c1af5SJan Medala #define ENA_STAT_GLOBAL_ENTRY(stat) \
84372c1af5SJan Medala 	ENA_STAT_ENTRY(stat, dev)
85372c1af5SJan Medala 
868a7a73f2SMichal Krawczyk /* Device arguments */
878a7a73f2SMichal Krawczyk #define ENA_DEVARG_LARGE_LLQ_HDR "large_llq_hdr"
888a7a73f2SMichal Krawczyk 
893adcba9aSMichal Krawczyk /*
903adcba9aSMichal Krawczyk  * Each rte_memzone should have unique name.
913adcba9aSMichal Krawczyk  * To satisfy it, count number of allocation and add it to name.
923adcba9aSMichal Krawczyk  */
93b14fcac0SIgor Chauskin rte_atomic32_t ena_alloc_cnt;
943adcba9aSMichal Krawczyk 
95372c1af5SJan Medala static const struct ena_stats ena_stats_global_strings[] = {
96372c1af5SJan Medala 	ENA_STAT_GLOBAL_ENTRY(wd_expired),
977830e905SSolganik Alexander 	ENA_STAT_GLOBAL_ENTRY(dev_start),
987830e905SSolganik Alexander 	ENA_STAT_GLOBAL_ENTRY(dev_stop),
99e1e73e32SMichal Krawczyk 	ENA_STAT_GLOBAL_ENTRY(tx_drops),
100372c1af5SJan Medala };
101372c1af5SJan Medala 
102372c1af5SJan Medala static const struct ena_stats ena_stats_tx_strings[] = {
103372c1af5SJan Medala 	ENA_STAT_TX_ENTRY(cnt),
104372c1af5SJan Medala 	ENA_STAT_TX_ENTRY(bytes),
1057830e905SSolganik Alexander 	ENA_STAT_TX_ENTRY(prepare_ctx_err),
106372c1af5SJan Medala 	ENA_STAT_TX_ENTRY(linearize),
107372c1af5SJan Medala 	ENA_STAT_TX_ENTRY(linearize_failed),
108372c1af5SJan Medala 	ENA_STAT_TX_ENTRY(tx_poll),
109372c1af5SJan Medala 	ENA_STAT_TX_ENTRY(doorbells),
110372c1af5SJan Medala 	ENA_STAT_TX_ENTRY(bad_req_id),
1117830e905SSolganik Alexander 	ENA_STAT_TX_ENTRY(available_desc),
112372c1af5SJan Medala };
113372c1af5SJan Medala 
114372c1af5SJan Medala static const struct ena_stats ena_stats_rx_strings[] = {
115372c1af5SJan Medala 	ENA_STAT_RX_ENTRY(cnt),
116372c1af5SJan Medala 	ENA_STAT_RX_ENTRY(bytes),
1177830e905SSolganik Alexander 	ENA_STAT_RX_ENTRY(refill_partial),
118372c1af5SJan Medala 	ENA_STAT_RX_ENTRY(bad_csum),
1197830e905SSolganik Alexander 	ENA_STAT_RX_ENTRY(mbuf_alloc_fail),
120372c1af5SJan Medala 	ENA_STAT_RX_ENTRY(bad_desc_num),
1217830e905SSolganik Alexander 	ENA_STAT_RX_ENTRY(bad_req_id),
122372c1af5SJan Medala };
123372c1af5SJan Medala 
124372c1af5SJan Medala #define ENA_STATS_ARRAY_GLOBAL	ARRAY_SIZE(ena_stats_global_strings)
125372c1af5SJan Medala #define ENA_STATS_ARRAY_TX	ARRAY_SIZE(ena_stats_tx_strings)
126372c1af5SJan Medala #define ENA_STATS_ARRAY_RX	ARRAY_SIZE(ena_stats_rx_strings)
1271173fca2SJan Medala 
12856b8b9b7SRafal Kozik #define QUEUE_OFFLOADS (DEV_TX_OFFLOAD_TCP_CKSUM |\
12956b8b9b7SRafal Kozik 			DEV_TX_OFFLOAD_UDP_CKSUM |\
13056b8b9b7SRafal Kozik 			DEV_TX_OFFLOAD_IPV4_CKSUM |\
13156b8b9b7SRafal Kozik 			DEV_TX_OFFLOAD_TCP_TSO)
13256b8b9b7SRafal Kozik #define MBUF_OFFLOADS (PKT_TX_L4_MASK |\
13356b8b9b7SRafal Kozik 		       PKT_TX_IP_CKSUM |\
13456b8b9b7SRafal Kozik 		       PKT_TX_TCP_SEG)
13556b8b9b7SRafal Kozik 
1361173fca2SJan Medala /** Vendor ID used by Amazon devices */
1371173fca2SJan Medala #define PCI_VENDOR_ID_AMAZON 0x1D0F
1381173fca2SJan Medala /** Amazon devices */
1391173fca2SJan Medala #define PCI_DEVICE_ID_ENA_VF	0xEC20
1401173fca2SJan Medala #define PCI_DEVICE_ID_ENA_LLQ_VF	0xEC21
1411173fca2SJan Medala 
142b3fc5a1aSKonstantin Ananyev #define	ENA_TX_OFFLOAD_MASK	(\
143b3fc5a1aSKonstantin Ananyev 	PKT_TX_L4_MASK |         \
144d6db681bSDidier Pallard 	PKT_TX_IPV6 |            \
145d6db681bSDidier Pallard 	PKT_TX_IPV4 |            \
146b3fc5a1aSKonstantin Ananyev 	PKT_TX_IP_CKSUM |        \
147b3fc5a1aSKonstantin Ananyev 	PKT_TX_TCP_SEG)
148b3fc5a1aSKonstantin Ananyev 
149b3fc5a1aSKonstantin Ananyev #define	ENA_TX_OFFLOAD_NOTSUP_MASK	\
150b3fc5a1aSKonstantin Ananyev 	(PKT_TX_OFFLOAD_MASK ^ ENA_TX_OFFLOAD_MASK)
151b3fc5a1aSKonstantin Ananyev 
1528bc0acaeSStephen Hemminger int ena_logtype_init;
1538bc0acaeSStephen Hemminger int ena_logtype_driver;
1548bc0acaeSStephen Hemminger 
1556f1c9df9SStephen Hemminger #ifdef RTE_LIBRTE_ENA_DEBUG_RX
1566f1c9df9SStephen Hemminger int ena_logtype_rx;
1576f1c9df9SStephen Hemminger #endif
1586f1c9df9SStephen Hemminger #ifdef RTE_LIBRTE_ENA_DEBUG_TX
1596f1c9df9SStephen Hemminger int ena_logtype_tx;
1606f1c9df9SStephen Hemminger #endif
1616f1c9df9SStephen Hemminger #ifdef RTE_LIBRTE_ENA_DEBUG_TX_FREE
1626f1c9df9SStephen Hemminger int ena_logtype_tx_free;
1636f1c9df9SStephen Hemminger #endif
1646f1c9df9SStephen Hemminger #ifdef RTE_LIBRTE_ENA_COM_DEBUG
1656f1c9df9SStephen Hemminger int ena_logtype_com;
1666f1c9df9SStephen Hemminger #endif
1676f1c9df9SStephen Hemminger 
16828a1fd4fSFerruh Yigit static const struct rte_pci_id pci_id_ena_map[] = {
169cb990571SDavid Marchand 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AMAZON, PCI_DEVICE_ID_ENA_VF) },
170cb990571SDavid Marchand 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AMAZON, PCI_DEVICE_ID_ENA_LLQ_VF) },
1711173fca2SJan Medala 	{ .device_id = 0 },
1721173fca2SJan Medala };
1731173fca2SJan Medala 
174ca148440SMichal Krawczyk static struct ena_aenq_handlers aenq_handlers;
1753adcba9aSMichal Krawczyk 
1761173fca2SJan Medala static int ena_device_init(struct ena_com_dev *ena_dev,
177e859d2b8SRafal Kozik 			   struct ena_com_dev_get_features_ctx *get_feat_ctx,
178e859d2b8SRafal Kozik 			   bool *wd_state);
1791173fca2SJan Medala static int ena_dev_configure(struct rte_eth_dev *dev);
1801173fca2SJan Medala static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
1811173fca2SJan Medala 				  uint16_t nb_pkts);
182b3fc5a1aSKonstantin Ananyev static uint16_t eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
183b3fc5a1aSKonstantin Ananyev 		uint16_t nb_pkts);
1841173fca2SJan Medala static int ena_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
1851173fca2SJan Medala 			      uint16_t nb_desc, unsigned int socket_id,
1861173fca2SJan Medala 			      const struct rte_eth_txconf *tx_conf);
1871173fca2SJan Medala static int ena_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
1881173fca2SJan Medala 			      uint16_t nb_desc, unsigned int socket_id,
1891173fca2SJan Medala 			      const struct rte_eth_rxconf *rx_conf,
1901173fca2SJan Medala 			      struct rte_mempool *mp);
191*1be097dcSMichal Krawczyk static inline void ena_init_rx_mbuf(struct rte_mbuf *mbuf, uint16_t len);
192*1be097dcSMichal Krawczyk static struct rte_mbuf *ena_rx_mbuf(struct ena_ring *rx_ring,
193*1be097dcSMichal Krawczyk 				    struct ena_com_rx_buf_info *ena_bufs,
194*1be097dcSMichal Krawczyk 				    uint32_t descs,
195*1be097dcSMichal Krawczyk 				    uint16_t *next_to_clean,
196*1be097dcSMichal Krawczyk 				    uint8_t offset);
1971173fca2SJan Medala static uint16_t eth_ena_recv_pkts(void *rx_queue,
1981173fca2SJan Medala 				  struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
1991173fca2SJan Medala static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count);
20033dde075SMichal Krawczyk static void ena_init_rings(struct ena_adapter *adapter,
20133dde075SMichal Krawczyk 			   bool disable_meta_caching);
2021173fca2SJan Medala static int ena_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
2031173fca2SJan Medala static int ena_start(struct rte_eth_dev *dev);
204eb0ef49dSMichal Krawczyk static void ena_stop(struct rte_eth_dev *dev);
2051173fca2SJan Medala static void ena_close(struct rte_eth_dev *dev);
2062081d5e2SMichal Krawczyk static int ena_dev_reset(struct rte_eth_dev *dev);
207d5b0924bSMatan Azrad static int ena_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats);
2081173fca2SJan Medala static void ena_rx_queue_release_all(struct rte_eth_dev *dev);
2091173fca2SJan Medala static void ena_tx_queue_release_all(struct rte_eth_dev *dev);
2101173fca2SJan Medala static void ena_rx_queue_release(void *queue);
2111173fca2SJan Medala static void ena_tx_queue_release(void *queue);
2121173fca2SJan Medala static void ena_rx_queue_release_bufs(struct ena_ring *ring);
2131173fca2SJan Medala static void ena_tx_queue_release_bufs(struct ena_ring *ring);
2141173fca2SJan Medala static int ena_link_update(struct rte_eth_dev *dev,
215dd2c630aSFerruh Yigit 			   int wait_to_complete);
216df238f84SMichal Krawczyk static int ena_create_io_queue(struct ena_ring *ring);
21726e5543dSRafal Kozik static void ena_queue_stop(struct ena_ring *ring);
21826e5543dSRafal Kozik static void ena_queue_stop_all(struct rte_eth_dev *dev,
21926e5543dSRafal Kozik 			      enum ena_ring_type ring_type);
22026e5543dSRafal Kozik static int ena_queue_start(struct ena_ring *ring);
22126e5543dSRafal Kozik static int ena_queue_start_all(struct rte_eth_dev *dev,
2221173fca2SJan Medala 			       enum ena_ring_type ring_type);
2231173fca2SJan Medala static void ena_stats_restart(struct rte_eth_dev *dev);
224bdad90d1SIvan Ilchenko static int ena_infos_get(struct rte_eth_dev *dev,
2251173fca2SJan Medala 			 struct rte_eth_dev_info *dev_info);
2261173fca2SJan Medala static int ena_rss_reta_update(struct rte_eth_dev *dev,
2271173fca2SJan Medala 			       struct rte_eth_rss_reta_entry64 *reta_conf,
2281173fca2SJan Medala 			       uint16_t reta_size);
2291173fca2SJan Medala static int ena_rss_reta_query(struct rte_eth_dev *dev,
2301173fca2SJan Medala 			      struct rte_eth_rss_reta_entry64 *reta_conf,
2311173fca2SJan Medala 			      uint16_t reta_size);
23215773e06SMichal Krawczyk static void ena_interrupt_handler_rte(void *cb_arg);
233d9b8b106SMichal Krawczyk static void ena_timer_wd_callback(struct rte_timer *timer, void *arg);
234e457bc70SRafal Kozik static void ena_destroy_device(struct rte_eth_dev *eth_dev);
235e457bc70SRafal Kozik static int eth_ena_dev_init(struct rte_eth_dev *eth_dev);
2367830e905SSolganik Alexander static int ena_xstats_get_names(struct rte_eth_dev *dev,
2377830e905SSolganik Alexander 				struct rte_eth_xstat_name *xstats_names,
2387830e905SSolganik Alexander 				unsigned int n);
2397830e905SSolganik Alexander static int ena_xstats_get(struct rte_eth_dev *dev,
2407830e905SSolganik Alexander 			  struct rte_eth_xstat *stats,
2417830e905SSolganik Alexander 			  unsigned int n);
2427830e905SSolganik Alexander static int ena_xstats_get_by_id(struct rte_eth_dev *dev,
2437830e905SSolganik Alexander 				const uint64_t *ids,
2447830e905SSolganik Alexander 				uint64_t *values,
2457830e905SSolganik Alexander 				unsigned int n);
2468a7a73f2SMichal Krawczyk static int ena_process_bool_devarg(const char *key,
2478a7a73f2SMichal Krawczyk 				   const char *value,
2488a7a73f2SMichal Krawczyk 				   void *opaque);
2498a7a73f2SMichal Krawczyk static int ena_parse_devargs(struct ena_adapter *adapter,
2508a7a73f2SMichal Krawczyk 			     struct rte_devargs *devargs);
2511173fca2SJan Medala 
252103ab18cSFerruh Yigit static const struct eth_dev_ops ena_dev_ops = {
2531173fca2SJan Medala 	.dev_configure        = ena_dev_configure,
2541173fca2SJan Medala 	.dev_infos_get        = ena_infos_get,
2551173fca2SJan Medala 	.rx_queue_setup       = ena_rx_queue_setup,
2561173fca2SJan Medala 	.tx_queue_setup       = ena_tx_queue_setup,
2571173fca2SJan Medala 	.dev_start            = ena_start,
258eb0ef49dSMichal Krawczyk 	.dev_stop             = ena_stop,
2591173fca2SJan Medala 	.link_update          = ena_link_update,
2601173fca2SJan Medala 	.stats_get            = ena_stats_get,
2617830e905SSolganik Alexander 	.xstats_get_names     = ena_xstats_get_names,
2627830e905SSolganik Alexander 	.xstats_get	      = ena_xstats_get,
2637830e905SSolganik Alexander 	.xstats_get_by_id     = ena_xstats_get_by_id,
2641173fca2SJan Medala 	.mtu_set              = ena_mtu_set,
2651173fca2SJan Medala 	.rx_queue_release     = ena_rx_queue_release,
2661173fca2SJan Medala 	.tx_queue_release     = ena_tx_queue_release,
2671173fca2SJan Medala 	.dev_close            = ena_close,
2682081d5e2SMichal Krawczyk 	.dev_reset            = ena_dev_reset,
2691173fca2SJan Medala 	.reta_update          = ena_rss_reta_update,
2701173fca2SJan Medala 	.reta_query           = ena_rss_reta_query,
2711173fca2SJan Medala };
2721173fca2SJan Medala 
273086c6b66SMichal Krawczyk void ena_rss_key_fill(void *key, size_t size)
274086c6b66SMichal Krawczyk {
275086c6b66SMichal Krawczyk 	static bool key_generated;
276086c6b66SMichal Krawczyk 	static uint8_t default_key[ENA_HASH_KEY_SIZE];
277086c6b66SMichal Krawczyk 	size_t i;
278086c6b66SMichal Krawczyk 
279086c6b66SMichal Krawczyk 	RTE_ASSERT(size <= ENA_HASH_KEY_SIZE);
280086c6b66SMichal Krawczyk 
281086c6b66SMichal Krawczyk 	if (!key_generated) {
282086c6b66SMichal Krawczyk 		for (i = 0; i < ENA_HASH_KEY_SIZE; ++i)
283086c6b66SMichal Krawczyk 			default_key[i] = rte_rand() & 0xff;
284086c6b66SMichal Krawczyk 		key_generated = true;
285086c6b66SMichal Krawczyk 	}
286086c6b66SMichal Krawczyk 
287086c6b66SMichal Krawczyk 	rte_memcpy(key, default_key, size);
288086c6b66SMichal Krawczyk }
289086c6b66SMichal Krawczyk 
2901173fca2SJan Medala static inline void ena_rx_mbuf_prepare(struct rte_mbuf *mbuf,
2911173fca2SJan Medala 				       struct ena_com_rx_ctx *ena_rx_ctx)
2921173fca2SJan Medala {
2931173fca2SJan Medala 	uint64_t ol_flags = 0;
294fd617795SRafal Kozik 	uint32_t packet_type = 0;
2951173fca2SJan Medala 
2961173fca2SJan Medala 	if (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP)
297fd617795SRafal Kozik 		packet_type |= RTE_PTYPE_L4_TCP;
2981173fca2SJan Medala 	else if (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP)
299fd617795SRafal Kozik 		packet_type |= RTE_PTYPE_L4_UDP;
3001173fca2SJan Medala 
3011173fca2SJan Medala 	if (ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV4)
302fd617795SRafal Kozik 		packet_type |= RTE_PTYPE_L3_IPV4;
3031173fca2SJan Medala 	else if (ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV6)
304fd617795SRafal Kozik 		packet_type |= RTE_PTYPE_L3_IPV6;
3051173fca2SJan Medala 
30605817057SIgor Chauskin 	if (!ena_rx_ctx->l4_csum_checked)
30705817057SIgor Chauskin 		ol_flags |= PKT_RX_L4_CKSUM_UNKNOWN;
30805817057SIgor Chauskin 	else
30905817057SIgor Chauskin 		if (unlikely(ena_rx_ctx->l4_csum_err) && !ena_rx_ctx->frag)
3101173fca2SJan Medala 			ol_flags |= PKT_RX_L4_CKSUM_BAD;
31105817057SIgor Chauskin 		else
31205817057SIgor Chauskin 			ol_flags |= PKT_RX_L4_CKSUM_UNKNOWN;
31305817057SIgor Chauskin 
3141173fca2SJan Medala 	if (unlikely(ena_rx_ctx->l3_csum_err))
3151173fca2SJan Medala 		ol_flags |= PKT_RX_IP_CKSUM_BAD;
3161173fca2SJan Medala 
3171173fca2SJan Medala 	mbuf->ol_flags = ol_flags;
318fd617795SRafal Kozik 	mbuf->packet_type = packet_type;
3191173fca2SJan Medala }
3201173fca2SJan Medala 
3211173fca2SJan Medala static inline void ena_tx_mbuf_prepare(struct rte_mbuf *mbuf,
32256b8b9b7SRafal Kozik 				       struct ena_com_tx_ctx *ena_tx_ctx,
32333dde075SMichal Krawczyk 				       uint64_t queue_offloads,
32433dde075SMichal Krawczyk 				       bool disable_meta_caching)
3251173fca2SJan Medala {
3261173fca2SJan Medala 	struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta;
3271173fca2SJan Medala 
32856b8b9b7SRafal Kozik 	if ((mbuf->ol_flags & MBUF_OFFLOADS) &&
32956b8b9b7SRafal Kozik 	    (queue_offloads & QUEUE_OFFLOADS)) {
3301173fca2SJan Medala 		/* check if TSO is required */
33156b8b9b7SRafal Kozik 		if ((mbuf->ol_flags & PKT_TX_TCP_SEG) &&
33256b8b9b7SRafal Kozik 		    (queue_offloads & DEV_TX_OFFLOAD_TCP_TSO)) {
3331173fca2SJan Medala 			ena_tx_ctx->tso_enable = true;
3341173fca2SJan Medala 
3351173fca2SJan Medala 			ena_meta->l4_hdr_len = GET_L4_HDR_LEN(mbuf);
3361173fca2SJan Medala 		}
3371173fca2SJan Medala 
3381173fca2SJan Medala 		/* check if L3 checksum is needed */
33956b8b9b7SRafal Kozik 		if ((mbuf->ol_flags & PKT_TX_IP_CKSUM) &&
34056b8b9b7SRafal Kozik 		    (queue_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM))
3411173fca2SJan Medala 			ena_tx_ctx->l3_csum_enable = true;
3421173fca2SJan Medala 
3431173fca2SJan Medala 		if (mbuf->ol_flags & PKT_TX_IPV6) {
3441173fca2SJan Medala 			ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV6;
3451173fca2SJan Medala 		} else {
3461173fca2SJan Medala 			ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV4;
3471173fca2SJan Medala 
3481173fca2SJan Medala 			/* set don't fragment (DF) flag */
3491173fca2SJan Medala 			if (mbuf->packet_type &
3501173fca2SJan Medala 				(RTE_PTYPE_L4_NONFRAG
3511173fca2SJan Medala 				 | RTE_PTYPE_INNER_L4_NONFRAG))
3521173fca2SJan Medala 				ena_tx_ctx->df = true;
3531173fca2SJan Medala 		}
3541173fca2SJan Medala 
3551173fca2SJan Medala 		/* check if L4 checksum is needed */
35640e7c021SMaciej Bielski 		if (((mbuf->ol_flags & PKT_TX_L4_MASK) == PKT_TX_TCP_CKSUM) &&
35756b8b9b7SRafal Kozik 		    (queue_offloads & DEV_TX_OFFLOAD_TCP_CKSUM)) {
3581173fca2SJan Medala 			ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_TCP;
3591173fca2SJan Medala 			ena_tx_ctx->l4_csum_enable = true;
36040e7c021SMaciej Bielski 		} else if (((mbuf->ol_flags & PKT_TX_L4_MASK) ==
36140e7c021SMaciej Bielski 				PKT_TX_UDP_CKSUM) &&
36256b8b9b7SRafal Kozik 				(queue_offloads & DEV_TX_OFFLOAD_UDP_CKSUM)) {
3631173fca2SJan Medala 			ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UDP;
3641173fca2SJan Medala 			ena_tx_ctx->l4_csum_enable = true;
36556b8b9b7SRafal Kozik 		} else {
3661173fca2SJan Medala 			ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UNKNOWN;
3671173fca2SJan Medala 			ena_tx_ctx->l4_csum_enable = false;
3681173fca2SJan Medala 		}
3691173fca2SJan Medala 
3701173fca2SJan Medala 		ena_meta->mss = mbuf->tso_segsz;
3711173fca2SJan Medala 		ena_meta->l3_hdr_len = mbuf->l3_len;
3721173fca2SJan Medala 		ena_meta->l3_hdr_offset = mbuf->l2_len;
3731173fca2SJan Medala 
3741173fca2SJan Medala 		ena_tx_ctx->meta_valid = true;
37533dde075SMichal Krawczyk 	} else if (disable_meta_caching) {
37633dde075SMichal Krawczyk 		memset(ena_meta, 0, sizeof(*ena_meta));
37733dde075SMichal Krawczyk 		ena_tx_ctx->meta_valid = true;
3781173fca2SJan Medala 	} else {
3791173fca2SJan Medala 		ena_tx_ctx->meta_valid = false;
3801173fca2SJan Medala 	}
3811173fca2SJan Medala }
3821173fca2SJan Medala 
383c2034976SMichal Krawczyk static inline int validate_rx_req_id(struct ena_ring *rx_ring, uint16_t req_id)
384c2034976SMichal Krawczyk {
385c2034976SMichal Krawczyk 	if (likely(req_id < rx_ring->ring_size))
386c2034976SMichal Krawczyk 		return 0;
387c2034976SMichal Krawczyk 
3886f1c9df9SStephen Hemminger 	PMD_DRV_LOG(ERR, "Invalid rx req_id: %hu\n", req_id);
389c2034976SMichal Krawczyk 
390c2034976SMichal Krawczyk 	rx_ring->adapter->reset_reason = ENA_REGS_RESET_INV_RX_REQ_ID;
391c2034976SMichal Krawczyk 	rx_ring->adapter->trigger_reset = true;
39245b6d861SMichal Krawczyk 	++rx_ring->rx_stats.bad_req_id;
393c2034976SMichal Krawczyk 
394c2034976SMichal Krawczyk 	return -EFAULT;
395c2034976SMichal Krawczyk }
396c2034976SMichal Krawczyk 
397f7d82d24SRafal Kozik static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id)
398f7d82d24SRafal Kozik {
399f7d82d24SRafal Kozik 	struct ena_tx_buffer *tx_info = NULL;
400f7d82d24SRafal Kozik 
401f7d82d24SRafal Kozik 	if (likely(req_id < tx_ring->ring_size)) {
402f7d82d24SRafal Kozik 		tx_info = &tx_ring->tx_buffer_info[req_id];
403f7d82d24SRafal Kozik 		if (likely(tx_info->mbuf))
404f7d82d24SRafal Kozik 			return 0;
405f7d82d24SRafal Kozik 	}
406f7d82d24SRafal Kozik 
407f7d82d24SRafal Kozik 	if (tx_info)
4086f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "tx_info doesn't have valid mbuf\n");
409f7d82d24SRafal Kozik 	else
4106f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "Invalid req_id: %hu\n", req_id);
411f7d82d24SRafal Kozik 
412f7d82d24SRafal Kozik 	/* Trigger device reset */
4137830e905SSolganik Alexander 	++tx_ring->tx_stats.bad_req_id;
414f7d82d24SRafal Kozik 	tx_ring->adapter->reset_reason = ENA_REGS_RESET_INV_TX_REQ_ID;
415f7d82d24SRafal Kozik 	tx_ring->adapter->trigger_reset	= true;
416f7d82d24SRafal Kozik 	return -EFAULT;
417f7d82d24SRafal Kozik }
418f7d82d24SRafal Kozik 
419372c1af5SJan Medala static void ena_config_host_info(struct ena_com_dev *ena_dev)
420372c1af5SJan Medala {
421372c1af5SJan Medala 	struct ena_admin_host_info *host_info;
422372c1af5SJan Medala 	int rc;
423372c1af5SJan Medala 
424372c1af5SJan Medala 	/* Allocate only the host info */
425372c1af5SJan Medala 	rc = ena_com_allocate_host_info(ena_dev);
426372c1af5SJan Medala 	if (rc) {
4276f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "Cannot allocate host info\n");
428372c1af5SJan Medala 		return;
429372c1af5SJan Medala 	}
430372c1af5SJan Medala 
431372c1af5SJan Medala 	host_info = ena_dev->host_attr.host_info;
432372c1af5SJan Medala 
433372c1af5SJan Medala 	host_info->os_type = ENA_ADMIN_OS_DPDK;
434372c1af5SJan Medala 	host_info->kernel_ver = RTE_VERSION;
4356723c0fcSBruce Richardson 	strlcpy((char *)host_info->kernel_ver_str, rte_version(),
4366723c0fcSBruce Richardson 		sizeof(host_info->kernel_ver_str));
437372c1af5SJan Medala 	host_info->os_dist = RTE_VERSION;
4386723c0fcSBruce Richardson 	strlcpy((char *)host_info->os_dist_str, rte_version(),
4396723c0fcSBruce Richardson 		sizeof(host_info->os_dist_str));
440372c1af5SJan Medala 	host_info->driver_version =
441372c1af5SJan Medala 		(DRV_MODULE_VER_MAJOR) |
442372c1af5SJan Medala 		(DRV_MODULE_VER_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) |
443c4144557SJan Medala 		(DRV_MODULE_VER_SUBMINOR <<
444c4144557SJan Medala 			ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT);
445b9302eb9SRafal Kozik 	host_info->num_cpus = rte_lcore_count();
446372c1af5SJan Medala 
4477b3a3c4bSMaciej Bielski 	host_info->driver_supported_features =
4487b3a3c4bSMaciej Bielski 		ENA_ADMIN_HOST_INFO_RX_OFFSET_MASK;
4497b3a3c4bSMaciej Bielski 
450372c1af5SJan Medala 	rc = ena_com_set_host_attributes(ena_dev);
451372c1af5SJan Medala 	if (rc) {
452241da076SRafal Kozik 		if (rc == -ENA_COM_UNSUPPORTED)
4536f1c9df9SStephen Hemminger 			PMD_DRV_LOG(WARNING, "Cannot set host attributes\n");
454241da076SRafal Kozik 		else
4556f1c9df9SStephen Hemminger 			PMD_DRV_LOG(ERR, "Cannot set host attributes\n");
456241da076SRafal Kozik 
457372c1af5SJan Medala 		goto err;
458372c1af5SJan Medala 	}
459372c1af5SJan Medala 
460372c1af5SJan Medala 	return;
461372c1af5SJan Medala 
462372c1af5SJan Medala err:
463372c1af5SJan Medala 	ena_com_delete_host_info(ena_dev);
464372c1af5SJan Medala }
465372c1af5SJan Medala 
4667830e905SSolganik Alexander /* This function calculates the number of xstats based on the current config */
4677830e905SSolganik Alexander static unsigned int ena_xstats_calc_num(struct rte_eth_dev *dev)
468372c1af5SJan Medala {
4697830e905SSolganik Alexander 	return ENA_STATS_ARRAY_GLOBAL +
4707830e905SSolganik Alexander 		(dev->data->nb_tx_queues * ENA_STATS_ARRAY_TX) +
4717830e905SSolganik Alexander 		(dev->data->nb_rx_queues * ENA_STATS_ARRAY_RX);
472372c1af5SJan Medala }
473372c1af5SJan Medala 
474372c1af5SJan Medala static void ena_config_debug_area(struct ena_adapter *adapter)
475372c1af5SJan Medala {
476372c1af5SJan Medala 	u32 debug_area_size;
477372c1af5SJan Medala 	int rc, ss_count;
478372c1af5SJan Medala 
4797830e905SSolganik Alexander 	ss_count = ena_xstats_calc_num(adapter->rte_dev);
480372c1af5SJan Medala 
481372c1af5SJan Medala 	/* allocate 32 bytes for each string and 64bit for the value */
482372c1af5SJan Medala 	debug_area_size = ss_count * ETH_GSTRING_LEN + sizeof(u64) * ss_count;
483372c1af5SJan Medala 
484372c1af5SJan Medala 	rc = ena_com_allocate_debug_area(&adapter->ena_dev, debug_area_size);
485372c1af5SJan Medala 	if (rc) {
4866f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "Cannot allocate debug area\n");
487372c1af5SJan Medala 		return;
488372c1af5SJan Medala 	}
489372c1af5SJan Medala 
490372c1af5SJan Medala 	rc = ena_com_set_host_attributes(&adapter->ena_dev);
491372c1af5SJan Medala 	if (rc) {
492241da076SRafal Kozik 		if (rc == -ENA_COM_UNSUPPORTED)
4936f1c9df9SStephen Hemminger 			PMD_DRV_LOG(WARNING, "Cannot set host attributes\n");
494241da076SRafal Kozik 		else
4956f1c9df9SStephen Hemminger 			PMD_DRV_LOG(ERR, "Cannot set host attributes\n");
496241da076SRafal Kozik 
497372c1af5SJan Medala 		goto err;
498372c1af5SJan Medala 	}
499372c1af5SJan Medala 
500372c1af5SJan Medala 	return;
501372c1af5SJan Medala err:
502372c1af5SJan Medala 	ena_com_delete_debug_area(&adapter->ena_dev);
503372c1af5SJan Medala }
504372c1af5SJan Medala 
5051173fca2SJan Medala static void ena_close(struct rte_eth_dev *dev)
5061173fca2SJan Medala {
5074d7877fdSMichal Krawczyk 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
5084d7877fdSMichal Krawczyk 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
509890728ffSStephen Hemminger 	struct ena_adapter *adapter = dev->data->dev_private;
5101173fca2SJan Medala 
511df238f84SMichal Krawczyk 	if (adapter->state == ENA_ADAPTER_STATE_RUNNING)
512eb0ef49dSMichal Krawczyk 		ena_stop(dev);
513eb0ef49dSMichal Krawczyk 	adapter->state = ENA_ADAPTER_STATE_CLOSED;
51415773e06SMichal Krawczyk 
5151173fca2SJan Medala 	ena_rx_queue_release_all(dev);
5161173fca2SJan Medala 	ena_tx_queue_release_all(dev);
5174d7877fdSMichal Krawczyk 
5184d7877fdSMichal Krawczyk 	rte_free(adapter->drv_stats);
5194d7877fdSMichal Krawczyk 	adapter->drv_stats = NULL;
5204d7877fdSMichal Krawczyk 
5214d7877fdSMichal Krawczyk 	rte_intr_disable(intr_handle);
5224d7877fdSMichal Krawczyk 	rte_intr_callback_unregister(intr_handle,
5234d7877fdSMichal Krawczyk 				     ena_interrupt_handler_rte,
5244d7877fdSMichal Krawczyk 				     adapter);
5254d7877fdSMichal Krawczyk 
5264d7877fdSMichal Krawczyk 	/*
5274d7877fdSMichal Krawczyk 	 * MAC is not allocated dynamically. Setting NULL should prevent from
5284d7877fdSMichal Krawczyk 	 * release of the resource in the rte_eth_dev_release_port().
5294d7877fdSMichal Krawczyk 	 */
5304d7877fdSMichal Krawczyk 	dev->data->mac_addrs = NULL;
5311173fca2SJan Medala }
5321173fca2SJan Medala 
5332081d5e2SMichal Krawczyk static int
5342081d5e2SMichal Krawczyk ena_dev_reset(struct rte_eth_dev *dev)
5352081d5e2SMichal Krawczyk {
536e457bc70SRafal Kozik 	int rc = 0;
5372081d5e2SMichal Krawczyk 
538e457bc70SRafal Kozik 	ena_destroy_device(dev);
539e457bc70SRafal Kozik 	rc = eth_ena_dev_init(dev);
540241da076SRafal Kozik 	if (rc)
541498c687aSRafal Kozik 		PMD_INIT_LOG(CRIT, "Cannot initialize device");
542e457bc70SRafal Kozik 
5432081d5e2SMichal Krawczyk 	return rc;
5442081d5e2SMichal Krawczyk }
5452081d5e2SMichal Krawczyk 
5461173fca2SJan Medala static int ena_rss_reta_update(struct rte_eth_dev *dev,
5471173fca2SJan Medala 			       struct rte_eth_rss_reta_entry64 *reta_conf,
5481173fca2SJan Medala 			       uint16_t reta_size)
5491173fca2SJan Medala {
550890728ffSStephen Hemminger 	struct ena_adapter *adapter = dev->data->dev_private;
5511173fca2SJan Medala 	struct ena_com_dev *ena_dev = &adapter->ena_dev;
552241da076SRafal Kozik 	int rc, i;
5531173fca2SJan Medala 	u16 entry_value;
5541173fca2SJan Medala 	int conf_idx;
5551173fca2SJan Medala 	int idx;
5561173fca2SJan Medala 
5571173fca2SJan Medala 	if ((reta_size == 0) || (reta_conf == NULL))
5581173fca2SJan Medala 		return -EINVAL;
5591173fca2SJan Medala 
5601173fca2SJan Medala 	if (reta_size > ENA_RX_RSS_TABLE_SIZE) {
5616f1c9df9SStephen Hemminger 		PMD_DRV_LOG(WARNING,
5621173fca2SJan Medala 			"indirection table %d is bigger than supported (%d)\n",
5631173fca2SJan Medala 			reta_size, ENA_RX_RSS_TABLE_SIZE);
564241da076SRafal Kozik 		return -EINVAL;
5651173fca2SJan Medala 	}
5661173fca2SJan Medala 
5671173fca2SJan Medala 	for (i = 0 ; i < reta_size ; i++) {
5681173fca2SJan Medala 		/* each reta_conf is for 64 entries.
5691173fca2SJan Medala 		 * to support 128 we use 2 conf of 64
5701173fca2SJan Medala 		 */
5711173fca2SJan Medala 		conf_idx = i / RTE_RETA_GROUP_SIZE;
5721173fca2SJan Medala 		idx = i % RTE_RETA_GROUP_SIZE;
5731173fca2SJan Medala 		if (TEST_BIT(reta_conf[conf_idx].mask, idx)) {
5741173fca2SJan Medala 			entry_value =
5751173fca2SJan Medala 				ENA_IO_RXQ_IDX(reta_conf[conf_idx].reta[idx]);
576241da076SRafal Kozik 
577241da076SRafal Kozik 			rc = ena_com_indirect_table_fill_entry(ena_dev,
5781173fca2SJan Medala 							       i,
5791173fca2SJan Medala 							       entry_value);
580241da076SRafal Kozik 			if (unlikely(rc && rc != ENA_COM_UNSUPPORTED)) {
5816f1c9df9SStephen Hemminger 				PMD_DRV_LOG(ERR,
5821173fca2SJan Medala 					"Cannot fill indirect table\n");
583241da076SRafal Kozik 				return rc;
5841173fca2SJan Medala 			}
5851173fca2SJan Medala 		}
5861173fca2SJan Medala 	}
5871173fca2SJan Medala 
588241da076SRafal Kozik 	rc = ena_com_indirect_table_set(ena_dev);
589241da076SRafal Kozik 	if (unlikely(rc && rc != ENA_COM_UNSUPPORTED)) {
5906f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "Cannot flush the indirect table\n");
591241da076SRafal Kozik 		return rc;
5921173fca2SJan Medala 	}
5931173fca2SJan Medala 
5946f1c9df9SStephen Hemminger 	PMD_DRV_LOG(DEBUG, "%s(): RSS configured %d entries  for port %d\n",
5951173fca2SJan Medala 		__func__, reta_size, adapter->rte_dev->data->port_id);
596241da076SRafal Kozik 
597241da076SRafal Kozik 	return 0;
5981173fca2SJan Medala }
5991173fca2SJan Medala 
6001173fca2SJan Medala /* Query redirection table. */
6011173fca2SJan Medala static int ena_rss_reta_query(struct rte_eth_dev *dev,
6021173fca2SJan Medala 			      struct rte_eth_rss_reta_entry64 *reta_conf,
6031173fca2SJan Medala 			      uint16_t reta_size)
6041173fca2SJan Medala {
605890728ffSStephen Hemminger 	struct ena_adapter *adapter = dev->data->dev_private;
6061173fca2SJan Medala 	struct ena_com_dev *ena_dev = &adapter->ena_dev;
607241da076SRafal Kozik 	int rc;
6081173fca2SJan Medala 	int i;
6091173fca2SJan Medala 	u32 indirect_table[ENA_RX_RSS_TABLE_SIZE] = {0};
6101173fca2SJan Medala 	int reta_conf_idx;
6111173fca2SJan Medala 	int reta_idx;
6121173fca2SJan Medala 
6131173fca2SJan Medala 	if (reta_size == 0 || reta_conf == NULL ||
6141173fca2SJan Medala 	    (reta_size > RTE_RETA_GROUP_SIZE && ((reta_conf + 1) == NULL)))
6151173fca2SJan Medala 		return -EINVAL;
6161173fca2SJan Medala 
617241da076SRafal Kozik 	rc = ena_com_indirect_table_get(ena_dev, indirect_table);
618241da076SRafal Kozik 	if (unlikely(rc && rc != ENA_COM_UNSUPPORTED)) {
6196f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "cannot get indirect table\n");
620241da076SRafal Kozik 		return -ENOTSUP;
6211173fca2SJan Medala 	}
6221173fca2SJan Medala 
6231173fca2SJan Medala 	for (i = 0 ; i < reta_size ; i++) {
6241173fca2SJan Medala 		reta_conf_idx = i / RTE_RETA_GROUP_SIZE;
6251173fca2SJan Medala 		reta_idx = i % RTE_RETA_GROUP_SIZE;
6261173fca2SJan Medala 		if (TEST_BIT(reta_conf[reta_conf_idx].mask, reta_idx))
6271173fca2SJan Medala 			reta_conf[reta_conf_idx].reta[reta_idx] =
6281173fca2SJan Medala 				ENA_IO_RXQ_IDX_REV(indirect_table[i]);
6291173fca2SJan Medala 	}
630241da076SRafal Kozik 
631241da076SRafal Kozik 	return 0;
6321173fca2SJan Medala }
6331173fca2SJan Medala 
6341173fca2SJan Medala static int ena_rss_init_default(struct ena_adapter *adapter)
6351173fca2SJan Medala {
6361173fca2SJan Medala 	struct ena_com_dev *ena_dev = &adapter->ena_dev;
6371173fca2SJan Medala 	uint16_t nb_rx_queues = adapter->rte_dev->data->nb_rx_queues;
6381173fca2SJan Medala 	int rc, i;
6391173fca2SJan Medala 	u32 val;
6401173fca2SJan Medala 
6411173fca2SJan Medala 	rc = ena_com_rss_init(ena_dev, ENA_RX_RSS_TABLE_LOG_SIZE);
6421173fca2SJan Medala 	if (unlikely(rc)) {
6436f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "Cannot init indirect table\n");
6441173fca2SJan Medala 		goto err_rss_init;
6451173fca2SJan Medala 	}
6461173fca2SJan Medala 
6471173fca2SJan Medala 	for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++) {
6481173fca2SJan Medala 		val = i % nb_rx_queues;
6491173fca2SJan Medala 		rc = ena_com_indirect_table_fill_entry(ena_dev, i,
6501173fca2SJan Medala 						       ENA_IO_RXQ_IDX(val));
6513adcba9aSMichal Krawczyk 		if (unlikely(rc && (rc != ENA_COM_UNSUPPORTED))) {
6526f1c9df9SStephen Hemminger 			PMD_DRV_LOG(ERR, "Cannot fill indirect table\n");
6531173fca2SJan Medala 			goto err_fill_indir;
6541173fca2SJan Medala 		}
6551173fca2SJan Medala 	}
6561173fca2SJan Medala 
6571173fca2SJan Medala 	rc = ena_com_fill_hash_function(ena_dev, ENA_ADMIN_CRC32, NULL,
6581173fca2SJan Medala 					ENA_HASH_KEY_SIZE, 0xFFFFFFFF);
6593adcba9aSMichal Krawczyk 	if (unlikely(rc && (rc != ENA_COM_UNSUPPORTED))) {
6606f1c9df9SStephen Hemminger 		PMD_DRV_LOG(INFO, "Cannot fill hash function\n");
6611173fca2SJan Medala 		goto err_fill_indir;
6621173fca2SJan Medala 	}
6631173fca2SJan Medala 
6641173fca2SJan Medala 	rc = ena_com_set_default_hash_ctrl(ena_dev);
6653adcba9aSMichal Krawczyk 	if (unlikely(rc && (rc != ENA_COM_UNSUPPORTED))) {
6666f1c9df9SStephen Hemminger 		PMD_DRV_LOG(INFO, "Cannot fill hash control\n");
6671173fca2SJan Medala 		goto err_fill_indir;
6681173fca2SJan Medala 	}
6691173fca2SJan Medala 
6701173fca2SJan Medala 	rc = ena_com_indirect_table_set(ena_dev);
6713adcba9aSMichal Krawczyk 	if (unlikely(rc && (rc != ENA_COM_UNSUPPORTED))) {
6726f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "Cannot flush the indirect table\n");
6731173fca2SJan Medala 		goto err_fill_indir;
6741173fca2SJan Medala 	}
6756f1c9df9SStephen Hemminger 	PMD_DRV_LOG(DEBUG, "RSS configured for port %d\n",
6761173fca2SJan Medala 		adapter->rte_dev->data->port_id);
6771173fca2SJan Medala 
6781173fca2SJan Medala 	return 0;
6791173fca2SJan Medala 
6801173fca2SJan Medala err_fill_indir:
6811173fca2SJan Medala 	ena_com_rss_destroy(ena_dev);
6821173fca2SJan Medala err_rss_init:
6831173fca2SJan Medala 
6841173fca2SJan Medala 	return rc;
6851173fca2SJan Medala }
6861173fca2SJan Medala 
6871173fca2SJan Medala static void ena_rx_queue_release_all(struct rte_eth_dev *dev)
6881173fca2SJan Medala {
6891173fca2SJan Medala 	struct ena_ring **queues = (struct ena_ring **)dev->data->rx_queues;
6901173fca2SJan Medala 	int nb_queues = dev->data->nb_rx_queues;
6911173fca2SJan Medala 	int i;
6921173fca2SJan Medala 
6931173fca2SJan Medala 	for (i = 0; i < nb_queues; i++)
6941173fca2SJan Medala 		ena_rx_queue_release(queues[i]);
6951173fca2SJan Medala }
6961173fca2SJan Medala 
6971173fca2SJan Medala static void ena_tx_queue_release_all(struct rte_eth_dev *dev)
6981173fca2SJan Medala {
6991173fca2SJan Medala 	struct ena_ring **queues = (struct ena_ring **)dev->data->tx_queues;
7001173fca2SJan Medala 	int nb_queues = dev->data->nb_tx_queues;
7011173fca2SJan Medala 	int i;
7021173fca2SJan Medala 
7031173fca2SJan Medala 	for (i = 0; i < nb_queues; i++)
7041173fca2SJan Medala 		ena_tx_queue_release(queues[i]);
7051173fca2SJan Medala }
7061173fca2SJan Medala 
7071173fca2SJan Medala static void ena_rx_queue_release(void *queue)
7081173fca2SJan Medala {
7091173fca2SJan Medala 	struct ena_ring *ring = (struct ena_ring *)queue;
7101173fca2SJan Medala 
7111173fca2SJan Medala 	/* Free ring resources */
7121173fca2SJan Medala 	if (ring->rx_buffer_info)
7131173fca2SJan Medala 		rte_free(ring->rx_buffer_info);
7141173fca2SJan Medala 	ring->rx_buffer_info = NULL;
7151173fca2SJan Medala 
71679405ee1SRafal Kozik 	if (ring->rx_refill_buffer)
71779405ee1SRafal Kozik 		rte_free(ring->rx_refill_buffer);
71879405ee1SRafal Kozik 	ring->rx_refill_buffer = NULL;
71979405ee1SRafal Kozik 
720c2034976SMichal Krawczyk 	if (ring->empty_rx_reqs)
721c2034976SMichal Krawczyk 		rte_free(ring->empty_rx_reqs);
722c2034976SMichal Krawczyk 	ring->empty_rx_reqs = NULL;
723c2034976SMichal Krawczyk 
7241173fca2SJan Medala 	ring->configured = 0;
7251173fca2SJan Medala 
7266f1c9df9SStephen Hemminger 	PMD_DRV_LOG(NOTICE, "RX Queue %d:%d released\n",
7271173fca2SJan Medala 		ring->port_id, ring->id);
7281173fca2SJan Medala }
7291173fca2SJan Medala 
7301173fca2SJan Medala static void ena_tx_queue_release(void *queue)
7311173fca2SJan Medala {
7321173fca2SJan Medala 	struct ena_ring *ring = (struct ena_ring *)queue;
7331173fca2SJan Medala 
7341173fca2SJan Medala 	/* Free ring resources */
7352fca2a98SMichal Krawczyk 	if (ring->push_buf_intermediate_buf)
7362fca2a98SMichal Krawczyk 		rte_free(ring->push_buf_intermediate_buf);
7372fca2a98SMichal Krawczyk 
7381173fca2SJan Medala 	if (ring->tx_buffer_info)
7391173fca2SJan Medala 		rte_free(ring->tx_buffer_info);
7401173fca2SJan Medala 
7411173fca2SJan Medala 	if (ring->empty_tx_reqs)
7421173fca2SJan Medala 		rte_free(ring->empty_tx_reqs);
7431173fca2SJan Medala 
7441173fca2SJan Medala 	ring->empty_tx_reqs = NULL;
7451173fca2SJan Medala 	ring->tx_buffer_info = NULL;
7462fca2a98SMichal Krawczyk 	ring->push_buf_intermediate_buf = NULL;
7471173fca2SJan Medala 
7481173fca2SJan Medala 	ring->configured = 0;
7491173fca2SJan Medala 
7506f1c9df9SStephen Hemminger 	PMD_DRV_LOG(NOTICE, "TX Queue %d:%d released\n",
7511173fca2SJan Medala 		ring->port_id, ring->id);
7521173fca2SJan Medala }
7531173fca2SJan Medala 
7541173fca2SJan Medala static void ena_rx_queue_release_bufs(struct ena_ring *ring)
7551173fca2SJan Medala {
756709b1dcbSRafal Kozik 	unsigned int i;
7571173fca2SJan Medala 
758*1be097dcSMichal Krawczyk 	for (i = 0; i < ring->ring_size; ++i) {
759*1be097dcSMichal Krawczyk 		struct ena_rx_buffer *rx_info = &ring->rx_buffer_info[i];
760*1be097dcSMichal Krawczyk 		if (rx_info->mbuf) {
761*1be097dcSMichal Krawczyk 			rte_mbuf_raw_free(rx_info->mbuf);
762*1be097dcSMichal Krawczyk 			rx_info->mbuf = NULL;
763*1be097dcSMichal Krawczyk 		}
7641173fca2SJan Medala 	}
7651173fca2SJan Medala }
7661173fca2SJan Medala 
7671173fca2SJan Medala static void ena_tx_queue_release_bufs(struct ena_ring *ring)
7681173fca2SJan Medala {
769207a514cSMichal Krawczyk 	unsigned int i;
7701173fca2SJan Medala 
771207a514cSMichal Krawczyk 	for (i = 0; i < ring->ring_size; ++i) {
772207a514cSMichal Krawczyk 		struct ena_tx_buffer *tx_buf = &ring->tx_buffer_info[i];
7731173fca2SJan Medala 
7741173fca2SJan Medala 		if (tx_buf->mbuf)
7751173fca2SJan Medala 			rte_pktmbuf_free(tx_buf->mbuf);
7761173fca2SJan Medala 	}
7771173fca2SJan Medala }
7781173fca2SJan Medala 
7791173fca2SJan Medala static int ena_link_update(struct rte_eth_dev *dev,
7801173fca2SJan Medala 			   __rte_unused int wait_to_complete)
7811173fca2SJan Medala {
7821173fca2SJan Medala 	struct rte_eth_link *link = &dev->data->dev_link;
783890728ffSStephen Hemminger 	struct ena_adapter *adapter = dev->data->dev_private;
784ca148440SMichal Krawczyk 
785ca148440SMichal Krawczyk 	link->link_status = adapter->link_status ? ETH_LINK_UP : ETH_LINK_DOWN;
78641e59028SRafal Kozik 	link->link_speed = ETH_SPEED_NUM_NONE;
7871173fca2SJan Medala 	link->link_duplex = ETH_LINK_FULL_DUPLEX;
7881173fca2SJan Medala 
7891173fca2SJan Medala 	return 0;
7901173fca2SJan Medala }
7911173fca2SJan Medala 
79226e5543dSRafal Kozik static int ena_queue_start_all(struct rte_eth_dev *dev,
7931173fca2SJan Medala 			       enum ena_ring_type ring_type)
7941173fca2SJan Medala {
795890728ffSStephen Hemminger 	struct ena_adapter *adapter = dev->data->dev_private;
7961173fca2SJan Medala 	struct ena_ring *queues = NULL;
79753b61841SMichal Krawczyk 	int nb_queues;
7981173fca2SJan Medala 	int i = 0;
7991173fca2SJan Medala 	int rc = 0;
8001173fca2SJan Medala 
80153b61841SMichal Krawczyk 	if (ring_type == ENA_RING_TYPE_RX) {
80253b61841SMichal Krawczyk 		queues = adapter->rx_ring;
80353b61841SMichal Krawczyk 		nb_queues = dev->data->nb_rx_queues;
80453b61841SMichal Krawczyk 	} else {
80553b61841SMichal Krawczyk 		queues = adapter->tx_ring;
80653b61841SMichal Krawczyk 		nb_queues = dev->data->nb_tx_queues;
80753b61841SMichal Krawczyk 	}
80853b61841SMichal Krawczyk 	for (i = 0; i < nb_queues; i++) {
8091173fca2SJan Medala 		if (queues[i].configured) {
8101173fca2SJan Medala 			if (ring_type == ENA_RING_TYPE_RX) {
8111173fca2SJan Medala 				ena_assert_msg(
8121173fca2SJan Medala 					dev->data->rx_queues[i] == &queues[i],
8131173fca2SJan Medala 					"Inconsistent state of rx queues\n");
8141173fca2SJan Medala 			} else {
8151173fca2SJan Medala 				ena_assert_msg(
8161173fca2SJan Medala 					dev->data->tx_queues[i] == &queues[i],
8171173fca2SJan Medala 					"Inconsistent state of tx queues\n");
8181173fca2SJan Medala 			}
8191173fca2SJan Medala 
82026e5543dSRafal Kozik 			rc = ena_queue_start(&queues[i]);
8211173fca2SJan Medala 
8221173fca2SJan Medala 			if (rc) {
8231173fca2SJan Medala 				PMD_INIT_LOG(ERR,
82426e5543dSRafal Kozik 					     "failed to start queue %d type(%d)",
8251173fca2SJan Medala 					     i, ring_type);
82626e5543dSRafal Kozik 				goto err;
8271173fca2SJan Medala 			}
8281173fca2SJan Medala 		}
8291173fca2SJan Medala 	}
8301173fca2SJan Medala 
8311173fca2SJan Medala 	return 0;
83226e5543dSRafal Kozik 
83326e5543dSRafal Kozik err:
83426e5543dSRafal Kozik 	while (i--)
83526e5543dSRafal Kozik 		if (queues[i].configured)
83626e5543dSRafal Kozik 			ena_queue_stop(&queues[i]);
83726e5543dSRafal Kozik 
83826e5543dSRafal Kozik 	return rc;
8391173fca2SJan Medala }
8401173fca2SJan Medala 
8411173fca2SJan Medala static uint32_t ena_get_mtu_conf(struct ena_adapter *adapter)
8421173fca2SJan Medala {
8431173fca2SJan Medala 	uint32_t max_frame_len = adapter->max_mtu;
8441173fca2SJan Medala 
8457369f88fSRafal Kozik 	if (adapter->rte_eth_dev_data->dev_conf.rxmode.offloads &
8467369f88fSRafal Kozik 	    DEV_RX_OFFLOAD_JUMBO_FRAME)
8471173fca2SJan Medala 		max_frame_len =
8481173fca2SJan Medala 			adapter->rte_eth_dev_data->dev_conf.rxmode.max_rx_pkt_len;
8491173fca2SJan Medala 
8501173fca2SJan Medala 	return max_frame_len;
8511173fca2SJan Medala }
8521173fca2SJan Medala 
8531173fca2SJan Medala static int ena_check_valid_conf(struct ena_adapter *adapter)
8541173fca2SJan Medala {
8551173fca2SJan Medala 	uint32_t max_frame_len = ena_get_mtu_conf(adapter);
8561173fca2SJan Medala 
857241da076SRafal Kozik 	if (max_frame_len > adapter->max_mtu || max_frame_len < ENA_MIN_MTU) {
858241da076SRafal Kozik 		PMD_INIT_LOG(ERR, "Unsupported MTU of %d. "
859498c687aSRafal Kozik 				  "max mtu: %d, min mtu: %d",
860241da076SRafal Kozik 			     max_frame_len, adapter->max_mtu, ENA_MIN_MTU);
861241da076SRafal Kozik 		return ENA_COM_UNSUPPORTED;
8621173fca2SJan Medala 	}
8631173fca2SJan Medala 
8641173fca2SJan Medala 	return 0;
8651173fca2SJan Medala }
8661173fca2SJan Medala 
8671173fca2SJan Medala static int
8688a7a73f2SMichal Krawczyk ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx,
8698a7a73f2SMichal Krawczyk 		       bool use_large_llq_hdr)
8701173fca2SJan Medala {
8712fca2a98SMichal Krawczyk 	struct ena_admin_feature_llq_desc *llq = &ctx->get_feat_ctx->llq;
8722fca2a98SMichal Krawczyk 	struct ena_com_dev *ena_dev = ctx->ena_dev;
8735920d930SMichal Krawczyk 	uint32_t max_tx_queue_size;
8745920d930SMichal Krawczyk 	uint32_t max_rx_queue_size;
8751173fca2SJan Medala 
8762fca2a98SMichal Krawczyk 	if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
877ea93d37eSRafal Kozik 		struct ena_admin_queue_ext_feature_fields *max_queue_ext =
878ea93d37eSRafal Kozik 			&ctx->get_feat_ctx->max_queue_ext.max_queue_ext;
8795920d930SMichal Krawczyk 		max_rx_queue_size = RTE_MIN(max_queue_ext->max_rx_cq_depth,
880ea93d37eSRafal Kozik 			max_queue_ext->max_rx_sq_depth);
8815920d930SMichal Krawczyk 		max_tx_queue_size = max_queue_ext->max_tx_cq_depth;
8822fca2a98SMichal Krawczyk 
8832fca2a98SMichal Krawczyk 		if (ena_dev->tx_mem_queue_type ==
8842fca2a98SMichal Krawczyk 		    ENA_ADMIN_PLACEMENT_POLICY_DEV) {
8855920d930SMichal Krawczyk 			max_tx_queue_size = RTE_MIN(max_tx_queue_size,
8862fca2a98SMichal Krawczyk 				llq->max_llq_depth);
8872fca2a98SMichal Krawczyk 		} else {
8885920d930SMichal Krawczyk 			max_tx_queue_size = RTE_MIN(max_tx_queue_size,
889ea93d37eSRafal Kozik 				max_queue_ext->max_tx_sq_depth);
8902fca2a98SMichal Krawczyk 		}
8912fca2a98SMichal Krawczyk 
892ea93d37eSRafal Kozik 		ctx->max_rx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS,
893ea93d37eSRafal Kozik 			max_queue_ext->max_per_packet_rx_descs);
894ea93d37eSRafal Kozik 		ctx->max_tx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS,
895ea93d37eSRafal Kozik 			max_queue_ext->max_per_packet_tx_descs);
896ea93d37eSRafal Kozik 	} else {
897ea93d37eSRafal Kozik 		struct ena_admin_queue_feature_desc *max_queues =
898ea93d37eSRafal Kozik 			&ctx->get_feat_ctx->max_queues;
8995920d930SMichal Krawczyk 		max_rx_queue_size = RTE_MIN(max_queues->max_cq_depth,
900ea93d37eSRafal Kozik 			max_queues->max_sq_depth);
9015920d930SMichal Krawczyk 		max_tx_queue_size = max_queues->max_cq_depth;
9022fca2a98SMichal Krawczyk 
9032fca2a98SMichal Krawczyk 		if (ena_dev->tx_mem_queue_type ==
9042fca2a98SMichal Krawczyk 		    ENA_ADMIN_PLACEMENT_POLICY_DEV) {
9055920d930SMichal Krawczyk 			max_tx_queue_size = RTE_MIN(max_tx_queue_size,
9062fca2a98SMichal Krawczyk 				llq->max_llq_depth);
9072fca2a98SMichal Krawczyk 		} else {
9085920d930SMichal Krawczyk 			max_tx_queue_size = RTE_MIN(max_tx_queue_size,
9092fca2a98SMichal Krawczyk 				max_queues->max_sq_depth);
9102fca2a98SMichal Krawczyk 		}
9112fca2a98SMichal Krawczyk 
912ea93d37eSRafal Kozik 		ctx->max_rx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS,
913ea93d37eSRafal Kozik 			max_queues->max_packet_rx_descs);
9145920d930SMichal Krawczyk 		ctx->max_tx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS,
9155920d930SMichal Krawczyk 			max_queues->max_packet_tx_descs);
916ea93d37eSRafal Kozik 	}
9171173fca2SJan Medala 
918ea93d37eSRafal Kozik 	/* Round down to the nearest power of 2 */
9195920d930SMichal Krawczyk 	max_rx_queue_size = rte_align32prevpow2(max_rx_queue_size);
9205920d930SMichal Krawczyk 	max_tx_queue_size = rte_align32prevpow2(max_tx_queue_size);
9211173fca2SJan Medala 
9228a7a73f2SMichal Krawczyk 	if (use_large_llq_hdr) {
9238a7a73f2SMichal Krawczyk 		if ((llq->entry_size_ctrl_supported &
9248a7a73f2SMichal Krawczyk 		     ENA_ADMIN_LIST_ENTRY_SIZE_256B) &&
9258a7a73f2SMichal Krawczyk 		    (ena_dev->tx_mem_queue_type ==
9268a7a73f2SMichal Krawczyk 		     ENA_ADMIN_PLACEMENT_POLICY_DEV)) {
9278a7a73f2SMichal Krawczyk 			max_tx_queue_size /= 2;
9288a7a73f2SMichal Krawczyk 			PMD_INIT_LOG(INFO,
9298a7a73f2SMichal Krawczyk 				"Forcing large headers and decreasing maximum TX queue size to %d\n",
9308a7a73f2SMichal Krawczyk 				max_tx_queue_size);
9318a7a73f2SMichal Krawczyk 		} else {
9328a7a73f2SMichal Krawczyk 			PMD_INIT_LOG(ERR,
9338a7a73f2SMichal Krawczyk 				"Forcing large headers failed: LLQ is disabled or device does not support large headers\n");
9348a7a73f2SMichal Krawczyk 		}
9358a7a73f2SMichal Krawczyk 	}
9368a7a73f2SMichal Krawczyk 
9375920d930SMichal Krawczyk 	if (unlikely(max_rx_queue_size == 0 || max_tx_queue_size == 0)) {
938f2462150SFerruh Yigit 		PMD_INIT_LOG(ERR, "Invalid queue size");
9391173fca2SJan Medala 		return -EFAULT;
9401173fca2SJan Medala 	}
9411173fca2SJan Medala 
9425920d930SMichal Krawczyk 	ctx->max_tx_queue_size = max_tx_queue_size;
9435920d930SMichal Krawczyk 	ctx->max_rx_queue_size = max_rx_queue_size;
9442061fe41SRafal Kozik 
945ea93d37eSRafal Kozik 	return 0;
9461173fca2SJan Medala }
9471173fca2SJan Medala 
9481173fca2SJan Medala static void ena_stats_restart(struct rte_eth_dev *dev)
9491173fca2SJan Medala {
950890728ffSStephen Hemminger 	struct ena_adapter *adapter = dev->data->dev_private;
9511173fca2SJan Medala 
9521173fca2SJan Medala 	rte_atomic64_init(&adapter->drv_stats->ierrors);
9531173fca2SJan Medala 	rte_atomic64_init(&adapter->drv_stats->oerrors);
9541173fca2SJan Medala 	rte_atomic64_init(&adapter->drv_stats->rx_nombuf);
955e1e73e32SMichal Krawczyk 	adapter->drv_stats->rx_drops = 0;
9561173fca2SJan Medala }
9571173fca2SJan Medala 
958d5b0924bSMatan Azrad static int ena_stats_get(struct rte_eth_dev *dev,
9591173fca2SJan Medala 			  struct rte_eth_stats *stats)
9601173fca2SJan Medala {
9611173fca2SJan Medala 	struct ena_admin_basic_stats ena_stats;
962890728ffSStephen Hemminger 	struct ena_adapter *adapter = dev->data->dev_private;
9631173fca2SJan Medala 	struct ena_com_dev *ena_dev = &adapter->ena_dev;
9641173fca2SJan Medala 	int rc;
96545b6d861SMichal Krawczyk 	int i;
96645b6d861SMichal Krawczyk 	int max_rings_stats;
9671173fca2SJan Medala 
9681173fca2SJan Medala 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
969d5b0924bSMatan Azrad 		return -ENOTSUP;
9701173fca2SJan Medala 
9711173fca2SJan Medala 	memset(&ena_stats, 0, sizeof(ena_stats));
9721173fca2SJan Medala 	rc = ena_com_get_dev_basic_stats(ena_dev, &ena_stats);
9731173fca2SJan Medala 	if (unlikely(rc)) {
9746f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "Could not retrieve statistics from ENA\n");
975d5b0924bSMatan Azrad 		return rc;
9761173fca2SJan Medala 	}
9771173fca2SJan Medala 
9781173fca2SJan Medala 	/* Set of basic statistics from ENA */
9791173fca2SJan Medala 	stats->ipackets = __MERGE_64B_H_L(ena_stats.rx_pkts_high,
9801173fca2SJan Medala 					  ena_stats.rx_pkts_low);
9811173fca2SJan Medala 	stats->opackets = __MERGE_64B_H_L(ena_stats.tx_pkts_high,
9821173fca2SJan Medala 					  ena_stats.tx_pkts_low);
9831173fca2SJan Medala 	stats->ibytes = __MERGE_64B_H_L(ena_stats.rx_bytes_high,
9841173fca2SJan Medala 					ena_stats.rx_bytes_low);
9851173fca2SJan Medala 	stats->obytes = __MERGE_64B_H_L(ena_stats.tx_bytes_high,
9861173fca2SJan Medala 					ena_stats.tx_bytes_low);
9871173fca2SJan Medala 
9881173fca2SJan Medala 	/* Driver related stats */
989e1e73e32SMichal Krawczyk 	stats->imissed = adapter->drv_stats->rx_drops;
9901173fca2SJan Medala 	stats->ierrors = rte_atomic64_read(&adapter->drv_stats->ierrors);
9911173fca2SJan Medala 	stats->oerrors = rte_atomic64_read(&adapter->drv_stats->oerrors);
9921173fca2SJan Medala 	stats->rx_nombuf = rte_atomic64_read(&adapter->drv_stats->rx_nombuf);
99345b6d861SMichal Krawczyk 
99445b6d861SMichal Krawczyk 	max_rings_stats = RTE_MIN(dev->data->nb_rx_queues,
99545b6d861SMichal Krawczyk 		RTE_ETHDEV_QUEUE_STAT_CNTRS);
99645b6d861SMichal Krawczyk 	for (i = 0; i < max_rings_stats; ++i) {
99745b6d861SMichal Krawczyk 		struct ena_stats_rx *rx_stats = &adapter->rx_ring[i].rx_stats;
99845b6d861SMichal Krawczyk 
99945b6d861SMichal Krawczyk 		stats->q_ibytes[i] = rx_stats->bytes;
100045b6d861SMichal Krawczyk 		stats->q_ipackets[i] = rx_stats->cnt;
100145b6d861SMichal Krawczyk 		stats->q_errors[i] = rx_stats->bad_desc_num +
100245b6d861SMichal Krawczyk 			rx_stats->bad_req_id;
100345b6d861SMichal Krawczyk 	}
100445b6d861SMichal Krawczyk 
100545b6d861SMichal Krawczyk 	max_rings_stats = RTE_MIN(dev->data->nb_tx_queues,
100645b6d861SMichal Krawczyk 		RTE_ETHDEV_QUEUE_STAT_CNTRS);
100745b6d861SMichal Krawczyk 	for (i = 0; i < max_rings_stats; ++i) {
100845b6d861SMichal Krawczyk 		struct ena_stats_tx *tx_stats = &adapter->tx_ring[i].tx_stats;
100945b6d861SMichal Krawczyk 
101045b6d861SMichal Krawczyk 		stats->q_obytes[i] = tx_stats->bytes;
101145b6d861SMichal Krawczyk 		stats->q_opackets[i] = tx_stats->cnt;
101245b6d861SMichal Krawczyk 	}
101345b6d861SMichal Krawczyk 
1014d5b0924bSMatan Azrad 	return 0;
10151173fca2SJan Medala }
10161173fca2SJan Medala 
10171173fca2SJan Medala static int ena_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
10181173fca2SJan Medala {
10191173fca2SJan Medala 	struct ena_adapter *adapter;
10201173fca2SJan Medala 	struct ena_com_dev *ena_dev;
10211173fca2SJan Medala 	int rc = 0;
10221173fca2SJan Medala 
1023498c687aSRafal Kozik 	ena_assert_msg(dev->data != NULL, "Uninitialized device\n");
1024498c687aSRafal Kozik 	ena_assert_msg(dev->data->dev_private != NULL, "Uninitialized device\n");
1025890728ffSStephen Hemminger 	adapter = dev->data->dev_private;
10261173fca2SJan Medala 
10271173fca2SJan Medala 	ena_dev = &adapter->ena_dev;
1028498c687aSRafal Kozik 	ena_assert_msg(ena_dev != NULL, "Uninitialized device\n");
10291173fca2SJan Medala 
1030241da076SRafal Kozik 	if (mtu > ena_get_mtu_conf(adapter) || mtu < ENA_MIN_MTU) {
10316f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR,
1032241da076SRafal Kozik 			"Invalid MTU setting. new_mtu: %d "
1033241da076SRafal Kozik 			"max mtu: %d min mtu: %d\n",
1034241da076SRafal Kozik 			mtu, ena_get_mtu_conf(adapter), ENA_MIN_MTU);
1035241da076SRafal Kozik 		return -EINVAL;
10361173fca2SJan Medala 	}
10371173fca2SJan Medala 
10381173fca2SJan Medala 	rc = ena_com_set_dev_mtu(ena_dev, mtu);
10391173fca2SJan Medala 	if (rc)
10406f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "Could not set MTU: %d\n", mtu);
10411173fca2SJan Medala 	else
10426f1c9df9SStephen Hemminger 		PMD_DRV_LOG(NOTICE, "Set MTU: %d\n", mtu);
10431173fca2SJan Medala 
10441173fca2SJan Medala 	return rc;
10451173fca2SJan Medala }
10461173fca2SJan Medala 
10471173fca2SJan Medala static int ena_start(struct rte_eth_dev *dev)
10481173fca2SJan Medala {
1049890728ffSStephen Hemminger 	struct ena_adapter *adapter = dev->data->dev_private;
1050d9b8b106SMichal Krawczyk 	uint64_t ticks;
10511173fca2SJan Medala 	int rc = 0;
10521173fca2SJan Medala 
10531173fca2SJan Medala 	rc = ena_check_valid_conf(adapter);
10541173fca2SJan Medala 	if (rc)
10551173fca2SJan Medala 		return rc;
10561173fca2SJan Medala 
105726e5543dSRafal Kozik 	rc = ena_queue_start_all(dev, ENA_RING_TYPE_RX);
10581173fca2SJan Medala 	if (rc)
10591173fca2SJan Medala 		return rc;
10601173fca2SJan Medala 
106126e5543dSRafal Kozik 	rc = ena_queue_start_all(dev, ENA_RING_TYPE_TX);
10621173fca2SJan Medala 	if (rc)
106326e5543dSRafal Kozik 		goto err_start_tx;
10641173fca2SJan Medala 
10651173fca2SJan Medala 	if (adapter->rte_dev->data->dev_conf.rxmode.mq_mode &
1066361913adSDaria Kolistratova 	    ETH_MQ_RX_RSS_FLAG && adapter->rte_dev->data->nb_rx_queues > 0) {
10671173fca2SJan Medala 		rc = ena_rss_init_default(adapter);
10681173fca2SJan Medala 		if (rc)
106926e5543dSRafal Kozik 			goto err_rss_init;
10701173fca2SJan Medala 	}
10711173fca2SJan Medala 
10721173fca2SJan Medala 	ena_stats_restart(dev);
10731173fca2SJan Medala 
1074d9b8b106SMichal Krawczyk 	adapter->timestamp_wd = rte_get_timer_cycles();
1075d9b8b106SMichal Krawczyk 	adapter->keep_alive_timeout = ENA_DEVICE_KALIVE_TIMEOUT;
1076d9b8b106SMichal Krawczyk 
1077d9b8b106SMichal Krawczyk 	ticks = rte_get_timer_hz();
1078d9b8b106SMichal Krawczyk 	rte_timer_reset(&adapter->timer_wd, ticks, PERIODICAL, rte_lcore_id(),
1079d9b8b106SMichal Krawczyk 			ena_timer_wd_callback, adapter);
1080d9b8b106SMichal Krawczyk 
10817830e905SSolganik Alexander 	++adapter->dev_stats.dev_start;
10821173fca2SJan Medala 	adapter->state = ENA_ADAPTER_STATE_RUNNING;
10831173fca2SJan Medala 
10841173fca2SJan Medala 	return 0;
108526e5543dSRafal Kozik 
108626e5543dSRafal Kozik err_rss_init:
108726e5543dSRafal Kozik 	ena_queue_stop_all(dev, ENA_RING_TYPE_TX);
108826e5543dSRafal Kozik err_start_tx:
108926e5543dSRafal Kozik 	ena_queue_stop_all(dev, ENA_RING_TYPE_RX);
109026e5543dSRafal Kozik 	return rc;
10911173fca2SJan Medala }
10921173fca2SJan Medala 
1093eb0ef49dSMichal Krawczyk static void ena_stop(struct rte_eth_dev *dev)
1094eb0ef49dSMichal Krawczyk {
1095890728ffSStephen Hemminger 	struct ena_adapter *adapter = dev->data->dev_private;
1096e457bc70SRafal Kozik 	struct ena_com_dev *ena_dev = &adapter->ena_dev;
1097e457bc70SRafal Kozik 	int rc;
1098eb0ef49dSMichal Krawczyk 
1099d9b8b106SMichal Krawczyk 	rte_timer_stop_sync(&adapter->timer_wd);
110026e5543dSRafal Kozik 	ena_queue_stop_all(dev, ENA_RING_TYPE_TX);
110126e5543dSRafal Kozik 	ena_queue_stop_all(dev, ENA_RING_TYPE_RX);
1102d9b8b106SMichal Krawczyk 
1103e457bc70SRafal Kozik 	if (adapter->trigger_reset) {
1104e457bc70SRafal Kozik 		rc = ena_com_dev_reset(ena_dev, adapter->reset_reason);
1105e457bc70SRafal Kozik 		if (rc)
11066f1c9df9SStephen Hemminger 			PMD_DRV_LOG(ERR, "Device reset failed rc=%d\n", rc);
1107e457bc70SRafal Kozik 	}
1108e457bc70SRafal Kozik 
11097830e905SSolganik Alexander 	++adapter->dev_stats.dev_stop;
1110eb0ef49dSMichal Krawczyk 	adapter->state = ENA_ADAPTER_STATE_STOPPED;
1111eb0ef49dSMichal Krawczyk }
1112eb0ef49dSMichal Krawczyk 
1113df238f84SMichal Krawczyk static int ena_create_io_queue(struct ena_ring *ring)
1114df238f84SMichal Krawczyk {
1115df238f84SMichal Krawczyk 	struct ena_adapter *adapter;
1116df238f84SMichal Krawczyk 	struct ena_com_dev *ena_dev;
1117df238f84SMichal Krawczyk 	struct ena_com_create_io_ctx ctx =
1118df238f84SMichal Krawczyk 		/* policy set to _HOST just to satisfy icc compiler */
1119df238f84SMichal Krawczyk 		{ ENA_ADMIN_PLACEMENT_POLICY_HOST,
1120df238f84SMichal Krawczyk 		  0, 0, 0, 0, 0 };
1121df238f84SMichal Krawczyk 	uint16_t ena_qid;
1122778677dcSRafal Kozik 	unsigned int i;
1123df238f84SMichal Krawczyk 	int rc;
1124df238f84SMichal Krawczyk 
1125df238f84SMichal Krawczyk 	adapter = ring->adapter;
1126df238f84SMichal Krawczyk 	ena_dev = &adapter->ena_dev;
1127df238f84SMichal Krawczyk 
1128df238f84SMichal Krawczyk 	if (ring->type == ENA_RING_TYPE_TX) {
1129df238f84SMichal Krawczyk 		ena_qid = ENA_IO_TXQ_IDX(ring->id);
1130df238f84SMichal Krawczyk 		ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX;
1131df238f84SMichal Krawczyk 		ctx.mem_queue_type = ena_dev->tx_mem_queue_type;
1132778677dcSRafal Kozik 		for (i = 0; i < ring->ring_size; i++)
1133778677dcSRafal Kozik 			ring->empty_tx_reqs[i] = i;
1134df238f84SMichal Krawczyk 	} else {
1135df238f84SMichal Krawczyk 		ena_qid = ENA_IO_RXQ_IDX(ring->id);
1136df238f84SMichal Krawczyk 		ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX;
1137778677dcSRafal Kozik 		for (i = 0; i < ring->ring_size; i++)
1138778677dcSRafal Kozik 			ring->empty_rx_reqs[i] = i;
1139df238f84SMichal Krawczyk 	}
1140badc3a6aSMichal Krawczyk 	ctx.queue_size = ring->ring_size;
1141df238f84SMichal Krawczyk 	ctx.qid = ena_qid;
1142df238f84SMichal Krawczyk 	ctx.msix_vector = -1; /* interrupts not used */
11434217cb0bSMichal Krawczyk 	ctx.numa_node = ring->numa_socket_id;
1144df238f84SMichal Krawczyk 
1145df238f84SMichal Krawczyk 	rc = ena_com_create_io_queue(ena_dev, &ctx);
1146df238f84SMichal Krawczyk 	if (rc) {
11476f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR,
1148df238f84SMichal Krawczyk 			"failed to create io queue #%d (qid:%d) rc: %d\n",
1149df238f84SMichal Krawczyk 			ring->id, ena_qid, rc);
1150df238f84SMichal Krawczyk 		return rc;
1151df238f84SMichal Krawczyk 	}
1152df238f84SMichal Krawczyk 
1153df238f84SMichal Krawczyk 	rc = ena_com_get_io_handlers(ena_dev, ena_qid,
1154df238f84SMichal Krawczyk 				     &ring->ena_com_io_sq,
1155df238f84SMichal Krawczyk 				     &ring->ena_com_io_cq);
1156df238f84SMichal Krawczyk 	if (rc) {
11576f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR,
1158df238f84SMichal Krawczyk 			"Failed to get io queue handlers. queue num %d rc: %d\n",
1159df238f84SMichal Krawczyk 			ring->id, rc);
1160df238f84SMichal Krawczyk 		ena_com_destroy_io_queue(ena_dev, ena_qid);
1161df238f84SMichal Krawczyk 		return rc;
1162df238f84SMichal Krawczyk 	}
1163df238f84SMichal Krawczyk 
1164df238f84SMichal Krawczyk 	if (ring->type == ENA_RING_TYPE_TX)
1165df238f84SMichal Krawczyk 		ena_com_update_numa_node(ring->ena_com_io_cq, ctx.numa_node);
1166df238f84SMichal Krawczyk 
1167df238f84SMichal Krawczyk 	return 0;
1168df238f84SMichal Krawczyk }
1169df238f84SMichal Krawczyk 
117026e5543dSRafal Kozik static void ena_queue_stop(struct ena_ring *ring)
1171df238f84SMichal Krawczyk {
117226e5543dSRafal Kozik 	struct ena_com_dev *ena_dev = &ring->adapter->ena_dev;
1173df238f84SMichal Krawczyk 
117426e5543dSRafal Kozik 	if (ring->type == ENA_RING_TYPE_RX) {
117526e5543dSRafal Kozik 		ena_com_destroy_io_queue(ena_dev, ENA_IO_RXQ_IDX(ring->id));
117626e5543dSRafal Kozik 		ena_rx_queue_release_bufs(ring);
117726e5543dSRafal Kozik 	} else {
117826e5543dSRafal Kozik 		ena_com_destroy_io_queue(ena_dev, ENA_IO_TXQ_IDX(ring->id));
117926e5543dSRafal Kozik 		ena_tx_queue_release_bufs(ring);
1180df238f84SMichal Krawczyk 	}
1181df238f84SMichal Krawczyk }
1182df238f84SMichal Krawczyk 
118326e5543dSRafal Kozik static void ena_queue_stop_all(struct rte_eth_dev *dev,
118426e5543dSRafal Kozik 			      enum ena_ring_type ring_type)
118526e5543dSRafal Kozik {
1186890728ffSStephen Hemminger 	struct ena_adapter *adapter = dev->data->dev_private;
118726e5543dSRafal Kozik 	struct ena_ring *queues = NULL;
118826e5543dSRafal Kozik 	uint16_t nb_queues, i;
118926e5543dSRafal Kozik 
119026e5543dSRafal Kozik 	if (ring_type == ENA_RING_TYPE_RX) {
119126e5543dSRafal Kozik 		queues = adapter->rx_ring;
119226e5543dSRafal Kozik 		nb_queues = dev->data->nb_rx_queues;
119326e5543dSRafal Kozik 	} else {
119426e5543dSRafal Kozik 		queues = adapter->tx_ring;
119526e5543dSRafal Kozik 		nb_queues = dev->data->nb_tx_queues;
119626e5543dSRafal Kozik 	}
119726e5543dSRafal Kozik 
119826e5543dSRafal Kozik 	for (i = 0; i < nb_queues; ++i)
119926e5543dSRafal Kozik 		if (queues[i].configured)
120026e5543dSRafal Kozik 			ena_queue_stop(&queues[i]);
120126e5543dSRafal Kozik }
120226e5543dSRafal Kozik 
120326e5543dSRafal Kozik static int ena_queue_start(struct ena_ring *ring)
12041173fca2SJan Medala {
1205a467e8f3SMichal Krawczyk 	int rc, bufs_num;
12061173fca2SJan Medala 
12071173fca2SJan Medala 	ena_assert_msg(ring->configured == 1,
120826e5543dSRafal Kozik 		       "Trying to start unconfigured queue\n");
12091173fca2SJan Medala 
1210df238f84SMichal Krawczyk 	rc = ena_create_io_queue(ring);
1211df238f84SMichal Krawczyk 	if (rc) {
1212498c687aSRafal Kozik 		PMD_INIT_LOG(ERR, "Failed to create IO queue!");
1213df238f84SMichal Krawczyk 		return rc;
1214df238f84SMichal Krawczyk 	}
1215df238f84SMichal Krawczyk 
12161173fca2SJan Medala 	ring->next_to_clean = 0;
12171173fca2SJan Medala 	ring->next_to_use = 0;
12181173fca2SJan Medala 
12197830e905SSolganik Alexander 	if (ring->type == ENA_RING_TYPE_TX) {
12207830e905SSolganik Alexander 		ring->tx_stats.available_desc =
1221b2b02edeSMichal Krawczyk 			ena_com_free_q_entries(ring->ena_com_io_sq);
12221173fca2SJan Medala 		return 0;
12237830e905SSolganik Alexander 	}
12241173fca2SJan Medala 
1225a467e8f3SMichal Krawczyk 	bufs_num = ring->ring_size - 1;
1226a467e8f3SMichal Krawczyk 	rc = ena_populate_rx_queue(ring, bufs_num);
1227a467e8f3SMichal Krawczyk 	if (rc != bufs_num) {
122826e5543dSRafal Kozik 		ena_com_destroy_io_queue(&ring->adapter->ena_dev,
122926e5543dSRafal Kozik 					 ENA_IO_RXQ_IDX(ring->id));
1230f2462150SFerruh Yigit 		PMD_INIT_LOG(ERR, "Failed to populate rx ring !");
1231241da076SRafal Kozik 		return ENA_COM_FAULT;
12321173fca2SJan Medala 	}
12331173fca2SJan Medala 
12341173fca2SJan Medala 	return 0;
12351173fca2SJan Medala }
12361173fca2SJan Medala 
12371173fca2SJan Medala static int ena_tx_queue_setup(struct rte_eth_dev *dev,
12381173fca2SJan Medala 			      uint16_t queue_idx,
12391173fca2SJan Medala 			      uint16_t nb_desc,
12404217cb0bSMichal Krawczyk 			      unsigned int socket_id,
124156b8b9b7SRafal Kozik 			      const struct rte_eth_txconf *tx_conf)
12421173fca2SJan Medala {
12431173fca2SJan Medala 	struct ena_ring *txq = NULL;
1244890728ffSStephen Hemminger 	struct ena_adapter *adapter = dev->data->dev_private;
12451173fca2SJan Medala 	unsigned int i;
12461173fca2SJan Medala 
12471173fca2SJan Medala 	txq = &adapter->tx_ring[queue_idx];
12481173fca2SJan Medala 
12491173fca2SJan Medala 	if (txq->configured) {
12506f1c9df9SStephen Hemminger 		PMD_DRV_LOG(CRIT,
12511173fca2SJan Medala 			"API violation. Queue %d is already configured\n",
12521173fca2SJan Medala 			queue_idx);
1253241da076SRafal Kozik 		return ENA_COM_FAULT;
12541173fca2SJan Medala 	}
12551173fca2SJan Medala 
12561daff526SJakub Palider 	if (!rte_is_power_of_2(nb_desc)) {
12576f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR,
1258498c687aSRafal Kozik 			"Unsupported size of TX queue: %d is not a power of 2.\n",
12591daff526SJakub Palider 			nb_desc);
12601daff526SJakub Palider 		return -EINVAL;
12611daff526SJakub Palider 	}
12621daff526SJakub Palider 
12635920d930SMichal Krawczyk 	if (nb_desc > adapter->max_tx_ring_size) {
12646f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR,
12651173fca2SJan Medala 			"Unsupported size of TX queue (max size: %d)\n",
12665920d930SMichal Krawczyk 			adapter->max_tx_ring_size);
12671173fca2SJan Medala 		return -EINVAL;
12681173fca2SJan Medala 	}
12691173fca2SJan Medala 
1270ea93d37eSRafal Kozik 	if (nb_desc == RTE_ETH_DEV_FALLBACK_TX_RINGSIZE)
12715920d930SMichal Krawczyk 		nb_desc = adapter->max_tx_ring_size;
1272ea93d37eSRafal Kozik 
12731173fca2SJan Medala 	txq->port_id = dev->data->port_id;
12741173fca2SJan Medala 	txq->next_to_clean = 0;
12751173fca2SJan Medala 	txq->next_to_use = 0;
12761173fca2SJan Medala 	txq->ring_size = nb_desc;
12774217cb0bSMichal Krawczyk 	txq->numa_socket_id = socket_id;
12781173fca2SJan Medala 
12791173fca2SJan Medala 	txq->tx_buffer_info = rte_zmalloc("txq->tx_buffer_info",
12801173fca2SJan Medala 					  sizeof(struct ena_tx_buffer) *
12811173fca2SJan Medala 					  txq->ring_size,
12821173fca2SJan Medala 					  RTE_CACHE_LINE_SIZE);
12831173fca2SJan Medala 	if (!txq->tx_buffer_info) {
12846f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "failed to alloc mem for tx buffer info\n");
1285df238f84SMichal Krawczyk 		return -ENOMEM;
12861173fca2SJan Medala 	}
12871173fca2SJan Medala 
12881173fca2SJan Medala 	txq->empty_tx_reqs = rte_zmalloc("txq->empty_tx_reqs",
12891173fca2SJan Medala 					 sizeof(u16) * txq->ring_size,
12901173fca2SJan Medala 					 RTE_CACHE_LINE_SIZE);
12911173fca2SJan Medala 	if (!txq->empty_tx_reqs) {
12926f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "failed to alloc mem for tx reqs\n");
1293df238f84SMichal Krawczyk 		rte_free(txq->tx_buffer_info);
1294df238f84SMichal Krawczyk 		return -ENOMEM;
12951173fca2SJan Medala 	}
1296241da076SRafal Kozik 
12972fca2a98SMichal Krawczyk 	txq->push_buf_intermediate_buf =
12982fca2a98SMichal Krawczyk 		rte_zmalloc("txq->push_buf_intermediate_buf",
12992fca2a98SMichal Krawczyk 			    txq->tx_max_header_size,
13002fca2a98SMichal Krawczyk 			    RTE_CACHE_LINE_SIZE);
13012fca2a98SMichal Krawczyk 	if (!txq->push_buf_intermediate_buf) {
13026f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "failed to alloc push buff for LLQ\n");
13032fca2a98SMichal Krawczyk 		rte_free(txq->tx_buffer_info);
13042fca2a98SMichal Krawczyk 		rte_free(txq->empty_tx_reqs);
13052fca2a98SMichal Krawczyk 		return -ENOMEM;
13062fca2a98SMichal Krawczyk 	}
13072fca2a98SMichal Krawczyk 
13081173fca2SJan Medala 	for (i = 0; i < txq->ring_size; i++)
13091173fca2SJan Medala 		txq->empty_tx_reqs[i] = i;
13101173fca2SJan Medala 
13112081d5e2SMichal Krawczyk 	if (tx_conf != NULL) {
13122081d5e2SMichal Krawczyk 		txq->offloads =
13132081d5e2SMichal Krawczyk 			tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
13142081d5e2SMichal Krawczyk 	}
13151173fca2SJan Medala 	/* Store pointer to this queue in upper layer */
13161173fca2SJan Medala 	txq->configured = 1;
13171173fca2SJan Medala 	dev->data->tx_queues[queue_idx] = txq;
1318241da076SRafal Kozik 
1319241da076SRafal Kozik 	return 0;
13201173fca2SJan Medala }
13211173fca2SJan Medala 
13221173fca2SJan Medala static int ena_rx_queue_setup(struct rte_eth_dev *dev,
13231173fca2SJan Medala 			      uint16_t queue_idx,
13241173fca2SJan Medala 			      uint16_t nb_desc,
13254217cb0bSMichal Krawczyk 			      unsigned int socket_id,
1326a4996bd8SWei Dai 			      __rte_unused const struct rte_eth_rxconf *rx_conf,
13271173fca2SJan Medala 			      struct rte_mempool *mp)
13281173fca2SJan Medala {
1329890728ffSStephen Hemminger 	struct ena_adapter *adapter = dev->data->dev_private;
13301173fca2SJan Medala 	struct ena_ring *rxq = NULL;
133138364c26SMichal Krawczyk 	size_t buffer_size;
1332df238f84SMichal Krawczyk 	int i;
13331173fca2SJan Medala 
13341173fca2SJan Medala 	rxq = &adapter->rx_ring[queue_idx];
13351173fca2SJan Medala 	if (rxq->configured) {
13366f1c9df9SStephen Hemminger 		PMD_DRV_LOG(CRIT,
13371173fca2SJan Medala 			"API violation. Queue %d is already configured\n",
13381173fca2SJan Medala 			queue_idx);
1339241da076SRafal Kozik 		return ENA_COM_FAULT;
13401173fca2SJan Medala 	}
13411173fca2SJan Medala 
1342ea93d37eSRafal Kozik 	if (nb_desc == RTE_ETH_DEV_FALLBACK_RX_RINGSIZE)
13435920d930SMichal Krawczyk 		nb_desc = adapter->max_rx_ring_size;
1344ea93d37eSRafal Kozik 
13451daff526SJakub Palider 	if (!rte_is_power_of_2(nb_desc)) {
13466f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR,
1347498c687aSRafal Kozik 			"Unsupported size of RX queue: %d is not a power of 2.\n",
13481daff526SJakub Palider 			nb_desc);
13491daff526SJakub Palider 		return -EINVAL;
13501daff526SJakub Palider 	}
13511daff526SJakub Palider 
13525920d930SMichal Krawczyk 	if (nb_desc > adapter->max_rx_ring_size) {
13536f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR,
13541173fca2SJan Medala 			"Unsupported size of RX queue (max size: %d)\n",
13555920d930SMichal Krawczyk 			adapter->max_rx_ring_size);
13561173fca2SJan Medala 		return -EINVAL;
13571173fca2SJan Medala 	}
13581173fca2SJan Medala 
135938364c26SMichal Krawczyk 	/* ENA isn't supporting buffers smaller than 1400 bytes */
136038364c26SMichal Krawczyk 	buffer_size = rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM;
136138364c26SMichal Krawczyk 	if (buffer_size < ENA_RX_BUF_MIN_SIZE) {
136238364c26SMichal Krawczyk 		PMD_DRV_LOG(ERR,
136338364c26SMichal Krawczyk 			"Unsupported size of RX buffer: %zu (min size: %d)\n",
136438364c26SMichal Krawczyk 			buffer_size, ENA_RX_BUF_MIN_SIZE);
136538364c26SMichal Krawczyk 		return -EINVAL;
136638364c26SMichal Krawczyk 	}
136738364c26SMichal Krawczyk 
13681173fca2SJan Medala 	rxq->port_id = dev->data->port_id;
13691173fca2SJan Medala 	rxq->next_to_clean = 0;
13701173fca2SJan Medala 	rxq->next_to_use = 0;
13711173fca2SJan Medala 	rxq->ring_size = nb_desc;
13724217cb0bSMichal Krawczyk 	rxq->numa_socket_id = socket_id;
13731173fca2SJan Medala 	rxq->mb_pool = mp;
13741173fca2SJan Medala 
13751173fca2SJan Medala 	rxq->rx_buffer_info = rte_zmalloc("rxq->buffer_info",
1376*1be097dcSMichal Krawczyk 		sizeof(struct ena_rx_buffer) * nb_desc,
13771173fca2SJan Medala 		RTE_CACHE_LINE_SIZE);
13781173fca2SJan Medala 	if (!rxq->rx_buffer_info) {
13796f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "failed to alloc mem for rx buffer info\n");
13801173fca2SJan Medala 		return -ENOMEM;
13811173fca2SJan Medala 	}
13821173fca2SJan Medala 
138379405ee1SRafal Kozik 	rxq->rx_refill_buffer = rte_zmalloc("rxq->rx_refill_buffer",
138479405ee1SRafal Kozik 					    sizeof(struct rte_mbuf *) * nb_desc,
138579405ee1SRafal Kozik 					    RTE_CACHE_LINE_SIZE);
138679405ee1SRafal Kozik 
138779405ee1SRafal Kozik 	if (!rxq->rx_refill_buffer) {
13886f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "failed to alloc mem for rx refill buffer\n");
138979405ee1SRafal Kozik 		rte_free(rxq->rx_buffer_info);
139079405ee1SRafal Kozik 		rxq->rx_buffer_info = NULL;
139179405ee1SRafal Kozik 		return -ENOMEM;
139279405ee1SRafal Kozik 	}
139379405ee1SRafal Kozik 
1394c2034976SMichal Krawczyk 	rxq->empty_rx_reqs = rte_zmalloc("rxq->empty_rx_reqs",
1395c2034976SMichal Krawczyk 					 sizeof(uint16_t) * nb_desc,
1396c2034976SMichal Krawczyk 					 RTE_CACHE_LINE_SIZE);
1397c2034976SMichal Krawczyk 	if (!rxq->empty_rx_reqs) {
13986f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "failed to alloc mem for empty rx reqs\n");
1399c2034976SMichal Krawczyk 		rte_free(rxq->rx_buffer_info);
1400c2034976SMichal Krawczyk 		rxq->rx_buffer_info = NULL;
140179405ee1SRafal Kozik 		rte_free(rxq->rx_refill_buffer);
140279405ee1SRafal Kozik 		rxq->rx_refill_buffer = NULL;
1403c2034976SMichal Krawczyk 		return -ENOMEM;
1404c2034976SMichal Krawczyk 	}
1405c2034976SMichal Krawczyk 
1406c2034976SMichal Krawczyk 	for (i = 0; i < nb_desc; i++)
1407eccbe2ffSRafal Kozik 		rxq->empty_rx_reqs[i] = i;
1408c2034976SMichal Krawczyk 
14091173fca2SJan Medala 	/* Store pointer to this queue in upper layer */
14101173fca2SJan Medala 	rxq->configured = 1;
14111173fca2SJan Medala 	dev->data->rx_queues[queue_idx] = rxq;
14121173fca2SJan Medala 
1413df238f84SMichal Krawczyk 	return 0;
14141173fca2SJan Medala }
14151173fca2SJan Medala 
14161173fca2SJan Medala static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count)
14171173fca2SJan Medala {
14181173fca2SJan Medala 	unsigned int i;
14191173fca2SJan Medala 	int rc;
14201daff526SJakub Palider 	uint16_t ring_size = rxq->ring_size;
14211daff526SJakub Palider 	uint16_t ring_mask = ring_size - 1;
14221daff526SJakub Palider 	uint16_t next_to_use = rxq->next_to_use;
1423c2034976SMichal Krawczyk 	uint16_t in_use, req_id;
142479405ee1SRafal Kozik 	struct rte_mbuf **mbufs = rxq->rx_refill_buffer;
14251173fca2SJan Medala 
14261173fca2SJan Medala 	if (unlikely(!count))
14271173fca2SJan Medala 		return 0;
14281173fca2SJan Medala 
14291daff526SJakub Palider 	in_use = rxq->next_to_use - rxq->next_to_clean;
1430498c687aSRafal Kozik 	ena_assert_msg(((in_use + count) < ring_size), "bad ring state\n");
14311173fca2SJan Medala 
14321173fca2SJan Medala 	/* get resources for incoming packets */
143379405ee1SRafal Kozik 	rc = rte_mempool_get_bulk(rxq->mb_pool, (void **)mbufs, count);
14341173fca2SJan Medala 	if (unlikely(rc < 0)) {
14351173fca2SJan Medala 		rte_atomic64_inc(&rxq->adapter->drv_stats->rx_nombuf);
14367830e905SSolganik Alexander 		++rxq->rx_stats.mbuf_alloc_fail;
14371173fca2SJan Medala 		PMD_RX_LOG(DEBUG, "there are no enough free buffers");
14381173fca2SJan Medala 		return 0;
14391173fca2SJan Medala 	}
14401173fca2SJan Medala 
14411173fca2SJan Medala 	for (i = 0; i < count; i++) {
14421daff526SJakub Palider 		uint16_t next_to_use_masked = next_to_use & ring_mask;
144379405ee1SRafal Kozik 		struct rte_mbuf *mbuf = mbufs[i];
14441173fca2SJan Medala 		struct ena_com_buf ebuf;
1445*1be097dcSMichal Krawczyk 		struct ena_rx_buffer *rx_info;
14461173fca2SJan Medala 
144779405ee1SRafal Kozik 		if (likely((i + 4) < count))
144879405ee1SRafal Kozik 			rte_prefetch0(mbufs[i + 4]);
1449c2034976SMichal Krawczyk 
1450c2034976SMichal Krawczyk 		req_id = rxq->empty_rx_reqs[next_to_use_masked];
1451241da076SRafal Kozik 		rc = validate_rx_req_id(rxq, req_id);
1452*1be097dcSMichal Krawczyk 		if (unlikely(rc))
1453241da076SRafal Kozik 			break;
1454*1be097dcSMichal Krawczyk 
1455*1be097dcSMichal Krawczyk 		rx_info = &rxq->rx_buffer_info[req_id];
1456241da076SRafal Kozik 
14571173fca2SJan Medala 		/* prepare physical address for DMA transaction */
1458455da545SSantosh Shukla 		ebuf.paddr = mbuf->buf_iova + RTE_PKTMBUF_HEADROOM;
14591173fca2SJan Medala 		ebuf.len = mbuf->buf_len - RTE_PKTMBUF_HEADROOM;
14601173fca2SJan Medala 		/* pass resource to device */
14611173fca2SJan Medala 		rc = ena_com_add_single_rx_desc(rxq->ena_com_io_sq,
1462c2034976SMichal Krawczyk 						&ebuf, req_id);
14631173fca2SJan Medala 		if (unlikely(rc)) {
14646f1c9df9SStephen Hemminger 			PMD_DRV_LOG(WARNING, "failed adding rx desc\n");
14651173fca2SJan Medala 			break;
14661173fca2SJan Medala 		}
1467*1be097dcSMichal Krawczyk 		rx_info->mbuf = mbuf;
14681daff526SJakub Palider 		next_to_use++;
14691173fca2SJan Medala 	}
14701173fca2SJan Medala 
147179405ee1SRafal Kozik 	if (unlikely(i < count)) {
14726f1c9df9SStephen Hemminger 		PMD_DRV_LOG(WARNING, "refilled rx qid %d with only %d "
1473241da076SRafal Kozik 			"buffers (from %d)\n", rxq->id, i, count);
147479405ee1SRafal Kozik 		rte_mempool_put_bulk(rxq->mb_pool, (void **)(&mbufs[i]),
147579405ee1SRafal Kozik 				     count - i);
14767830e905SSolganik Alexander 		++rxq->rx_stats.refill_partial;
147779405ee1SRafal Kozik 	}
1478241da076SRafal Kozik 
14795e02e19eSJan Medala 	/* When we submitted free recources to device... */
14803d19e1abSRafal Kozik 	if (likely(i > 0)) {
148138faa87eSMichal Krawczyk 		/* ...let HW know that it can fill buffers with data. */
14821173fca2SJan Medala 		ena_com_write_sq_doorbell(rxq->ena_com_io_sq);
14831173fca2SJan Medala 
14845e02e19eSJan Medala 		rxq->next_to_use = next_to_use;
14855e02e19eSJan Medala 	}
14865e02e19eSJan Medala 
14871173fca2SJan Medala 	return i;
14881173fca2SJan Medala }
14891173fca2SJan Medala 
14901173fca2SJan Medala static int ena_device_init(struct ena_com_dev *ena_dev,
1491e859d2b8SRafal Kozik 			   struct ena_com_dev_get_features_ctx *get_feat_ctx,
1492e859d2b8SRafal Kozik 			   bool *wd_state)
14931173fca2SJan Medala {
1494ca148440SMichal Krawczyk 	uint32_t aenq_groups;
14951173fca2SJan Medala 	int rc;
1496c4144557SJan Medala 	bool readless_supported;
14971173fca2SJan Medala 
14981173fca2SJan Medala 	/* Initialize mmio registers */
14991173fca2SJan Medala 	rc = ena_com_mmio_reg_read_request_init(ena_dev);
15001173fca2SJan Medala 	if (rc) {
15016f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "failed to init mmio read less\n");
15021173fca2SJan Medala 		return rc;
15031173fca2SJan Medala 	}
15041173fca2SJan Medala 
1505c4144557SJan Medala 	/* The PCIe configuration space revision id indicate if mmio reg
1506c4144557SJan Medala 	 * read is disabled.
1507c4144557SJan Medala 	 */
1508c4144557SJan Medala 	readless_supported =
1509c4144557SJan Medala 		!(((struct rte_pci_device *)ena_dev->dmadev)->id.class_id
1510c4144557SJan Medala 			       & ENA_MMIO_DISABLE_REG_READ);
1511c4144557SJan Medala 	ena_com_set_mmio_read_mode(ena_dev, readless_supported);
1512c4144557SJan Medala 
15131173fca2SJan Medala 	/* reset device */
15143adcba9aSMichal Krawczyk 	rc = ena_com_dev_reset(ena_dev, ENA_REGS_RESET_NORMAL);
15151173fca2SJan Medala 	if (rc) {
15166f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "cannot reset device\n");
15171173fca2SJan Medala 		goto err_mmio_read_less;
15181173fca2SJan Medala 	}
15191173fca2SJan Medala 
15201173fca2SJan Medala 	/* check FW version */
15211173fca2SJan Medala 	rc = ena_com_validate_version(ena_dev);
15221173fca2SJan Medala 	if (rc) {
15236f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "device version is too low\n");
15241173fca2SJan Medala 		goto err_mmio_read_less;
15251173fca2SJan Medala 	}
15261173fca2SJan Medala 
15271173fca2SJan Medala 	ena_dev->dma_addr_bits = ena_com_get_dma_width(ena_dev);
15281173fca2SJan Medala 
15291173fca2SJan Medala 	/* ENA device administration layer init */
1530b68309beSRafal Kozik 	rc = ena_com_admin_init(ena_dev, &aenq_handlers);
15311173fca2SJan Medala 	if (rc) {
15326f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR,
15331173fca2SJan Medala 			"cannot initialize ena admin queue with device\n");
15341173fca2SJan Medala 		goto err_mmio_read_less;
15351173fca2SJan Medala 	}
15361173fca2SJan Medala 
15371173fca2SJan Medala 	/* To enable the msix interrupts the driver needs to know the number
15381173fca2SJan Medala 	 * of queues. So the driver uses polling mode to retrieve this
15391173fca2SJan Medala 	 * information.
15401173fca2SJan Medala 	 */
15411173fca2SJan Medala 	ena_com_set_admin_polling_mode(ena_dev, true);
15421173fca2SJan Medala 
1543201ff2e5SJakub Palider 	ena_config_host_info(ena_dev);
1544201ff2e5SJakub Palider 
15451173fca2SJan Medala 	/* Get Device Attributes and features */
15461173fca2SJan Medala 	rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx);
15471173fca2SJan Medala 	if (rc) {
15486f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR,
15491173fca2SJan Medala 			"cannot get attribute for ena device rc= %d\n", rc);
15501173fca2SJan Medala 		goto err_admin_init;
15511173fca2SJan Medala 	}
15521173fca2SJan Medala 
1553f01f060cSRafal Kozik 	aenq_groups = BIT(ENA_ADMIN_LINK_CHANGE) |
1554d9b8b106SMichal Krawczyk 		      BIT(ENA_ADMIN_NOTIFICATION) |
1555983cce2dSRafal Kozik 		      BIT(ENA_ADMIN_KEEP_ALIVE) |
1556983cce2dSRafal Kozik 		      BIT(ENA_ADMIN_FATAL_ERROR) |
1557983cce2dSRafal Kozik 		      BIT(ENA_ADMIN_WARNING);
1558ca148440SMichal Krawczyk 
1559ca148440SMichal Krawczyk 	aenq_groups &= get_feat_ctx->aenq.supported_groups;
1560ca148440SMichal Krawczyk 	rc = ena_com_set_aenq_config(ena_dev, aenq_groups);
1561ca148440SMichal Krawczyk 	if (rc) {
15626f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "Cannot configure aenq groups rc: %d\n", rc);
1563ca148440SMichal Krawczyk 		goto err_admin_init;
1564ca148440SMichal Krawczyk 	}
1565ca148440SMichal Krawczyk 
1566e859d2b8SRafal Kozik 	*wd_state = !!(aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE));
1567e859d2b8SRafal Kozik 
15681173fca2SJan Medala 	return 0;
15691173fca2SJan Medala 
15701173fca2SJan Medala err_admin_init:
15711173fca2SJan Medala 	ena_com_admin_destroy(ena_dev);
15721173fca2SJan Medala 
15731173fca2SJan Medala err_mmio_read_less:
15741173fca2SJan Medala 	ena_com_mmio_reg_read_request_destroy(ena_dev);
15751173fca2SJan Medala 
15761173fca2SJan Medala 	return rc;
15771173fca2SJan Medala }
15781173fca2SJan Medala 
1579ca148440SMichal Krawczyk static void ena_interrupt_handler_rte(void *cb_arg)
158015773e06SMichal Krawczyk {
1581890728ffSStephen Hemminger 	struct ena_adapter *adapter = cb_arg;
158215773e06SMichal Krawczyk 	struct ena_com_dev *ena_dev = &adapter->ena_dev;
158315773e06SMichal Krawczyk 
158415773e06SMichal Krawczyk 	ena_com_admin_q_comp_intr_handler(ena_dev);
15853d19e1abSRafal Kozik 	if (likely(adapter->state != ENA_ADAPTER_STATE_CLOSED))
1586ca148440SMichal Krawczyk 		ena_com_aenq_intr_handler(ena_dev, adapter);
158715773e06SMichal Krawczyk }
158815773e06SMichal Krawczyk 
15895efb9fc7SMichal Krawczyk static void check_for_missing_keep_alive(struct ena_adapter *adapter)
15905efb9fc7SMichal Krawczyk {
1591e859d2b8SRafal Kozik 	if (!adapter->wd_state)
1592e859d2b8SRafal Kozik 		return;
1593e859d2b8SRafal Kozik 
15945efb9fc7SMichal Krawczyk 	if (adapter->keep_alive_timeout == ENA_HW_HINTS_NO_TIMEOUT)
15955efb9fc7SMichal Krawczyk 		return;
15965efb9fc7SMichal Krawczyk 
15975efb9fc7SMichal Krawczyk 	if (unlikely((rte_get_timer_cycles() - adapter->timestamp_wd) >=
15985efb9fc7SMichal Krawczyk 	    adapter->keep_alive_timeout)) {
15996f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "Keep alive timeout\n");
16005efb9fc7SMichal Krawczyk 		adapter->reset_reason = ENA_REGS_RESET_KEEP_ALIVE_TO;
16015efb9fc7SMichal Krawczyk 		adapter->trigger_reset = true;
16027830e905SSolganik Alexander 		++adapter->dev_stats.wd_expired;
16035efb9fc7SMichal Krawczyk 	}
16045efb9fc7SMichal Krawczyk }
16055efb9fc7SMichal Krawczyk 
16065efb9fc7SMichal Krawczyk /* Check if admin queue is enabled */
16075efb9fc7SMichal Krawczyk static void check_for_admin_com_state(struct ena_adapter *adapter)
16085efb9fc7SMichal Krawczyk {
16095efb9fc7SMichal Krawczyk 	if (unlikely(!ena_com_get_admin_running_state(&adapter->ena_dev))) {
16106f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "ENA admin queue is not in running state!\n");
16115efb9fc7SMichal Krawczyk 		adapter->reset_reason = ENA_REGS_RESET_ADMIN_TO;
16125efb9fc7SMichal Krawczyk 		adapter->trigger_reset = true;
16135efb9fc7SMichal Krawczyk 	}
16145efb9fc7SMichal Krawczyk }
16155efb9fc7SMichal Krawczyk 
1616d9b8b106SMichal Krawczyk static void ena_timer_wd_callback(__rte_unused struct rte_timer *timer,
1617d9b8b106SMichal Krawczyk 				  void *arg)
1618d9b8b106SMichal Krawczyk {
1619890728ffSStephen Hemminger 	struct ena_adapter *adapter = arg;
1620d9b8b106SMichal Krawczyk 	struct rte_eth_dev *dev = adapter->rte_dev;
1621d9b8b106SMichal Krawczyk 
16225efb9fc7SMichal Krawczyk 	check_for_missing_keep_alive(adapter);
16235efb9fc7SMichal Krawczyk 	check_for_admin_com_state(adapter);
1624d9b8b106SMichal Krawczyk 
16255efb9fc7SMichal Krawczyk 	if (unlikely(adapter->trigger_reset)) {
16266f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "Trigger reset is on\n");
1627d9b8b106SMichal Krawczyk 		_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET,
1628d9b8b106SMichal Krawczyk 			NULL);
1629d9b8b106SMichal Krawczyk 	}
1630d9b8b106SMichal Krawczyk }
1631d9b8b106SMichal Krawczyk 
16322fca2a98SMichal Krawczyk static inline void
16338a7a73f2SMichal Krawczyk set_default_llq_configurations(struct ena_llq_configurations *llq_config,
16348a7a73f2SMichal Krawczyk 			       struct ena_admin_feature_llq_desc *llq,
16358a7a73f2SMichal Krawczyk 			       bool use_large_llq_hdr)
16362fca2a98SMichal Krawczyk {
16372fca2a98SMichal Krawczyk 	llq_config->llq_header_location = ENA_ADMIN_INLINE_HEADER;
16382fca2a98SMichal Krawczyk 	llq_config->llq_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY;
16392fca2a98SMichal Krawczyk 	llq_config->llq_num_decs_before_header =
16402fca2a98SMichal Krawczyk 		ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2;
16418a7a73f2SMichal Krawczyk 
16428a7a73f2SMichal Krawczyk 	if (use_large_llq_hdr &&
16438a7a73f2SMichal Krawczyk 	    (llq->entry_size_ctrl_supported & ENA_ADMIN_LIST_ENTRY_SIZE_256B)) {
16448a7a73f2SMichal Krawczyk 		llq_config->llq_ring_entry_size =
16458a7a73f2SMichal Krawczyk 			ENA_ADMIN_LIST_ENTRY_SIZE_256B;
16468a7a73f2SMichal Krawczyk 		llq_config->llq_ring_entry_size_value = 256;
16478a7a73f2SMichal Krawczyk 	} else {
16488a7a73f2SMichal Krawczyk 		llq_config->llq_ring_entry_size =
16498a7a73f2SMichal Krawczyk 			ENA_ADMIN_LIST_ENTRY_SIZE_128B;
16502fca2a98SMichal Krawczyk 		llq_config->llq_ring_entry_size_value = 128;
16512fca2a98SMichal Krawczyk 	}
16528a7a73f2SMichal Krawczyk }
16532fca2a98SMichal Krawczyk 
16542fca2a98SMichal Krawczyk static int
16552fca2a98SMichal Krawczyk ena_set_queues_placement_policy(struct ena_adapter *adapter,
16562fca2a98SMichal Krawczyk 				struct ena_com_dev *ena_dev,
16572fca2a98SMichal Krawczyk 				struct ena_admin_feature_llq_desc *llq,
16582fca2a98SMichal Krawczyk 				struct ena_llq_configurations *llq_default_configurations)
16592fca2a98SMichal Krawczyk {
16602fca2a98SMichal Krawczyk 	int rc;
16612fca2a98SMichal Krawczyk 	u32 llq_feature_mask;
16622fca2a98SMichal Krawczyk 
16632fca2a98SMichal Krawczyk 	llq_feature_mask = 1 << ENA_ADMIN_LLQ;
16642fca2a98SMichal Krawczyk 	if (!(ena_dev->supported_features & llq_feature_mask)) {
16656f1c9df9SStephen Hemminger 		PMD_DRV_LOG(INFO,
16662fca2a98SMichal Krawczyk 			"LLQ is not supported. Fallback to host mode policy.\n");
16672fca2a98SMichal Krawczyk 		ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
16682fca2a98SMichal Krawczyk 		return 0;
16692fca2a98SMichal Krawczyk 	}
16702fca2a98SMichal Krawczyk 
16712fca2a98SMichal Krawczyk 	rc = ena_com_config_dev_mode(ena_dev, llq, llq_default_configurations);
16722fca2a98SMichal Krawczyk 	if (unlikely(rc)) {
16732fca2a98SMichal Krawczyk 		PMD_INIT_LOG(WARNING, "Failed to config dev mode. "
1674498c687aSRafal Kozik 			"Fallback to host mode policy.");
16752fca2a98SMichal Krawczyk 		ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
16762fca2a98SMichal Krawczyk 		return 0;
16772fca2a98SMichal Krawczyk 	}
16782fca2a98SMichal Krawczyk 
16792fca2a98SMichal Krawczyk 	/* Nothing to config, exit */
16802fca2a98SMichal Krawczyk 	if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
16812fca2a98SMichal Krawczyk 		return 0;
16822fca2a98SMichal Krawczyk 
16832fca2a98SMichal Krawczyk 	if (!adapter->dev_mem_base) {
16846f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "Unable to access LLQ bar resource. "
16852fca2a98SMichal Krawczyk 			"Fallback to host mode policy.\n.");
16862fca2a98SMichal Krawczyk 		ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
16872fca2a98SMichal Krawczyk 		return 0;
16882fca2a98SMichal Krawczyk 	}
16892fca2a98SMichal Krawczyk 
16902fca2a98SMichal Krawczyk 	ena_dev->mem_bar = adapter->dev_mem_base;
16912fca2a98SMichal Krawczyk 
16922fca2a98SMichal Krawczyk 	return 0;
16932fca2a98SMichal Krawczyk }
16942fca2a98SMichal Krawczyk 
16955920d930SMichal Krawczyk static uint32_t ena_calc_max_io_queue_num(struct ena_com_dev *ena_dev,
169601bd6877SRafal Kozik 	struct ena_com_dev_get_features_ctx *get_feat_ctx)
169701bd6877SRafal Kozik {
16985920d930SMichal Krawczyk 	uint32_t io_tx_sq_num, io_tx_cq_num, io_rx_num, max_num_io_queues;
169901bd6877SRafal Kozik 
1700ea93d37eSRafal Kozik 	/* Regular queues capabilities */
1701ea93d37eSRafal Kozik 	if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
1702ea93d37eSRafal Kozik 		struct ena_admin_queue_ext_feature_fields *max_queue_ext =
1703ea93d37eSRafal Kozik 			&get_feat_ctx->max_queue_ext.max_queue_ext;
17042fca2a98SMichal Krawczyk 		io_rx_num = RTE_MIN(max_queue_ext->max_rx_sq_num,
17052fca2a98SMichal Krawczyk 				    max_queue_ext->max_rx_cq_num);
17062fca2a98SMichal Krawczyk 		io_tx_sq_num = max_queue_ext->max_tx_sq_num;
17072fca2a98SMichal Krawczyk 		io_tx_cq_num = max_queue_ext->max_tx_cq_num;
1708ea93d37eSRafal Kozik 	} else {
1709ea93d37eSRafal Kozik 		struct ena_admin_queue_feature_desc *max_queues =
1710ea93d37eSRafal Kozik 			&get_feat_ctx->max_queues;
17112fca2a98SMichal Krawczyk 		io_tx_sq_num = max_queues->max_sq_num;
17122fca2a98SMichal Krawczyk 		io_tx_cq_num = max_queues->max_cq_num;
17132fca2a98SMichal Krawczyk 		io_rx_num = RTE_MIN(io_tx_sq_num, io_tx_cq_num);
1714ea93d37eSRafal Kozik 	}
171501bd6877SRafal Kozik 
17162fca2a98SMichal Krawczyk 	/* In case of LLQ use the llq number in the get feature cmd */
17172fca2a98SMichal Krawczyk 	if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
17182fca2a98SMichal Krawczyk 		io_tx_sq_num = get_feat_ctx->llq.max_llq_num;
17192fca2a98SMichal Krawczyk 
17205920d930SMichal Krawczyk 	max_num_io_queues = RTE_MIN(ENA_MAX_NUM_IO_QUEUES, io_rx_num);
17215920d930SMichal Krawczyk 	max_num_io_queues = RTE_MIN(max_num_io_queues, io_tx_sq_num);
17225920d930SMichal Krawczyk 	max_num_io_queues = RTE_MIN(max_num_io_queues, io_tx_cq_num);
172301bd6877SRafal Kozik 
17245920d930SMichal Krawczyk 	if (unlikely(max_num_io_queues == 0)) {
17256f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "Number of IO queues should not be 0\n");
172601bd6877SRafal Kozik 		return -EFAULT;
172701bd6877SRafal Kozik 	}
172801bd6877SRafal Kozik 
17295920d930SMichal Krawczyk 	return max_num_io_queues;
173001bd6877SRafal Kozik }
173101bd6877SRafal Kozik 
17321173fca2SJan Medala static int eth_ena_dev_init(struct rte_eth_dev *eth_dev)
17331173fca2SJan Medala {
1734ea93d37eSRafal Kozik 	struct ena_calc_queue_size_ctx calc_queue_ctx = { 0 };
17351173fca2SJan Medala 	struct rte_pci_device *pci_dev;
1736eb0ef49dSMichal Krawczyk 	struct rte_intr_handle *intr_handle;
1737890728ffSStephen Hemminger 	struct ena_adapter *adapter = eth_dev->data->dev_private;
17381173fca2SJan Medala 	struct ena_com_dev *ena_dev = &adapter->ena_dev;
17391173fca2SJan Medala 	struct ena_com_dev_get_features_ctx get_feat_ctx;
17402fca2a98SMichal Krawczyk 	struct ena_llq_configurations llq_config;
17412fca2a98SMichal Krawczyk 	const char *queue_type_str;
17425920d930SMichal Krawczyk 	uint32_t max_num_io_queues;
1743ea93d37eSRafal Kozik 	int rc;
17441173fca2SJan Medala 	static int adapters_found;
174533dde075SMichal Krawczyk 	bool disable_meta_caching;
1746e859d2b8SRafal Kozik 	bool wd_state;
17471173fca2SJan Medala 
17481173fca2SJan Medala 	eth_dev->dev_ops = &ena_dev_ops;
17491173fca2SJan Medala 	eth_dev->rx_pkt_burst = &eth_ena_recv_pkts;
17501173fca2SJan Medala 	eth_dev->tx_pkt_burst = &eth_ena_xmit_pkts;
1751b3fc5a1aSKonstantin Ananyev 	eth_dev->tx_pkt_prepare = &eth_ena_prep_pkts;
17521173fca2SJan Medala 
17531173fca2SJan Medala 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
17541173fca2SJan Medala 		return 0;
17551173fca2SJan Medala 
1756fd976890SMichal Krawczyk 	memset(adapter, 0, sizeof(struct ena_adapter));
1757fd976890SMichal Krawczyk 	ena_dev = &adapter->ena_dev;
1758fd976890SMichal Krawczyk 
1759fd976890SMichal Krawczyk 	adapter->rte_eth_dev_data = eth_dev->data;
1760fd976890SMichal Krawczyk 	adapter->rte_dev = eth_dev;
1761fd976890SMichal Krawczyk 
1762c0802544SFerruh Yigit 	pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
17631173fca2SJan Medala 	adapter->pdev = pci_dev;
17641173fca2SJan Medala 
1765f2462150SFerruh Yigit 	PMD_INIT_LOG(INFO, "Initializing %x:%x:%x.%d",
17661173fca2SJan Medala 		     pci_dev->addr.domain,
17671173fca2SJan Medala 		     pci_dev->addr.bus,
17681173fca2SJan Medala 		     pci_dev->addr.devid,
17691173fca2SJan Medala 		     pci_dev->addr.function);
17701173fca2SJan Medala 
1771eb0ef49dSMichal Krawczyk 	intr_handle = &pci_dev->intr_handle;
1772eb0ef49dSMichal Krawczyk 
17731173fca2SJan Medala 	adapter->regs = pci_dev->mem_resource[ENA_REGS_BAR].addr;
17741173fca2SJan Medala 	adapter->dev_mem_base = pci_dev->mem_resource[ENA_MEM_BAR].addr;
17751173fca2SJan Medala 
17761d339597SRafal Kozik 	if (!adapter->regs) {
1777f2462150SFerruh Yigit 		PMD_INIT_LOG(CRIT, "Failed to access registers BAR(%d)",
17781173fca2SJan Medala 			     ENA_REGS_BAR);
17791d339597SRafal Kozik 		return -ENXIO;
17801d339597SRafal Kozik 	}
17811173fca2SJan Medala 
17821173fca2SJan Medala 	ena_dev->reg_bar = adapter->regs;
17831173fca2SJan Medala 	ena_dev->dmadev = adapter->pdev;
17841173fca2SJan Medala 
17851173fca2SJan Medala 	adapter->id_number = adapters_found;
17861173fca2SJan Medala 
17871173fca2SJan Medala 	snprintf(adapter->name, ENA_NAME_MAX_LEN, "ena_%d",
17881173fca2SJan Medala 		 adapter->id_number);
17891173fca2SJan Medala 
17908a7a73f2SMichal Krawczyk 	rc = ena_parse_devargs(adapter, pci_dev->device.devargs);
17918a7a73f2SMichal Krawczyk 	if (rc != 0) {
17928a7a73f2SMichal Krawczyk 		PMD_INIT_LOG(CRIT, "Failed to parse devargs\n");
17938a7a73f2SMichal Krawczyk 		goto err;
17948a7a73f2SMichal Krawczyk 	}
17958a7a73f2SMichal Krawczyk 
17961173fca2SJan Medala 	/* device specific initialization routine */
1797e859d2b8SRafal Kozik 	rc = ena_device_init(ena_dev, &get_feat_ctx, &wd_state);
17981173fca2SJan Medala 	if (rc) {
1799f2462150SFerruh Yigit 		PMD_INIT_LOG(CRIT, "Failed to init ENA device");
1800241da076SRafal Kozik 		goto err;
18011173fca2SJan Medala 	}
1802e859d2b8SRafal Kozik 	adapter->wd_state = wd_state;
18031173fca2SJan Medala 
18048a7a73f2SMichal Krawczyk 	set_default_llq_configurations(&llq_config, &get_feat_ctx.llq,
18058a7a73f2SMichal Krawczyk 		adapter->use_large_llq_hdr);
18062fca2a98SMichal Krawczyk 	rc = ena_set_queues_placement_policy(adapter, ena_dev,
18072fca2a98SMichal Krawczyk 					     &get_feat_ctx.llq, &llq_config);
18082fca2a98SMichal Krawczyk 	if (unlikely(rc)) {
18092fca2a98SMichal Krawczyk 		PMD_INIT_LOG(CRIT, "Failed to set placement policy");
18102fca2a98SMichal Krawczyk 		return rc;
18112fca2a98SMichal Krawczyk 	}
18122fca2a98SMichal Krawczyk 
18132fca2a98SMichal Krawczyk 	if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
18142fca2a98SMichal Krawczyk 		queue_type_str = "Regular";
18152fca2a98SMichal Krawczyk 	else
18162fca2a98SMichal Krawczyk 		queue_type_str = "Low latency";
18176f1c9df9SStephen Hemminger 	PMD_DRV_LOG(INFO, "Placement policy: %s\n", queue_type_str);
1818ea93d37eSRafal Kozik 
1819ea93d37eSRafal Kozik 	calc_queue_ctx.ena_dev = ena_dev;
1820ea93d37eSRafal Kozik 	calc_queue_ctx.get_feat_ctx = &get_feat_ctx;
18211173fca2SJan Medala 
18225920d930SMichal Krawczyk 	max_num_io_queues = ena_calc_max_io_queue_num(ena_dev, &get_feat_ctx);
18238a7a73f2SMichal Krawczyk 	rc = ena_calc_io_queue_size(&calc_queue_ctx,
18248a7a73f2SMichal Krawczyk 		adapter->use_large_llq_hdr);
18255920d930SMichal Krawczyk 	if (unlikely((rc != 0) || (max_num_io_queues == 0))) {
1826241da076SRafal Kozik 		rc = -EFAULT;
1827241da076SRafal Kozik 		goto err_device_destroy;
1828241da076SRafal Kozik 	}
18291173fca2SJan Medala 
18305920d930SMichal Krawczyk 	adapter->max_tx_ring_size = calc_queue_ctx.max_tx_queue_size;
18315920d930SMichal Krawczyk 	adapter->max_rx_ring_size = calc_queue_ctx.max_rx_queue_size;
1832ea93d37eSRafal Kozik 	adapter->max_tx_sgl_size = calc_queue_ctx.max_tx_sgl_size;
1833ea93d37eSRafal Kozik 	adapter->max_rx_sgl_size = calc_queue_ctx.max_rx_sgl_size;
18345920d930SMichal Krawczyk 	adapter->max_num_io_queues = max_num_io_queues;
18352061fe41SRafal Kozik 
183633dde075SMichal Krawczyk 	if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
183733dde075SMichal Krawczyk 		disable_meta_caching =
183833dde075SMichal Krawczyk 			!!(get_feat_ctx.llq.accel_mode.u.get.supported_flags &
183933dde075SMichal Krawczyk 			BIT(ENA_ADMIN_DISABLE_META_CACHING));
184033dde075SMichal Krawczyk 	} else {
184133dde075SMichal Krawczyk 		disable_meta_caching = false;
184233dde075SMichal Krawczyk 	}
184333dde075SMichal Krawczyk 
18441173fca2SJan Medala 	/* prepare ring structures */
184533dde075SMichal Krawczyk 	ena_init_rings(adapter, disable_meta_caching);
18461173fca2SJan Medala 
1847372c1af5SJan Medala 	ena_config_debug_area(adapter);
1848372c1af5SJan Medala 
18491173fca2SJan Medala 	/* Set max MTU for this device */
18501173fca2SJan Medala 	adapter->max_mtu = get_feat_ctx.dev_attr.max_mtu;
18511173fca2SJan Medala 
1852117ba4a6SMichal Krawczyk 	/* set device support for offloads */
1853117ba4a6SMichal Krawczyk 	adapter->offloads.tso4_supported = (get_feat_ctx.offload.tx &
1854117ba4a6SMichal Krawczyk 		ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK) != 0;
1855117ba4a6SMichal Krawczyk 	adapter->offloads.tx_csum_supported = (get_feat_ctx.offload.tx &
1856117ba4a6SMichal Krawczyk 		ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK) != 0;
1857ef538c1aSMichal Krawczyk 	adapter->offloads.rx_csum_supported =
1858117ba4a6SMichal Krawczyk 		(get_feat_ctx.offload.rx_supported &
1859117ba4a6SMichal Krawczyk 		ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK) != 0;
186083277a7cSJakub Palider 
18611173fca2SJan Medala 	/* Copy MAC address and point DPDK to it */
18626d13ea8eSOlivier Matz 	eth_dev->data->mac_addrs = (struct rte_ether_addr *)adapter->mac_addr;
1863538da7a1SOlivier Matz 	rte_ether_addr_copy((struct rte_ether_addr *)
1864538da7a1SOlivier Matz 			get_feat_ctx.dev_attr.mac_addr,
18656d13ea8eSOlivier Matz 			(struct rte_ether_addr *)adapter->mac_addr);
18661173fca2SJan Medala 
186715febafdSThomas Monjalon 	/*
186815febafdSThomas Monjalon 	 * Pass the information to the rte_eth_dev_close() that it should also
186915febafdSThomas Monjalon 	 * release the private port resources.
187015febafdSThomas Monjalon 	 */
187115febafdSThomas Monjalon 	eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
187215febafdSThomas Monjalon 
18731173fca2SJan Medala 	adapter->drv_stats = rte_zmalloc("adapter stats",
18741173fca2SJan Medala 					 sizeof(*adapter->drv_stats),
18751173fca2SJan Medala 					 RTE_CACHE_LINE_SIZE);
18761173fca2SJan Medala 	if (!adapter->drv_stats) {
18776f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "failed to alloc mem for adapter stats\n");
1878241da076SRafal Kozik 		rc = -ENOMEM;
1879241da076SRafal Kozik 		goto err_delete_debug_area;
18801173fca2SJan Medala 	}
18811173fca2SJan Medala 
1882eb0ef49dSMichal Krawczyk 	rte_intr_callback_register(intr_handle,
1883eb0ef49dSMichal Krawczyk 				   ena_interrupt_handler_rte,
1884eb0ef49dSMichal Krawczyk 				   adapter);
1885eb0ef49dSMichal Krawczyk 	rte_intr_enable(intr_handle);
1886eb0ef49dSMichal Krawczyk 	ena_com_set_admin_polling_mode(ena_dev, false);
1887ca148440SMichal Krawczyk 	ena_com_admin_aenq_enable(ena_dev);
1888eb0ef49dSMichal Krawczyk 
1889d9b8b106SMichal Krawczyk 	if (adapters_found == 0)
1890d9b8b106SMichal Krawczyk 		rte_timer_subsystem_init();
1891d9b8b106SMichal Krawczyk 	rte_timer_init(&adapter->timer_wd);
1892d9b8b106SMichal Krawczyk 
18931173fca2SJan Medala 	adapters_found++;
18941173fca2SJan Medala 	adapter->state = ENA_ADAPTER_STATE_INIT;
18951173fca2SJan Medala 
18961173fca2SJan Medala 	return 0;
1897241da076SRafal Kozik 
1898241da076SRafal Kozik err_delete_debug_area:
1899241da076SRafal Kozik 	ena_com_delete_debug_area(ena_dev);
1900241da076SRafal Kozik 
1901241da076SRafal Kozik err_device_destroy:
1902241da076SRafal Kozik 	ena_com_delete_host_info(ena_dev);
1903241da076SRafal Kozik 	ena_com_admin_destroy(ena_dev);
1904241da076SRafal Kozik 
1905241da076SRafal Kozik err:
1906241da076SRafal Kozik 	return rc;
19071173fca2SJan Medala }
19081173fca2SJan Medala 
1909e457bc70SRafal Kozik static void ena_destroy_device(struct rte_eth_dev *eth_dev)
1910eb0ef49dSMichal Krawczyk {
1911890728ffSStephen Hemminger 	struct ena_adapter *adapter = eth_dev->data->dev_private;
1912e457bc70SRafal Kozik 	struct ena_com_dev *ena_dev = &adapter->ena_dev;
1913eb0ef49dSMichal Krawczyk 
1914e457bc70SRafal Kozik 	if (adapter->state == ENA_ADAPTER_STATE_FREE)
1915e457bc70SRafal Kozik 		return;
1916e457bc70SRafal Kozik 
1917e457bc70SRafal Kozik 	ena_com_set_admin_running_state(ena_dev, false);
1918eb0ef49dSMichal Krawczyk 
1919eb0ef49dSMichal Krawczyk 	if (adapter->state != ENA_ADAPTER_STATE_CLOSED)
1920eb0ef49dSMichal Krawczyk 		ena_close(eth_dev);
1921eb0ef49dSMichal Krawczyk 
1922e457bc70SRafal Kozik 	ena_com_delete_debug_area(ena_dev);
1923e457bc70SRafal Kozik 	ena_com_delete_host_info(ena_dev);
1924e457bc70SRafal Kozik 
1925e457bc70SRafal Kozik 	ena_com_abort_admin_commands(ena_dev);
1926e457bc70SRafal Kozik 	ena_com_wait_for_abort_completion(ena_dev);
1927e457bc70SRafal Kozik 	ena_com_admin_destroy(ena_dev);
1928e457bc70SRafal Kozik 	ena_com_mmio_reg_read_request_destroy(ena_dev);
1929e457bc70SRafal Kozik 
1930e457bc70SRafal Kozik 	adapter->state = ENA_ADAPTER_STATE_FREE;
1931e457bc70SRafal Kozik }
1932e457bc70SRafal Kozik 
1933e457bc70SRafal Kozik static int eth_ena_dev_uninit(struct rte_eth_dev *eth_dev)
1934e457bc70SRafal Kozik {
1935e457bc70SRafal Kozik 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1936e457bc70SRafal Kozik 		return 0;
1937e457bc70SRafal Kozik 
1938e457bc70SRafal Kozik 	ena_destroy_device(eth_dev);
1939e457bc70SRafal Kozik 
1940eb0ef49dSMichal Krawczyk 	eth_dev->dev_ops = NULL;
1941eb0ef49dSMichal Krawczyk 	eth_dev->rx_pkt_burst = NULL;
1942eb0ef49dSMichal Krawczyk 	eth_dev->tx_pkt_burst = NULL;
1943eb0ef49dSMichal Krawczyk 	eth_dev->tx_pkt_prepare = NULL;
1944eb0ef49dSMichal Krawczyk 
1945eb0ef49dSMichal Krawczyk 	return 0;
1946eb0ef49dSMichal Krawczyk }
1947eb0ef49dSMichal Krawczyk 
19481173fca2SJan Medala static int ena_dev_configure(struct rte_eth_dev *dev)
19491173fca2SJan Medala {
1950890728ffSStephen Hemminger 	struct ena_adapter *adapter = dev->data->dev_private;
19517369f88fSRafal Kozik 
19521173fca2SJan Medala 	adapter->state = ENA_ADAPTER_STATE_CONFIG;
19531173fca2SJan Medala 
1954a4996bd8SWei Dai 	adapter->tx_selected_offloads = dev->data->dev_conf.txmode.offloads;
1955a4996bd8SWei Dai 	adapter->rx_selected_offloads = dev->data->dev_conf.rxmode.offloads;
19561173fca2SJan Medala 	return 0;
19571173fca2SJan Medala }
19581173fca2SJan Medala 
195933dde075SMichal Krawczyk static void ena_init_rings(struct ena_adapter *adapter,
196033dde075SMichal Krawczyk 			   bool disable_meta_caching)
19611173fca2SJan Medala {
19625920d930SMichal Krawczyk 	size_t i;
19631173fca2SJan Medala 
19645920d930SMichal Krawczyk 	for (i = 0; i < adapter->max_num_io_queues; i++) {
19651173fca2SJan Medala 		struct ena_ring *ring = &adapter->tx_ring[i];
19661173fca2SJan Medala 
19671173fca2SJan Medala 		ring->configured = 0;
19681173fca2SJan Medala 		ring->type = ENA_RING_TYPE_TX;
19691173fca2SJan Medala 		ring->adapter = adapter;
19701173fca2SJan Medala 		ring->id = i;
19711173fca2SJan Medala 		ring->tx_mem_queue_type = adapter->ena_dev.tx_mem_queue_type;
19721173fca2SJan Medala 		ring->tx_max_header_size = adapter->ena_dev.tx_max_header_size;
19732061fe41SRafal Kozik 		ring->sgl_size = adapter->max_tx_sgl_size;
197433dde075SMichal Krawczyk 		ring->disable_meta_caching = disable_meta_caching;
19751173fca2SJan Medala 	}
19761173fca2SJan Medala 
19775920d930SMichal Krawczyk 	for (i = 0; i < adapter->max_num_io_queues; i++) {
19781173fca2SJan Medala 		struct ena_ring *ring = &adapter->rx_ring[i];
19791173fca2SJan Medala 
19801173fca2SJan Medala 		ring->configured = 0;
19811173fca2SJan Medala 		ring->type = ENA_RING_TYPE_RX;
19821173fca2SJan Medala 		ring->adapter = adapter;
19831173fca2SJan Medala 		ring->id = i;
1984ea93d37eSRafal Kozik 		ring->sgl_size = adapter->max_rx_sgl_size;
19851173fca2SJan Medala 	}
19861173fca2SJan Medala }
19871173fca2SJan Medala 
1988bdad90d1SIvan Ilchenko static int ena_infos_get(struct rte_eth_dev *dev,
19891173fca2SJan Medala 			  struct rte_eth_dev_info *dev_info)
19901173fca2SJan Medala {
19911173fca2SJan Medala 	struct ena_adapter *adapter;
19921173fca2SJan Medala 	struct ena_com_dev *ena_dev;
199356b8b9b7SRafal Kozik 	uint64_t rx_feat = 0, tx_feat = 0;
19941173fca2SJan Medala 
1995498c687aSRafal Kozik 	ena_assert_msg(dev->data != NULL, "Uninitialized device\n");
1996498c687aSRafal Kozik 	ena_assert_msg(dev->data->dev_private != NULL, "Uninitialized device\n");
1997890728ffSStephen Hemminger 	adapter = dev->data->dev_private;
19981173fca2SJan Medala 
19991173fca2SJan Medala 	ena_dev = &adapter->ena_dev;
2000498c687aSRafal Kozik 	ena_assert_msg(ena_dev != NULL, "Uninitialized device\n");
20011173fca2SJan Medala 
2002e274f573SMarc Sune 	dev_info->speed_capa =
2003e274f573SMarc Sune 			ETH_LINK_SPEED_1G   |
2004e274f573SMarc Sune 			ETH_LINK_SPEED_2_5G |
2005e274f573SMarc Sune 			ETH_LINK_SPEED_5G   |
2006e274f573SMarc Sune 			ETH_LINK_SPEED_10G  |
2007e274f573SMarc Sune 			ETH_LINK_SPEED_25G  |
2008e274f573SMarc Sune 			ETH_LINK_SPEED_40G  |
2009b2feed01SThomas Monjalon 			ETH_LINK_SPEED_50G  |
2010b2feed01SThomas Monjalon 			ETH_LINK_SPEED_100G;
2011e274f573SMarc Sune 
20121173fca2SJan Medala 	/* Set Tx & Rx features available for device */
2013117ba4a6SMichal Krawczyk 	if (adapter->offloads.tso4_supported)
20141173fca2SJan Medala 		tx_feat	|= DEV_TX_OFFLOAD_TCP_TSO;
20151173fca2SJan Medala 
2016117ba4a6SMichal Krawczyk 	if (adapter->offloads.tx_csum_supported)
20171173fca2SJan Medala 		tx_feat |= DEV_TX_OFFLOAD_IPV4_CKSUM |
20181173fca2SJan Medala 			DEV_TX_OFFLOAD_UDP_CKSUM |
20191173fca2SJan Medala 			DEV_TX_OFFLOAD_TCP_CKSUM;
20201173fca2SJan Medala 
2021117ba4a6SMichal Krawczyk 	if (adapter->offloads.rx_csum_supported)
20221173fca2SJan Medala 		rx_feat |= DEV_RX_OFFLOAD_IPV4_CKSUM |
20231173fca2SJan Medala 			DEV_RX_OFFLOAD_UDP_CKSUM  |
20241173fca2SJan Medala 			DEV_RX_OFFLOAD_TCP_CKSUM;
20251173fca2SJan Medala 
2026a0a4ff40SRafal Kozik 	rx_feat |= DEV_RX_OFFLOAD_JUMBO_FRAME;
2027a0a4ff40SRafal Kozik 
20281173fca2SJan Medala 	/* Inform framework about available features */
20291173fca2SJan Medala 	dev_info->rx_offload_capa = rx_feat;
20307369f88fSRafal Kozik 	dev_info->rx_queue_offload_capa = rx_feat;
20311173fca2SJan Medala 	dev_info->tx_offload_capa = tx_feat;
203256b8b9b7SRafal Kozik 	dev_info->tx_queue_offload_capa = tx_feat;
20331173fca2SJan Medala 
2034b01ead20SRafal Kozik 	dev_info->flow_type_rss_offloads = ETH_RSS_IP | ETH_RSS_TCP |
2035b01ead20SRafal Kozik 					   ETH_RSS_UDP;
2036b01ead20SRafal Kozik 
20371173fca2SJan Medala 	dev_info->min_rx_bufsize = ENA_MIN_FRAME_LEN;
20381173fca2SJan Medala 	dev_info->max_rx_pktlen  = adapter->max_mtu;
20391173fca2SJan Medala 	dev_info->max_mac_addrs = 1;
20401173fca2SJan Medala 
20415920d930SMichal Krawczyk 	dev_info->max_rx_queues = adapter->max_num_io_queues;
20425920d930SMichal Krawczyk 	dev_info->max_tx_queues = adapter->max_num_io_queues;
20431173fca2SJan Medala 	dev_info->reta_size = ENA_RX_RSS_TABLE_SIZE;
204456b8b9b7SRafal Kozik 
204556b8b9b7SRafal Kozik 	adapter->tx_supported_offloads = tx_feat;
20467369f88fSRafal Kozik 	adapter->rx_supported_offloads = rx_feat;
204792680dc2SRafal Kozik 
20485920d930SMichal Krawczyk 	dev_info->rx_desc_lim.nb_max = adapter->max_rx_ring_size;
204992680dc2SRafal Kozik 	dev_info->rx_desc_lim.nb_min = ENA_MIN_RING_DESC;
2050ea93d37eSRafal Kozik 	dev_info->rx_desc_lim.nb_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS,
2051ea93d37eSRafal Kozik 					adapter->max_rx_sgl_size);
2052ea93d37eSRafal Kozik 	dev_info->rx_desc_lim.nb_mtu_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS,
2053ea93d37eSRafal Kozik 					adapter->max_rx_sgl_size);
205492680dc2SRafal Kozik 
20555920d930SMichal Krawczyk 	dev_info->tx_desc_lim.nb_max = adapter->max_tx_ring_size;
205692680dc2SRafal Kozik 	dev_info->tx_desc_lim.nb_min = ENA_MIN_RING_DESC;
205792680dc2SRafal Kozik 	dev_info->tx_desc_lim.nb_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS,
2058ea93d37eSRafal Kozik 					adapter->max_tx_sgl_size);
205992680dc2SRafal Kozik 	dev_info->tx_desc_lim.nb_mtu_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS,
2060ea93d37eSRafal Kozik 					adapter->max_tx_sgl_size);
2061bdad90d1SIvan Ilchenko 
2062bdad90d1SIvan Ilchenko 	return 0;
20631173fca2SJan Medala }
20641173fca2SJan Medala 
2065*1be097dcSMichal Krawczyk static inline void ena_init_rx_mbuf(struct rte_mbuf *mbuf, uint16_t len)
2066*1be097dcSMichal Krawczyk {
2067*1be097dcSMichal Krawczyk 	mbuf->data_len = len;
2068*1be097dcSMichal Krawczyk 	mbuf->data_off = RTE_PKTMBUF_HEADROOM;
2069*1be097dcSMichal Krawczyk 	mbuf->refcnt = 1;
2070*1be097dcSMichal Krawczyk 	mbuf->next = NULL;
2071*1be097dcSMichal Krawczyk }
2072*1be097dcSMichal Krawczyk 
2073*1be097dcSMichal Krawczyk static struct rte_mbuf *ena_rx_mbuf(struct ena_ring *rx_ring,
2074*1be097dcSMichal Krawczyk 				    struct ena_com_rx_buf_info *ena_bufs,
2075*1be097dcSMichal Krawczyk 				    uint32_t descs,
2076*1be097dcSMichal Krawczyk 				    uint16_t *next_to_clean,
2077*1be097dcSMichal Krawczyk 				    uint8_t offset)
2078*1be097dcSMichal Krawczyk {
2079*1be097dcSMichal Krawczyk 	struct rte_mbuf *mbuf;
2080*1be097dcSMichal Krawczyk 	struct rte_mbuf *mbuf_head;
2081*1be097dcSMichal Krawczyk 	struct ena_rx_buffer *rx_info;
2082*1be097dcSMichal Krawczyk 	unsigned int ring_mask = rx_ring->ring_size - 1;
2083*1be097dcSMichal Krawczyk 	uint16_t ntc, len, req_id, buf = 0;
2084*1be097dcSMichal Krawczyk 
2085*1be097dcSMichal Krawczyk 	if (unlikely(descs == 0))
2086*1be097dcSMichal Krawczyk 		return NULL;
2087*1be097dcSMichal Krawczyk 
2088*1be097dcSMichal Krawczyk 	ntc = *next_to_clean;
2089*1be097dcSMichal Krawczyk 
2090*1be097dcSMichal Krawczyk 	len = ena_bufs[buf].len;
2091*1be097dcSMichal Krawczyk 	req_id = ena_bufs[buf].req_id;
2092*1be097dcSMichal Krawczyk 	if (unlikely(validate_rx_req_id(rx_ring, req_id)))
2093*1be097dcSMichal Krawczyk 		return NULL;
2094*1be097dcSMichal Krawczyk 
2095*1be097dcSMichal Krawczyk 	rx_info = &rx_ring->rx_buffer_info[req_id];
2096*1be097dcSMichal Krawczyk 
2097*1be097dcSMichal Krawczyk 	mbuf = rx_info->mbuf;
2098*1be097dcSMichal Krawczyk 	RTE_ASSERT(mbuf != NULL);
2099*1be097dcSMichal Krawczyk 
2100*1be097dcSMichal Krawczyk 	ena_init_rx_mbuf(mbuf, len);
2101*1be097dcSMichal Krawczyk 
2102*1be097dcSMichal Krawczyk 	/* Fill the mbuf head with the data specific for 1st segment. */
2103*1be097dcSMichal Krawczyk 	mbuf_head = mbuf;
2104*1be097dcSMichal Krawczyk 	mbuf_head->nb_segs = descs;
2105*1be097dcSMichal Krawczyk 	mbuf_head->port = rx_ring->port_id;
2106*1be097dcSMichal Krawczyk 	mbuf_head->pkt_len = len;
2107*1be097dcSMichal Krawczyk 	mbuf_head->data_off += offset;
2108*1be097dcSMichal Krawczyk 
2109*1be097dcSMichal Krawczyk 	rx_info->mbuf = NULL;
2110*1be097dcSMichal Krawczyk 	rx_ring->empty_rx_reqs[ntc & ring_mask] = req_id;
2111*1be097dcSMichal Krawczyk 	++ntc;
2112*1be097dcSMichal Krawczyk 
2113*1be097dcSMichal Krawczyk 	while (--descs) {
2114*1be097dcSMichal Krawczyk 		++buf;
2115*1be097dcSMichal Krawczyk 		len = ena_bufs[buf].len;
2116*1be097dcSMichal Krawczyk 		req_id = ena_bufs[buf].req_id;
2117*1be097dcSMichal Krawczyk 		if (unlikely(validate_rx_req_id(rx_ring, req_id))) {
2118*1be097dcSMichal Krawczyk 			rte_mbuf_raw_free(mbuf_head);
2119*1be097dcSMichal Krawczyk 			return NULL;
2120*1be097dcSMichal Krawczyk 		}
2121*1be097dcSMichal Krawczyk 
2122*1be097dcSMichal Krawczyk 		rx_info = &rx_ring->rx_buffer_info[req_id];
2123*1be097dcSMichal Krawczyk 		RTE_ASSERT(rx_info->mbuf != NULL);
2124*1be097dcSMichal Krawczyk 
2125*1be097dcSMichal Krawczyk 		/* Create an mbuf chain. */
2126*1be097dcSMichal Krawczyk 		mbuf->next = rx_info->mbuf;
2127*1be097dcSMichal Krawczyk 		mbuf = mbuf->next;
2128*1be097dcSMichal Krawczyk 
2129*1be097dcSMichal Krawczyk 		ena_init_rx_mbuf(mbuf, len);
2130*1be097dcSMichal Krawczyk 		mbuf_head->pkt_len += len;
2131*1be097dcSMichal Krawczyk 
2132*1be097dcSMichal Krawczyk 		rx_info->mbuf = NULL;
2133*1be097dcSMichal Krawczyk 		rx_ring->empty_rx_reqs[ntc & ring_mask] = req_id;
2134*1be097dcSMichal Krawczyk 		++ntc;
2135*1be097dcSMichal Krawczyk 	}
2136*1be097dcSMichal Krawczyk 
2137*1be097dcSMichal Krawczyk 	*next_to_clean = ntc;
2138*1be097dcSMichal Krawczyk 
2139*1be097dcSMichal Krawczyk 	return mbuf_head;
2140*1be097dcSMichal Krawczyk }
2141*1be097dcSMichal Krawczyk 
21421173fca2SJan Medala static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
21431173fca2SJan Medala 				  uint16_t nb_pkts)
21441173fca2SJan Medala {
21451173fca2SJan Medala 	struct ena_ring *rx_ring = (struct ena_ring *)(rx_queue);
21461173fca2SJan Medala 	unsigned int ring_size = rx_ring->ring_size;
21471173fca2SJan Medala 	unsigned int ring_mask = ring_size - 1;
21481173fca2SJan Medala 	uint16_t next_to_clean = rx_ring->next_to_clean;
21491daff526SJakub Palider 	uint16_t desc_in_use = 0;
2150*1be097dcSMichal Krawczyk 	struct rte_mbuf *mbuf;
2151*1be097dcSMichal Krawczyk 	uint16_t completed;
21521173fca2SJan Medala 	struct ena_com_rx_ctx ena_rx_ctx;
2153*1be097dcSMichal Krawczyk 	int i, rc = 0;
21541173fca2SJan Medala 
21551173fca2SJan Medala 	/* Check adapter state */
21561173fca2SJan Medala 	if (unlikely(rx_ring->adapter->state != ENA_ADAPTER_STATE_RUNNING)) {
21576f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ALERT,
21581173fca2SJan Medala 			"Trying to receive pkts while device is NOT running\n");
21591173fca2SJan Medala 		return 0;
21601173fca2SJan Medala 	}
21611173fca2SJan Medala 
21621daff526SJakub Palider 	desc_in_use = rx_ring->next_to_use - next_to_clean;
21631173fca2SJan Medala 	if (unlikely(nb_pkts > desc_in_use))
21641173fca2SJan Medala 		nb_pkts = desc_in_use;
21651173fca2SJan Medala 
21661173fca2SJan Medala 	for (completed = 0; completed < nb_pkts; completed++) {
2167ea93d37eSRafal Kozik 		ena_rx_ctx.max_bufs = rx_ring->sgl_size;
21681173fca2SJan Medala 		ena_rx_ctx.ena_bufs = rx_ring->ena_bufs;
21691173fca2SJan Medala 		ena_rx_ctx.descs = 0;
21707b3a3c4bSMaciej Bielski 		ena_rx_ctx.pkt_offset = 0;
21711173fca2SJan Medala 		/* receive packet context */
21721173fca2SJan Medala 		rc = ena_com_rx_pkt(rx_ring->ena_com_io_cq,
21731173fca2SJan Medala 				    rx_ring->ena_com_io_sq,
21741173fca2SJan Medala 				    &ena_rx_ctx);
21751173fca2SJan Medala 		if (unlikely(rc)) {
21766f1c9df9SStephen Hemminger 			PMD_DRV_LOG(ERR, "ena_com_rx_pkt error %d\n", rc);
21779b260dbfSRafal Kozik 			rx_ring->adapter->reset_reason =
21789b260dbfSRafal Kozik 				ENA_REGS_RESET_TOO_MANY_RX_DESCS;
2179241da076SRafal Kozik 			rx_ring->adapter->trigger_reset = true;
21807830e905SSolganik Alexander 			++rx_ring->rx_stats.bad_desc_num;
21811173fca2SJan Medala 			return 0;
21821173fca2SJan Medala 		}
21831173fca2SJan Medala 
2184*1be097dcSMichal Krawczyk 		mbuf = ena_rx_mbuf(rx_ring,
2185*1be097dcSMichal Krawczyk 			ena_rx_ctx.ena_bufs,
2186*1be097dcSMichal Krawczyk 			ena_rx_ctx.descs,
2187*1be097dcSMichal Krawczyk 			&next_to_clean,
2188*1be097dcSMichal Krawczyk 			ena_rx_ctx.pkt_offset);
2189*1be097dcSMichal Krawczyk 		if (unlikely(mbuf == NULL)) {
2190*1be097dcSMichal Krawczyk 			for (i = 0; i < ena_rx_ctx.descs; ++i) {
2191c2034976SMichal Krawczyk 				rx_ring->empty_rx_reqs[next_to_clean & ring_mask] =
2192*1be097dcSMichal Krawczyk 					rx_ring->ena_bufs[i].req_id;
2193*1be097dcSMichal Krawczyk 				++next_to_clean;
21941173fca2SJan Medala 			}
2195f00930d9SRafal Kozik 			break;
2196*1be097dcSMichal Krawczyk 		}
21971173fca2SJan Medala 
21981173fca2SJan Medala 		/* fill mbuf attributes if any */
2199*1be097dcSMichal Krawczyk 		ena_rx_mbuf_prepare(mbuf, &ena_rx_ctx);
22007830e905SSolganik Alexander 
2201*1be097dcSMichal Krawczyk 		if (unlikely(mbuf->ol_flags &
2202ef74b5f7SMichal Krawczyk 				(PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD))) {
2203ef74b5f7SMichal Krawczyk 			rte_atomic64_inc(&rx_ring->adapter->drv_stats->ierrors);
22047830e905SSolganik Alexander 			++rx_ring->rx_stats.bad_csum;
2205ef74b5f7SMichal Krawczyk 		}
22067830e905SSolganik Alexander 
2207*1be097dcSMichal Krawczyk 		mbuf->hash.rss = ena_rx_ctx.hash;
22081173fca2SJan Medala 
2209*1be097dcSMichal Krawczyk 		rx_pkts[completed] = mbuf;
2210*1be097dcSMichal Krawczyk 		rx_ring->rx_stats.bytes += mbuf->pkt_len;
22111173fca2SJan Medala 	}
22121173fca2SJan Medala 
2213*1be097dcSMichal Krawczyk 	rx_ring->rx_stats.cnt += completed;
2214ec78af6bSMichal Krawczyk 	rx_ring->next_to_clean = next_to_clean;
2215ec78af6bSMichal Krawczyk 
2216ec78af6bSMichal Krawczyk 	desc_in_use = desc_in_use - completed + 1;
22171173fca2SJan Medala 	/* Burst refill to save doorbells, memory barriers, const interval */
2218a45462c5SRafal Kozik 	if (ring_size - desc_in_use > ENA_RING_DESCS_RATIO(ring_size)) {
2219a45462c5SRafal Kozik 		ena_com_update_dev_comp_head(rx_ring->ena_com_io_cq);
22201daff526SJakub Palider 		ena_populate_rx_queue(rx_ring, ring_size - desc_in_use);
2221a45462c5SRafal Kozik 	}
22221173fca2SJan Medala 
2223*1be097dcSMichal Krawczyk 	return completed;
22241173fca2SJan Medala }
22251173fca2SJan Medala 
2226b3fc5a1aSKonstantin Ananyev static uint16_t
222783277a7cSJakub Palider eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
2228b3fc5a1aSKonstantin Ananyev 		uint16_t nb_pkts)
2229b3fc5a1aSKonstantin Ananyev {
2230b3fc5a1aSKonstantin Ananyev 	int32_t ret;
2231b3fc5a1aSKonstantin Ananyev 	uint32_t i;
2232b3fc5a1aSKonstantin Ananyev 	struct rte_mbuf *m;
223383277a7cSJakub Palider 	struct ena_ring *tx_ring = (struct ena_ring *)(tx_queue);
2234a7c528e5SOlivier Matz 	struct rte_ipv4_hdr *ip_hdr;
2235b3fc5a1aSKonstantin Ananyev 	uint64_t ol_flags;
223683277a7cSJakub Palider 	uint16_t frag_field;
223783277a7cSJakub Palider 
2238b3fc5a1aSKonstantin Ananyev 	for (i = 0; i != nb_pkts; i++) {
2239b3fc5a1aSKonstantin Ananyev 		m = tx_pkts[i];
2240b3fc5a1aSKonstantin Ananyev 		ol_flags = m->ol_flags;
2241b3fc5a1aSKonstantin Ananyev 
2242bc5ef57dSMichal Krawczyk 		if (!(ol_flags & PKT_TX_IPV4))
2243bc5ef57dSMichal Krawczyk 			continue;
2244bc5ef57dSMichal Krawczyk 
2245bc5ef57dSMichal Krawczyk 		/* If there was not L2 header length specified, assume it is
2246bc5ef57dSMichal Krawczyk 		 * length of the ethernet header.
2247bc5ef57dSMichal Krawczyk 		 */
2248bc5ef57dSMichal Krawczyk 		if (unlikely(m->l2_len == 0))
22496d13ea8eSOlivier Matz 			m->l2_len = sizeof(struct rte_ether_hdr);
2250bc5ef57dSMichal Krawczyk 
2251a7c528e5SOlivier Matz 		ip_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *,
2252bc5ef57dSMichal Krawczyk 						 m->l2_len);
2253bc5ef57dSMichal Krawczyk 		frag_field = rte_be_to_cpu_16(ip_hdr->fragment_offset);
2254bc5ef57dSMichal Krawczyk 
225524ac604eSOlivier Matz 		if ((frag_field & RTE_IPV4_HDR_DF_FLAG) != 0) {
2256bc5ef57dSMichal Krawczyk 			m->packet_type |= RTE_PTYPE_L4_NONFRAG;
2257bc5ef57dSMichal Krawczyk 
2258bc5ef57dSMichal Krawczyk 			/* If IPv4 header has DF flag enabled and TSO support is
2259bc5ef57dSMichal Krawczyk 			 * disabled, partial chcecksum should not be calculated.
2260bc5ef57dSMichal Krawczyk 			 */
2261117ba4a6SMichal Krawczyk 			if (!tx_ring->adapter->offloads.tso4_supported)
2262bc5ef57dSMichal Krawczyk 				continue;
2263bc5ef57dSMichal Krawczyk 		}
2264bc5ef57dSMichal Krawczyk 
2265b3fc5a1aSKonstantin Ananyev 		if ((ol_flags & ENA_TX_OFFLOAD_NOTSUP_MASK) != 0 ||
2266b3fc5a1aSKonstantin Ananyev 				(ol_flags & PKT_TX_L4_MASK) ==
2267b3fc5a1aSKonstantin Ananyev 				PKT_TX_SCTP_CKSUM) {
2268baeed5f4SMichal Krawczyk 			rte_errno = ENOTSUP;
2269b3fc5a1aSKonstantin Ananyev 			return i;
2270b3fc5a1aSKonstantin Ananyev 		}
2271b3fc5a1aSKonstantin Ananyev 
2272b3fc5a1aSKonstantin Ananyev #ifdef RTE_LIBRTE_ETHDEV_DEBUG
2273b3fc5a1aSKonstantin Ananyev 		ret = rte_validate_tx_offload(m);
2274b3fc5a1aSKonstantin Ananyev 		if (ret != 0) {
2275baeed5f4SMichal Krawczyk 			rte_errno = -ret;
2276b3fc5a1aSKonstantin Ananyev 			return i;
2277b3fc5a1aSKonstantin Ananyev 		}
2278b3fc5a1aSKonstantin Ananyev #endif
227983277a7cSJakub Palider 
228083277a7cSJakub Palider 		/* In case we are supposed to TSO and have DF not set (DF=0)
228183277a7cSJakub Palider 		 * hardware must be provided with partial checksum, otherwise
228283277a7cSJakub Palider 		 * it will take care of necessary calculations.
228383277a7cSJakub Palider 		 */
228483277a7cSJakub Palider 
2285b3fc5a1aSKonstantin Ananyev 		ret = rte_net_intel_cksum_flags_prepare(m,
2286b3fc5a1aSKonstantin Ananyev 			ol_flags & ~PKT_TX_TCP_SEG);
2287b3fc5a1aSKonstantin Ananyev 		if (ret != 0) {
2288baeed5f4SMichal Krawczyk 			rte_errno = -ret;
2289b3fc5a1aSKonstantin Ananyev 			return i;
2290b3fc5a1aSKonstantin Ananyev 		}
2291b3fc5a1aSKonstantin Ananyev 	}
2292b3fc5a1aSKonstantin Ananyev 
2293b3fc5a1aSKonstantin Ananyev 	return i;
2294b3fc5a1aSKonstantin Ananyev }
2295b3fc5a1aSKonstantin Ananyev 
2296f01f060cSRafal Kozik static void ena_update_hints(struct ena_adapter *adapter,
2297f01f060cSRafal Kozik 			     struct ena_admin_ena_hw_hints *hints)
2298f01f060cSRafal Kozik {
2299f01f060cSRafal Kozik 	if (hints->admin_completion_tx_timeout)
2300f01f060cSRafal Kozik 		adapter->ena_dev.admin_queue.completion_timeout =
2301f01f060cSRafal Kozik 			hints->admin_completion_tx_timeout * 1000;
2302f01f060cSRafal Kozik 
2303f01f060cSRafal Kozik 	if (hints->mmio_read_timeout)
2304f01f060cSRafal Kozik 		/* convert to usec */
2305f01f060cSRafal Kozik 		adapter->ena_dev.mmio_read.reg_read_to =
2306f01f060cSRafal Kozik 			hints->mmio_read_timeout * 1000;
2307d9b8b106SMichal Krawczyk 
2308d9b8b106SMichal Krawczyk 	if (hints->driver_watchdog_timeout) {
2309d9b8b106SMichal Krawczyk 		if (hints->driver_watchdog_timeout == ENA_HW_HINTS_NO_TIMEOUT)
2310d9b8b106SMichal Krawczyk 			adapter->keep_alive_timeout = ENA_HW_HINTS_NO_TIMEOUT;
2311d9b8b106SMichal Krawczyk 		else
2312d9b8b106SMichal Krawczyk 			// Convert msecs to ticks
2313d9b8b106SMichal Krawczyk 			adapter->keep_alive_timeout =
2314d9b8b106SMichal Krawczyk 				(hints->driver_watchdog_timeout *
2315d9b8b106SMichal Krawczyk 				rte_get_timer_hz()) / 1000;
2316d9b8b106SMichal Krawczyk 	}
2317f01f060cSRafal Kozik }
2318f01f060cSRafal Kozik 
23192061fe41SRafal Kozik static int ena_check_and_linearize_mbuf(struct ena_ring *tx_ring,
23202061fe41SRafal Kozik 					struct rte_mbuf *mbuf)
23212061fe41SRafal Kozik {
23222fca2a98SMichal Krawczyk 	struct ena_com_dev *ena_dev;
23232fca2a98SMichal Krawczyk 	int num_segments, header_len, rc;
23242061fe41SRafal Kozik 
23252fca2a98SMichal Krawczyk 	ena_dev = &tx_ring->adapter->ena_dev;
23262061fe41SRafal Kozik 	num_segments = mbuf->nb_segs;
23272fca2a98SMichal Krawczyk 	header_len = mbuf->data_len;
23282061fe41SRafal Kozik 
23292061fe41SRafal Kozik 	if (likely(num_segments < tx_ring->sgl_size))
23302061fe41SRafal Kozik 		return 0;
23312061fe41SRafal Kozik 
23322fca2a98SMichal Krawczyk 	if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV &&
23332fca2a98SMichal Krawczyk 	    (num_segments == tx_ring->sgl_size) &&
23342fca2a98SMichal Krawczyk 	    (header_len < tx_ring->tx_max_header_size))
23352fca2a98SMichal Krawczyk 		return 0;
23362fca2a98SMichal Krawczyk 
23377830e905SSolganik Alexander 	++tx_ring->tx_stats.linearize;
23382061fe41SRafal Kozik 	rc = rte_pktmbuf_linearize(mbuf);
23397830e905SSolganik Alexander 	if (unlikely(rc)) {
23406f1c9df9SStephen Hemminger 		PMD_DRV_LOG(WARNING, "Mbuf linearize failed\n");
23417830e905SSolganik Alexander 		rte_atomic64_inc(&tx_ring->adapter->drv_stats->ierrors);
23427830e905SSolganik Alexander 		++tx_ring->tx_stats.linearize_failed;
23437830e905SSolganik Alexander 		return rc;
23447830e905SSolganik Alexander 	}
23452061fe41SRafal Kozik 
23462061fe41SRafal Kozik 	return rc;
23472061fe41SRafal Kozik }
23482061fe41SRafal Kozik 
23491173fca2SJan Medala static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
23501173fca2SJan Medala 				  uint16_t nb_pkts)
23511173fca2SJan Medala {
23521173fca2SJan Medala 	struct ena_ring *tx_ring = (struct ena_ring *)(tx_queue);
23531daff526SJakub Palider 	uint16_t next_to_use = tx_ring->next_to_use;
23541daff526SJakub Palider 	uint16_t next_to_clean = tx_ring->next_to_clean;
23551173fca2SJan Medala 	struct rte_mbuf *mbuf;
23562fca2a98SMichal Krawczyk 	uint16_t seg_len;
23571173fca2SJan Medala 	unsigned int ring_size = tx_ring->ring_size;
23581173fca2SJan Medala 	unsigned int ring_mask = ring_size - 1;
23591173fca2SJan Medala 	struct ena_com_tx_ctx ena_tx_ctx;
23601173fca2SJan Medala 	struct ena_tx_buffer *tx_info;
23611173fca2SJan Medala 	struct ena_com_buf *ebuf;
23621173fca2SJan Medala 	uint16_t rc, req_id, total_tx_descs = 0;
2363b66b6e72SJakub Palider 	uint16_t sent_idx = 0, empty_tx_reqs;
23642fca2a98SMichal Krawczyk 	uint16_t push_len = 0;
23652fca2a98SMichal Krawczyk 	uint16_t delta = 0;
23661173fca2SJan Medala 	int nb_hw_desc;
236745b6d861SMichal Krawczyk 	uint32_t total_length;
23681173fca2SJan Medala 
23691173fca2SJan Medala 	/* Check adapter state */
23701173fca2SJan Medala 	if (unlikely(tx_ring->adapter->state != ENA_ADAPTER_STATE_RUNNING)) {
23716f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ALERT,
23721173fca2SJan Medala 			"Trying to xmit pkts while device is NOT running\n");
23731173fca2SJan Medala 		return 0;
23741173fca2SJan Medala 	}
23751173fca2SJan Medala 
2376b66b6e72SJakub Palider 	empty_tx_reqs = ring_size - (next_to_use - next_to_clean);
2377b66b6e72SJakub Palider 	if (nb_pkts > empty_tx_reqs)
2378b66b6e72SJakub Palider 		nb_pkts = empty_tx_reqs;
2379b66b6e72SJakub Palider 
23801173fca2SJan Medala 	for (sent_idx = 0; sent_idx < nb_pkts; sent_idx++) {
23811173fca2SJan Medala 		mbuf = tx_pkts[sent_idx];
238245b6d861SMichal Krawczyk 		total_length = 0;
23831173fca2SJan Medala 
23842061fe41SRafal Kozik 		rc = ena_check_and_linearize_mbuf(tx_ring, mbuf);
23852061fe41SRafal Kozik 		if (unlikely(rc))
23862061fe41SRafal Kozik 			break;
23872061fe41SRafal Kozik 
23881daff526SJakub Palider 		req_id = tx_ring->empty_tx_reqs[next_to_use & ring_mask];
23891173fca2SJan Medala 		tx_info = &tx_ring->tx_buffer_info[req_id];
23901173fca2SJan Medala 		tx_info->mbuf = mbuf;
23911173fca2SJan Medala 		tx_info->num_of_bufs = 0;
23921173fca2SJan Medala 		ebuf = tx_info->bufs;
23931173fca2SJan Medala 
23941173fca2SJan Medala 		/* Prepare TX context */
23951173fca2SJan Medala 		memset(&ena_tx_ctx, 0x0, sizeof(struct ena_com_tx_ctx));
23961173fca2SJan Medala 		memset(&ena_tx_ctx.ena_meta, 0x0,
23971173fca2SJan Medala 		       sizeof(struct ena_com_tx_meta));
23981173fca2SJan Medala 		ena_tx_ctx.ena_bufs = ebuf;
23991173fca2SJan Medala 		ena_tx_ctx.req_id = req_id;
24002fca2a98SMichal Krawczyk 
24012fca2a98SMichal Krawczyk 		delta = 0;
24022fca2a98SMichal Krawczyk 		seg_len = mbuf->data_len;
24032fca2a98SMichal Krawczyk 
24041173fca2SJan Medala 		if (tx_ring->tx_mem_queue_type ==
24051173fca2SJan Medala 				ENA_ADMIN_PLACEMENT_POLICY_DEV) {
24062fca2a98SMichal Krawczyk 			push_len = RTE_MIN(mbuf->pkt_len,
24071173fca2SJan Medala 					   tx_ring->tx_max_header_size);
24082fca2a98SMichal Krawczyk 			ena_tx_ctx.header_len = push_len;
24092fca2a98SMichal Krawczyk 
24102fca2a98SMichal Krawczyk 			if (likely(push_len <= seg_len)) {
24112fca2a98SMichal Krawczyk 				/* If the push header is in the single segment,
24122fca2a98SMichal Krawczyk 				 * then just point it to the 1st mbuf data.
24132fca2a98SMichal Krawczyk 				 */
24141173fca2SJan Medala 				ena_tx_ctx.push_header =
24152fca2a98SMichal Krawczyk 					rte_pktmbuf_mtod(mbuf, uint8_t *);
24162fca2a98SMichal Krawczyk 			} else {
24172fca2a98SMichal Krawczyk 				/* If the push header lays in the several
24182fca2a98SMichal Krawczyk 				 * segments, copy it to the intermediate buffer.
24192fca2a98SMichal Krawczyk 				 */
24202fca2a98SMichal Krawczyk 				rte_pktmbuf_read(mbuf, 0, push_len,
24212fca2a98SMichal Krawczyk 					tx_ring->push_buf_intermediate_buf);
24222fca2a98SMichal Krawczyk 				ena_tx_ctx.push_header =
24232fca2a98SMichal Krawczyk 					tx_ring->push_buf_intermediate_buf;
24242fca2a98SMichal Krawczyk 				delta = push_len - seg_len;
24252fca2a98SMichal Krawczyk 			}
24261173fca2SJan Medala 		} /* there's no else as we take advantage of memset zeroing */
24271173fca2SJan Medala 
24281173fca2SJan Medala 		/* Set TX offloads flags, if applicable */
242933dde075SMichal Krawczyk 		ena_tx_mbuf_prepare(mbuf, &ena_tx_ctx, tx_ring->offloads,
243033dde075SMichal Krawczyk 			tx_ring->disable_meta_caching);
24311173fca2SJan Medala 
24321173fca2SJan Medala 		rte_prefetch0(tx_pkts[(sent_idx + 4) & ring_mask]);
24331173fca2SJan Medala 
24341173fca2SJan Medala 		/* Process first segment taking into
24351173fca2SJan Medala 		 * consideration pushed header
24361173fca2SJan Medala 		 */
24372fca2a98SMichal Krawczyk 		if (seg_len > push_len) {
2438455da545SSantosh Shukla 			ebuf->paddr = mbuf->buf_iova +
24391173fca2SJan Medala 				      mbuf->data_off +
24402fca2a98SMichal Krawczyk 				      push_len;
24412fca2a98SMichal Krawczyk 			ebuf->len = seg_len - push_len;
24421173fca2SJan Medala 			ebuf++;
24431173fca2SJan Medala 			tx_info->num_of_bufs++;
24441173fca2SJan Medala 		}
244545b6d861SMichal Krawczyk 		total_length += mbuf->data_len;
24461173fca2SJan Medala 
24471173fca2SJan Medala 		while ((mbuf = mbuf->next) != NULL) {
24482fca2a98SMichal Krawczyk 			seg_len = mbuf->data_len;
24492fca2a98SMichal Krawczyk 
24502fca2a98SMichal Krawczyk 			/* Skip mbufs if whole data is pushed as a header */
24512fca2a98SMichal Krawczyk 			if (unlikely(delta > seg_len)) {
24522fca2a98SMichal Krawczyk 				delta -= seg_len;
24532fca2a98SMichal Krawczyk 				continue;
24542fca2a98SMichal Krawczyk 			}
24552fca2a98SMichal Krawczyk 
24562fca2a98SMichal Krawczyk 			ebuf->paddr = mbuf->buf_iova + mbuf->data_off + delta;
24572fca2a98SMichal Krawczyk 			ebuf->len = seg_len - delta;
245845b6d861SMichal Krawczyk 			total_length += ebuf->len;
24591173fca2SJan Medala 			ebuf++;
24601173fca2SJan Medala 			tx_info->num_of_bufs++;
24612fca2a98SMichal Krawczyk 
24622fca2a98SMichal Krawczyk 			delta = 0;
24631173fca2SJan Medala 		}
24641173fca2SJan Medala 
24651173fca2SJan Medala 		ena_tx_ctx.num_bufs = tx_info->num_of_bufs;
24661173fca2SJan Medala 
2467c7519ea5SRafal Kozik 		if (ena_com_is_doorbell_needed(tx_ring->ena_com_io_sq,
2468c7519ea5SRafal Kozik 					       &ena_tx_ctx)) {
24696f1c9df9SStephen Hemminger 			PMD_DRV_LOG(DEBUG, "llq tx max burst size of queue %d"
2470c7519ea5SRafal Kozik 				" achieved, writing doorbell to send burst\n",
2471c7519ea5SRafal Kozik 				tx_ring->id);
2472c7519ea5SRafal Kozik 			ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq);
2473c7519ea5SRafal Kozik 		}
2474c7519ea5SRafal Kozik 
2475c7519ea5SRafal Kozik 		/* prepare the packet's descriptors to dma engine */
24761173fca2SJan Medala 		rc = ena_com_prepare_tx(tx_ring->ena_com_io_sq,
24771173fca2SJan Medala 					&ena_tx_ctx, &nb_hw_desc);
24787830e905SSolganik Alexander 		if (unlikely(rc)) {
24797830e905SSolganik Alexander 			++tx_ring->tx_stats.prepare_ctx_err;
24801173fca2SJan Medala 			break;
24817830e905SSolganik Alexander 		}
24821173fca2SJan Medala 		tx_info->tx_descs = nb_hw_desc;
24831173fca2SJan Medala 
24841daff526SJakub Palider 		next_to_use++;
24855673e285SRafal Kozik 		tx_ring->tx_stats.cnt++;
248645b6d861SMichal Krawczyk 		tx_ring->tx_stats.bytes += total_length;
24871173fca2SJan Medala 	}
24887830e905SSolganik Alexander 	tx_ring->tx_stats.available_desc =
2489b2b02edeSMichal Krawczyk 		ena_com_free_q_entries(tx_ring->ena_com_io_sq);
24901173fca2SJan Medala 
24915e02e19eSJan Medala 	/* If there are ready packets to be xmitted... */
24925e02e19eSJan Medala 	if (sent_idx > 0) {
24935e02e19eSJan Medala 		/* ...let HW do its best :-) */
24941173fca2SJan Medala 		ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq);
249545b6d861SMichal Krawczyk 		tx_ring->tx_stats.doorbells++;
24965e02e19eSJan Medala 		tx_ring->next_to_use = next_to_use;
24975e02e19eSJan Medala 	}
24985e02e19eSJan Medala 
24991173fca2SJan Medala 	/* Clear complete packets  */
25001173fca2SJan Medala 	while (ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq, &req_id) >= 0) {
2501f7d82d24SRafal Kozik 		rc = validate_tx_req_id(tx_ring, req_id);
2502f7d82d24SRafal Kozik 		if (rc)
2503f7d82d24SRafal Kozik 			break;
2504f7d82d24SRafal Kozik 
25051173fca2SJan Medala 		/* Get Tx info & store how many descs were processed  */
25061173fca2SJan Medala 		tx_info = &tx_ring->tx_buffer_info[req_id];
25071173fca2SJan Medala 		total_tx_descs += tx_info->tx_descs;
25081173fca2SJan Medala 
25091173fca2SJan Medala 		/* Free whole mbuf chain  */
25101173fca2SJan Medala 		mbuf = tx_info->mbuf;
25111173fca2SJan Medala 		rte_pktmbuf_free(mbuf);
2512207a514cSMichal Krawczyk 		tx_info->mbuf = NULL;
25131173fca2SJan Medala 
25141173fca2SJan Medala 		/* Put back descriptor to the ring for reuse */
25151daff526SJakub Palider 		tx_ring->empty_tx_reqs[next_to_clean & ring_mask] = req_id;
25161daff526SJakub Palider 		next_to_clean++;
25171173fca2SJan Medala 
25181173fca2SJan Medala 		/* If too many descs to clean, leave it for another run */
25191173fca2SJan Medala 		if (unlikely(total_tx_descs > ENA_RING_DESCS_RATIO(ring_size)))
25201173fca2SJan Medala 			break;
25211173fca2SJan Medala 	}
25227830e905SSolganik Alexander 	tx_ring->tx_stats.available_desc =
2523b2b02edeSMichal Krawczyk 		ena_com_free_q_entries(tx_ring->ena_com_io_sq);
25241173fca2SJan Medala 
25255e02e19eSJan Medala 	if (total_tx_descs > 0) {
25261173fca2SJan Medala 		/* acknowledge completion of sent packets */
25271daff526SJakub Palider 		tx_ring->next_to_clean = next_to_clean;
2528a45462c5SRafal Kozik 		ena_com_comp_ack(tx_ring->ena_com_io_sq, total_tx_descs);
2529a45462c5SRafal Kozik 		ena_com_update_dev_comp_head(tx_ring->ena_com_io_cq);
25305e02e19eSJan Medala 	}
25315e02e19eSJan Medala 
25327830e905SSolganik Alexander 	tx_ring->tx_stats.tx_poll++;
25337830e905SSolganik Alexander 
25341173fca2SJan Medala 	return sent_idx;
25351173fca2SJan Medala }
25361173fca2SJan Medala 
25377830e905SSolganik Alexander /**
25387830e905SSolganik Alexander  * DPDK callback to retrieve names of extended device statistics
25397830e905SSolganik Alexander  *
25407830e905SSolganik Alexander  * @param dev
25417830e905SSolganik Alexander  *   Pointer to Ethernet device structure.
25427830e905SSolganik Alexander  * @param[out] xstats_names
25437830e905SSolganik Alexander  *   Buffer to insert names into.
25447830e905SSolganik Alexander  * @param n
25457830e905SSolganik Alexander  *   Number of names.
25467830e905SSolganik Alexander  *
25477830e905SSolganik Alexander  * @return
25487830e905SSolganik Alexander  *   Number of xstats names.
25497830e905SSolganik Alexander  */
25507830e905SSolganik Alexander static int ena_xstats_get_names(struct rte_eth_dev *dev,
25517830e905SSolganik Alexander 				struct rte_eth_xstat_name *xstats_names,
25527830e905SSolganik Alexander 				unsigned int n)
25537830e905SSolganik Alexander {
25547830e905SSolganik Alexander 	unsigned int xstats_count = ena_xstats_calc_num(dev);
25557830e905SSolganik Alexander 	unsigned int stat, i, count = 0;
25567830e905SSolganik Alexander 
25577830e905SSolganik Alexander 	if (n < xstats_count || !xstats_names)
25587830e905SSolganik Alexander 		return xstats_count;
25597830e905SSolganik Alexander 
25607830e905SSolganik Alexander 	for (stat = 0; stat < ENA_STATS_ARRAY_GLOBAL; stat++, count++)
25617830e905SSolganik Alexander 		strcpy(xstats_names[count].name,
25627830e905SSolganik Alexander 			ena_stats_global_strings[stat].name);
25637830e905SSolganik Alexander 
25647830e905SSolganik Alexander 	for (stat = 0; stat < ENA_STATS_ARRAY_RX; stat++)
25657830e905SSolganik Alexander 		for (i = 0; i < dev->data->nb_rx_queues; i++, count++)
25667830e905SSolganik Alexander 			snprintf(xstats_names[count].name,
25677830e905SSolganik Alexander 				sizeof(xstats_names[count].name),
25687830e905SSolganik Alexander 				"rx_q%d_%s", i,
25697830e905SSolganik Alexander 				ena_stats_rx_strings[stat].name);
25707830e905SSolganik Alexander 
25717830e905SSolganik Alexander 	for (stat = 0; stat < ENA_STATS_ARRAY_TX; stat++)
25727830e905SSolganik Alexander 		for (i = 0; i < dev->data->nb_tx_queues; i++, count++)
25737830e905SSolganik Alexander 			snprintf(xstats_names[count].name,
25747830e905SSolganik Alexander 				sizeof(xstats_names[count].name),
25757830e905SSolganik Alexander 				"tx_q%d_%s", i,
25767830e905SSolganik Alexander 				ena_stats_tx_strings[stat].name);
25777830e905SSolganik Alexander 
25787830e905SSolganik Alexander 	return xstats_count;
25797830e905SSolganik Alexander }
25807830e905SSolganik Alexander 
25817830e905SSolganik Alexander /**
25827830e905SSolganik Alexander  * DPDK callback to get extended device statistics.
25837830e905SSolganik Alexander  *
25847830e905SSolganik Alexander  * @param dev
25857830e905SSolganik Alexander  *   Pointer to Ethernet device structure.
25867830e905SSolganik Alexander  * @param[out] stats
25877830e905SSolganik Alexander  *   Stats table output buffer.
25887830e905SSolganik Alexander  * @param n
25897830e905SSolganik Alexander  *   The size of the stats table.
25907830e905SSolganik Alexander  *
25917830e905SSolganik Alexander  * @return
25927830e905SSolganik Alexander  *   Number of xstats on success, negative on failure.
25937830e905SSolganik Alexander  */
25947830e905SSolganik Alexander static int ena_xstats_get(struct rte_eth_dev *dev,
25957830e905SSolganik Alexander 			  struct rte_eth_xstat *xstats,
25967830e905SSolganik Alexander 			  unsigned int n)
25977830e905SSolganik Alexander {
2598890728ffSStephen Hemminger 	struct ena_adapter *adapter = dev->data->dev_private;
25997830e905SSolganik Alexander 	unsigned int xstats_count = ena_xstats_calc_num(dev);
26007830e905SSolganik Alexander 	unsigned int stat, i, count = 0;
26017830e905SSolganik Alexander 	int stat_offset;
26027830e905SSolganik Alexander 	void *stats_begin;
26037830e905SSolganik Alexander 
26047830e905SSolganik Alexander 	if (n < xstats_count)
26057830e905SSolganik Alexander 		return xstats_count;
26067830e905SSolganik Alexander 
26077830e905SSolganik Alexander 	if (!xstats)
26087830e905SSolganik Alexander 		return 0;
26097830e905SSolganik Alexander 
26107830e905SSolganik Alexander 	for (stat = 0; stat < ENA_STATS_ARRAY_GLOBAL; stat++, count++) {
26117830e905SSolganik Alexander 		stat_offset = ena_stats_rx_strings[stat].stat_offset;
26127830e905SSolganik Alexander 		stats_begin = &adapter->dev_stats;
26137830e905SSolganik Alexander 
26147830e905SSolganik Alexander 		xstats[count].id = count;
26157830e905SSolganik Alexander 		xstats[count].value = *((uint64_t *)
26167830e905SSolganik Alexander 			((char *)stats_begin + stat_offset));
26177830e905SSolganik Alexander 	}
26187830e905SSolganik Alexander 
26197830e905SSolganik Alexander 	for (stat = 0; stat < ENA_STATS_ARRAY_RX; stat++) {
26207830e905SSolganik Alexander 		for (i = 0; i < dev->data->nb_rx_queues; i++, count++) {
26217830e905SSolganik Alexander 			stat_offset = ena_stats_rx_strings[stat].stat_offset;
26227830e905SSolganik Alexander 			stats_begin = &adapter->rx_ring[i].rx_stats;
26237830e905SSolganik Alexander 
26247830e905SSolganik Alexander 			xstats[count].id = count;
26257830e905SSolganik Alexander 			xstats[count].value = *((uint64_t *)
26267830e905SSolganik Alexander 				((char *)stats_begin + stat_offset));
26277830e905SSolganik Alexander 		}
26287830e905SSolganik Alexander 	}
26297830e905SSolganik Alexander 
26307830e905SSolganik Alexander 	for (stat = 0; stat < ENA_STATS_ARRAY_TX; stat++) {
26317830e905SSolganik Alexander 		for (i = 0; i < dev->data->nb_tx_queues; i++, count++) {
26327830e905SSolganik Alexander 			stat_offset = ena_stats_tx_strings[stat].stat_offset;
26337830e905SSolganik Alexander 			stats_begin = &adapter->tx_ring[i].rx_stats;
26347830e905SSolganik Alexander 
26357830e905SSolganik Alexander 			xstats[count].id = count;
26367830e905SSolganik Alexander 			xstats[count].value = *((uint64_t *)
26377830e905SSolganik Alexander 				((char *)stats_begin + stat_offset));
26387830e905SSolganik Alexander 		}
26397830e905SSolganik Alexander 	}
26407830e905SSolganik Alexander 
26417830e905SSolganik Alexander 	return count;
26427830e905SSolganik Alexander }
26437830e905SSolganik Alexander 
26447830e905SSolganik Alexander static int ena_xstats_get_by_id(struct rte_eth_dev *dev,
26457830e905SSolganik Alexander 				const uint64_t *ids,
26467830e905SSolganik Alexander 				uint64_t *values,
26477830e905SSolganik Alexander 				unsigned int n)
26487830e905SSolganik Alexander {
2649890728ffSStephen Hemminger 	struct ena_adapter *adapter = dev->data->dev_private;
26507830e905SSolganik Alexander 	uint64_t id;
26517830e905SSolganik Alexander 	uint64_t rx_entries, tx_entries;
26527830e905SSolganik Alexander 	unsigned int i;
26537830e905SSolganik Alexander 	int qid;
26547830e905SSolganik Alexander 	int valid = 0;
26557830e905SSolganik Alexander 	for (i = 0; i < n; ++i) {
26567830e905SSolganik Alexander 		id = ids[i];
26577830e905SSolganik Alexander 		/* Check if id belongs to global statistics */
26587830e905SSolganik Alexander 		if (id < ENA_STATS_ARRAY_GLOBAL) {
26597830e905SSolganik Alexander 			values[i] = *((uint64_t *)&adapter->dev_stats + id);
26607830e905SSolganik Alexander 			++valid;
26617830e905SSolganik Alexander 			continue;
26627830e905SSolganik Alexander 		}
26637830e905SSolganik Alexander 
26647830e905SSolganik Alexander 		/* Check if id belongs to rx queue statistics */
26657830e905SSolganik Alexander 		id -= ENA_STATS_ARRAY_GLOBAL;
26667830e905SSolganik Alexander 		rx_entries = ENA_STATS_ARRAY_RX * dev->data->nb_rx_queues;
26677830e905SSolganik Alexander 		if (id < rx_entries) {
26687830e905SSolganik Alexander 			qid = id % dev->data->nb_rx_queues;
26697830e905SSolganik Alexander 			id /= dev->data->nb_rx_queues;
26707830e905SSolganik Alexander 			values[i] = *((uint64_t *)
26717830e905SSolganik Alexander 				&adapter->rx_ring[qid].rx_stats + id);
26727830e905SSolganik Alexander 			++valid;
26737830e905SSolganik Alexander 			continue;
26747830e905SSolganik Alexander 		}
26757830e905SSolganik Alexander 				/* Check if id belongs to rx queue statistics */
26767830e905SSolganik Alexander 		id -= rx_entries;
26777830e905SSolganik Alexander 		tx_entries = ENA_STATS_ARRAY_TX * dev->data->nb_tx_queues;
26787830e905SSolganik Alexander 		if (id < tx_entries) {
26797830e905SSolganik Alexander 			qid = id % dev->data->nb_tx_queues;
26807830e905SSolganik Alexander 			id /= dev->data->nb_tx_queues;
26817830e905SSolganik Alexander 			values[i] = *((uint64_t *)
26827830e905SSolganik Alexander 				&adapter->tx_ring[qid].tx_stats + id);
26837830e905SSolganik Alexander 			++valid;
26847830e905SSolganik Alexander 			continue;
26857830e905SSolganik Alexander 		}
26867830e905SSolganik Alexander 	}
26877830e905SSolganik Alexander 
26887830e905SSolganik Alexander 	return valid;
26897830e905SSolganik Alexander }
26907830e905SSolganik Alexander 
26918a7a73f2SMichal Krawczyk static int ena_process_bool_devarg(const char *key,
26928a7a73f2SMichal Krawczyk 				   const char *value,
26938a7a73f2SMichal Krawczyk 				   void *opaque)
26948a7a73f2SMichal Krawczyk {
26958a7a73f2SMichal Krawczyk 	struct ena_adapter *adapter = opaque;
26968a7a73f2SMichal Krawczyk 	bool bool_value;
26978a7a73f2SMichal Krawczyk 
26988a7a73f2SMichal Krawczyk 	/* Parse the value. */
26998a7a73f2SMichal Krawczyk 	if (strcmp(value, "1") == 0) {
27008a7a73f2SMichal Krawczyk 		bool_value = true;
27018a7a73f2SMichal Krawczyk 	} else if (strcmp(value, "0") == 0) {
27028a7a73f2SMichal Krawczyk 		bool_value = false;
27038a7a73f2SMichal Krawczyk 	} else {
27048a7a73f2SMichal Krawczyk 		PMD_INIT_LOG(ERR,
27058a7a73f2SMichal Krawczyk 			"Invalid value: '%s' for key '%s'. Accepted: '0' or '1'\n",
27068a7a73f2SMichal Krawczyk 			value, key);
27078a7a73f2SMichal Krawczyk 		return -EINVAL;
27088a7a73f2SMichal Krawczyk 	}
27098a7a73f2SMichal Krawczyk 
27108a7a73f2SMichal Krawczyk 	/* Now, assign it to the proper adapter field. */
27118a7a73f2SMichal Krawczyk 	if (strcmp(key, ENA_DEVARG_LARGE_LLQ_HDR))
27128a7a73f2SMichal Krawczyk 		adapter->use_large_llq_hdr = bool_value;
27138a7a73f2SMichal Krawczyk 
27148a7a73f2SMichal Krawczyk 	return 0;
27158a7a73f2SMichal Krawczyk }
27168a7a73f2SMichal Krawczyk 
27178a7a73f2SMichal Krawczyk static int ena_parse_devargs(struct ena_adapter *adapter,
27188a7a73f2SMichal Krawczyk 			     struct rte_devargs *devargs)
27198a7a73f2SMichal Krawczyk {
27208a7a73f2SMichal Krawczyk 	static const char * const allowed_args[] = {
27218a7a73f2SMichal Krawczyk 		ENA_DEVARG_LARGE_LLQ_HDR,
27228a7a73f2SMichal Krawczyk 	};
27238a7a73f2SMichal Krawczyk 	struct rte_kvargs *kvlist;
27248a7a73f2SMichal Krawczyk 	int rc;
27258a7a73f2SMichal Krawczyk 
27268a7a73f2SMichal Krawczyk 	if (devargs == NULL)
27278a7a73f2SMichal Krawczyk 		return 0;
27288a7a73f2SMichal Krawczyk 
27298a7a73f2SMichal Krawczyk 	kvlist = rte_kvargs_parse(devargs->args, allowed_args);
27308a7a73f2SMichal Krawczyk 	if (kvlist == NULL) {
27318a7a73f2SMichal Krawczyk 		PMD_INIT_LOG(ERR, "Invalid device arguments: %s\n",
27328a7a73f2SMichal Krawczyk 			devargs->args);
27338a7a73f2SMichal Krawczyk 		return -EINVAL;
27348a7a73f2SMichal Krawczyk 	}
27358a7a73f2SMichal Krawczyk 
27368a7a73f2SMichal Krawczyk 	rc = rte_kvargs_process(kvlist, ENA_DEVARG_LARGE_LLQ_HDR,
27378a7a73f2SMichal Krawczyk 		ena_process_bool_devarg, adapter);
27388a7a73f2SMichal Krawczyk 
27398a7a73f2SMichal Krawczyk 	rte_kvargs_free(kvlist);
27408a7a73f2SMichal Krawczyk 
27418a7a73f2SMichal Krawczyk 	return rc;
27428a7a73f2SMichal Krawczyk }
27438a7a73f2SMichal Krawczyk 
2744ca148440SMichal Krawczyk /*********************************************************************
2745ca148440SMichal Krawczyk  *  PMD configuration
2746ca148440SMichal Krawczyk  *********************************************************************/
2747fdf91e0fSJan Blunck static int eth_ena_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
2748fdf91e0fSJan Blunck 	struct rte_pci_device *pci_dev)
2749fdf91e0fSJan Blunck {
2750fdf91e0fSJan Blunck 	return rte_eth_dev_pci_generic_probe(pci_dev,
2751fdf91e0fSJan Blunck 		sizeof(struct ena_adapter), eth_ena_dev_init);
2752fdf91e0fSJan Blunck }
2753fdf91e0fSJan Blunck 
2754fdf91e0fSJan Blunck static int eth_ena_pci_remove(struct rte_pci_device *pci_dev)
2755fdf91e0fSJan Blunck {
2756eb0ef49dSMichal Krawczyk 	return rte_eth_dev_pci_generic_remove(pci_dev, eth_ena_dev_uninit);
2757fdf91e0fSJan Blunck }
2758fdf91e0fSJan Blunck 
2759fdf91e0fSJan Blunck static struct rte_pci_driver rte_ena_pmd = {
27601173fca2SJan Medala 	.id_table = pci_id_ena_map,
276105e0eee0SRafal Kozik 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
276205e0eee0SRafal Kozik 		     RTE_PCI_DRV_WC_ACTIVATE,
2763fdf91e0fSJan Blunck 	.probe = eth_ena_pci_probe,
2764fdf91e0fSJan Blunck 	.remove = eth_ena_pci_remove,
27651173fca2SJan Medala };
27661173fca2SJan Medala 
2767fdf91e0fSJan Blunck RTE_PMD_REGISTER_PCI(net_ena, rte_ena_pmd);
276801f19227SShreyansh Jain RTE_PMD_REGISTER_PCI_TABLE(net_ena, pci_id_ena_map);
276906e81dc9SDavid Marchand RTE_PMD_REGISTER_KMOD_DEP(net_ena, "* igb_uio | uio_pci_generic | vfio-pci");
27708a7a73f2SMichal Krawczyk RTE_PMD_REGISTER_PARAM_STRING(net_ena, ENA_DEVARG_LARGE_LLQ_HDR "=<0|1>");
27718bc0acaeSStephen Hemminger 
2772f8e99896SThomas Monjalon RTE_INIT(ena_init_log)
27738bc0acaeSStephen Hemminger {
27743f111952SHarry van Haaren 	ena_logtype_init = rte_log_register("pmd.net.ena.init");
27758bc0acaeSStephen Hemminger 	if (ena_logtype_init >= 0)
27768bc0acaeSStephen Hemminger 		rte_log_set_level(ena_logtype_init, RTE_LOG_NOTICE);
27773f111952SHarry van Haaren 	ena_logtype_driver = rte_log_register("pmd.net.ena.driver");
27788bc0acaeSStephen Hemminger 	if (ena_logtype_driver >= 0)
27798bc0acaeSStephen Hemminger 		rte_log_set_level(ena_logtype_driver, RTE_LOG_NOTICE);
27806f1c9df9SStephen Hemminger 
27816f1c9df9SStephen Hemminger #ifdef RTE_LIBRTE_ENA_DEBUG_RX
27826f1c9df9SStephen Hemminger 	ena_logtype_rx = rte_log_register("pmd.net.ena.rx");
27836f1c9df9SStephen Hemminger 	if (ena_logtype_rx >= 0)
27846f1c9df9SStephen Hemminger 		rte_log_set_level(ena_logtype_rx, RTE_LOG_NOTICE);
27856f1c9df9SStephen Hemminger #endif
27866f1c9df9SStephen Hemminger 
27876f1c9df9SStephen Hemminger #ifdef RTE_LIBRTE_ENA_DEBUG_TX
27886f1c9df9SStephen Hemminger 	ena_logtype_tx = rte_log_register("pmd.net.ena.tx");
27896f1c9df9SStephen Hemminger 	if (ena_logtype_tx >= 0)
27906f1c9df9SStephen Hemminger 		rte_log_set_level(ena_logtype_tx, RTE_LOG_NOTICE);
27916f1c9df9SStephen Hemminger #endif
27926f1c9df9SStephen Hemminger 
27936f1c9df9SStephen Hemminger #ifdef RTE_LIBRTE_ENA_DEBUG_TX_FREE
27946f1c9df9SStephen Hemminger 	ena_logtype_tx_free = rte_log_register("pmd.net.ena.tx_free");
27956f1c9df9SStephen Hemminger 	if (ena_logtype_tx_free >= 0)
27966f1c9df9SStephen Hemminger 		rte_log_set_level(ena_logtype_tx_free, RTE_LOG_NOTICE);
27976f1c9df9SStephen Hemminger #endif
27986f1c9df9SStephen Hemminger 
27996f1c9df9SStephen Hemminger #ifdef RTE_LIBRTE_ENA_COM_DEBUG
28006f1c9df9SStephen Hemminger 	ena_logtype_com = rte_log_register("pmd.net.ena.com");
28016f1c9df9SStephen Hemminger 	if (ena_logtype_com >= 0)
28026f1c9df9SStephen Hemminger 		rte_log_set_level(ena_logtype_com, RTE_LOG_NOTICE);
28036f1c9df9SStephen Hemminger #endif
28048bc0acaeSStephen Hemminger }
28053adcba9aSMichal Krawczyk 
28063adcba9aSMichal Krawczyk /******************************************************************************
28073adcba9aSMichal Krawczyk  ******************************** AENQ Handlers *******************************
28083adcba9aSMichal Krawczyk  *****************************************************************************/
2809ca148440SMichal Krawczyk static void ena_update_on_link_change(void *adapter_data,
2810ca148440SMichal Krawczyk 				      struct ena_admin_aenq_entry *aenq_e)
2811ca148440SMichal Krawczyk {
2812ca148440SMichal Krawczyk 	struct rte_eth_dev *eth_dev;
2813ca148440SMichal Krawczyk 	struct ena_adapter *adapter;
2814ca148440SMichal Krawczyk 	struct ena_admin_aenq_link_change_desc *aenq_link_desc;
2815ca148440SMichal Krawczyk 	uint32_t status;
2816ca148440SMichal Krawczyk 
2817890728ffSStephen Hemminger 	adapter = adapter_data;
2818ca148440SMichal Krawczyk 	aenq_link_desc = (struct ena_admin_aenq_link_change_desc *)aenq_e;
2819ca148440SMichal Krawczyk 	eth_dev = adapter->rte_dev;
2820ca148440SMichal Krawczyk 
2821ca148440SMichal Krawczyk 	status = get_ena_admin_aenq_link_change_desc_link_status(aenq_link_desc);
2822ca148440SMichal Krawczyk 	adapter->link_status = status;
2823ca148440SMichal Krawczyk 
2824ca148440SMichal Krawczyk 	ena_link_update(eth_dev, 0);
2825ca148440SMichal Krawczyk 	_rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL);
2826ca148440SMichal Krawczyk }
2827ca148440SMichal Krawczyk 
2828f01f060cSRafal Kozik static void ena_notification(void *data,
2829f01f060cSRafal Kozik 			     struct ena_admin_aenq_entry *aenq_e)
2830f01f060cSRafal Kozik {
2831890728ffSStephen Hemminger 	struct ena_adapter *adapter = data;
2832f01f060cSRafal Kozik 	struct ena_admin_ena_hw_hints *hints;
2833f01f060cSRafal Kozik 
2834f01f060cSRafal Kozik 	if (aenq_e->aenq_common_desc.group != ENA_ADMIN_NOTIFICATION)
28356f1c9df9SStephen Hemminger 		PMD_DRV_LOG(WARNING, "Invalid group(%x) expected %x\n",
2836f01f060cSRafal Kozik 			aenq_e->aenq_common_desc.group,
2837f01f060cSRafal Kozik 			ENA_ADMIN_NOTIFICATION);
2838f01f060cSRafal Kozik 
2839f01f060cSRafal Kozik 	switch (aenq_e->aenq_common_desc.syndrom) {
2840f01f060cSRafal Kozik 	case ENA_ADMIN_UPDATE_HINTS:
2841f01f060cSRafal Kozik 		hints = (struct ena_admin_ena_hw_hints *)
2842f01f060cSRafal Kozik 			(&aenq_e->inline_data_w4);
2843f01f060cSRafal Kozik 		ena_update_hints(adapter, hints);
2844f01f060cSRafal Kozik 		break;
2845f01f060cSRafal Kozik 	default:
28466f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "Invalid aenq notification link state %d\n",
2847f01f060cSRafal Kozik 			aenq_e->aenq_common_desc.syndrom);
2848f01f060cSRafal Kozik 	}
2849f01f060cSRafal Kozik }
2850f01f060cSRafal Kozik 
2851d9b8b106SMichal Krawczyk static void ena_keep_alive(void *adapter_data,
2852d9b8b106SMichal Krawczyk 			   __rte_unused struct ena_admin_aenq_entry *aenq_e)
2853d9b8b106SMichal Krawczyk {
2854890728ffSStephen Hemminger 	struct ena_adapter *adapter = adapter_data;
285594c3e376SRafal Kozik 	struct ena_admin_aenq_keep_alive_desc *desc;
285694c3e376SRafal Kozik 	uint64_t rx_drops;
2857e1e73e32SMichal Krawczyk 	uint64_t tx_drops;
2858d9b8b106SMichal Krawczyk 
2859d9b8b106SMichal Krawczyk 	adapter->timestamp_wd = rte_get_timer_cycles();
286094c3e376SRafal Kozik 
286194c3e376SRafal Kozik 	desc = (struct ena_admin_aenq_keep_alive_desc *)aenq_e;
286294c3e376SRafal Kozik 	rx_drops = ((uint64_t)desc->rx_drops_high << 32) | desc->rx_drops_low;
2863e1e73e32SMichal Krawczyk 	tx_drops = ((uint64_t)desc->tx_drops_high << 32) | desc->tx_drops_low;
2864e1e73e32SMichal Krawczyk 
2865e1e73e32SMichal Krawczyk 	adapter->drv_stats->rx_drops = rx_drops;
2866e1e73e32SMichal Krawczyk 	adapter->dev_stats.tx_drops = tx_drops;
2867d9b8b106SMichal Krawczyk }
2868d9b8b106SMichal Krawczyk 
28693adcba9aSMichal Krawczyk /**
28703adcba9aSMichal Krawczyk  * This handler will called for unknown event group or unimplemented handlers
28713adcba9aSMichal Krawczyk  **/
28723adcba9aSMichal Krawczyk static void unimplemented_aenq_handler(__rte_unused void *data,
28733adcba9aSMichal Krawczyk 				       __rte_unused struct ena_admin_aenq_entry *aenq_e)
28743adcba9aSMichal Krawczyk {
28756f1c9df9SStephen Hemminger 	PMD_DRV_LOG(ERR, "Unknown event was received or event with "
2876983cce2dSRafal Kozik 			  "unimplemented handler\n");
28773adcba9aSMichal Krawczyk }
28783adcba9aSMichal Krawczyk 
2879ca148440SMichal Krawczyk static struct ena_aenq_handlers aenq_handlers = {
28803adcba9aSMichal Krawczyk 	.handlers = {
2881ca148440SMichal Krawczyk 		[ENA_ADMIN_LINK_CHANGE] = ena_update_on_link_change,
2882f01f060cSRafal Kozik 		[ENA_ADMIN_NOTIFICATION] = ena_notification,
2883d9b8b106SMichal Krawczyk 		[ENA_ADMIN_KEEP_ALIVE] = ena_keep_alive
28843adcba9aSMichal Krawczyk 	},
28853adcba9aSMichal Krawczyk 	.unimplemented_handler = unimplemented_aenq_handler
28863adcba9aSMichal Krawczyk };
2887