xref: /dpdk/drivers/net/ena/ena_ethdev.c (revision dbbdeb8b47f7531f9dc6a315cfadf8b900e7c635)
1702928afSMaciej Bielski /* SPDX-License-Identifier: BSD-3-Clause
238364c26SMichal Krawczyk  * Copyright (c) 2015-2020 Amazon.com, Inc. or its affiliates.
31173fca2SJan Medala  * All rights reserved.
41173fca2SJan Medala  */
51173fca2SJan Medala 
66723c0fcSBruce Richardson #include <rte_string_fns.h>
71173fca2SJan Medala #include <rte_errno.h>
8372c1af5SJan Medala #include <rte_version.h>
9b3fc5a1aSKonstantin Ananyev #include <rte_net.h>
108a7a73f2SMichal Krawczyk #include <rte_kvargs.h>
111173fca2SJan Medala 
121173fca2SJan Medala #include "ena_ethdev.h"
131173fca2SJan Medala #include "ena_logs.h"
141173fca2SJan Medala #include "ena_platform.h"
151173fca2SJan Medala #include "ena_com.h"
161173fca2SJan Medala #include "ena_eth_com.h"
171173fca2SJan Medala 
181173fca2SJan Medala #include <ena_common_defs.h>
191173fca2SJan Medala #include <ena_regs_defs.h>
201173fca2SJan Medala #include <ena_admin_defs.h>
211173fca2SJan Medala #include <ena_eth_io_defs.h>
221173fca2SJan Medala 
23419c3e3eSMichal Krawczyk #define DRV_MODULE_VER_MAJOR	2
24*dbbdeb8bSMichal Krawczyk #define DRV_MODULE_VER_MINOR	6
251b48c60dSMichal Krawczyk #define DRV_MODULE_VER_SUBMINOR	0
26372c1af5SJan Medala 
271173fca2SJan Medala #define __MERGE_64B_H_L(h, l) (((uint64_t)h << 32) | l)
281173fca2SJan Medala 
291173fca2SJan Medala #define GET_L4_HDR_LEN(mbuf)					\
30f41b5156SOlivier Matz 	((rte_pktmbuf_mtod_offset(mbuf,	struct rte_tcp_hdr *,	\
311173fca2SJan Medala 		mbuf->l3_len + mbuf->l2_len)->data_off) >> 4)
321173fca2SJan Medala 
33372c1af5SJan Medala #define ETH_GSTRING_LEN	32
34372c1af5SJan Medala 
35a3c9a11aSAndrew Boyer #define ARRAY_SIZE(x) RTE_DIM(x)
36372c1af5SJan Medala 
3792680dc2SRafal Kozik #define ENA_MIN_RING_DESC	128
3892680dc2SRafal Kozik 
39b418f0d2SMichal Krawczyk #define ENA_PTYPE_HAS_HASH	(RTE_PTYPE_L4_TCP | RTE_PTYPE_L4_UDP)
40b418f0d2SMichal Krawczyk 
41372c1af5SJan Medala struct ena_stats {
42372c1af5SJan Medala 	char name[ETH_GSTRING_LEN];
43372c1af5SJan Medala 	int stat_offset;
44372c1af5SJan Medala };
45372c1af5SJan Medala 
46372c1af5SJan Medala #define ENA_STAT_ENTRY(stat, stat_type) { \
47372c1af5SJan Medala 	.name = #stat, \
48372c1af5SJan Medala 	.stat_offset = offsetof(struct ena_stats_##stat_type, stat) \
49372c1af5SJan Medala }
50372c1af5SJan Medala 
51372c1af5SJan Medala #define ENA_STAT_RX_ENTRY(stat) \
52372c1af5SJan Medala 	ENA_STAT_ENTRY(stat, rx)
53372c1af5SJan Medala 
54372c1af5SJan Medala #define ENA_STAT_TX_ENTRY(stat) \
55372c1af5SJan Medala 	ENA_STAT_ENTRY(stat, tx)
56372c1af5SJan Medala 
5745718adaSMichal Krawczyk #define ENA_STAT_ENI_ENTRY(stat) \
5845718adaSMichal Krawczyk 	ENA_STAT_ENTRY(stat, eni)
5945718adaSMichal Krawczyk 
60372c1af5SJan Medala #define ENA_STAT_GLOBAL_ENTRY(stat) \
61372c1af5SJan Medala 	ENA_STAT_ENTRY(stat, dev)
62372c1af5SJan Medala 
638a7a73f2SMichal Krawczyk /* Device arguments */
648a7a73f2SMichal Krawczyk #define ENA_DEVARG_LARGE_LLQ_HDR "large_llq_hdr"
65cc0c5d25SMichal Krawczyk /* Timeout in seconds after which a single uncompleted Tx packet should be
66cc0c5d25SMichal Krawczyk  * considered as a missing.
67cc0c5d25SMichal Krawczyk  */
68cc0c5d25SMichal Krawczyk #define ENA_DEVARG_MISS_TXC_TO "miss_txc_to"
698a7a73f2SMichal Krawczyk 
703adcba9aSMichal Krawczyk /*
713adcba9aSMichal Krawczyk  * Each rte_memzone should have unique name.
723adcba9aSMichal Krawczyk  * To satisfy it, count number of allocation and add it to name.
733adcba9aSMichal Krawczyk  */
747c0a233eSAmit Bernstein rte_atomic64_t ena_alloc_cnt;
753adcba9aSMichal Krawczyk 
76372c1af5SJan Medala static const struct ena_stats ena_stats_global_strings[] = {
77372c1af5SJan Medala 	ENA_STAT_GLOBAL_ENTRY(wd_expired),
787830e905SSolganik Alexander 	ENA_STAT_GLOBAL_ENTRY(dev_start),
797830e905SSolganik Alexander 	ENA_STAT_GLOBAL_ENTRY(dev_stop),
80e1e73e32SMichal Krawczyk 	ENA_STAT_GLOBAL_ENTRY(tx_drops),
81372c1af5SJan Medala };
82372c1af5SJan Medala 
8345718adaSMichal Krawczyk static const struct ena_stats ena_stats_eni_strings[] = {
8445718adaSMichal Krawczyk 	ENA_STAT_ENI_ENTRY(bw_in_allowance_exceeded),
8545718adaSMichal Krawczyk 	ENA_STAT_ENI_ENTRY(bw_out_allowance_exceeded),
8645718adaSMichal Krawczyk 	ENA_STAT_ENI_ENTRY(pps_allowance_exceeded),
8745718adaSMichal Krawczyk 	ENA_STAT_ENI_ENTRY(conntrack_allowance_exceeded),
8845718adaSMichal Krawczyk 	ENA_STAT_ENI_ENTRY(linklocal_allowance_exceeded),
8945718adaSMichal Krawczyk };
9045718adaSMichal Krawczyk 
91372c1af5SJan Medala static const struct ena_stats ena_stats_tx_strings[] = {
92372c1af5SJan Medala 	ENA_STAT_TX_ENTRY(cnt),
93372c1af5SJan Medala 	ENA_STAT_TX_ENTRY(bytes),
947830e905SSolganik Alexander 	ENA_STAT_TX_ENTRY(prepare_ctx_err),
95372c1af5SJan Medala 	ENA_STAT_TX_ENTRY(tx_poll),
96372c1af5SJan Medala 	ENA_STAT_TX_ENTRY(doorbells),
97372c1af5SJan Medala 	ENA_STAT_TX_ENTRY(bad_req_id),
987830e905SSolganik Alexander 	ENA_STAT_TX_ENTRY(available_desc),
99f93e20e5SMichal Krawczyk 	ENA_STAT_TX_ENTRY(missed_tx),
100372c1af5SJan Medala };
101372c1af5SJan Medala 
102372c1af5SJan Medala static const struct ena_stats ena_stats_rx_strings[] = {
103372c1af5SJan Medala 	ENA_STAT_RX_ENTRY(cnt),
104372c1af5SJan Medala 	ENA_STAT_RX_ENTRY(bytes),
1057830e905SSolganik Alexander 	ENA_STAT_RX_ENTRY(refill_partial),
10684daba99SMichal Krawczyk 	ENA_STAT_RX_ENTRY(l3_csum_bad),
10784daba99SMichal Krawczyk 	ENA_STAT_RX_ENTRY(l4_csum_bad),
10884daba99SMichal Krawczyk 	ENA_STAT_RX_ENTRY(l4_csum_good),
1097830e905SSolganik Alexander 	ENA_STAT_RX_ENTRY(mbuf_alloc_fail),
110372c1af5SJan Medala 	ENA_STAT_RX_ENTRY(bad_desc_num),
1117830e905SSolganik Alexander 	ENA_STAT_RX_ENTRY(bad_req_id),
112372c1af5SJan Medala };
113372c1af5SJan Medala 
114372c1af5SJan Medala #define ENA_STATS_ARRAY_GLOBAL	ARRAY_SIZE(ena_stats_global_strings)
11545718adaSMichal Krawczyk #define ENA_STATS_ARRAY_ENI	ARRAY_SIZE(ena_stats_eni_strings)
116372c1af5SJan Medala #define ENA_STATS_ARRAY_TX	ARRAY_SIZE(ena_stats_tx_strings)
117372c1af5SJan Medala #define ENA_STATS_ARRAY_RX	ARRAY_SIZE(ena_stats_rx_strings)
1181173fca2SJan Medala 
119295968d1SFerruh Yigit #define QUEUE_OFFLOADS (RTE_ETH_TX_OFFLOAD_TCP_CKSUM |\
120295968d1SFerruh Yigit 			RTE_ETH_TX_OFFLOAD_UDP_CKSUM |\
121295968d1SFerruh Yigit 			RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |\
122295968d1SFerruh Yigit 			RTE_ETH_TX_OFFLOAD_TCP_TSO)
123daa02b5cSOlivier Matz #define MBUF_OFFLOADS (RTE_MBUF_F_TX_L4_MASK |\
124daa02b5cSOlivier Matz 		       RTE_MBUF_F_TX_IP_CKSUM |\
125daa02b5cSOlivier Matz 		       RTE_MBUF_F_TX_TCP_SEG)
12656b8b9b7SRafal Kozik 
1271173fca2SJan Medala /** Vendor ID used by Amazon devices */
1281173fca2SJan Medala #define PCI_VENDOR_ID_AMAZON 0x1D0F
1291173fca2SJan Medala /** Amazon devices */
1301173fca2SJan Medala #define PCI_DEVICE_ID_ENA_VF		0xEC20
131f7138b91SMichal Krawczyk #define PCI_DEVICE_ID_ENA_VF_RSERV0	0xEC21
1321173fca2SJan Medala 
133daa02b5cSOlivier Matz #define	ENA_TX_OFFLOAD_MASK	(RTE_MBUF_F_TX_L4_MASK |         \
134daa02b5cSOlivier Matz 	RTE_MBUF_F_TX_IPV6 |            \
135daa02b5cSOlivier Matz 	RTE_MBUF_F_TX_IPV4 |            \
136daa02b5cSOlivier Matz 	RTE_MBUF_F_TX_IP_CKSUM |        \
137daa02b5cSOlivier Matz 	RTE_MBUF_F_TX_TCP_SEG)
138b3fc5a1aSKonstantin Ananyev 
139b3fc5a1aSKonstantin Ananyev #define	ENA_TX_OFFLOAD_NOTSUP_MASK	\
140daa02b5cSOlivier Matz 	(RTE_MBUF_F_TX_OFFLOAD_MASK ^ ENA_TX_OFFLOAD_MASK)
141b3fc5a1aSKonstantin Ananyev 
142e8c838fdSMichal Krawczyk /** HW specific offloads capabilities. */
143e8c838fdSMichal Krawczyk /* IPv4 checksum offload. */
144e8c838fdSMichal Krawczyk #define ENA_L3_IPV4_CSUM		0x0001
145e8c838fdSMichal Krawczyk /* TCP/UDP checksum offload for IPv4 packets. */
146e8c838fdSMichal Krawczyk #define ENA_L4_IPV4_CSUM		0x0002
147e8c838fdSMichal Krawczyk /* TCP/UDP checksum offload for IPv4 packets with pseudo header checksum. */
148e8c838fdSMichal Krawczyk #define ENA_L4_IPV4_CSUM_PARTIAL	0x0004
149e8c838fdSMichal Krawczyk /* TCP/UDP checksum offload for IPv6 packets. */
150e8c838fdSMichal Krawczyk #define ENA_L4_IPV6_CSUM		0x0008
151e8c838fdSMichal Krawczyk /* TCP/UDP checksum offload for IPv6 packets with pseudo header checksum. */
152e8c838fdSMichal Krawczyk #define ENA_L4_IPV6_CSUM_PARTIAL	0x0010
153e8c838fdSMichal Krawczyk /* TSO support for IPv4 packets. */
154e8c838fdSMichal Krawczyk #define ENA_IPV4_TSO			0x0020
155e8c838fdSMichal Krawczyk 
156e8c838fdSMichal Krawczyk /* Device supports setting RSS hash. */
157e8c838fdSMichal Krawczyk #define ENA_RX_RSS_HASH			0x0040
158e8c838fdSMichal Krawczyk 
15928a1fd4fSFerruh Yigit static const struct rte_pci_id pci_id_ena_map[] = {
160cb990571SDavid Marchand 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AMAZON, PCI_DEVICE_ID_ENA_VF) },
161f7138b91SMichal Krawczyk 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AMAZON, PCI_DEVICE_ID_ENA_VF_RSERV0) },
1621173fca2SJan Medala 	{ .device_id = 0 },
1631173fca2SJan Medala };
1641173fca2SJan Medala 
165ca148440SMichal Krawczyk static struct ena_aenq_handlers aenq_handlers;
1663adcba9aSMichal Krawczyk 
167b9b05d6fSMichal Krawczyk static int ena_device_init(struct ena_adapter *adapter,
168aab58857SStanislaw Kardach 			   struct rte_pci_device *pdev,
169b9b05d6fSMichal Krawczyk 			   struct ena_com_dev_get_features_ctx *get_feat_ctx);
1701173fca2SJan Medala static int ena_dev_configure(struct rte_eth_dev *dev);
17136278b82SMichal Krawczyk static void ena_tx_map_mbuf(struct ena_ring *tx_ring,
17236278b82SMichal Krawczyk 	struct ena_tx_buffer *tx_info,
17336278b82SMichal Krawczyk 	struct rte_mbuf *mbuf,
17436278b82SMichal Krawczyk 	void **push_header,
17536278b82SMichal Krawczyk 	uint16_t *header_len);
17636278b82SMichal Krawczyk static int ena_xmit_mbuf(struct ena_ring *tx_ring, struct rte_mbuf *mbuf);
177a52b317eSDawid Gorecki static int ena_tx_cleanup(void *txp, uint32_t free_pkt_cnt);
1781173fca2SJan Medala static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
1791173fca2SJan Medala 				  uint16_t nb_pkts);
180b3fc5a1aSKonstantin Ananyev static uint16_t eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
181b3fc5a1aSKonstantin Ananyev 		uint16_t nb_pkts);
1821173fca2SJan Medala static int ena_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
1831173fca2SJan Medala 			      uint16_t nb_desc, unsigned int socket_id,
1841173fca2SJan Medala 			      const struct rte_eth_txconf *tx_conf);
1851173fca2SJan Medala static int ena_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
1861173fca2SJan Medala 			      uint16_t nb_desc, unsigned int socket_id,
1871173fca2SJan Medala 			      const struct rte_eth_rxconf *rx_conf,
1881173fca2SJan Medala 			      struct rte_mempool *mp);
1891be097dcSMichal Krawczyk static inline void ena_init_rx_mbuf(struct rte_mbuf *mbuf, uint16_t len);
1901be097dcSMichal Krawczyk static struct rte_mbuf *ena_rx_mbuf(struct ena_ring *rx_ring,
1911be097dcSMichal Krawczyk 				    struct ena_com_rx_buf_info *ena_bufs,
1921be097dcSMichal Krawczyk 				    uint32_t descs,
1931be097dcSMichal Krawczyk 				    uint16_t *next_to_clean,
1941be097dcSMichal Krawczyk 				    uint8_t offset);
1951173fca2SJan Medala static uint16_t eth_ena_recv_pkts(void *rx_queue,
1961173fca2SJan Medala 				  struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
19783fd97b2SMichal Krawczyk static int ena_add_single_rx_desc(struct ena_com_io_sq *io_sq,
19883fd97b2SMichal Krawczyk 				  struct rte_mbuf *mbuf, uint16_t id);
1991173fca2SJan Medala static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count);
20033dde075SMichal Krawczyk static void ena_init_rings(struct ena_adapter *adapter,
20133dde075SMichal Krawczyk 			   bool disable_meta_caching);
2021173fca2SJan Medala static int ena_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
2031173fca2SJan Medala static int ena_start(struct rte_eth_dev *dev);
20462024eb8SIvan Ilchenko static int ena_stop(struct rte_eth_dev *dev);
205b142387bSThomas Monjalon static int ena_close(struct rte_eth_dev *dev);
2062081d5e2SMichal Krawczyk static int ena_dev_reset(struct rte_eth_dev *dev);
207d5b0924bSMatan Azrad static int ena_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats);
2081173fca2SJan Medala static void ena_rx_queue_release_all(struct rte_eth_dev *dev);
2091173fca2SJan Medala static void ena_tx_queue_release_all(struct rte_eth_dev *dev);
2107483341aSXueming Li static void ena_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
2117483341aSXueming Li static void ena_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
2121173fca2SJan Medala static void ena_rx_queue_release_bufs(struct ena_ring *ring);
2131173fca2SJan Medala static void ena_tx_queue_release_bufs(struct ena_ring *ring);
2141173fca2SJan Medala static int ena_link_update(struct rte_eth_dev *dev,
215dd2c630aSFerruh Yigit 			   int wait_to_complete);
2166986cdc4SMichal Krawczyk static int ena_create_io_queue(struct rte_eth_dev *dev, struct ena_ring *ring);
21726e5543dSRafal Kozik static void ena_queue_stop(struct ena_ring *ring);
21826e5543dSRafal Kozik static void ena_queue_stop_all(struct rte_eth_dev *dev,
21926e5543dSRafal Kozik 			      enum ena_ring_type ring_type);
2206986cdc4SMichal Krawczyk static int ena_queue_start(struct rte_eth_dev *dev, struct ena_ring *ring);
22126e5543dSRafal Kozik static int ena_queue_start_all(struct rte_eth_dev *dev,
2221173fca2SJan Medala 			       enum ena_ring_type ring_type);
2231173fca2SJan Medala static void ena_stats_restart(struct rte_eth_dev *dev);
2243a822d79SMichal Krawczyk static uint64_t ena_get_rx_port_offloads(struct ena_adapter *adapter);
2253a822d79SMichal Krawczyk static uint64_t ena_get_tx_port_offloads(struct ena_adapter *adapter);
2263a822d79SMichal Krawczyk static uint64_t ena_get_rx_queue_offloads(struct ena_adapter *adapter);
2273a822d79SMichal Krawczyk static uint64_t ena_get_tx_queue_offloads(struct ena_adapter *adapter);
228bdad90d1SIvan Ilchenko static int ena_infos_get(struct rte_eth_dev *dev,
2291173fca2SJan Medala 			 struct rte_eth_dev_info *dev_info);
23015773e06SMichal Krawczyk static void ena_interrupt_handler_rte(void *cb_arg);
231d9b8b106SMichal Krawczyk static void ena_timer_wd_callback(struct rte_timer *timer, void *arg);
232e457bc70SRafal Kozik static void ena_destroy_device(struct rte_eth_dev *eth_dev);
233e457bc70SRafal Kozik static int eth_ena_dev_init(struct rte_eth_dev *eth_dev);
2347830e905SSolganik Alexander static int ena_xstats_get_names(struct rte_eth_dev *dev,
2357830e905SSolganik Alexander 				struct rte_eth_xstat_name *xstats_names,
2367830e905SSolganik Alexander 				unsigned int n);
2373cec73faSMichal Krawczyk static int ena_xstats_get_names_by_id(struct rte_eth_dev *dev,
2383cec73faSMichal Krawczyk 				      const uint64_t *ids,
2393cec73faSMichal Krawczyk 				      struct rte_eth_xstat_name *xstats_names,
2403cec73faSMichal Krawczyk 				      unsigned int size);
2417830e905SSolganik Alexander static int ena_xstats_get(struct rte_eth_dev *dev,
2427830e905SSolganik Alexander 			  struct rte_eth_xstat *stats,
2437830e905SSolganik Alexander 			  unsigned int n);
2447830e905SSolganik Alexander static int ena_xstats_get_by_id(struct rte_eth_dev *dev,
2457830e905SSolganik Alexander 				const uint64_t *ids,
2467830e905SSolganik Alexander 				uint64_t *values,
2477830e905SSolganik Alexander 				unsigned int n);
2488a7a73f2SMichal Krawczyk static int ena_process_bool_devarg(const char *key,
2498a7a73f2SMichal Krawczyk 				   const char *value,
2508a7a73f2SMichal Krawczyk 				   void *opaque);
2518a7a73f2SMichal Krawczyk static int ena_parse_devargs(struct ena_adapter *adapter,
2528a7a73f2SMichal Krawczyk 			     struct rte_devargs *devargs);
253e3595539SStanislaw Kardach static int ena_copy_eni_stats(struct ena_adapter *adapter,
254e3595539SStanislaw Kardach 			      struct ena_stats_eni *stats);
2556986cdc4SMichal Krawczyk static int ena_setup_rx_intr(struct rte_eth_dev *dev);
2566986cdc4SMichal Krawczyk static int ena_rx_queue_intr_enable(struct rte_eth_dev *dev,
2576986cdc4SMichal Krawczyk 				    uint16_t queue_id);
2586986cdc4SMichal Krawczyk static int ena_rx_queue_intr_disable(struct rte_eth_dev *dev,
2596986cdc4SMichal Krawczyk 				     uint16_t queue_id);
260b9b05d6fSMichal Krawczyk static int ena_configure_aenq(struct ena_adapter *adapter);
261e3595539SStanislaw Kardach static int ena_mp_primary_handle(const struct rte_mp_msg *mp_msg,
262e3595539SStanislaw Kardach 				 const void *peer);
2631173fca2SJan Medala 
264103ab18cSFerruh Yigit static const struct eth_dev_ops ena_dev_ops = {
2651173fca2SJan Medala 	.dev_configure          = ena_dev_configure,
2661173fca2SJan Medala 	.dev_infos_get          = ena_infos_get,
2671173fca2SJan Medala 	.rx_queue_setup         = ena_rx_queue_setup,
2681173fca2SJan Medala 	.tx_queue_setup         = ena_tx_queue_setup,
2691173fca2SJan Medala 	.dev_start              = ena_start,
270eb0ef49dSMichal Krawczyk 	.dev_stop               = ena_stop,
2711173fca2SJan Medala 	.link_update            = ena_link_update,
2721173fca2SJan Medala 	.stats_get              = ena_stats_get,
2737830e905SSolganik Alexander 	.xstats_get_names       = ena_xstats_get_names,
2743cec73faSMichal Krawczyk 	.xstats_get_names_by_id = ena_xstats_get_names_by_id,
2757830e905SSolganik Alexander 	.xstats_get             = ena_xstats_get,
2767830e905SSolganik Alexander 	.xstats_get_by_id       = ena_xstats_get_by_id,
2771173fca2SJan Medala 	.mtu_set                = ena_mtu_set,
2781173fca2SJan Medala 	.rx_queue_release       = ena_rx_queue_release,
2791173fca2SJan Medala 	.tx_queue_release       = ena_tx_queue_release,
2801173fca2SJan Medala 	.dev_close              = ena_close,
2812081d5e2SMichal Krawczyk 	.dev_reset              = ena_dev_reset,
2821173fca2SJan Medala 	.reta_update            = ena_rss_reta_update,
2831173fca2SJan Medala 	.reta_query             = ena_rss_reta_query,
2846986cdc4SMichal Krawczyk 	.rx_queue_intr_enable   = ena_rx_queue_intr_enable,
2856986cdc4SMichal Krawczyk 	.rx_queue_intr_disable  = ena_rx_queue_intr_disable,
28634d5e97eSMichal Krawczyk 	.rss_hash_update        = ena_rss_hash_update,
28734d5e97eSMichal Krawczyk 	.rss_hash_conf_get      = ena_rss_hash_conf_get,
288a52b317eSDawid Gorecki 	.tx_done_cleanup        = ena_tx_cleanup,
2891173fca2SJan Medala };
2901173fca2SJan Medala 
291e3595539SStanislaw Kardach /*********************************************************************
292e3595539SStanislaw Kardach  *  Multi-Process communication bits
293e3595539SStanislaw Kardach  *********************************************************************/
294e3595539SStanislaw Kardach /* rte_mp IPC message name */
295e3595539SStanislaw Kardach #define ENA_MP_NAME	"net_ena_mp"
296e3595539SStanislaw Kardach /* Request timeout in seconds */
297e3595539SStanislaw Kardach #define ENA_MP_REQ_TMO	5
298e3595539SStanislaw Kardach 
299e3595539SStanislaw Kardach /** Proxy request type */
300e3595539SStanislaw Kardach enum ena_mp_req {
301e3595539SStanislaw Kardach 	ENA_MP_DEV_STATS_GET,
302e3595539SStanislaw Kardach 	ENA_MP_ENI_STATS_GET,
303e3595539SStanislaw Kardach 	ENA_MP_MTU_SET,
304e3595539SStanislaw Kardach 	ENA_MP_IND_TBL_GET,
305e3595539SStanislaw Kardach 	ENA_MP_IND_TBL_SET
306e3595539SStanislaw Kardach };
307e3595539SStanislaw Kardach 
308e3595539SStanislaw Kardach /** Proxy message body. Shared between requests and responses. */
309e3595539SStanislaw Kardach struct ena_mp_body {
310e3595539SStanislaw Kardach 	/* Message type */
311e3595539SStanislaw Kardach 	enum ena_mp_req type;
312e3595539SStanislaw Kardach 	int port_id;
313e3595539SStanislaw Kardach 	/* Processing result. Set in replies. 0 if message succeeded, negative
314e3595539SStanislaw Kardach 	 * error code otherwise.
315e3595539SStanislaw Kardach 	 */
316e3595539SStanislaw Kardach 	int result;
317e3595539SStanislaw Kardach 	union {
318e3595539SStanislaw Kardach 		int mtu; /* For ENA_MP_MTU_SET */
319e3595539SStanislaw Kardach 	} args;
320e3595539SStanislaw Kardach };
321e3595539SStanislaw Kardach 
322e3595539SStanislaw Kardach /**
323e3595539SStanislaw Kardach  * Initialize IPC message.
324e3595539SStanislaw Kardach  *
325e3595539SStanislaw Kardach  * @param[out] msg
326e3595539SStanislaw Kardach  *   Pointer to the message to initialize.
327e3595539SStanislaw Kardach  * @param[in] type
328e3595539SStanislaw Kardach  *   Message type.
329e3595539SStanislaw Kardach  * @param[in] port_id
330e3595539SStanislaw Kardach  *   Port ID of target device.
331e3595539SStanislaw Kardach  *
332e3595539SStanislaw Kardach  */
333e3595539SStanislaw Kardach static void
334e3595539SStanislaw Kardach mp_msg_init(struct rte_mp_msg *msg, enum ena_mp_req type, int port_id)
335e3595539SStanislaw Kardach {
336e3595539SStanislaw Kardach 	struct ena_mp_body *body = (struct ena_mp_body *)&msg->param;
337e3595539SStanislaw Kardach 
338e3595539SStanislaw Kardach 	memset(msg, 0, sizeof(*msg));
339e3595539SStanislaw Kardach 	strlcpy(msg->name, ENA_MP_NAME, sizeof(msg->name));
340e3595539SStanislaw Kardach 	msg->len_param = sizeof(*body);
341e3595539SStanislaw Kardach 	body->type = type;
342e3595539SStanislaw Kardach 	body->port_id = port_id;
343e3595539SStanislaw Kardach }
344e3595539SStanislaw Kardach 
345e3595539SStanislaw Kardach /*********************************************************************
346e3595539SStanislaw Kardach  *  Multi-Process communication PMD API
347e3595539SStanislaw Kardach  *********************************************************************/
348e3595539SStanislaw Kardach /**
349e3595539SStanislaw Kardach  * Define proxy request descriptor
350e3595539SStanislaw Kardach  *
351e3595539SStanislaw Kardach  * Used to define all structures and functions required for proxying a given
352e3595539SStanislaw Kardach  * function to the primary process including the code to perform to prepare the
353e3595539SStanislaw Kardach  * request and process the response.
354e3595539SStanislaw Kardach  *
355e3595539SStanislaw Kardach  * @param[in] f
356e3595539SStanislaw Kardach  *   Name of the function to proxy
357e3595539SStanislaw Kardach  * @param[in] t
358e3595539SStanislaw Kardach  *   Message type to use
359e3595539SStanislaw Kardach  * @param[in] prep
360e3595539SStanislaw Kardach  *   Body of a function to prepare the request in form of a statement
361e3595539SStanislaw Kardach  *   expression. It is passed all the original function arguments along with two
362e3595539SStanislaw Kardach  *   extra ones:
363e3595539SStanislaw Kardach  *   - struct ena_adapter *adapter - PMD data of the device calling the proxy.
364e3595539SStanislaw Kardach  *   - struct ena_mp_body *req - body of a request to prepare.
365e3595539SStanislaw Kardach  * @param[in] proc
366e3595539SStanislaw Kardach  *   Body of a function to process the response in form of a statement
367e3595539SStanislaw Kardach  *   expression. It is passed all the original function arguments along with two
368e3595539SStanislaw Kardach  *   extra ones:
369e3595539SStanislaw Kardach  *   - struct ena_adapter *adapter - PMD data of the device calling the proxy.
370e3595539SStanislaw Kardach  *   - struct ena_mp_body *rsp - body of a response to process.
371e3595539SStanislaw Kardach  * @param ...
372e3595539SStanislaw Kardach  *   Proxied function's arguments
373e3595539SStanislaw Kardach  *
374e3595539SStanislaw Kardach  * @note Inside prep and proc any parameters which aren't used should be marked
375e3595539SStanislaw Kardach  *       as such (with ENA_TOUCH or __rte_unused).
376e3595539SStanislaw Kardach  */
377e3595539SStanislaw Kardach #define ENA_PROXY_DESC(f, t, prep, proc, ...)			\
378e3595539SStanislaw Kardach 	static const enum ena_mp_req mp_type_ ## f =  t;	\
379e3595539SStanislaw Kardach 	static const char *mp_name_ ## f = #t;			\
380e3595539SStanislaw Kardach 	static void mp_prep_ ## f(struct ena_adapter *adapter,	\
381e3595539SStanislaw Kardach 				  struct ena_mp_body *req,	\
382e3595539SStanislaw Kardach 				  __VA_ARGS__)			\
383e3595539SStanislaw Kardach 	{							\
384e3595539SStanislaw Kardach 		prep;						\
385e3595539SStanislaw Kardach 	}							\
386e3595539SStanislaw Kardach 	static void mp_proc_ ## f(struct ena_adapter *adapter,	\
387e3595539SStanislaw Kardach 				  struct ena_mp_body *rsp,	\
388e3595539SStanislaw Kardach 				  __VA_ARGS__)			\
389e3595539SStanislaw Kardach 	{							\
390e3595539SStanislaw Kardach 		proc;						\
391e3595539SStanislaw Kardach 	}
392e3595539SStanislaw Kardach 
393e3595539SStanislaw Kardach /**
394e3595539SStanislaw Kardach  * Proxy wrapper for calling primary functions in a secondary process.
395e3595539SStanislaw Kardach  *
396e3595539SStanislaw Kardach  * Depending on whether called in primary or secondary process, calls the
397e3595539SStanislaw Kardach  * @p func directly or proxies the call to the primary process via rte_mp IPC.
398e3595539SStanislaw Kardach  * This macro requires a proxy request descriptor to be defined for @p func
399e3595539SStanislaw Kardach  * using ENA_PROXY_DESC() macro.
400e3595539SStanislaw Kardach  *
401e3595539SStanislaw Kardach  * @param[in/out] a
402e3595539SStanislaw Kardach  *   Device PMD data. Used for sending the message and sharing message results
403e3595539SStanislaw Kardach  *   between primary and secondary.
404e3595539SStanislaw Kardach  * @param[in] f
405e3595539SStanislaw Kardach  *   Function to proxy.
406e3595539SStanislaw Kardach  * @param ...
407e3595539SStanislaw Kardach  *   Arguments of @p func.
408e3595539SStanislaw Kardach  *
409e3595539SStanislaw Kardach  * @return
410e3595539SStanislaw Kardach  *   - 0: Processing succeeded and response handler was called.
411e3595539SStanislaw Kardach  *   - -EPERM: IPC is unavailable on this platform. This means only primary
412e3595539SStanislaw Kardach  *             process may call the proxied function.
413e3595539SStanislaw Kardach  *   - -EIO:   IPC returned error on request send. Inspect rte_errno detailed
414e3595539SStanislaw Kardach  *             error code.
415e3595539SStanislaw Kardach  *   - Negative error code from the proxied function.
416e3595539SStanislaw Kardach  *
417e3595539SStanislaw Kardach  * @note This mechanism is geared towards control-path tasks. Avoid calling it
418e3595539SStanislaw Kardach  *       in fast-path unless unbound delays are allowed. This is due to the IPC
419e3595539SStanislaw Kardach  *       mechanism itself (socket based).
420e3595539SStanislaw Kardach  * @note Due to IPC parameter size limitations the proxy logic shares call
421e3595539SStanislaw Kardach  *       results through the struct ena_adapter shared memory. This makes the
422e3595539SStanislaw Kardach  *       proxy mechanism strictly single-threaded. Therefore be sure to make all
423e3595539SStanislaw Kardach  *       calls to the same proxied function under the same lock.
424e3595539SStanislaw Kardach  */
425e3595539SStanislaw Kardach #define ENA_PROXY(a, f, ...)						\
426e3595539SStanislaw Kardach ({									\
427e3595539SStanislaw Kardach 	struct ena_adapter *_a = (a);					\
428e3595539SStanislaw Kardach 	struct timespec ts = { .tv_sec = ENA_MP_REQ_TMO };		\
429e3595539SStanislaw Kardach 	struct ena_mp_body *req, *rsp;					\
430e3595539SStanislaw Kardach 	struct rte_mp_reply mp_rep;					\
431e3595539SStanislaw Kardach 	struct rte_mp_msg mp_req;					\
432e3595539SStanislaw Kardach 	int ret;							\
433e3595539SStanislaw Kardach 									\
434e3595539SStanislaw Kardach 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {		\
435e3595539SStanislaw Kardach 		ret = f(__VA_ARGS__);					\
436e3595539SStanislaw Kardach 	} else {							\
437e3595539SStanislaw Kardach 		/* Prepare and send request */				\
438e3595539SStanislaw Kardach 		req = (struct ena_mp_body *)&mp_req.param;		\
439e3595539SStanislaw Kardach 		mp_msg_init(&mp_req, mp_type_ ## f, _a->edev_data->port_id); \
440e3595539SStanislaw Kardach 		mp_prep_ ## f(_a, req, ## __VA_ARGS__);			\
441e3595539SStanislaw Kardach 									\
442e3595539SStanislaw Kardach 		ret = rte_mp_request_sync(&mp_req, &mp_rep, &ts);	\
443e3595539SStanislaw Kardach 		if (likely(!ret)) {					\
444e3595539SStanislaw Kardach 			RTE_ASSERT(mp_rep.nb_received == 1);		\
445e3595539SStanislaw Kardach 			rsp = (struct ena_mp_body *)&mp_rep.msgs[0].param; \
446e3595539SStanislaw Kardach 			ret = rsp->result;				\
447e3595539SStanislaw Kardach 			if (ret == 0) {					\
448e3595539SStanislaw Kardach 				mp_proc_##f(_a, rsp, ## __VA_ARGS__);	\
449e3595539SStanislaw Kardach 			} else {					\
450e3595539SStanislaw Kardach 				PMD_DRV_LOG(ERR,			\
451e3595539SStanislaw Kardach 					    "%s returned error: %d\n",	\
452e3595539SStanislaw Kardach 					    mp_name_ ## f, rsp->result);\
453e3595539SStanislaw Kardach 			}						\
454e3595539SStanislaw Kardach 			free(mp_rep.msgs);				\
455e3595539SStanislaw Kardach 		} else if (rte_errno == ENOTSUP) {			\
456e3595539SStanislaw Kardach 			PMD_DRV_LOG(ERR,				\
457e3595539SStanislaw Kardach 				    "No IPC, can't proxy to primary\n");\
458e3595539SStanislaw Kardach 			ret = -rte_errno;				\
459e3595539SStanislaw Kardach 		} else {						\
460e3595539SStanislaw Kardach 			PMD_DRV_LOG(ERR, "Request %s failed: %s\n",	\
461e3595539SStanislaw Kardach 				    mp_name_ ## f,			\
462e3595539SStanislaw Kardach 				    rte_strerror(rte_errno));		\
463e3595539SStanislaw Kardach 			ret = -EIO;					\
464e3595539SStanislaw Kardach 		}							\
465e3595539SStanislaw Kardach 	}								\
466e3595539SStanislaw Kardach 	ret;								\
467e3595539SStanislaw Kardach })
468e3595539SStanislaw Kardach 
469e3595539SStanislaw Kardach /*********************************************************************
470e3595539SStanislaw Kardach  *  Multi-Process communication request descriptors
471e3595539SStanislaw Kardach  *********************************************************************/
472e3595539SStanislaw Kardach 
473e3595539SStanislaw Kardach ENA_PROXY_DESC(ena_com_get_dev_basic_stats, ENA_MP_DEV_STATS_GET,
474e3595539SStanislaw Kardach ({
475e3595539SStanislaw Kardach 	ENA_TOUCH(adapter);
476e3595539SStanislaw Kardach 	ENA_TOUCH(req);
477e3595539SStanislaw Kardach 	ENA_TOUCH(ena_dev);
478e3595539SStanislaw Kardach 	ENA_TOUCH(stats);
479e3595539SStanislaw Kardach }),
480e3595539SStanislaw Kardach ({
481e3595539SStanislaw Kardach 	ENA_TOUCH(rsp);
482e3595539SStanislaw Kardach 	ENA_TOUCH(ena_dev);
483e3595539SStanislaw Kardach 	if (stats != &adapter->basic_stats)
484e3595539SStanislaw Kardach 		rte_memcpy(stats, &adapter->basic_stats, sizeof(*stats));
485e3595539SStanislaw Kardach }),
486e3595539SStanislaw Kardach 	struct ena_com_dev *ena_dev, struct ena_admin_basic_stats *stats);
487e3595539SStanislaw Kardach 
488e3595539SStanislaw Kardach ENA_PROXY_DESC(ena_com_get_eni_stats, ENA_MP_ENI_STATS_GET,
489e3595539SStanislaw Kardach ({
490e3595539SStanislaw Kardach 	ENA_TOUCH(adapter);
491e3595539SStanislaw Kardach 	ENA_TOUCH(req);
492e3595539SStanislaw Kardach 	ENA_TOUCH(ena_dev);
493e3595539SStanislaw Kardach 	ENA_TOUCH(stats);
494e3595539SStanislaw Kardach }),
495e3595539SStanislaw Kardach ({
496e3595539SStanislaw Kardach 	ENA_TOUCH(rsp);
497e3595539SStanislaw Kardach 	ENA_TOUCH(ena_dev);
498e3595539SStanislaw Kardach 	if (stats != (struct ena_admin_eni_stats *)&adapter->eni_stats)
499e3595539SStanislaw Kardach 		rte_memcpy(stats, &adapter->eni_stats, sizeof(*stats));
500e3595539SStanislaw Kardach }),
501e3595539SStanislaw Kardach 	struct ena_com_dev *ena_dev, struct ena_admin_eni_stats *stats);
502e3595539SStanislaw Kardach 
503e3595539SStanislaw Kardach ENA_PROXY_DESC(ena_com_set_dev_mtu, ENA_MP_MTU_SET,
504e3595539SStanislaw Kardach ({
505e3595539SStanislaw Kardach 	ENA_TOUCH(adapter);
506e3595539SStanislaw Kardach 	ENA_TOUCH(ena_dev);
507e3595539SStanislaw Kardach 	req->args.mtu = mtu;
508e3595539SStanislaw Kardach }),
509e3595539SStanislaw Kardach ({
510e3595539SStanislaw Kardach 	ENA_TOUCH(adapter);
511e3595539SStanislaw Kardach 	ENA_TOUCH(rsp);
512e3595539SStanislaw Kardach 	ENA_TOUCH(ena_dev);
513e3595539SStanislaw Kardach 	ENA_TOUCH(mtu);
514e3595539SStanislaw Kardach }),
515e3595539SStanislaw Kardach 	struct ena_com_dev *ena_dev, int mtu);
516e3595539SStanislaw Kardach 
517e3595539SStanislaw Kardach ENA_PROXY_DESC(ena_com_indirect_table_set, ENA_MP_IND_TBL_SET,
518e3595539SStanislaw Kardach ({
519e3595539SStanislaw Kardach 	ENA_TOUCH(adapter);
520e3595539SStanislaw Kardach 	ENA_TOUCH(req);
521e3595539SStanislaw Kardach 	ENA_TOUCH(ena_dev);
522e3595539SStanislaw Kardach }),
523e3595539SStanislaw Kardach ({
524e3595539SStanislaw Kardach 	ENA_TOUCH(adapter);
525e3595539SStanislaw Kardach 	ENA_TOUCH(rsp);
526e3595539SStanislaw Kardach 	ENA_TOUCH(ena_dev);
527e3595539SStanislaw Kardach }),
528e3595539SStanislaw Kardach 	struct ena_com_dev *ena_dev);
529e3595539SStanislaw Kardach 
530e3595539SStanislaw Kardach ENA_PROXY_DESC(ena_com_indirect_table_get, ENA_MP_IND_TBL_GET,
531e3595539SStanislaw Kardach ({
532e3595539SStanislaw Kardach 	ENA_TOUCH(adapter);
533e3595539SStanislaw Kardach 	ENA_TOUCH(req);
534e3595539SStanislaw Kardach 	ENA_TOUCH(ena_dev);
535e3595539SStanislaw Kardach 	ENA_TOUCH(ind_tbl);
536e3595539SStanislaw Kardach }),
537e3595539SStanislaw Kardach ({
538e3595539SStanislaw Kardach 	ENA_TOUCH(rsp);
539e3595539SStanislaw Kardach 	ENA_TOUCH(ena_dev);
540e3595539SStanislaw Kardach 	if (ind_tbl != adapter->indirect_table)
541e3595539SStanislaw Kardach 		rte_memcpy(ind_tbl, adapter->indirect_table,
542e3595539SStanislaw Kardach 			   sizeof(adapter->indirect_table));
543e3595539SStanislaw Kardach }),
544e3595539SStanislaw Kardach 	struct ena_com_dev *ena_dev, u32 *ind_tbl);
545e3595539SStanislaw Kardach 
5462bae75eaSDawid Gorecki static inline void ena_trigger_reset(struct ena_adapter *adapter,
5472bae75eaSDawid Gorecki 				     enum ena_regs_reset_reason_types reason)
5482bae75eaSDawid Gorecki {
5492bae75eaSDawid Gorecki 	if (likely(!adapter->trigger_reset)) {
5502bae75eaSDawid Gorecki 		adapter->reset_reason = reason;
5512bae75eaSDawid Gorecki 		adapter->trigger_reset = true;
5522bae75eaSDawid Gorecki 	}
5532bae75eaSDawid Gorecki }
5542bae75eaSDawid Gorecki 
55584daba99SMichal Krawczyk static inline void ena_rx_mbuf_prepare(struct ena_ring *rx_ring,
55684daba99SMichal Krawczyk 				       struct rte_mbuf *mbuf,
55734d5e97eSMichal Krawczyk 				       struct ena_com_rx_ctx *ena_rx_ctx,
55834d5e97eSMichal Krawczyk 				       bool fill_hash)
5591173fca2SJan Medala {
56084daba99SMichal Krawczyk 	struct ena_stats_rx *rx_stats = &rx_ring->rx_stats;
5611173fca2SJan Medala 	uint64_t ol_flags = 0;
562fd617795SRafal Kozik 	uint32_t packet_type = 0;
5631173fca2SJan Medala 
5641173fca2SJan Medala 	if (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP)
565fd617795SRafal Kozik 		packet_type |= RTE_PTYPE_L4_TCP;
5661173fca2SJan Medala 	else if (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP)
567fd617795SRafal Kozik 		packet_type |= RTE_PTYPE_L4_UDP;
5681173fca2SJan Medala 
569856edce2SMichal Krawczyk 	if (ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV4) {
570fd617795SRafal Kozik 		packet_type |= RTE_PTYPE_L3_IPV4;
57184daba99SMichal Krawczyk 		if (unlikely(ena_rx_ctx->l3_csum_err)) {
57284daba99SMichal Krawczyk 			++rx_stats->l3_csum_bad;
573daa02b5cSOlivier Matz 			ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
57484daba99SMichal Krawczyk 		} else {
575daa02b5cSOlivier Matz 			ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
57684daba99SMichal Krawczyk 		}
577856edce2SMichal Krawczyk 	} else if (ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV6) {
578856edce2SMichal Krawczyk 		packet_type |= RTE_PTYPE_L3_IPV6;
579856edce2SMichal Krawczyk 	}
580856edce2SMichal Krawczyk 
58184daba99SMichal Krawczyk 	if (!ena_rx_ctx->l4_csum_checked || ena_rx_ctx->frag) {
582daa02b5cSOlivier Matz 		ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN;
58384daba99SMichal Krawczyk 	} else {
58484daba99SMichal Krawczyk 		if (unlikely(ena_rx_ctx->l4_csum_err)) {
58584daba99SMichal Krawczyk 			++rx_stats->l4_csum_bad;
586b2d2f1cfSMichal Krawczyk 			/*
587b2d2f1cfSMichal Krawczyk 			 * For the L4 Rx checksum offload the HW may indicate
588b2d2f1cfSMichal Krawczyk 			 * bad checksum although it's valid. Because of that,
589b2d2f1cfSMichal Krawczyk 			 * we're setting the UNKNOWN flag to let the app
590b2d2f1cfSMichal Krawczyk 			 * re-verify the checksum.
591b2d2f1cfSMichal Krawczyk 			 */
592b2d2f1cfSMichal Krawczyk 			ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN;
59384daba99SMichal Krawczyk 		} else {
59484daba99SMichal Krawczyk 			++rx_stats->l4_csum_good;
595daa02b5cSOlivier Matz 			ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
59684daba99SMichal Krawczyk 		}
59784daba99SMichal Krawczyk 	}
5981173fca2SJan Medala 
59934d5e97eSMichal Krawczyk 	if (fill_hash &&
60034d5e97eSMichal Krawczyk 	    likely((packet_type & ENA_PTYPE_HAS_HASH) && !ena_rx_ctx->frag)) {
601daa02b5cSOlivier Matz 		ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
602b418f0d2SMichal Krawczyk 		mbuf->hash.rss = ena_rx_ctx->hash;
603b418f0d2SMichal Krawczyk 	}
604b418f0d2SMichal Krawczyk 
6051173fca2SJan Medala 	mbuf->ol_flags = ol_flags;
606fd617795SRafal Kozik 	mbuf->packet_type = packet_type;
6071173fca2SJan Medala }
6081173fca2SJan Medala 
6091173fca2SJan Medala static inline void ena_tx_mbuf_prepare(struct rte_mbuf *mbuf,
61056b8b9b7SRafal Kozik 				       struct ena_com_tx_ctx *ena_tx_ctx,
61133dde075SMichal Krawczyk 				       uint64_t queue_offloads,
61233dde075SMichal Krawczyk 				       bool disable_meta_caching)
6131173fca2SJan Medala {
6141173fca2SJan Medala 	struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta;
6151173fca2SJan Medala 
61656b8b9b7SRafal Kozik 	if ((mbuf->ol_flags & MBUF_OFFLOADS) &&
61756b8b9b7SRafal Kozik 	    (queue_offloads & QUEUE_OFFLOADS)) {
6181173fca2SJan Medala 		/* check if TSO is required */
619daa02b5cSOlivier Matz 		if ((mbuf->ol_flags & RTE_MBUF_F_TX_TCP_SEG) &&
620295968d1SFerruh Yigit 		    (queue_offloads & RTE_ETH_TX_OFFLOAD_TCP_TSO)) {
6211173fca2SJan Medala 			ena_tx_ctx->tso_enable = true;
6221173fca2SJan Medala 
6231173fca2SJan Medala 			ena_meta->l4_hdr_len = GET_L4_HDR_LEN(mbuf);
6241173fca2SJan Medala 		}
6251173fca2SJan Medala 
6261173fca2SJan Medala 		/* check if L3 checksum is needed */
627daa02b5cSOlivier Matz 		if ((mbuf->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) &&
628295968d1SFerruh Yigit 		    (queue_offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM))
6291173fca2SJan Medala 			ena_tx_ctx->l3_csum_enable = true;
6301173fca2SJan Medala 
631daa02b5cSOlivier Matz 		if (mbuf->ol_flags & RTE_MBUF_F_TX_IPV6) {
6321173fca2SJan Medala 			ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV6;
633022fb61bSMichal Krawczyk 			/* For the IPv6 packets, DF always needs to be true. */
634022fb61bSMichal Krawczyk 			ena_tx_ctx->df = 1;
6351173fca2SJan Medala 		} else {
6361173fca2SJan Medala 			ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV4;
6371173fca2SJan Medala 
6381173fca2SJan Medala 			/* set don't fragment (DF) flag */
6391173fca2SJan Medala 			if (mbuf->packet_type &
6401173fca2SJan Medala 				(RTE_PTYPE_L4_NONFRAG
6411173fca2SJan Medala 				 | RTE_PTYPE_INNER_L4_NONFRAG))
642022fb61bSMichal Krawczyk 				ena_tx_ctx->df = 1;
6431173fca2SJan Medala 		}
6441173fca2SJan Medala 
6451173fca2SJan Medala 		/* check if L4 checksum is needed */
646daa02b5cSOlivier Matz 		if (((mbuf->ol_flags & RTE_MBUF_F_TX_L4_MASK) == RTE_MBUF_F_TX_TCP_CKSUM) &&
647295968d1SFerruh Yigit 		    (queue_offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM)) {
6481173fca2SJan Medala 			ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_TCP;
6491173fca2SJan Medala 			ena_tx_ctx->l4_csum_enable = true;
650daa02b5cSOlivier Matz 		} else if (((mbuf->ol_flags & RTE_MBUF_F_TX_L4_MASK) ==
651daa02b5cSOlivier Matz 				RTE_MBUF_F_TX_UDP_CKSUM) &&
652295968d1SFerruh Yigit 				(queue_offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM)) {
6531173fca2SJan Medala 			ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UDP;
6541173fca2SJan Medala 			ena_tx_ctx->l4_csum_enable = true;
65556b8b9b7SRafal Kozik 		} else {
6561173fca2SJan Medala 			ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UNKNOWN;
6571173fca2SJan Medala 			ena_tx_ctx->l4_csum_enable = false;
6581173fca2SJan Medala 		}
6591173fca2SJan Medala 
6601173fca2SJan Medala 		ena_meta->mss = mbuf->tso_segsz;
6611173fca2SJan Medala 		ena_meta->l3_hdr_len = mbuf->l3_len;
6621173fca2SJan Medala 		ena_meta->l3_hdr_offset = mbuf->l2_len;
6631173fca2SJan Medala 
6641173fca2SJan Medala 		ena_tx_ctx->meta_valid = true;
66533dde075SMichal Krawczyk 	} else if (disable_meta_caching) {
66633dde075SMichal Krawczyk 		memset(ena_meta, 0, sizeof(*ena_meta));
66733dde075SMichal Krawczyk 		ena_tx_ctx->meta_valid = true;
6681173fca2SJan Medala 	} else {
6691173fca2SJan Medala 		ena_tx_ctx->meta_valid = false;
6701173fca2SJan Medala 	}
6711173fca2SJan Medala }
6721173fca2SJan Medala 
673f7d82d24SRafal Kozik static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id)
674f7d82d24SRafal Kozik {
675f7d82d24SRafal Kozik 	struct ena_tx_buffer *tx_info = NULL;
676f7d82d24SRafal Kozik 
677f7d82d24SRafal Kozik 	if (likely(req_id < tx_ring->ring_size)) {
678f7d82d24SRafal Kozik 		tx_info = &tx_ring->tx_buffer_info[req_id];
679f7d82d24SRafal Kozik 		if (likely(tx_info->mbuf))
680f7d82d24SRafal Kozik 			return 0;
681f7d82d24SRafal Kozik 	}
682f7d82d24SRafal Kozik 
683f7d82d24SRafal Kozik 	if (tx_info)
68477e764c7SDawid Gorecki 		PMD_TX_LOG(ERR, "tx_info doesn't have valid mbuf. queue %d:%d req_id %u\n",
68577e764c7SDawid Gorecki 			tx_ring->port_id, tx_ring->id, req_id);
686f7d82d24SRafal Kozik 	else
68777e764c7SDawid Gorecki 		PMD_TX_LOG(ERR, "Invalid req_id: %hu in queue %d:%d\n",
68877e764c7SDawid Gorecki 			req_id, tx_ring->port_id, tx_ring->id);
689f7d82d24SRafal Kozik 
690f7d82d24SRafal Kozik 	/* Trigger device reset */
6917830e905SSolganik Alexander 	++tx_ring->tx_stats.bad_req_id;
6922bae75eaSDawid Gorecki 	ena_trigger_reset(tx_ring->adapter, ENA_REGS_RESET_INV_TX_REQ_ID);
693f7d82d24SRafal Kozik 	return -EFAULT;
694f7d82d24SRafal Kozik }
695f7d82d24SRafal Kozik 
696372c1af5SJan Medala static void ena_config_host_info(struct ena_com_dev *ena_dev)
697372c1af5SJan Medala {
698372c1af5SJan Medala 	struct ena_admin_host_info *host_info;
699372c1af5SJan Medala 	int rc;
700372c1af5SJan Medala 
701372c1af5SJan Medala 	/* Allocate only the host info */
702372c1af5SJan Medala 	rc = ena_com_allocate_host_info(ena_dev);
703372c1af5SJan Medala 	if (rc) {
7046f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "Cannot allocate host info\n");
705372c1af5SJan Medala 		return;
706372c1af5SJan Medala 	}
707372c1af5SJan Medala 
708372c1af5SJan Medala 	host_info = ena_dev->host_attr.host_info;
709372c1af5SJan Medala 
710372c1af5SJan Medala 	host_info->os_type = ENA_ADMIN_OS_DPDK;
711372c1af5SJan Medala 	host_info->kernel_ver = RTE_VERSION;
7126723c0fcSBruce Richardson 	strlcpy((char *)host_info->kernel_ver_str, rte_version(),
7136723c0fcSBruce Richardson 		sizeof(host_info->kernel_ver_str));
714372c1af5SJan Medala 	host_info->os_dist = RTE_VERSION;
7156723c0fcSBruce Richardson 	strlcpy((char *)host_info->os_dist_str, rte_version(),
7166723c0fcSBruce Richardson 		sizeof(host_info->os_dist_str));
717372c1af5SJan Medala 	host_info->driver_version =
718372c1af5SJan Medala 		(DRV_MODULE_VER_MAJOR) |
719372c1af5SJan Medala 		(DRV_MODULE_VER_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) |
720c4144557SJan Medala 		(DRV_MODULE_VER_SUBMINOR <<
721c4144557SJan Medala 			ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT);
722b9302eb9SRafal Kozik 	host_info->num_cpus = rte_lcore_count();
723372c1af5SJan Medala 
7247b3a3c4bSMaciej Bielski 	host_info->driver_supported_features =
72534d5e97eSMichal Krawczyk 		ENA_ADMIN_HOST_INFO_RX_OFFSET_MASK |
72634d5e97eSMichal Krawczyk 		ENA_ADMIN_HOST_INFO_RSS_CONFIGURABLE_FUNCTION_KEY_MASK;
7277b3a3c4bSMaciej Bielski 
728372c1af5SJan Medala 	rc = ena_com_set_host_attributes(ena_dev);
729372c1af5SJan Medala 	if (rc) {
730241da076SRafal Kozik 		if (rc == -ENA_COM_UNSUPPORTED)
7316f1c9df9SStephen Hemminger 			PMD_DRV_LOG(WARNING, "Cannot set host attributes\n");
732241da076SRafal Kozik 		else
7336f1c9df9SStephen Hemminger 			PMD_DRV_LOG(ERR, "Cannot set host attributes\n");
734241da076SRafal Kozik 
735372c1af5SJan Medala 		goto err;
736372c1af5SJan Medala 	}
737372c1af5SJan Medala 
738372c1af5SJan Medala 	return;
739372c1af5SJan Medala 
740372c1af5SJan Medala err:
741372c1af5SJan Medala 	ena_com_delete_host_info(ena_dev);
742372c1af5SJan Medala }
743372c1af5SJan Medala 
7447830e905SSolganik Alexander /* This function calculates the number of xstats based on the current config */
745aab58857SStanislaw Kardach static unsigned int ena_xstats_calc_num(struct rte_eth_dev_data *data)
746372c1af5SJan Medala {
74745718adaSMichal Krawczyk 	return ENA_STATS_ARRAY_GLOBAL + ENA_STATS_ARRAY_ENI +
748aab58857SStanislaw Kardach 		(data->nb_tx_queues * ENA_STATS_ARRAY_TX) +
749aab58857SStanislaw Kardach 		(data->nb_rx_queues * ENA_STATS_ARRAY_RX);
750372c1af5SJan Medala }
751372c1af5SJan Medala 
752372c1af5SJan Medala static void ena_config_debug_area(struct ena_adapter *adapter)
753372c1af5SJan Medala {
754372c1af5SJan Medala 	u32 debug_area_size;
755372c1af5SJan Medala 	int rc, ss_count;
756372c1af5SJan Medala 
757aab58857SStanislaw Kardach 	ss_count = ena_xstats_calc_num(adapter->edev_data);
758372c1af5SJan Medala 
759372c1af5SJan Medala 	/* allocate 32 bytes for each string and 64bit for the value */
760372c1af5SJan Medala 	debug_area_size = ss_count * ETH_GSTRING_LEN + sizeof(u64) * ss_count;
761372c1af5SJan Medala 
762372c1af5SJan Medala 	rc = ena_com_allocate_debug_area(&adapter->ena_dev, debug_area_size);
763372c1af5SJan Medala 	if (rc) {
7646f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "Cannot allocate debug area\n");
765372c1af5SJan Medala 		return;
766372c1af5SJan Medala 	}
767372c1af5SJan Medala 
768372c1af5SJan Medala 	rc = ena_com_set_host_attributes(&adapter->ena_dev);
769372c1af5SJan Medala 	if (rc) {
770241da076SRafal Kozik 		if (rc == -ENA_COM_UNSUPPORTED)
7716f1c9df9SStephen Hemminger 			PMD_DRV_LOG(WARNING, "Cannot set host attributes\n");
772241da076SRafal Kozik 		else
7736f1c9df9SStephen Hemminger 			PMD_DRV_LOG(ERR, "Cannot set host attributes\n");
774241da076SRafal Kozik 
775372c1af5SJan Medala 		goto err;
776372c1af5SJan Medala 	}
777372c1af5SJan Medala 
778372c1af5SJan Medala 	return;
779372c1af5SJan Medala err:
780372c1af5SJan Medala 	ena_com_delete_debug_area(&adapter->ena_dev);
781372c1af5SJan Medala }
782372c1af5SJan Medala 
783b142387bSThomas Monjalon static int ena_close(struct rte_eth_dev *dev)
7841173fca2SJan Medala {
7854d7877fdSMichal Krawczyk 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
786d61138d4SHarman Kalra 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
787890728ffSStephen Hemminger 	struct ena_adapter *adapter = dev->data->dev_private;
78862024eb8SIvan Ilchenko 	int ret = 0;
7891173fca2SJan Medala 
79030410493SThomas Monjalon 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
79130410493SThomas Monjalon 		return 0;
79230410493SThomas Monjalon 
793df238f84SMichal Krawczyk 	if (adapter->state == ENA_ADAPTER_STATE_RUNNING)
79462024eb8SIvan Ilchenko 		ret = ena_stop(dev);
795eb0ef49dSMichal Krawczyk 	adapter->state = ENA_ADAPTER_STATE_CLOSED;
79615773e06SMichal Krawczyk 
7971173fca2SJan Medala 	ena_rx_queue_release_all(dev);
7981173fca2SJan Medala 	ena_tx_queue_release_all(dev);
7994d7877fdSMichal Krawczyk 
8004d7877fdSMichal Krawczyk 	rte_free(adapter->drv_stats);
8014d7877fdSMichal Krawczyk 	adapter->drv_stats = NULL;
8024d7877fdSMichal Krawczyk 
8034d7877fdSMichal Krawczyk 	rte_intr_disable(intr_handle);
8044d7877fdSMichal Krawczyk 	rte_intr_callback_unregister(intr_handle,
8054d7877fdSMichal Krawczyk 				     ena_interrupt_handler_rte,
806aab58857SStanislaw Kardach 				     dev);
8074d7877fdSMichal Krawczyk 
8084d7877fdSMichal Krawczyk 	/*
8094d7877fdSMichal Krawczyk 	 * MAC is not allocated dynamically. Setting NULL should prevent from
8104d7877fdSMichal Krawczyk 	 * release of the resource in the rte_eth_dev_release_port().
8114d7877fdSMichal Krawczyk 	 */
8124d7877fdSMichal Krawczyk 	dev->data->mac_addrs = NULL;
813b142387bSThomas Monjalon 
81462024eb8SIvan Ilchenko 	return ret;
8151173fca2SJan Medala }
8161173fca2SJan Medala 
8172081d5e2SMichal Krawczyk static int
8182081d5e2SMichal Krawczyk ena_dev_reset(struct rte_eth_dev *dev)
8192081d5e2SMichal Krawczyk {
820e457bc70SRafal Kozik 	int rc = 0;
8212081d5e2SMichal Krawczyk 
82239ecdd3dSStanislaw Kardach 	/* Cannot release memory in secondary process */
82339ecdd3dSStanislaw Kardach 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
82439ecdd3dSStanislaw Kardach 		PMD_DRV_LOG(WARNING, "dev_reset not supported in secondary.\n");
82539ecdd3dSStanislaw Kardach 		return -EPERM;
82639ecdd3dSStanislaw Kardach 	}
82739ecdd3dSStanislaw Kardach 
828e457bc70SRafal Kozik 	ena_destroy_device(dev);
829e457bc70SRafal Kozik 	rc = eth_ena_dev_init(dev);
830241da076SRafal Kozik 	if (rc)
831617898d1SMichal Krawczyk 		PMD_INIT_LOG(CRIT, "Cannot initialize device\n");
832e457bc70SRafal Kozik 
8332081d5e2SMichal Krawczyk 	return rc;
8342081d5e2SMichal Krawczyk }
8352081d5e2SMichal Krawczyk 
8361173fca2SJan Medala static void ena_rx_queue_release_all(struct rte_eth_dev *dev)
8371173fca2SJan Medala {
8381173fca2SJan Medala 	int nb_queues = dev->data->nb_rx_queues;
8391173fca2SJan Medala 	int i;
8401173fca2SJan Medala 
8411173fca2SJan Medala 	for (i = 0; i < nb_queues; i++)
8427483341aSXueming Li 		ena_rx_queue_release(dev, i);
8431173fca2SJan Medala }
8441173fca2SJan Medala 
8451173fca2SJan Medala static void ena_tx_queue_release_all(struct rte_eth_dev *dev)
8461173fca2SJan Medala {
8471173fca2SJan Medala 	int nb_queues = dev->data->nb_tx_queues;
8481173fca2SJan Medala 	int i;
8491173fca2SJan Medala 
8501173fca2SJan Medala 	for (i = 0; i < nb_queues; i++)
8517483341aSXueming Li 		ena_tx_queue_release(dev, i);
8521173fca2SJan Medala }
8531173fca2SJan Medala 
8547483341aSXueming Li static void ena_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
8551173fca2SJan Medala {
8567483341aSXueming Li 	struct ena_ring *ring = dev->data->rx_queues[qid];
8571173fca2SJan Medala 
8581173fca2SJan Medala 	/* Free ring resources */
8591173fca2SJan Medala 	rte_free(ring->rx_buffer_info);
8601173fca2SJan Medala 	ring->rx_buffer_info = NULL;
8611173fca2SJan Medala 
86279405ee1SRafal Kozik 	rte_free(ring->rx_refill_buffer);
86379405ee1SRafal Kozik 	ring->rx_refill_buffer = NULL;
86479405ee1SRafal Kozik 
865c2034976SMichal Krawczyk 	rte_free(ring->empty_rx_reqs);
866c2034976SMichal Krawczyk 	ring->empty_rx_reqs = NULL;
867c2034976SMichal Krawczyk 
8681173fca2SJan Medala 	ring->configured = 0;
8691173fca2SJan Medala 
870617898d1SMichal Krawczyk 	PMD_DRV_LOG(NOTICE, "Rx queue %d:%d released\n",
8711173fca2SJan Medala 		ring->port_id, ring->id);
8721173fca2SJan Medala }
8731173fca2SJan Medala 
8747483341aSXueming Li static void ena_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
8751173fca2SJan Medala {
8767483341aSXueming Li 	struct ena_ring *ring = dev->data->tx_queues[qid];
8771173fca2SJan Medala 
8781173fca2SJan Medala 	/* Free ring resources */
8792fca2a98SMichal Krawczyk 	rte_free(ring->push_buf_intermediate_buf);
8802fca2a98SMichal Krawczyk 
8811173fca2SJan Medala 	rte_free(ring->tx_buffer_info);
8821173fca2SJan Medala 
8831173fca2SJan Medala 	rte_free(ring->empty_tx_reqs);
8841173fca2SJan Medala 
8851173fca2SJan Medala 	ring->empty_tx_reqs = NULL;
8861173fca2SJan Medala 	ring->tx_buffer_info = NULL;
8872fca2a98SMichal Krawczyk 	ring->push_buf_intermediate_buf = NULL;
8881173fca2SJan Medala 
8891173fca2SJan Medala 	ring->configured = 0;
8901173fca2SJan Medala 
891617898d1SMichal Krawczyk 	PMD_DRV_LOG(NOTICE, "Tx queue %d:%d released\n",
8921173fca2SJan Medala 		ring->port_id, ring->id);
8931173fca2SJan Medala }
8941173fca2SJan Medala 
8951173fca2SJan Medala static void ena_rx_queue_release_bufs(struct ena_ring *ring)
8961173fca2SJan Medala {
897709b1dcbSRafal Kozik 	unsigned int i;
8981173fca2SJan Medala 
8991be097dcSMichal Krawczyk 	for (i = 0; i < ring->ring_size; ++i) {
9001be097dcSMichal Krawczyk 		struct ena_rx_buffer *rx_info = &ring->rx_buffer_info[i];
9011be097dcSMichal Krawczyk 		if (rx_info->mbuf) {
9021be097dcSMichal Krawczyk 			rte_mbuf_raw_free(rx_info->mbuf);
9031be097dcSMichal Krawczyk 			rx_info->mbuf = NULL;
9041be097dcSMichal Krawczyk 		}
9051173fca2SJan Medala 	}
9061173fca2SJan Medala }
9071173fca2SJan Medala 
9081173fca2SJan Medala static void ena_tx_queue_release_bufs(struct ena_ring *ring)
9091173fca2SJan Medala {
910207a514cSMichal Krawczyk 	unsigned int i;
9111173fca2SJan Medala 
912207a514cSMichal Krawczyk 	for (i = 0; i < ring->ring_size; ++i) {
913207a514cSMichal Krawczyk 		struct ena_tx_buffer *tx_buf = &ring->tx_buffer_info[i];
9141173fca2SJan Medala 
9153c8bc29fSDavid Harton 		if (tx_buf->mbuf) {
9161173fca2SJan Medala 			rte_pktmbuf_free(tx_buf->mbuf);
9173c8bc29fSDavid Harton 			tx_buf->mbuf = NULL;
9183c8bc29fSDavid Harton 		}
9191173fca2SJan Medala 	}
9201173fca2SJan Medala }
9211173fca2SJan Medala 
9221173fca2SJan Medala static int ena_link_update(struct rte_eth_dev *dev,
9231173fca2SJan Medala 			   __rte_unused int wait_to_complete)
9241173fca2SJan Medala {
9251173fca2SJan Medala 	struct rte_eth_link *link = &dev->data->dev_link;
926890728ffSStephen Hemminger 	struct ena_adapter *adapter = dev->data->dev_private;
927ca148440SMichal Krawczyk 
928295968d1SFerruh Yigit 	link->link_status = adapter->link_status ? RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN;
929295968d1SFerruh Yigit 	link->link_speed = RTE_ETH_SPEED_NUM_NONE;
930295968d1SFerruh Yigit 	link->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
9311173fca2SJan Medala 
9321173fca2SJan Medala 	return 0;
9331173fca2SJan Medala }
9341173fca2SJan Medala 
93526e5543dSRafal Kozik static int ena_queue_start_all(struct rte_eth_dev *dev,
9361173fca2SJan Medala 			       enum ena_ring_type ring_type)
9371173fca2SJan Medala {
938890728ffSStephen Hemminger 	struct ena_adapter *adapter = dev->data->dev_private;
9391173fca2SJan Medala 	struct ena_ring *queues = NULL;
94053b61841SMichal Krawczyk 	int nb_queues;
9411173fca2SJan Medala 	int i = 0;
9421173fca2SJan Medala 	int rc = 0;
9431173fca2SJan Medala 
94453b61841SMichal Krawczyk 	if (ring_type == ENA_RING_TYPE_RX) {
94553b61841SMichal Krawczyk 		queues = adapter->rx_ring;
94653b61841SMichal Krawczyk 		nb_queues = dev->data->nb_rx_queues;
94753b61841SMichal Krawczyk 	} else {
94853b61841SMichal Krawczyk 		queues = adapter->tx_ring;
94953b61841SMichal Krawczyk 		nb_queues = dev->data->nb_tx_queues;
95053b61841SMichal Krawczyk 	}
95153b61841SMichal Krawczyk 	for (i = 0; i < nb_queues; i++) {
9521173fca2SJan Medala 		if (queues[i].configured) {
9531173fca2SJan Medala 			if (ring_type == ENA_RING_TYPE_RX) {
9541173fca2SJan Medala 				ena_assert_msg(
9551173fca2SJan Medala 					dev->data->rx_queues[i] == &queues[i],
956617898d1SMichal Krawczyk 					"Inconsistent state of Rx queues\n");
9571173fca2SJan Medala 			} else {
9581173fca2SJan Medala 				ena_assert_msg(
9591173fca2SJan Medala 					dev->data->tx_queues[i] == &queues[i],
960617898d1SMichal Krawczyk 					"Inconsistent state of Tx queues\n");
9611173fca2SJan Medala 			}
9621173fca2SJan Medala 
9636986cdc4SMichal Krawczyk 			rc = ena_queue_start(dev, &queues[i]);
9641173fca2SJan Medala 
9651173fca2SJan Medala 			if (rc) {
9661173fca2SJan Medala 				PMD_INIT_LOG(ERR,
967617898d1SMichal Krawczyk 					"Failed to start queue[%d] of type(%d)\n",
9681173fca2SJan Medala 					i, ring_type);
96926e5543dSRafal Kozik 				goto err;
9701173fca2SJan Medala 			}
9711173fca2SJan Medala 		}
9721173fca2SJan Medala 	}
9731173fca2SJan Medala 
9741173fca2SJan Medala 	return 0;
97526e5543dSRafal Kozik 
97626e5543dSRafal Kozik err:
97726e5543dSRafal Kozik 	while (i--)
97826e5543dSRafal Kozik 		if (queues[i].configured)
97926e5543dSRafal Kozik 			ena_queue_stop(&queues[i]);
98026e5543dSRafal Kozik 
98126e5543dSRafal Kozik 	return rc;
9821173fca2SJan Medala }
9831173fca2SJan Medala 
9841173fca2SJan Medala static int ena_check_valid_conf(struct ena_adapter *adapter)
9851173fca2SJan Medala {
9861bb4a528SFerruh Yigit 	uint32_t mtu = adapter->edev_data->mtu;
9871173fca2SJan Medala 
9881bb4a528SFerruh Yigit 	if (mtu > adapter->max_mtu || mtu < ENA_MIN_MTU) {
989617898d1SMichal Krawczyk 		PMD_INIT_LOG(ERR,
990617898d1SMichal Krawczyk 			"Unsupported MTU of %d. Max MTU: %d, min MTU: %d\n",
9911bb4a528SFerruh Yigit 			mtu, adapter->max_mtu, ENA_MIN_MTU);
992241da076SRafal Kozik 		return ENA_COM_UNSUPPORTED;
9931173fca2SJan Medala 	}
9941173fca2SJan Medala 
9951173fca2SJan Medala 	return 0;
9961173fca2SJan Medala }
9971173fca2SJan Medala 
9981173fca2SJan Medala static int
9998a7a73f2SMichal Krawczyk ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx,
10008a7a73f2SMichal Krawczyk 		       bool use_large_llq_hdr)
10011173fca2SJan Medala {
10022fca2a98SMichal Krawczyk 	struct ena_admin_feature_llq_desc *llq = &ctx->get_feat_ctx->llq;
10032fca2a98SMichal Krawczyk 	struct ena_com_dev *ena_dev = ctx->ena_dev;
10045920d930SMichal Krawczyk 	uint32_t max_tx_queue_size;
10055920d930SMichal Krawczyk 	uint32_t max_rx_queue_size;
10061173fca2SJan Medala 
10072fca2a98SMichal Krawczyk 	if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
1008ea93d37eSRafal Kozik 		struct ena_admin_queue_ext_feature_fields *max_queue_ext =
1009ea93d37eSRafal Kozik 			&ctx->get_feat_ctx->max_queue_ext.max_queue_ext;
10105920d930SMichal Krawczyk 		max_rx_queue_size = RTE_MIN(max_queue_ext->max_rx_cq_depth,
1011ea93d37eSRafal Kozik 			max_queue_ext->max_rx_sq_depth);
10125920d930SMichal Krawczyk 		max_tx_queue_size = max_queue_ext->max_tx_cq_depth;
10132fca2a98SMichal Krawczyk 
10142fca2a98SMichal Krawczyk 		if (ena_dev->tx_mem_queue_type ==
10152fca2a98SMichal Krawczyk 		    ENA_ADMIN_PLACEMENT_POLICY_DEV) {
10165920d930SMichal Krawczyk 			max_tx_queue_size = RTE_MIN(max_tx_queue_size,
10172fca2a98SMichal Krawczyk 				llq->max_llq_depth);
10182fca2a98SMichal Krawczyk 		} else {
10195920d930SMichal Krawczyk 			max_tx_queue_size = RTE_MIN(max_tx_queue_size,
1020ea93d37eSRafal Kozik 				max_queue_ext->max_tx_sq_depth);
10212fca2a98SMichal Krawczyk 		}
10222fca2a98SMichal Krawczyk 
1023ea93d37eSRafal Kozik 		ctx->max_rx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS,
1024ea93d37eSRafal Kozik 			max_queue_ext->max_per_packet_rx_descs);
1025ea93d37eSRafal Kozik 		ctx->max_tx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS,
1026ea93d37eSRafal Kozik 			max_queue_ext->max_per_packet_tx_descs);
1027ea93d37eSRafal Kozik 	} else {
1028ea93d37eSRafal Kozik 		struct ena_admin_queue_feature_desc *max_queues =
1029ea93d37eSRafal Kozik 			&ctx->get_feat_ctx->max_queues;
10305920d930SMichal Krawczyk 		max_rx_queue_size = RTE_MIN(max_queues->max_cq_depth,
1031ea93d37eSRafal Kozik 			max_queues->max_sq_depth);
10325920d930SMichal Krawczyk 		max_tx_queue_size = max_queues->max_cq_depth;
10332fca2a98SMichal Krawczyk 
10342fca2a98SMichal Krawczyk 		if (ena_dev->tx_mem_queue_type ==
10352fca2a98SMichal Krawczyk 		    ENA_ADMIN_PLACEMENT_POLICY_DEV) {
10365920d930SMichal Krawczyk 			max_tx_queue_size = RTE_MIN(max_tx_queue_size,
10372fca2a98SMichal Krawczyk 				llq->max_llq_depth);
10382fca2a98SMichal Krawczyk 		} else {
10395920d930SMichal Krawczyk 			max_tx_queue_size = RTE_MIN(max_tx_queue_size,
10402fca2a98SMichal Krawczyk 				max_queues->max_sq_depth);
10412fca2a98SMichal Krawczyk 		}
10422fca2a98SMichal Krawczyk 
1043ea93d37eSRafal Kozik 		ctx->max_rx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS,
1044ea93d37eSRafal Kozik 			max_queues->max_packet_rx_descs);
10455920d930SMichal Krawczyk 		ctx->max_tx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS,
10465920d930SMichal Krawczyk 			max_queues->max_packet_tx_descs);
1047ea93d37eSRafal Kozik 	}
10481173fca2SJan Medala 
1049ea93d37eSRafal Kozik 	/* Round down to the nearest power of 2 */
10505920d930SMichal Krawczyk 	max_rx_queue_size = rte_align32prevpow2(max_rx_queue_size);
10515920d930SMichal Krawczyk 	max_tx_queue_size = rte_align32prevpow2(max_tx_queue_size);
10521173fca2SJan Medala 
10538a7a73f2SMichal Krawczyk 	if (use_large_llq_hdr) {
10548a7a73f2SMichal Krawczyk 		if ((llq->entry_size_ctrl_supported &
10558a7a73f2SMichal Krawczyk 		     ENA_ADMIN_LIST_ENTRY_SIZE_256B) &&
10568a7a73f2SMichal Krawczyk 		    (ena_dev->tx_mem_queue_type ==
10578a7a73f2SMichal Krawczyk 		     ENA_ADMIN_PLACEMENT_POLICY_DEV)) {
10588a7a73f2SMichal Krawczyk 			max_tx_queue_size /= 2;
10598a7a73f2SMichal Krawczyk 			PMD_INIT_LOG(INFO,
1060617898d1SMichal Krawczyk 				"Forcing large headers and decreasing maximum Tx queue size to %d\n",
10618a7a73f2SMichal Krawczyk 				max_tx_queue_size);
10628a7a73f2SMichal Krawczyk 		} else {
10638a7a73f2SMichal Krawczyk 			PMD_INIT_LOG(ERR,
10648a7a73f2SMichal Krawczyk 				"Forcing large headers failed: LLQ is disabled or device does not support large headers\n");
10658a7a73f2SMichal Krawczyk 		}
10668a7a73f2SMichal Krawczyk 	}
10678a7a73f2SMichal Krawczyk 
10685920d930SMichal Krawczyk 	if (unlikely(max_rx_queue_size == 0 || max_tx_queue_size == 0)) {
1069617898d1SMichal Krawczyk 		PMD_INIT_LOG(ERR, "Invalid queue size\n");
10701173fca2SJan Medala 		return -EFAULT;
10711173fca2SJan Medala 	}
10721173fca2SJan Medala 
10735920d930SMichal Krawczyk 	ctx->max_tx_queue_size = max_tx_queue_size;
10745920d930SMichal Krawczyk 	ctx->max_rx_queue_size = max_rx_queue_size;
10752061fe41SRafal Kozik 
1076ea93d37eSRafal Kozik 	return 0;
10771173fca2SJan Medala }
10781173fca2SJan Medala 
10791173fca2SJan Medala static void ena_stats_restart(struct rte_eth_dev *dev)
10801173fca2SJan Medala {
1081890728ffSStephen Hemminger 	struct ena_adapter *adapter = dev->data->dev_private;
10821173fca2SJan Medala 
10831173fca2SJan Medala 	rte_atomic64_init(&adapter->drv_stats->ierrors);
10841173fca2SJan Medala 	rte_atomic64_init(&adapter->drv_stats->oerrors);
10851173fca2SJan Medala 	rte_atomic64_init(&adapter->drv_stats->rx_nombuf);
1086e1e73e32SMichal Krawczyk 	adapter->drv_stats->rx_drops = 0;
10871173fca2SJan Medala }
10881173fca2SJan Medala 
1089d5b0924bSMatan Azrad static int ena_stats_get(struct rte_eth_dev *dev,
10901173fca2SJan Medala 			  struct rte_eth_stats *stats)
10911173fca2SJan Medala {
10921173fca2SJan Medala 	struct ena_admin_basic_stats ena_stats;
1093890728ffSStephen Hemminger 	struct ena_adapter *adapter = dev->data->dev_private;
10941173fca2SJan Medala 	struct ena_com_dev *ena_dev = &adapter->ena_dev;
10951173fca2SJan Medala 	int rc;
109645b6d861SMichal Krawczyk 	int i;
109745b6d861SMichal Krawczyk 	int max_rings_stats;
10981173fca2SJan Medala 
10991173fca2SJan Medala 	memset(&ena_stats, 0, sizeof(ena_stats));
11001343c415SMichal Krawczyk 
11011343c415SMichal Krawczyk 	rte_spinlock_lock(&adapter->admin_lock);
1102e3595539SStanislaw Kardach 	rc = ENA_PROXY(adapter, ena_com_get_dev_basic_stats, ena_dev,
1103e3595539SStanislaw Kardach 		       &ena_stats);
11041343c415SMichal Krawczyk 	rte_spinlock_unlock(&adapter->admin_lock);
11051173fca2SJan Medala 	if (unlikely(rc)) {
11066f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "Could not retrieve statistics from ENA\n");
1107d5b0924bSMatan Azrad 		return rc;
11081173fca2SJan Medala 	}
11091173fca2SJan Medala 
11101173fca2SJan Medala 	/* Set of basic statistics from ENA */
11111173fca2SJan Medala 	stats->ipackets = __MERGE_64B_H_L(ena_stats.rx_pkts_high,
11121173fca2SJan Medala 					  ena_stats.rx_pkts_low);
11131173fca2SJan Medala 	stats->opackets = __MERGE_64B_H_L(ena_stats.tx_pkts_high,
11141173fca2SJan Medala 					  ena_stats.tx_pkts_low);
11151173fca2SJan Medala 	stats->ibytes = __MERGE_64B_H_L(ena_stats.rx_bytes_high,
11161173fca2SJan Medala 					ena_stats.rx_bytes_low);
11171173fca2SJan Medala 	stats->obytes = __MERGE_64B_H_L(ena_stats.tx_bytes_high,
11181173fca2SJan Medala 					ena_stats.tx_bytes_low);
11191173fca2SJan Medala 
11201173fca2SJan Medala 	/* Driver related stats */
1121e1e73e32SMichal Krawczyk 	stats->imissed = adapter->drv_stats->rx_drops;
11221173fca2SJan Medala 	stats->ierrors = rte_atomic64_read(&adapter->drv_stats->ierrors);
11231173fca2SJan Medala 	stats->oerrors = rte_atomic64_read(&adapter->drv_stats->oerrors);
11241173fca2SJan Medala 	stats->rx_nombuf = rte_atomic64_read(&adapter->drv_stats->rx_nombuf);
112545b6d861SMichal Krawczyk 
112645b6d861SMichal Krawczyk 	max_rings_stats = RTE_MIN(dev->data->nb_rx_queues,
112745b6d861SMichal Krawczyk 		RTE_ETHDEV_QUEUE_STAT_CNTRS);
112845b6d861SMichal Krawczyk 	for (i = 0; i < max_rings_stats; ++i) {
112945b6d861SMichal Krawczyk 		struct ena_stats_rx *rx_stats = &adapter->rx_ring[i].rx_stats;
113045b6d861SMichal Krawczyk 
113145b6d861SMichal Krawczyk 		stats->q_ibytes[i] = rx_stats->bytes;
113245b6d861SMichal Krawczyk 		stats->q_ipackets[i] = rx_stats->cnt;
113345b6d861SMichal Krawczyk 		stats->q_errors[i] = rx_stats->bad_desc_num +
113445b6d861SMichal Krawczyk 			rx_stats->bad_req_id;
113545b6d861SMichal Krawczyk 	}
113645b6d861SMichal Krawczyk 
113745b6d861SMichal Krawczyk 	max_rings_stats = RTE_MIN(dev->data->nb_tx_queues,
113845b6d861SMichal Krawczyk 		RTE_ETHDEV_QUEUE_STAT_CNTRS);
113945b6d861SMichal Krawczyk 	for (i = 0; i < max_rings_stats; ++i) {
114045b6d861SMichal Krawczyk 		struct ena_stats_tx *tx_stats = &adapter->tx_ring[i].tx_stats;
114145b6d861SMichal Krawczyk 
114245b6d861SMichal Krawczyk 		stats->q_obytes[i] = tx_stats->bytes;
114345b6d861SMichal Krawczyk 		stats->q_opackets[i] = tx_stats->cnt;
114445b6d861SMichal Krawczyk 	}
114545b6d861SMichal Krawczyk 
1146d5b0924bSMatan Azrad 	return 0;
11471173fca2SJan Medala }
11481173fca2SJan Medala 
11491173fca2SJan Medala static int ena_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
11501173fca2SJan Medala {
11511173fca2SJan Medala 	struct ena_adapter *adapter;
11521173fca2SJan Medala 	struct ena_com_dev *ena_dev;
11531173fca2SJan Medala 	int rc = 0;
11541173fca2SJan Medala 
1155498c687aSRafal Kozik 	ena_assert_msg(dev->data != NULL, "Uninitialized device\n");
1156498c687aSRafal Kozik 	ena_assert_msg(dev->data->dev_private != NULL, "Uninitialized device\n");
1157890728ffSStephen Hemminger 	adapter = dev->data->dev_private;
11581173fca2SJan Medala 
11591173fca2SJan Medala 	ena_dev = &adapter->ena_dev;
1160498c687aSRafal Kozik 	ena_assert_msg(ena_dev != NULL, "Uninitialized device\n");
11611173fca2SJan Medala 
11621bb4a528SFerruh Yigit 	if (mtu > adapter->max_mtu || mtu < ENA_MIN_MTU) {
11636f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR,
1164617898d1SMichal Krawczyk 			"Invalid MTU setting. New MTU: %d, max MTU: %d, min MTU: %d\n",
11651bb4a528SFerruh Yigit 			mtu, adapter->max_mtu, ENA_MIN_MTU);
1166241da076SRafal Kozik 		return -EINVAL;
11671173fca2SJan Medala 	}
11681173fca2SJan Medala 
1169e3595539SStanislaw Kardach 	rc = ENA_PROXY(adapter, ena_com_set_dev_mtu, ena_dev, mtu);
11701173fca2SJan Medala 	if (rc)
11716f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "Could not set MTU: %d\n", mtu);
11721173fca2SJan Medala 	else
1173617898d1SMichal Krawczyk 		PMD_DRV_LOG(NOTICE, "MTU set to: %d\n", mtu);
11741173fca2SJan Medala 
11751173fca2SJan Medala 	return rc;
11761173fca2SJan Medala }
11771173fca2SJan Medala 
11781173fca2SJan Medala static int ena_start(struct rte_eth_dev *dev)
11791173fca2SJan Medala {
1180890728ffSStephen Hemminger 	struct ena_adapter *adapter = dev->data->dev_private;
1181d9b8b106SMichal Krawczyk 	uint64_t ticks;
11821173fca2SJan Medala 	int rc = 0;
11831173fca2SJan Medala 
118439ecdd3dSStanislaw Kardach 	/* Cannot allocate memory in secondary process */
118539ecdd3dSStanislaw Kardach 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
118639ecdd3dSStanislaw Kardach 		PMD_DRV_LOG(WARNING, "dev_start not supported in secondary.\n");
118739ecdd3dSStanislaw Kardach 		return -EPERM;
118839ecdd3dSStanislaw Kardach 	}
118939ecdd3dSStanislaw Kardach 
11901173fca2SJan Medala 	rc = ena_check_valid_conf(adapter);
11911173fca2SJan Medala 	if (rc)
11921173fca2SJan Medala 		return rc;
11931173fca2SJan Medala 
11946986cdc4SMichal Krawczyk 	rc = ena_setup_rx_intr(dev);
11956986cdc4SMichal Krawczyk 	if (rc)
11966986cdc4SMichal Krawczyk 		return rc;
11976986cdc4SMichal Krawczyk 
119826e5543dSRafal Kozik 	rc = ena_queue_start_all(dev, ENA_RING_TYPE_RX);
11991173fca2SJan Medala 	if (rc)
12001173fca2SJan Medala 		return rc;
12011173fca2SJan Medala 
120226e5543dSRafal Kozik 	rc = ena_queue_start_all(dev, ENA_RING_TYPE_TX);
12031173fca2SJan Medala 	if (rc)
120426e5543dSRafal Kozik 		goto err_start_tx;
12051173fca2SJan Medala 
1206295968d1SFerruh Yigit 	if (adapter->edev_data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
120734d5e97eSMichal Krawczyk 		rc = ena_rss_configure(adapter);
12081173fca2SJan Medala 		if (rc)
120926e5543dSRafal Kozik 			goto err_rss_init;
12101173fca2SJan Medala 	}
12111173fca2SJan Medala 
12121173fca2SJan Medala 	ena_stats_restart(dev);
12131173fca2SJan Medala 
1214d9b8b106SMichal Krawczyk 	adapter->timestamp_wd = rte_get_timer_cycles();
1215d9b8b106SMichal Krawczyk 	adapter->keep_alive_timeout = ENA_DEVICE_KALIVE_TIMEOUT;
1216d9b8b106SMichal Krawczyk 
1217d9b8b106SMichal Krawczyk 	ticks = rte_get_timer_hz();
1218d9b8b106SMichal Krawczyk 	rte_timer_reset(&adapter->timer_wd, ticks, PERIODICAL, rte_lcore_id(),
1219aab58857SStanislaw Kardach 			ena_timer_wd_callback, dev);
1220d9b8b106SMichal Krawczyk 
12217830e905SSolganik Alexander 	++adapter->dev_stats.dev_start;
12221173fca2SJan Medala 	adapter->state = ENA_ADAPTER_STATE_RUNNING;
12231173fca2SJan Medala 
12241173fca2SJan Medala 	return 0;
122526e5543dSRafal Kozik 
122626e5543dSRafal Kozik err_rss_init:
122726e5543dSRafal Kozik 	ena_queue_stop_all(dev, ENA_RING_TYPE_TX);
122826e5543dSRafal Kozik err_start_tx:
122926e5543dSRafal Kozik 	ena_queue_stop_all(dev, ENA_RING_TYPE_RX);
123026e5543dSRafal Kozik 	return rc;
12311173fca2SJan Medala }
12321173fca2SJan Medala 
123362024eb8SIvan Ilchenko static int ena_stop(struct rte_eth_dev *dev)
1234eb0ef49dSMichal Krawczyk {
1235890728ffSStephen Hemminger 	struct ena_adapter *adapter = dev->data->dev_private;
1236e457bc70SRafal Kozik 	struct ena_com_dev *ena_dev = &adapter->ena_dev;
12376986cdc4SMichal Krawczyk 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1238d61138d4SHarman Kalra 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
1239e457bc70SRafal Kozik 	int rc;
1240eb0ef49dSMichal Krawczyk 
124139ecdd3dSStanislaw Kardach 	/* Cannot free memory in secondary process */
124239ecdd3dSStanislaw Kardach 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
124339ecdd3dSStanislaw Kardach 		PMD_DRV_LOG(WARNING, "dev_stop not supported in secondary.\n");
124439ecdd3dSStanislaw Kardach 		return -EPERM;
124539ecdd3dSStanislaw Kardach 	}
124639ecdd3dSStanislaw Kardach 
1247d9b8b106SMichal Krawczyk 	rte_timer_stop_sync(&adapter->timer_wd);
124826e5543dSRafal Kozik 	ena_queue_stop_all(dev, ENA_RING_TYPE_TX);
124926e5543dSRafal Kozik 	ena_queue_stop_all(dev, ENA_RING_TYPE_RX);
1250d9b8b106SMichal Krawczyk 
1251e457bc70SRafal Kozik 	if (adapter->trigger_reset) {
1252e457bc70SRafal Kozik 		rc = ena_com_dev_reset(ena_dev, adapter->reset_reason);
1253e457bc70SRafal Kozik 		if (rc)
1254617898d1SMichal Krawczyk 			PMD_DRV_LOG(ERR, "Device reset failed, rc: %d\n", rc);
1255e457bc70SRafal Kozik 	}
1256e457bc70SRafal Kozik 
12576986cdc4SMichal Krawczyk 	rte_intr_disable(intr_handle);
12586986cdc4SMichal Krawczyk 
12596986cdc4SMichal Krawczyk 	rte_intr_efd_disable(intr_handle);
1260d61138d4SHarman Kalra 
1261d61138d4SHarman Kalra 	/* Cleanup vector list */
1262d61138d4SHarman Kalra 	rte_intr_vec_list_free(intr_handle);
12636986cdc4SMichal Krawczyk 
12646986cdc4SMichal Krawczyk 	rte_intr_enable(intr_handle);
12656986cdc4SMichal Krawczyk 
12667830e905SSolganik Alexander 	++adapter->dev_stats.dev_stop;
1267eb0ef49dSMichal Krawczyk 	adapter->state = ENA_ADAPTER_STATE_STOPPED;
1268b8f5d2aeSThomas Monjalon 	dev->data->dev_started = 0;
126962024eb8SIvan Ilchenko 
127062024eb8SIvan Ilchenko 	return 0;
1271eb0ef49dSMichal Krawczyk }
1272eb0ef49dSMichal Krawczyk 
12736986cdc4SMichal Krawczyk static int ena_create_io_queue(struct rte_eth_dev *dev, struct ena_ring *ring)
1274df238f84SMichal Krawczyk {
12756986cdc4SMichal Krawczyk 	struct ena_adapter *adapter = ring->adapter;
12766986cdc4SMichal Krawczyk 	struct ena_com_dev *ena_dev = &adapter->ena_dev;
12776986cdc4SMichal Krawczyk 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1278d61138d4SHarman Kalra 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
1279df238f84SMichal Krawczyk 	struct ena_com_create_io_ctx ctx =
1280df238f84SMichal Krawczyk 		/* policy set to _HOST just to satisfy icc compiler */
1281df238f84SMichal Krawczyk 		{ ENA_ADMIN_PLACEMENT_POLICY_HOST,
1282df238f84SMichal Krawczyk 		  0, 0, 0, 0, 0 };
1283df238f84SMichal Krawczyk 	uint16_t ena_qid;
1284778677dcSRafal Kozik 	unsigned int i;
1285df238f84SMichal Krawczyk 	int rc;
1286df238f84SMichal Krawczyk 
12876986cdc4SMichal Krawczyk 	ctx.msix_vector = -1;
1288df238f84SMichal Krawczyk 	if (ring->type == ENA_RING_TYPE_TX) {
1289df238f84SMichal Krawczyk 		ena_qid = ENA_IO_TXQ_IDX(ring->id);
1290df238f84SMichal Krawczyk 		ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX;
1291df238f84SMichal Krawczyk 		ctx.mem_queue_type = ena_dev->tx_mem_queue_type;
1292778677dcSRafal Kozik 		for (i = 0; i < ring->ring_size; i++)
1293778677dcSRafal Kozik 			ring->empty_tx_reqs[i] = i;
1294df238f84SMichal Krawczyk 	} else {
1295df238f84SMichal Krawczyk 		ena_qid = ENA_IO_RXQ_IDX(ring->id);
1296df238f84SMichal Krawczyk 		ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX;
12976986cdc4SMichal Krawczyk 		if (rte_intr_dp_is_en(intr_handle))
1298d61138d4SHarman Kalra 			ctx.msix_vector =
1299d61138d4SHarman Kalra 				rte_intr_vec_list_index_get(intr_handle,
1300d61138d4SHarman Kalra 								   ring->id);
1301d61138d4SHarman Kalra 
1302778677dcSRafal Kozik 		for (i = 0; i < ring->ring_size; i++)
1303778677dcSRafal Kozik 			ring->empty_rx_reqs[i] = i;
1304df238f84SMichal Krawczyk 	}
1305badc3a6aSMichal Krawczyk 	ctx.queue_size = ring->ring_size;
1306df238f84SMichal Krawczyk 	ctx.qid = ena_qid;
13074217cb0bSMichal Krawczyk 	ctx.numa_node = ring->numa_socket_id;
1308df238f84SMichal Krawczyk 
1309df238f84SMichal Krawczyk 	rc = ena_com_create_io_queue(ena_dev, &ctx);
1310df238f84SMichal Krawczyk 	if (rc) {
13116f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR,
1312617898d1SMichal Krawczyk 			"Failed to create IO queue[%d] (qid:%d), rc: %d\n",
1313df238f84SMichal Krawczyk 			ring->id, ena_qid, rc);
1314df238f84SMichal Krawczyk 		return rc;
1315df238f84SMichal Krawczyk 	}
1316df238f84SMichal Krawczyk 
1317df238f84SMichal Krawczyk 	rc = ena_com_get_io_handlers(ena_dev, ena_qid,
1318df238f84SMichal Krawczyk 				     &ring->ena_com_io_sq,
1319df238f84SMichal Krawczyk 				     &ring->ena_com_io_cq);
1320df238f84SMichal Krawczyk 	if (rc) {
13216f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR,
1322617898d1SMichal Krawczyk 			"Failed to get IO queue[%d] handlers, rc: %d\n",
1323df238f84SMichal Krawczyk 			ring->id, rc);
1324df238f84SMichal Krawczyk 		ena_com_destroy_io_queue(ena_dev, ena_qid);
1325df238f84SMichal Krawczyk 		return rc;
1326df238f84SMichal Krawczyk 	}
1327df238f84SMichal Krawczyk 
1328df238f84SMichal Krawczyk 	if (ring->type == ENA_RING_TYPE_TX)
1329df238f84SMichal Krawczyk 		ena_com_update_numa_node(ring->ena_com_io_cq, ctx.numa_node);
1330df238f84SMichal Krawczyk 
13316986cdc4SMichal Krawczyk 	/* Start with Rx interrupts being masked. */
13326986cdc4SMichal Krawczyk 	if (ring->type == ENA_RING_TYPE_RX && rte_intr_dp_is_en(intr_handle))
13336986cdc4SMichal Krawczyk 		ena_rx_queue_intr_disable(dev, ring->id);
13346986cdc4SMichal Krawczyk 
1335df238f84SMichal Krawczyk 	return 0;
1336df238f84SMichal Krawczyk }
1337df238f84SMichal Krawczyk 
133826e5543dSRafal Kozik static void ena_queue_stop(struct ena_ring *ring)
1339df238f84SMichal Krawczyk {
134026e5543dSRafal Kozik 	struct ena_com_dev *ena_dev = &ring->adapter->ena_dev;
1341df238f84SMichal Krawczyk 
134226e5543dSRafal Kozik 	if (ring->type == ENA_RING_TYPE_RX) {
134326e5543dSRafal Kozik 		ena_com_destroy_io_queue(ena_dev, ENA_IO_RXQ_IDX(ring->id));
134426e5543dSRafal Kozik 		ena_rx_queue_release_bufs(ring);
134526e5543dSRafal Kozik 	} else {
134626e5543dSRafal Kozik 		ena_com_destroy_io_queue(ena_dev, ENA_IO_TXQ_IDX(ring->id));
134726e5543dSRafal Kozik 		ena_tx_queue_release_bufs(ring);
1348df238f84SMichal Krawczyk 	}
1349df238f84SMichal Krawczyk }
1350df238f84SMichal Krawczyk 
135126e5543dSRafal Kozik static void ena_queue_stop_all(struct rte_eth_dev *dev,
135226e5543dSRafal Kozik 			      enum ena_ring_type ring_type)
135326e5543dSRafal Kozik {
1354890728ffSStephen Hemminger 	struct ena_adapter *adapter = dev->data->dev_private;
135526e5543dSRafal Kozik 	struct ena_ring *queues = NULL;
135626e5543dSRafal Kozik 	uint16_t nb_queues, i;
135726e5543dSRafal Kozik 
135826e5543dSRafal Kozik 	if (ring_type == ENA_RING_TYPE_RX) {
135926e5543dSRafal Kozik 		queues = adapter->rx_ring;
136026e5543dSRafal Kozik 		nb_queues = dev->data->nb_rx_queues;
136126e5543dSRafal Kozik 	} else {
136226e5543dSRafal Kozik 		queues = adapter->tx_ring;
136326e5543dSRafal Kozik 		nb_queues = dev->data->nb_tx_queues;
136426e5543dSRafal Kozik 	}
136526e5543dSRafal Kozik 
136626e5543dSRafal Kozik 	for (i = 0; i < nb_queues; ++i)
136726e5543dSRafal Kozik 		if (queues[i].configured)
136826e5543dSRafal Kozik 			ena_queue_stop(&queues[i]);
136926e5543dSRafal Kozik }
137026e5543dSRafal Kozik 
13716986cdc4SMichal Krawczyk static int ena_queue_start(struct rte_eth_dev *dev, struct ena_ring *ring)
13721173fca2SJan Medala {
1373a467e8f3SMichal Krawczyk 	int rc, bufs_num;
13741173fca2SJan Medala 
13751173fca2SJan Medala 	ena_assert_msg(ring->configured == 1,
137626e5543dSRafal Kozik 		       "Trying to start unconfigured queue\n");
13771173fca2SJan Medala 
13786986cdc4SMichal Krawczyk 	rc = ena_create_io_queue(dev, ring);
1379df238f84SMichal Krawczyk 	if (rc) {
1380617898d1SMichal Krawczyk 		PMD_INIT_LOG(ERR, "Failed to create IO queue\n");
1381df238f84SMichal Krawczyk 		return rc;
1382df238f84SMichal Krawczyk 	}
1383df238f84SMichal Krawczyk 
13841173fca2SJan Medala 	ring->next_to_clean = 0;
13851173fca2SJan Medala 	ring->next_to_use = 0;
13861173fca2SJan Medala 
13877830e905SSolganik Alexander 	if (ring->type == ENA_RING_TYPE_TX) {
13887830e905SSolganik Alexander 		ring->tx_stats.available_desc =
1389b2b02edeSMichal Krawczyk 			ena_com_free_q_entries(ring->ena_com_io_sq);
13901173fca2SJan Medala 		return 0;
13917830e905SSolganik Alexander 	}
13921173fca2SJan Medala 
1393a467e8f3SMichal Krawczyk 	bufs_num = ring->ring_size - 1;
1394a467e8f3SMichal Krawczyk 	rc = ena_populate_rx_queue(ring, bufs_num);
1395a467e8f3SMichal Krawczyk 	if (rc != bufs_num) {
139626e5543dSRafal Kozik 		ena_com_destroy_io_queue(&ring->adapter->ena_dev,
139726e5543dSRafal Kozik 					 ENA_IO_RXQ_IDX(ring->id));
1398617898d1SMichal Krawczyk 		PMD_INIT_LOG(ERR, "Failed to populate Rx ring\n");
1399241da076SRafal Kozik 		return ENA_COM_FAULT;
14001173fca2SJan Medala 	}
14014387e81cSIdo Segev 	/* Flush per-core RX buffers pools cache as they can be used on other
14024387e81cSIdo Segev 	 * cores as well.
14034387e81cSIdo Segev 	 */
14044387e81cSIdo Segev 	rte_mempool_cache_flush(NULL, ring->mb_pool);
14051173fca2SJan Medala 
14061173fca2SJan Medala 	return 0;
14071173fca2SJan Medala }
14081173fca2SJan Medala 
14091173fca2SJan Medala static int ena_tx_queue_setup(struct rte_eth_dev *dev,
14101173fca2SJan Medala 			      uint16_t queue_idx,
14111173fca2SJan Medala 			      uint16_t nb_desc,
14124217cb0bSMichal Krawczyk 			      unsigned int socket_id,
141356b8b9b7SRafal Kozik 			      const struct rte_eth_txconf *tx_conf)
14141173fca2SJan Medala {
14151173fca2SJan Medala 	struct ena_ring *txq = NULL;
1416890728ffSStephen Hemminger 	struct ena_adapter *adapter = dev->data->dev_private;
14171173fca2SJan Medala 	unsigned int i;
1418005064e5SMichal Krawczyk 	uint16_t dyn_thresh;
14191173fca2SJan Medala 
14201173fca2SJan Medala 	txq = &adapter->tx_ring[queue_idx];
14211173fca2SJan Medala 
14221173fca2SJan Medala 	if (txq->configured) {
14236f1c9df9SStephen Hemminger 		PMD_DRV_LOG(CRIT,
1424617898d1SMichal Krawczyk 			"API violation. Queue[%d] is already configured\n",
14251173fca2SJan Medala 			queue_idx);
1426241da076SRafal Kozik 		return ENA_COM_FAULT;
14271173fca2SJan Medala 	}
14281173fca2SJan Medala 
14291daff526SJakub Palider 	if (!rte_is_power_of_2(nb_desc)) {
14306f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR,
1431617898d1SMichal Krawczyk 			"Unsupported size of Tx queue: %d is not a power of 2.\n",
14321daff526SJakub Palider 			nb_desc);
14331daff526SJakub Palider 		return -EINVAL;
14341daff526SJakub Palider 	}
14351daff526SJakub Palider 
14365920d930SMichal Krawczyk 	if (nb_desc > adapter->max_tx_ring_size) {
14376f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR,
1438617898d1SMichal Krawczyk 			"Unsupported size of Tx queue (max size: %d)\n",
14395920d930SMichal Krawczyk 			adapter->max_tx_ring_size);
14401173fca2SJan Medala 		return -EINVAL;
14411173fca2SJan Medala 	}
14421173fca2SJan Medala 
14431173fca2SJan Medala 	txq->port_id = dev->data->port_id;
14441173fca2SJan Medala 	txq->next_to_clean = 0;
14451173fca2SJan Medala 	txq->next_to_use = 0;
14461173fca2SJan Medala 	txq->ring_size = nb_desc;
1447c0006061SMichal Krawczyk 	txq->size_mask = nb_desc - 1;
14484217cb0bSMichal Krawczyk 	txq->numa_socket_id = socket_id;
14491d973d8fSIgor Chauskin 	txq->pkts_without_db = false;
1450f93e20e5SMichal Krawczyk 	txq->last_cleanup_ticks = 0;
14511173fca2SJan Medala 
145208180833SMichal Krawczyk 	txq->tx_buffer_info = rte_zmalloc_socket("txq->tx_buffer_info",
145308180833SMichal Krawczyk 		sizeof(struct ena_tx_buffer) * txq->ring_size,
145408180833SMichal Krawczyk 		RTE_CACHE_LINE_SIZE,
145508180833SMichal Krawczyk 		socket_id);
14561173fca2SJan Medala 	if (!txq->tx_buffer_info) {
1457617898d1SMichal Krawczyk 		PMD_DRV_LOG(ERR,
1458617898d1SMichal Krawczyk 			"Failed to allocate memory for Tx buffer info\n");
1459df238f84SMichal Krawczyk 		return -ENOMEM;
14601173fca2SJan Medala 	}
14611173fca2SJan Medala 
146208180833SMichal Krawczyk 	txq->empty_tx_reqs = rte_zmalloc_socket("txq->empty_tx_reqs",
146308180833SMichal Krawczyk 		sizeof(uint16_t) * txq->ring_size,
146408180833SMichal Krawczyk 		RTE_CACHE_LINE_SIZE,
146508180833SMichal Krawczyk 		socket_id);
14661173fca2SJan Medala 	if (!txq->empty_tx_reqs) {
1467617898d1SMichal Krawczyk 		PMD_DRV_LOG(ERR,
1468617898d1SMichal Krawczyk 			"Failed to allocate memory for empty Tx requests\n");
1469df238f84SMichal Krawczyk 		rte_free(txq->tx_buffer_info);
1470df238f84SMichal Krawczyk 		return -ENOMEM;
14711173fca2SJan Medala 	}
1472241da076SRafal Kozik 
14732fca2a98SMichal Krawczyk 	txq->push_buf_intermediate_buf =
147408180833SMichal Krawczyk 		rte_zmalloc_socket("txq->push_buf_intermediate_buf",
14752fca2a98SMichal Krawczyk 			txq->tx_max_header_size,
147608180833SMichal Krawczyk 			RTE_CACHE_LINE_SIZE,
147708180833SMichal Krawczyk 			socket_id);
14782fca2a98SMichal Krawczyk 	if (!txq->push_buf_intermediate_buf) {
1479617898d1SMichal Krawczyk 		PMD_DRV_LOG(ERR, "Failed to alloc push buffer for LLQ\n");
14802fca2a98SMichal Krawczyk 		rte_free(txq->tx_buffer_info);
14812fca2a98SMichal Krawczyk 		rte_free(txq->empty_tx_reqs);
14822fca2a98SMichal Krawczyk 		return -ENOMEM;
14832fca2a98SMichal Krawczyk 	}
14842fca2a98SMichal Krawczyk 
14851173fca2SJan Medala 	for (i = 0; i < txq->ring_size; i++)
14861173fca2SJan Medala 		txq->empty_tx_reqs[i] = i;
14871173fca2SJan Medala 
1488005064e5SMichal Krawczyk 	txq->offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
1489005064e5SMichal Krawczyk 
1490005064e5SMichal Krawczyk 	/* Check if caller provided the Tx cleanup threshold value. */
1491005064e5SMichal Krawczyk 	if (tx_conf->tx_free_thresh != 0) {
1492005064e5SMichal Krawczyk 		txq->tx_free_thresh = tx_conf->tx_free_thresh;
1493005064e5SMichal Krawczyk 	} else {
1494005064e5SMichal Krawczyk 		dyn_thresh = txq->ring_size -
1495005064e5SMichal Krawczyk 			txq->ring_size / ENA_REFILL_THRESH_DIVIDER;
1496005064e5SMichal Krawczyk 		txq->tx_free_thresh = RTE_MAX(dyn_thresh,
1497005064e5SMichal Krawczyk 			txq->ring_size - ENA_REFILL_THRESH_PACKET);
14982081d5e2SMichal Krawczyk 	}
1499005064e5SMichal Krawczyk 
1500f93e20e5SMichal Krawczyk 	txq->missing_tx_completion_threshold =
1501f93e20e5SMichal Krawczyk 		RTE_MIN(txq->ring_size / 2, ENA_DEFAULT_MISSING_COMP);
1502f93e20e5SMichal Krawczyk 
15031173fca2SJan Medala 	/* Store pointer to this queue in upper layer */
15041173fca2SJan Medala 	txq->configured = 1;
15051173fca2SJan Medala 	dev->data->tx_queues[queue_idx] = txq;
1506241da076SRafal Kozik 
1507241da076SRafal Kozik 	return 0;
15081173fca2SJan Medala }
15091173fca2SJan Medala 
15101173fca2SJan Medala static int ena_rx_queue_setup(struct rte_eth_dev *dev,
15111173fca2SJan Medala 			      uint16_t queue_idx,
15121173fca2SJan Medala 			      uint16_t nb_desc,
15134217cb0bSMichal Krawczyk 			      unsigned int socket_id,
151434d5e97eSMichal Krawczyk 			      const struct rte_eth_rxconf *rx_conf,
15151173fca2SJan Medala 			      struct rte_mempool *mp)
15161173fca2SJan Medala {
1517890728ffSStephen Hemminger 	struct ena_adapter *adapter = dev->data->dev_private;
15181173fca2SJan Medala 	struct ena_ring *rxq = NULL;
151938364c26SMichal Krawczyk 	size_t buffer_size;
1520df238f84SMichal Krawczyk 	int i;
1521005064e5SMichal Krawczyk 	uint16_t dyn_thresh;
15221173fca2SJan Medala 
15231173fca2SJan Medala 	rxq = &adapter->rx_ring[queue_idx];
15241173fca2SJan Medala 	if (rxq->configured) {
15256f1c9df9SStephen Hemminger 		PMD_DRV_LOG(CRIT,
1526617898d1SMichal Krawczyk 			"API violation. Queue[%d] is already configured\n",
15271173fca2SJan Medala 			queue_idx);
1528241da076SRafal Kozik 		return ENA_COM_FAULT;
15291173fca2SJan Medala 	}
15301173fca2SJan Medala 
15311daff526SJakub Palider 	if (!rte_is_power_of_2(nb_desc)) {
15326f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR,
1533617898d1SMichal Krawczyk 			"Unsupported size of Rx queue: %d is not a power of 2.\n",
15341daff526SJakub Palider 			nb_desc);
15351daff526SJakub Palider 		return -EINVAL;
15361daff526SJakub Palider 	}
15371daff526SJakub Palider 
15385920d930SMichal Krawczyk 	if (nb_desc > adapter->max_rx_ring_size) {
15396f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR,
1540617898d1SMichal Krawczyk 			"Unsupported size of Rx queue (max size: %d)\n",
15415920d930SMichal Krawczyk 			adapter->max_rx_ring_size);
15421173fca2SJan Medala 		return -EINVAL;
15431173fca2SJan Medala 	}
15441173fca2SJan Medala 
154538364c26SMichal Krawczyk 	/* ENA isn't supporting buffers smaller than 1400 bytes */
154638364c26SMichal Krawczyk 	buffer_size = rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM;
154738364c26SMichal Krawczyk 	if (buffer_size < ENA_RX_BUF_MIN_SIZE) {
154838364c26SMichal Krawczyk 		PMD_DRV_LOG(ERR,
1549617898d1SMichal Krawczyk 			"Unsupported size of Rx buffer: %zu (min size: %d)\n",
155038364c26SMichal Krawczyk 			buffer_size, ENA_RX_BUF_MIN_SIZE);
155138364c26SMichal Krawczyk 		return -EINVAL;
155238364c26SMichal Krawczyk 	}
155338364c26SMichal Krawczyk 
15541173fca2SJan Medala 	rxq->port_id = dev->data->port_id;
15551173fca2SJan Medala 	rxq->next_to_clean = 0;
15561173fca2SJan Medala 	rxq->next_to_use = 0;
15571173fca2SJan Medala 	rxq->ring_size = nb_desc;
1558c0006061SMichal Krawczyk 	rxq->size_mask = nb_desc - 1;
15594217cb0bSMichal Krawczyk 	rxq->numa_socket_id = socket_id;
15601173fca2SJan Medala 	rxq->mb_pool = mp;
15611173fca2SJan Medala 
156208180833SMichal Krawczyk 	rxq->rx_buffer_info = rte_zmalloc_socket("rxq->buffer_info",
15631be097dcSMichal Krawczyk 		sizeof(struct ena_rx_buffer) * nb_desc,
156408180833SMichal Krawczyk 		RTE_CACHE_LINE_SIZE,
156508180833SMichal Krawczyk 		socket_id);
15661173fca2SJan Medala 	if (!rxq->rx_buffer_info) {
1567617898d1SMichal Krawczyk 		PMD_DRV_LOG(ERR,
1568617898d1SMichal Krawczyk 			"Failed to allocate memory for Rx buffer info\n");
15691173fca2SJan Medala 		return -ENOMEM;
15701173fca2SJan Medala 	}
15711173fca2SJan Medala 
157208180833SMichal Krawczyk 	rxq->rx_refill_buffer = rte_zmalloc_socket("rxq->rx_refill_buffer",
157379405ee1SRafal Kozik 		sizeof(struct rte_mbuf *) * nb_desc,
157408180833SMichal Krawczyk 		RTE_CACHE_LINE_SIZE,
157508180833SMichal Krawczyk 		socket_id);
157679405ee1SRafal Kozik 	if (!rxq->rx_refill_buffer) {
1577617898d1SMichal Krawczyk 		PMD_DRV_LOG(ERR,
1578617898d1SMichal Krawczyk 			"Failed to allocate memory for Rx refill buffer\n");
157979405ee1SRafal Kozik 		rte_free(rxq->rx_buffer_info);
158079405ee1SRafal Kozik 		rxq->rx_buffer_info = NULL;
158179405ee1SRafal Kozik 		return -ENOMEM;
158279405ee1SRafal Kozik 	}
158379405ee1SRafal Kozik 
158408180833SMichal Krawczyk 	rxq->empty_rx_reqs = rte_zmalloc_socket("rxq->empty_rx_reqs",
1585c2034976SMichal Krawczyk 		sizeof(uint16_t) * nb_desc,
158608180833SMichal Krawczyk 		RTE_CACHE_LINE_SIZE,
158708180833SMichal Krawczyk 		socket_id);
1588c2034976SMichal Krawczyk 	if (!rxq->empty_rx_reqs) {
1589617898d1SMichal Krawczyk 		PMD_DRV_LOG(ERR,
1590617898d1SMichal Krawczyk 			"Failed to allocate memory for empty Rx requests\n");
1591c2034976SMichal Krawczyk 		rte_free(rxq->rx_buffer_info);
1592c2034976SMichal Krawczyk 		rxq->rx_buffer_info = NULL;
159379405ee1SRafal Kozik 		rte_free(rxq->rx_refill_buffer);
159479405ee1SRafal Kozik 		rxq->rx_refill_buffer = NULL;
1595c2034976SMichal Krawczyk 		return -ENOMEM;
1596c2034976SMichal Krawczyk 	}
1597c2034976SMichal Krawczyk 
1598c2034976SMichal Krawczyk 	for (i = 0; i < nb_desc; i++)
1599eccbe2ffSRafal Kozik 		rxq->empty_rx_reqs[i] = i;
1600c2034976SMichal Krawczyk 
160134d5e97eSMichal Krawczyk 	rxq->offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
160234d5e97eSMichal Krawczyk 
1603005064e5SMichal Krawczyk 	if (rx_conf->rx_free_thresh != 0) {
1604005064e5SMichal Krawczyk 		rxq->rx_free_thresh = rx_conf->rx_free_thresh;
1605005064e5SMichal Krawczyk 	} else {
1606005064e5SMichal Krawczyk 		dyn_thresh = rxq->ring_size / ENA_REFILL_THRESH_DIVIDER;
1607005064e5SMichal Krawczyk 		rxq->rx_free_thresh = RTE_MIN(dyn_thresh,
1608005064e5SMichal Krawczyk 			(uint16_t)(ENA_REFILL_THRESH_PACKET));
1609005064e5SMichal Krawczyk 	}
1610005064e5SMichal Krawczyk 
16111173fca2SJan Medala 	/* Store pointer to this queue in upper layer */
16121173fca2SJan Medala 	rxq->configured = 1;
16131173fca2SJan Medala 	dev->data->rx_queues[queue_idx] = rxq;
16141173fca2SJan Medala 
1615df238f84SMichal Krawczyk 	return 0;
16161173fca2SJan Medala }
16171173fca2SJan Medala 
161883fd97b2SMichal Krawczyk static int ena_add_single_rx_desc(struct ena_com_io_sq *io_sq,
161983fd97b2SMichal Krawczyk 				  struct rte_mbuf *mbuf, uint16_t id)
162083fd97b2SMichal Krawczyk {
162183fd97b2SMichal Krawczyk 	struct ena_com_buf ebuf;
162283fd97b2SMichal Krawczyk 	int rc;
162383fd97b2SMichal Krawczyk 
162483fd97b2SMichal Krawczyk 	/* prepare physical address for DMA transaction */
162583fd97b2SMichal Krawczyk 	ebuf.paddr = mbuf->buf_iova + RTE_PKTMBUF_HEADROOM;
162683fd97b2SMichal Krawczyk 	ebuf.len = mbuf->buf_len - RTE_PKTMBUF_HEADROOM;
162783fd97b2SMichal Krawczyk 
162883fd97b2SMichal Krawczyk 	/* pass resource to device */
162983fd97b2SMichal Krawczyk 	rc = ena_com_add_single_rx_desc(io_sq, &ebuf, id);
163083fd97b2SMichal Krawczyk 	if (unlikely(rc != 0))
16310a001d69SMichal Krawczyk 		PMD_RX_LOG(WARNING, "Failed adding Rx desc\n");
163283fd97b2SMichal Krawczyk 
163383fd97b2SMichal Krawczyk 	return rc;
163483fd97b2SMichal Krawczyk }
163583fd97b2SMichal Krawczyk 
16361173fca2SJan Medala static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count)
16371173fca2SJan Medala {
16381173fca2SJan Medala 	unsigned int i;
16391173fca2SJan Medala 	int rc;
16401daff526SJakub Palider 	uint16_t next_to_use = rxq->next_to_use;
16410a001d69SMichal Krawczyk 	uint16_t req_id;
16420a001d69SMichal Krawczyk #ifdef RTE_ETHDEV_DEBUG_RX
16430a001d69SMichal Krawczyk 	uint16_t in_use;
16440a001d69SMichal Krawczyk #endif
164579405ee1SRafal Kozik 	struct rte_mbuf **mbufs = rxq->rx_refill_buffer;
16461173fca2SJan Medala 
16471173fca2SJan Medala 	if (unlikely(!count))
16481173fca2SJan Medala 		return 0;
16491173fca2SJan Medala 
16500a001d69SMichal Krawczyk #ifdef RTE_ETHDEV_DEBUG_RX
1651c0006061SMichal Krawczyk 	in_use = rxq->ring_size - 1 -
1652c0006061SMichal Krawczyk 		ena_com_free_q_entries(rxq->ena_com_io_sq);
16530a001d69SMichal Krawczyk 	if (unlikely((in_use + count) >= rxq->ring_size))
16540a001d69SMichal Krawczyk 		PMD_RX_LOG(ERR, "Bad Rx ring state\n");
16550a001d69SMichal Krawczyk #endif
16561173fca2SJan Medala 
16571173fca2SJan Medala 	/* get resources for incoming packets */
16583c8bc29fSDavid Harton 	rc = rte_pktmbuf_alloc_bulk(rxq->mb_pool, mbufs, count);
16591173fca2SJan Medala 	if (unlikely(rc < 0)) {
16601173fca2SJan Medala 		rte_atomic64_inc(&rxq->adapter->drv_stats->rx_nombuf);
16617830e905SSolganik Alexander 		++rxq->rx_stats.mbuf_alloc_fail;
1662617898d1SMichal Krawczyk 		PMD_RX_LOG(DEBUG, "There are not enough free buffers\n");
16631173fca2SJan Medala 		return 0;
16641173fca2SJan Medala 	}
16651173fca2SJan Medala 
16661173fca2SJan Medala 	for (i = 0; i < count; i++) {
166779405ee1SRafal Kozik 		struct rte_mbuf *mbuf = mbufs[i];
16681be097dcSMichal Krawczyk 		struct ena_rx_buffer *rx_info;
16691173fca2SJan Medala 
167079405ee1SRafal Kozik 		if (likely((i + 4) < count))
167179405ee1SRafal Kozik 			rte_prefetch0(mbufs[i + 4]);
1672c2034976SMichal Krawczyk 
1673c0006061SMichal Krawczyk 		req_id = rxq->empty_rx_reqs[next_to_use];
16741be097dcSMichal Krawczyk 		rx_info = &rxq->rx_buffer_info[req_id];
1675241da076SRafal Kozik 
167683fd97b2SMichal Krawczyk 		rc = ena_add_single_rx_desc(rxq->ena_com_io_sq, mbuf, req_id);
167783fd97b2SMichal Krawczyk 		if (unlikely(rc != 0))
16781173fca2SJan Medala 			break;
167983fd97b2SMichal Krawczyk 
16801be097dcSMichal Krawczyk 		rx_info->mbuf = mbuf;
1681c0006061SMichal Krawczyk 		next_to_use = ENA_IDX_NEXT_MASKED(next_to_use, rxq->size_mask);
16821173fca2SJan Medala 	}
16831173fca2SJan Medala 
168479405ee1SRafal Kozik 	if (unlikely(i < count)) {
16850a001d69SMichal Krawczyk 		PMD_RX_LOG(WARNING,
1686617898d1SMichal Krawczyk 			"Refilled Rx queue[%d] with only %d/%d buffers\n",
1687617898d1SMichal Krawczyk 			rxq->id, i, count);
16883c8bc29fSDavid Harton 		rte_pktmbuf_free_bulk(&mbufs[i], count - i);
16897830e905SSolganik Alexander 		++rxq->rx_stats.refill_partial;
169079405ee1SRafal Kozik 	}
1691241da076SRafal Kozik 
16927be78d02SJosh Soref 	/* When we submitted free resources to device... */
16933d19e1abSRafal Kozik 	if (likely(i > 0)) {
169438faa87eSMichal Krawczyk 		/* ...let HW know that it can fill buffers with data. */
16951173fca2SJan Medala 		ena_com_write_sq_doorbell(rxq->ena_com_io_sq);
16961173fca2SJan Medala 
16975e02e19eSJan Medala 		rxq->next_to_use = next_to_use;
16985e02e19eSJan Medala 	}
16995e02e19eSJan Medala 
17001173fca2SJan Medala 	return i;
17011173fca2SJan Medala }
17021173fca2SJan Medala 
1703b9b05d6fSMichal Krawczyk static int ena_device_init(struct ena_adapter *adapter,
1704aab58857SStanislaw Kardach 			   struct rte_pci_device *pdev,
1705b9b05d6fSMichal Krawczyk 			   struct ena_com_dev_get_features_ctx *get_feat_ctx)
17061173fca2SJan Medala {
1707b9b05d6fSMichal Krawczyk 	struct ena_com_dev *ena_dev = &adapter->ena_dev;
1708ca148440SMichal Krawczyk 	uint32_t aenq_groups;
17091173fca2SJan Medala 	int rc;
1710c4144557SJan Medala 	bool readless_supported;
17111173fca2SJan Medala 
17121173fca2SJan Medala 	/* Initialize mmio registers */
17131173fca2SJan Medala 	rc = ena_com_mmio_reg_read_request_init(ena_dev);
17141173fca2SJan Medala 	if (rc) {
1715617898d1SMichal Krawczyk 		PMD_DRV_LOG(ERR, "Failed to init MMIO read less\n");
17161173fca2SJan Medala 		return rc;
17171173fca2SJan Medala 	}
17181173fca2SJan Medala 
1719c4144557SJan Medala 	/* The PCIe configuration space revision id indicate if mmio reg
1720c4144557SJan Medala 	 * read is disabled.
1721c4144557SJan Medala 	 */
1722aab58857SStanislaw Kardach 	readless_supported = !(pdev->id.class_id & ENA_MMIO_DISABLE_REG_READ);
1723c4144557SJan Medala 	ena_com_set_mmio_read_mode(ena_dev, readless_supported);
1724c4144557SJan Medala 
17251173fca2SJan Medala 	/* reset device */
17263adcba9aSMichal Krawczyk 	rc = ena_com_dev_reset(ena_dev, ENA_REGS_RESET_NORMAL);
17271173fca2SJan Medala 	if (rc) {
1728617898d1SMichal Krawczyk 		PMD_DRV_LOG(ERR, "Cannot reset device\n");
17291173fca2SJan Medala 		goto err_mmio_read_less;
17301173fca2SJan Medala 	}
17311173fca2SJan Medala 
17321173fca2SJan Medala 	/* check FW version */
17331173fca2SJan Medala 	rc = ena_com_validate_version(ena_dev);
17341173fca2SJan Medala 	if (rc) {
1735617898d1SMichal Krawczyk 		PMD_DRV_LOG(ERR, "Device version is too low\n");
17361173fca2SJan Medala 		goto err_mmio_read_less;
17371173fca2SJan Medala 	}
17381173fca2SJan Medala 
17391173fca2SJan Medala 	ena_dev->dma_addr_bits = ena_com_get_dma_width(ena_dev);
17401173fca2SJan Medala 
17411173fca2SJan Medala 	/* ENA device administration layer init */
1742b68309beSRafal Kozik 	rc = ena_com_admin_init(ena_dev, &aenq_handlers);
17431173fca2SJan Medala 	if (rc) {
17446f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR,
1745617898d1SMichal Krawczyk 			"Cannot initialize ENA admin queue\n");
17461173fca2SJan Medala 		goto err_mmio_read_less;
17471173fca2SJan Medala 	}
17481173fca2SJan Medala 
17491173fca2SJan Medala 	/* To enable the msix interrupts the driver needs to know the number
17501173fca2SJan Medala 	 * of queues. So the driver uses polling mode to retrieve this
17511173fca2SJan Medala 	 * information.
17521173fca2SJan Medala 	 */
17531173fca2SJan Medala 	ena_com_set_admin_polling_mode(ena_dev, true);
17541173fca2SJan Medala 
1755201ff2e5SJakub Palider 	ena_config_host_info(ena_dev);
1756201ff2e5SJakub Palider 
17571173fca2SJan Medala 	/* Get Device Attributes and features */
17581173fca2SJan Medala 	rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx);
17591173fca2SJan Medala 	if (rc) {
17606f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR,
1761617898d1SMichal Krawczyk 			"Cannot get attribute for ENA device, rc: %d\n", rc);
17621173fca2SJan Medala 		goto err_admin_init;
17631173fca2SJan Medala 	}
17641173fca2SJan Medala 
1765f01f060cSRafal Kozik 	aenq_groups = BIT(ENA_ADMIN_LINK_CHANGE) |
1766d9b8b106SMichal Krawczyk 		      BIT(ENA_ADMIN_NOTIFICATION) |
1767983cce2dSRafal Kozik 		      BIT(ENA_ADMIN_KEEP_ALIVE) |
1768983cce2dSRafal Kozik 		      BIT(ENA_ADMIN_FATAL_ERROR) |
1769983cce2dSRafal Kozik 		      BIT(ENA_ADMIN_WARNING);
1770ca148440SMichal Krawczyk 
1771ca148440SMichal Krawczyk 	aenq_groups &= get_feat_ctx->aenq.supported_groups;
1772ca148440SMichal Krawczyk 
1773b9b05d6fSMichal Krawczyk 	adapter->all_aenq_groups = aenq_groups;
1774e859d2b8SRafal Kozik 
17751173fca2SJan Medala 	return 0;
17761173fca2SJan Medala 
17771173fca2SJan Medala err_admin_init:
17781173fca2SJan Medala 	ena_com_admin_destroy(ena_dev);
17791173fca2SJan Medala 
17801173fca2SJan Medala err_mmio_read_less:
17811173fca2SJan Medala 	ena_com_mmio_reg_read_request_destroy(ena_dev);
17821173fca2SJan Medala 
17831173fca2SJan Medala 	return rc;
17841173fca2SJan Medala }
17851173fca2SJan Medala 
1786ca148440SMichal Krawczyk static void ena_interrupt_handler_rte(void *cb_arg)
178715773e06SMichal Krawczyk {
1788aab58857SStanislaw Kardach 	struct rte_eth_dev *dev = cb_arg;
1789aab58857SStanislaw Kardach 	struct ena_adapter *adapter = dev->data->dev_private;
179015773e06SMichal Krawczyk 	struct ena_com_dev *ena_dev = &adapter->ena_dev;
179115773e06SMichal Krawczyk 
179215773e06SMichal Krawczyk 	ena_com_admin_q_comp_intr_handler(ena_dev);
17933d19e1abSRafal Kozik 	if (likely(adapter->state != ENA_ADAPTER_STATE_CLOSED))
1794aab58857SStanislaw Kardach 		ena_com_aenq_intr_handler(ena_dev, dev);
179515773e06SMichal Krawczyk }
179615773e06SMichal Krawczyk 
17975efb9fc7SMichal Krawczyk static void check_for_missing_keep_alive(struct ena_adapter *adapter)
17985efb9fc7SMichal Krawczyk {
1799b9b05d6fSMichal Krawczyk 	if (!(adapter->active_aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE)))
1800e859d2b8SRafal Kozik 		return;
1801e859d2b8SRafal Kozik 
18025efb9fc7SMichal Krawczyk 	if (adapter->keep_alive_timeout == ENA_HW_HINTS_NO_TIMEOUT)
18035efb9fc7SMichal Krawczyk 		return;
18045efb9fc7SMichal Krawczyk 
18055efb9fc7SMichal Krawczyk 	if (unlikely((rte_get_timer_cycles() - adapter->timestamp_wd) >=
18065efb9fc7SMichal Krawczyk 	    adapter->keep_alive_timeout)) {
18076f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "Keep alive timeout\n");
18082bae75eaSDawid Gorecki 		ena_trigger_reset(adapter, ENA_REGS_RESET_KEEP_ALIVE_TO);
18097830e905SSolganik Alexander 		++adapter->dev_stats.wd_expired;
18105efb9fc7SMichal Krawczyk 	}
18115efb9fc7SMichal Krawczyk }
18125efb9fc7SMichal Krawczyk 
18135efb9fc7SMichal Krawczyk /* Check if admin queue is enabled */
18145efb9fc7SMichal Krawczyk static void check_for_admin_com_state(struct ena_adapter *adapter)
18155efb9fc7SMichal Krawczyk {
18165efb9fc7SMichal Krawczyk 	if (unlikely(!ena_com_get_admin_running_state(&adapter->ena_dev))) {
1817617898d1SMichal Krawczyk 		PMD_DRV_LOG(ERR, "ENA admin queue is not in running state\n");
18182bae75eaSDawid Gorecki 		ena_trigger_reset(adapter, ENA_REGS_RESET_ADMIN_TO);
18195efb9fc7SMichal Krawczyk 	}
18205efb9fc7SMichal Krawczyk }
18215efb9fc7SMichal Krawczyk 
1822f93e20e5SMichal Krawczyk static int check_for_tx_completion_in_queue(struct ena_adapter *adapter,
1823f93e20e5SMichal Krawczyk 					    struct ena_ring *tx_ring)
1824f93e20e5SMichal Krawczyk {
1825f93e20e5SMichal Krawczyk 	struct ena_tx_buffer *tx_buf;
1826f93e20e5SMichal Krawczyk 	uint64_t timestamp;
1827f93e20e5SMichal Krawczyk 	uint64_t completion_delay;
1828f93e20e5SMichal Krawczyk 	uint32_t missed_tx = 0;
1829f93e20e5SMichal Krawczyk 	unsigned int i;
1830f93e20e5SMichal Krawczyk 	int rc = 0;
1831f93e20e5SMichal Krawczyk 
1832f93e20e5SMichal Krawczyk 	for (i = 0; i < tx_ring->ring_size; ++i) {
1833f93e20e5SMichal Krawczyk 		tx_buf = &tx_ring->tx_buffer_info[i];
1834f93e20e5SMichal Krawczyk 		timestamp = tx_buf->timestamp;
1835f93e20e5SMichal Krawczyk 
1836f93e20e5SMichal Krawczyk 		if (timestamp == 0)
1837f93e20e5SMichal Krawczyk 			continue;
1838f93e20e5SMichal Krawczyk 
1839f93e20e5SMichal Krawczyk 		completion_delay = rte_get_timer_cycles() - timestamp;
1840f93e20e5SMichal Krawczyk 		if (completion_delay > adapter->missing_tx_completion_to) {
1841f93e20e5SMichal Krawczyk 			if (unlikely(!tx_buf->print_once)) {
1842f93e20e5SMichal Krawczyk 				PMD_TX_LOG(WARNING,
1843f93e20e5SMichal Krawczyk 					"Found a Tx that wasn't completed on time, qid %d, index %d. "
1844f93e20e5SMichal Krawczyk 					"Missing Tx outstanding for %" PRIu64 " msecs.\n",
1845f93e20e5SMichal Krawczyk 					tx_ring->id, i,	completion_delay /
1846f93e20e5SMichal Krawczyk 					rte_get_timer_hz() * 1000);
1847f93e20e5SMichal Krawczyk 				tx_buf->print_once = true;
1848f93e20e5SMichal Krawczyk 			}
1849f93e20e5SMichal Krawczyk 			++missed_tx;
1850f93e20e5SMichal Krawczyk 		}
1851f93e20e5SMichal Krawczyk 	}
1852f93e20e5SMichal Krawczyk 
1853f93e20e5SMichal Krawczyk 	if (unlikely(missed_tx > tx_ring->missing_tx_completion_threshold)) {
1854f93e20e5SMichal Krawczyk 		PMD_DRV_LOG(ERR,
1855f93e20e5SMichal Krawczyk 			"The number of lost Tx completions is above the threshold (%d > %d). "
1856f93e20e5SMichal Krawczyk 			"Trigger the device reset.\n",
1857f93e20e5SMichal Krawczyk 			missed_tx,
1858f93e20e5SMichal Krawczyk 			tx_ring->missing_tx_completion_threshold);
1859f93e20e5SMichal Krawczyk 		adapter->reset_reason = ENA_REGS_RESET_MISS_TX_CMPL;
1860f93e20e5SMichal Krawczyk 		adapter->trigger_reset = true;
1861f93e20e5SMichal Krawczyk 		rc = -EIO;
1862f93e20e5SMichal Krawczyk 	}
1863f93e20e5SMichal Krawczyk 
1864f93e20e5SMichal Krawczyk 	tx_ring->tx_stats.missed_tx += missed_tx;
1865f93e20e5SMichal Krawczyk 
1866f93e20e5SMichal Krawczyk 	return rc;
1867f93e20e5SMichal Krawczyk }
1868f93e20e5SMichal Krawczyk 
1869f93e20e5SMichal Krawczyk static void check_for_tx_completions(struct ena_adapter *adapter)
1870f93e20e5SMichal Krawczyk {
1871f93e20e5SMichal Krawczyk 	struct ena_ring *tx_ring;
1872f93e20e5SMichal Krawczyk 	uint64_t tx_cleanup_delay;
1873f93e20e5SMichal Krawczyk 	size_t qid;
1874f93e20e5SMichal Krawczyk 	int budget;
1875f93e20e5SMichal Krawczyk 	uint16_t nb_tx_queues = adapter->edev_data->nb_tx_queues;
1876f93e20e5SMichal Krawczyk 
1877f93e20e5SMichal Krawczyk 	if (adapter->missing_tx_completion_to == ENA_HW_HINTS_NO_TIMEOUT)
1878f93e20e5SMichal Krawczyk 		return;
1879f93e20e5SMichal Krawczyk 
1880f93e20e5SMichal Krawczyk 	nb_tx_queues = adapter->edev_data->nb_tx_queues;
1881f93e20e5SMichal Krawczyk 	budget = adapter->missing_tx_completion_budget;
1882f93e20e5SMichal Krawczyk 
1883f93e20e5SMichal Krawczyk 	qid = adapter->last_tx_comp_qid;
1884f93e20e5SMichal Krawczyk 	while (budget-- > 0) {
1885f93e20e5SMichal Krawczyk 		tx_ring = &adapter->tx_ring[qid];
1886f93e20e5SMichal Krawczyk 
1887f93e20e5SMichal Krawczyk 		/* Tx cleanup is called only by the burst function and can be
1888f93e20e5SMichal Krawczyk 		 * called dynamically by the application. Also cleanup is
1889f93e20e5SMichal Krawczyk 		 * limited by the threshold. To avoid false detection of the
1890f93e20e5SMichal Krawczyk 		 * missing HW Tx completion, get the delay since last cleanup
1891f93e20e5SMichal Krawczyk 		 * function was called.
1892f93e20e5SMichal Krawczyk 		 */
1893f93e20e5SMichal Krawczyk 		tx_cleanup_delay = rte_get_timer_cycles() -
1894f93e20e5SMichal Krawczyk 			tx_ring->last_cleanup_ticks;
1895f93e20e5SMichal Krawczyk 		if (tx_cleanup_delay < adapter->tx_cleanup_stall_delay)
1896f93e20e5SMichal Krawczyk 			check_for_tx_completion_in_queue(adapter, tx_ring);
1897f93e20e5SMichal Krawczyk 		qid = (qid + 1) % nb_tx_queues;
1898f93e20e5SMichal Krawczyk 	}
1899f93e20e5SMichal Krawczyk 
1900f93e20e5SMichal Krawczyk 	adapter->last_tx_comp_qid = qid;
1901f93e20e5SMichal Krawczyk }
1902f93e20e5SMichal Krawczyk 
1903d9b8b106SMichal Krawczyk static void ena_timer_wd_callback(__rte_unused struct rte_timer *timer,
1904d9b8b106SMichal Krawczyk 				  void *arg)
1905d9b8b106SMichal Krawczyk {
1906aab58857SStanislaw Kardach 	struct rte_eth_dev *dev = arg;
1907aab58857SStanislaw Kardach 	struct ena_adapter *adapter = dev->data->dev_private;
1908d9b8b106SMichal Krawczyk 
1909e2174a54SMichal Krawczyk 	if (unlikely(adapter->trigger_reset))
1910e2174a54SMichal Krawczyk 		return;
1911e2174a54SMichal Krawczyk 
19125efb9fc7SMichal Krawczyk 	check_for_missing_keep_alive(adapter);
19135efb9fc7SMichal Krawczyk 	check_for_admin_com_state(adapter);
1914f93e20e5SMichal Krawczyk 	check_for_tx_completions(adapter);
1915d9b8b106SMichal Krawczyk 
19165efb9fc7SMichal Krawczyk 	if (unlikely(adapter->trigger_reset)) {
19176f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "Trigger reset is on\n");
19185723fbedSFerruh Yigit 		rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET,
1919d9b8b106SMichal Krawczyk 			NULL);
1920d9b8b106SMichal Krawczyk 	}
1921d9b8b106SMichal Krawczyk }
1922d9b8b106SMichal Krawczyk 
19232fca2a98SMichal Krawczyk static inline void
19248a7a73f2SMichal Krawczyk set_default_llq_configurations(struct ena_llq_configurations *llq_config,
19258a7a73f2SMichal Krawczyk 			       struct ena_admin_feature_llq_desc *llq,
19268a7a73f2SMichal Krawczyk 			       bool use_large_llq_hdr)
19272fca2a98SMichal Krawczyk {
19282fca2a98SMichal Krawczyk 	llq_config->llq_header_location = ENA_ADMIN_INLINE_HEADER;
19292fca2a98SMichal Krawczyk 	llq_config->llq_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY;
19302fca2a98SMichal Krawczyk 	llq_config->llq_num_decs_before_header =
19312fca2a98SMichal Krawczyk 		ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2;
19328a7a73f2SMichal Krawczyk 
19338a7a73f2SMichal Krawczyk 	if (use_large_llq_hdr &&
19348a7a73f2SMichal Krawczyk 	    (llq->entry_size_ctrl_supported & ENA_ADMIN_LIST_ENTRY_SIZE_256B)) {
19358a7a73f2SMichal Krawczyk 		llq_config->llq_ring_entry_size =
19368a7a73f2SMichal Krawczyk 			ENA_ADMIN_LIST_ENTRY_SIZE_256B;
19378a7a73f2SMichal Krawczyk 		llq_config->llq_ring_entry_size_value = 256;
19388a7a73f2SMichal Krawczyk 	} else {
19398a7a73f2SMichal Krawczyk 		llq_config->llq_ring_entry_size =
19408a7a73f2SMichal Krawczyk 			ENA_ADMIN_LIST_ENTRY_SIZE_128B;
19412fca2a98SMichal Krawczyk 		llq_config->llq_ring_entry_size_value = 128;
19422fca2a98SMichal Krawczyk 	}
19438a7a73f2SMichal Krawczyk }
19442fca2a98SMichal Krawczyk 
19452fca2a98SMichal Krawczyk static int
19462fca2a98SMichal Krawczyk ena_set_queues_placement_policy(struct ena_adapter *adapter,
19472fca2a98SMichal Krawczyk 				struct ena_com_dev *ena_dev,
19482fca2a98SMichal Krawczyk 				struct ena_admin_feature_llq_desc *llq,
19492fca2a98SMichal Krawczyk 				struct ena_llq_configurations *llq_default_configurations)
19502fca2a98SMichal Krawczyk {
19512fca2a98SMichal Krawczyk 	int rc;
19522fca2a98SMichal Krawczyk 	u32 llq_feature_mask;
19532fca2a98SMichal Krawczyk 
19542fca2a98SMichal Krawczyk 	llq_feature_mask = 1 << ENA_ADMIN_LLQ;
19552fca2a98SMichal Krawczyk 	if (!(ena_dev->supported_features & llq_feature_mask)) {
19566f1c9df9SStephen Hemminger 		PMD_DRV_LOG(INFO,
19572fca2a98SMichal Krawczyk 			"LLQ is not supported. Fallback to host mode policy.\n");
19582fca2a98SMichal Krawczyk 		ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
19592fca2a98SMichal Krawczyk 		return 0;
19602fca2a98SMichal Krawczyk 	}
19612fca2a98SMichal Krawczyk 
19629ae7a13fSDawid Gorecki 	if (adapter->dev_mem_base == NULL) {
19639ae7a13fSDawid Gorecki 		PMD_DRV_LOG(ERR,
19649ae7a13fSDawid Gorecki 			"LLQ is advertised as supported, but device doesn't expose mem bar\n");
19659ae7a13fSDawid Gorecki 		ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
19669ae7a13fSDawid Gorecki 		return 0;
19679ae7a13fSDawid Gorecki 	}
19689ae7a13fSDawid Gorecki 
19692fca2a98SMichal Krawczyk 	rc = ena_com_config_dev_mode(ena_dev, llq, llq_default_configurations);
19702fca2a98SMichal Krawczyk 	if (unlikely(rc)) {
1971617898d1SMichal Krawczyk 		PMD_INIT_LOG(WARNING,
1972617898d1SMichal Krawczyk 			"Failed to config dev mode. Fallback to host mode policy.\n");
19732fca2a98SMichal Krawczyk 		ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
19742fca2a98SMichal Krawczyk 		return 0;
19752fca2a98SMichal Krawczyk 	}
19762fca2a98SMichal Krawczyk 
19772fca2a98SMichal Krawczyk 	/* Nothing to config, exit */
19782fca2a98SMichal Krawczyk 	if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
19792fca2a98SMichal Krawczyk 		return 0;
19802fca2a98SMichal Krawczyk 
19812fca2a98SMichal Krawczyk 	ena_dev->mem_bar = adapter->dev_mem_base;
19822fca2a98SMichal Krawczyk 
19832fca2a98SMichal Krawczyk 	return 0;
19842fca2a98SMichal Krawczyk }
19852fca2a98SMichal Krawczyk 
19865920d930SMichal Krawczyk static uint32_t ena_calc_max_io_queue_num(struct ena_com_dev *ena_dev,
198701bd6877SRafal Kozik 	struct ena_com_dev_get_features_ctx *get_feat_ctx)
198801bd6877SRafal Kozik {
19895920d930SMichal Krawczyk 	uint32_t io_tx_sq_num, io_tx_cq_num, io_rx_num, max_num_io_queues;
199001bd6877SRafal Kozik 
1991ea93d37eSRafal Kozik 	/* Regular queues capabilities */
1992ea93d37eSRafal Kozik 	if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
1993ea93d37eSRafal Kozik 		struct ena_admin_queue_ext_feature_fields *max_queue_ext =
1994ea93d37eSRafal Kozik 			&get_feat_ctx->max_queue_ext.max_queue_ext;
19952fca2a98SMichal Krawczyk 		io_rx_num = RTE_MIN(max_queue_ext->max_rx_sq_num,
19962fca2a98SMichal Krawczyk 				    max_queue_ext->max_rx_cq_num);
19972fca2a98SMichal Krawczyk 		io_tx_sq_num = max_queue_ext->max_tx_sq_num;
19982fca2a98SMichal Krawczyk 		io_tx_cq_num = max_queue_ext->max_tx_cq_num;
1999ea93d37eSRafal Kozik 	} else {
2000ea93d37eSRafal Kozik 		struct ena_admin_queue_feature_desc *max_queues =
2001ea93d37eSRafal Kozik 			&get_feat_ctx->max_queues;
20022fca2a98SMichal Krawczyk 		io_tx_sq_num = max_queues->max_sq_num;
20032fca2a98SMichal Krawczyk 		io_tx_cq_num = max_queues->max_cq_num;
20042fca2a98SMichal Krawczyk 		io_rx_num = RTE_MIN(io_tx_sq_num, io_tx_cq_num);
2005ea93d37eSRafal Kozik 	}
200601bd6877SRafal Kozik 
20072fca2a98SMichal Krawczyk 	/* In case of LLQ use the llq number in the get feature cmd */
20082fca2a98SMichal Krawczyk 	if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
20092fca2a98SMichal Krawczyk 		io_tx_sq_num = get_feat_ctx->llq.max_llq_num;
20102fca2a98SMichal Krawczyk 
20115920d930SMichal Krawczyk 	max_num_io_queues = RTE_MIN(ENA_MAX_NUM_IO_QUEUES, io_rx_num);
20125920d930SMichal Krawczyk 	max_num_io_queues = RTE_MIN(max_num_io_queues, io_tx_sq_num);
20135920d930SMichal Krawczyk 	max_num_io_queues = RTE_MIN(max_num_io_queues, io_tx_cq_num);
201401bd6877SRafal Kozik 
20155920d930SMichal Krawczyk 	if (unlikely(max_num_io_queues == 0)) {
2016617898d1SMichal Krawczyk 		PMD_DRV_LOG(ERR, "Number of IO queues cannot not be 0\n");
201701bd6877SRafal Kozik 		return -EFAULT;
201801bd6877SRafal Kozik 	}
201901bd6877SRafal Kozik 
20205920d930SMichal Krawczyk 	return max_num_io_queues;
202101bd6877SRafal Kozik }
202201bd6877SRafal Kozik 
2023e8c838fdSMichal Krawczyk static void
2024e8c838fdSMichal Krawczyk ena_set_offloads(struct ena_offloads *offloads,
2025e8c838fdSMichal Krawczyk 		 struct ena_admin_feature_offload_desc *offload_desc)
2026e8c838fdSMichal Krawczyk {
2027e8c838fdSMichal Krawczyk 	if (offload_desc->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK)
2028e8c838fdSMichal Krawczyk 		offloads->tx_offloads |= ENA_IPV4_TSO;
2029e8c838fdSMichal Krawczyk 
2030e8c838fdSMichal Krawczyk 	/* Tx IPv4 checksum offloads */
2031e8c838fdSMichal Krawczyk 	if (offload_desc->tx &
2032e8c838fdSMichal Krawczyk 	    ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L3_CSUM_IPV4_MASK)
2033e8c838fdSMichal Krawczyk 		offloads->tx_offloads |= ENA_L3_IPV4_CSUM;
2034e8c838fdSMichal Krawczyk 	if (offload_desc->tx &
2035e8c838fdSMichal Krawczyk 	    ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_MASK)
2036e8c838fdSMichal Krawczyk 		offloads->tx_offloads |= ENA_L4_IPV4_CSUM;
2037e8c838fdSMichal Krawczyk 	if (offload_desc->tx &
2038e8c838fdSMichal Krawczyk 	    ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK)
2039e8c838fdSMichal Krawczyk 		offloads->tx_offloads |= ENA_L4_IPV4_CSUM_PARTIAL;
2040e8c838fdSMichal Krawczyk 
2041e8c838fdSMichal Krawczyk 	/* Tx IPv6 checksum offloads */
2042e8c838fdSMichal Krawczyk 	if (offload_desc->tx &
2043e8c838fdSMichal Krawczyk 	    ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_MASK)
2044e8c838fdSMichal Krawczyk 		offloads->tx_offloads |= ENA_L4_IPV6_CSUM;
2045e8c838fdSMichal Krawczyk 	if (offload_desc->tx &
2046e8c838fdSMichal Krawczyk 	     ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK)
2047e8c838fdSMichal Krawczyk 		offloads->tx_offloads |= ENA_L4_IPV6_CSUM_PARTIAL;
2048e8c838fdSMichal Krawczyk 
2049e8c838fdSMichal Krawczyk 	/* Rx IPv4 checksum offloads */
2050e8c838fdSMichal Krawczyk 	if (offload_desc->rx_supported &
2051e8c838fdSMichal Krawczyk 	    ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L3_CSUM_IPV4_MASK)
2052e8c838fdSMichal Krawczyk 		offloads->rx_offloads |= ENA_L3_IPV4_CSUM;
2053e8c838fdSMichal Krawczyk 	if (offload_desc->rx_supported &
2054e8c838fdSMichal Krawczyk 	    ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK)
2055e8c838fdSMichal Krawczyk 		offloads->rx_offloads |= ENA_L4_IPV4_CSUM;
2056e8c838fdSMichal Krawczyk 
2057e8c838fdSMichal Krawczyk 	/* Rx IPv6 checksum offloads */
2058e8c838fdSMichal Krawczyk 	if (offload_desc->rx_supported &
2059e8c838fdSMichal Krawczyk 	    ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK)
2060e8c838fdSMichal Krawczyk 		offloads->rx_offloads |= ENA_L4_IPV6_CSUM;
2061e8c838fdSMichal Krawczyk 
2062e8c838fdSMichal Krawczyk 	if (offload_desc->rx_supported &
2063e8c838fdSMichal Krawczyk 	    ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_MASK)
2064e8c838fdSMichal Krawczyk 		offloads->rx_offloads |= ENA_RX_RSS_HASH;
2065e8c838fdSMichal Krawczyk }
2066e8c838fdSMichal Krawczyk 
2067e3595539SStanislaw Kardach static int ena_init_once(void)
2068e3595539SStanislaw Kardach {
2069e3595539SStanislaw Kardach 	static bool init_done;
2070e3595539SStanislaw Kardach 
2071e3595539SStanislaw Kardach 	if (init_done)
2072e3595539SStanislaw Kardach 		return 0;
2073e3595539SStanislaw Kardach 
2074e3595539SStanislaw Kardach 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
2075e3595539SStanislaw Kardach 		/* Init timer subsystem for the ENA timer service. */
2076e3595539SStanislaw Kardach 		rte_timer_subsystem_init();
2077e3595539SStanislaw Kardach 		/* Register handler for requests from secondary processes. */
2078e3595539SStanislaw Kardach 		rte_mp_action_register(ENA_MP_NAME, ena_mp_primary_handle);
2079e3595539SStanislaw Kardach 	}
2080e3595539SStanislaw Kardach 
2081e3595539SStanislaw Kardach 	init_done = true;
2082e3595539SStanislaw Kardach 	return 0;
2083e3595539SStanislaw Kardach }
2084e3595539SStanislaw Kardach 
20851173fca2SJan Medala static int eth_ena_dev_init(struct rte_eth_dev *eth_dev)
20861173fca2SJan Medala {
2087ea93d37eSRafal Kozik 	struct ena_calc_queue_size_ctx calc_queue_ctx = { 0 };
20881173fca2SJan Medala 	struct rte_pci_device *pci_dev;
2089eb0ef49dSMichal Krawczyk 	struct rte_intr_handle *intr_handle;
2090890728ffSStephen Hemminger 	struct ena_adapter *adapter = eth_dev->data->dev_private;
20911173fca2SJan Medala 	struct ena_com_dev *ena_dev = &adapter->ena_dev;
20921173fca2SJan Medala 	struct ena_com_dev_get_features_ctx get_feat_ctx;
20932fca2a98SMichal Krawczyk 	struct ena_llq_configurations llq_config;
20942fca2a98SMichal Krawczyk 	const char *queue_type_str;
20955920d930SMichal Krawczyk 	uint32_t max_num_io_queues;
2096ea93d37eSRafal Kozik 	int rc;
20971173fca2SJan Medala 	static int adapters_found;
209833dde075SMichal Krawczyk 	bool disable_meta_caching;
20991173fca2SJan Medala 
21001173fca2SJan Medala 	eth_dev->dev_ops = &ena_dev_ops;
21011173fca2SJan Medala 	eth_dev->rx_pkt_burst = &eth_ena_recv_pkts;
21021173fca2SJan Medala 	eth_dev->tx_pkt_burst = &eth_ena_xmit_pkts;
2103b3fc5a1aSKonstantin Ananyev 	eth_dev->tx_pkt_prepare = &eth_ena_prep_pkts;
21041173fca2SJan Medala 
2105e3595539SStanislaw Kardach 	rc = ena_init_once();
2106e3595539SStanislaw Kardach 	if (rc != 0)
2107e3595539SStanislaw Kardach 		return rc;
2108e3595539SStanislaw Kardach 
21091173fca2SJan Medala 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
21101173fca2SJan Medala 		return 0;
21111173fca2SJan Medala 
2112f30e69b4SFerruh Yigit 	eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
2113f30e69b4SFerruh Yigit 
2114fd976890SMichal Krawczyk 	memset(adapter, 0, sizeof(struct ena_adapter));
2115fd976890SMichal Krawczyk 	ena_dev = &adapter->ena_dev;
2116fd976890SMichal Krawczyk 
2117aab58857SStanislaw Kardach 	adapter->edev_data = eth_dev->data;
2118fd976890SMichal Krawczyk 
2119c0802544SFerruh Yigit 	pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
21201173fca2SJan Medala 
2121617898d1SMichal Krawczyk 	PMD_INIT_LOG(INFO, "Initializing %x:%x:%x.%d\n",
21221173fca2SJan Medala 		     pci_dev->addr.domain,
21231173fca2SJan Medala 		     pci_dev->addr.bus,
21241173fca2SJan Medala 		     pci_dev->addr.devid,
21251173fca2SJan Medala 		     pci_dev->addr.function);
21261173fca2SJan Medala 
2127d61138d4SHarman Kalra 	intr_handle = pci_dev->intr_handle;
2128eb0ef49dSMichal Krawczyk 
21291173fca2SJan Medala 	adapter->regs = pci_dev->mem_resource[ENA_REGS_BAR].addr;
21301173fca2SJan Medala 	adapter->dev_mem_base = pci_dev->mem_resource[ENA_MEM_BAR].addr;
21311173fca2SJan Medala 
21321d339597SRafal Kozik 	if (!adapter->regs) {
2133617898d1SMichal Krawczyk 		PMD_INIT_LOG(CRIT, "Failed to access registers BAR(%d)\n",
21341173fca2SJan Medala 			     ENA_REGS_BAR);
21351d339597SRafal Kozik 		return -ENXIO;
21361d339597SRafal Kozik 	}
21371173fca2SJan Medala 
21381173fca2SJan Medala 	ena_dev->reg_bar = adapter->regs;
2139850e1bb1SMichal Krawczyk 	/* Pass device data as a pointer which can be passed to the IO functions
2140850e1bb1SMichal Krawczyk 	 * by the ena_com (for example - the memory allocation).
2141850e1bb1SMichal Krawczyk 	 */
2142850e1bb1SMichal Krawczyk 	ena_dev->dmadev = eth_dev->data;
21431173fca2SJan Medala 
21441173fca2SJan Medala 	adapter->id_number = adapters_found;
21451173fca2SJan Medala 
21461173fca2SJan Medala 	snprintf(adapter->name, ENA_NAME_MAX_LEN, "ena_%d",
21471173fca2SJan Medala 		 adapter->id_number);
21481173fca2SJan Medala 
2149cc0c5d25SMichal Krawczyk 	adapter->missing_tx_completion_to = ENA_TX_TIMEOUT;
2150cc0c5d25SMichal Krawczyk 
21518a7a73f2SMichal Krawczyk 	rc = ena_parse_devargs(adapter, pci_dev->device.devargs);
21528a7a73f2SMichal Krawczyk 	if (rc != 0) {
21538a7a73f2SMichal Krawczyk 		PMD_INIT_LOG(CRIT, "Failed to parse devargs\n");
21548a7a73f2SMichal Krawczyk 		goto err;
21558a7a73f2SMichal Krawczyk 	}
21568a7a73f2SMichal Krawczyk 
21571173fca2SJan Medala 	/* device specific initialization routine */
2158b9b05d6fSMichal Krawczyk 	rc = ena_device_init(adapter, pci_dev, &get_feat_ctx);
21591173fca2SJan Medala 	if (rc) {
2160617898d1SMichal Krawczyk 		PMD_INIT_LOG(CRIT, "Failed to init ENA device\n");
2161241da076SRafal Kozik 		goto err;
21621173fca2SJan Medala 	}
2163b9b05d6fSMichal Krawczyk 
2164b9b05d6fSMichal Krawczyk 	/* Check if device supports LSC */
2165b9b05d6fSMichal Krawczyk 	if (!(adapter->all_aenq_groups & BIT(ENA_ADMIN_LINK_CHANGE)))
2166b9b05d6fSMichal Krawczyk 		adapter->edev_data->dev_flags &= ~RTE_ETH_DEV_INTR_LSC;
21671173fca2SJan Medala 
21688a7a73f2SMichal Krawczyk 	set_default_llq_configurations(&llq_config, &get_feat_ctx.llq,
21698a7a73f2SMichal Krawczyk 		adapter->use_large_llq_hdr);
21702fca2a98SMichal Krawczyk 	rc = ena_set_queues_placement_policy(adapter, ena_dev,
21712fca2a98SMichal Krawczyk 					     &get_feat_ctx.llq, &llq_config);
21722fca2a98SMichal Krawczyk 	if (unlikely(rc)) {
2173617898d1SMichal Krawczyk 		PMD_INIT_LOG(CRIT, "Failed to set placement policy\n");
21742fca2a98SMichal Krawczyk 		return rc;
21752fca2a98SMichal Krawczyk 	}
21762fca2a98SMichal Krawczyk 
21772fca2a98SMichal Krawczyk 	if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
21782fca2a98SMichal Krawczyk 		queue_type_str = "Regular";
21792fca2a98SMichal Krawczyk 	else
21802fca2a98SMichal Krawczyk 		queue_type_str = "Low latency";
21816f1c9df9SStephen Hemminger 	PMD_DRV_LOG(INFO, "Placement policy: %s\n", queue_type_str);
2182ea93d37eSRafal Kozik 
2183ea93d37eSRafal Kozik 	calc_queue_ctx.ena_dev = ena_dev;
2184ea93d37eSRafal Kozik 	calc_queue_ctx.get_feat_ctx = &get_feat_ctx;
21851173fca2SJan Medala 
21865920d930SMichal Krawczyk 	max_num_io_queues = ena_calc_max_io_queue_num(ena_dev, &get_feat_ctx);
21878a7a73f2SMichal Krawczyk 	rc = ena_calc_io_queue_size(&calc_queue_ctx,
21888a7a73f2SMichal Krawczyk 		adapter->use_large_llq_hdr);
21895920d930SMichal Krawczyk 	if (unlikely((rc != 0) || (max_num_io_queues == 0))) {
2190241da076SRafal Kozik 		rc = -EFAULT;
2191241da076SRafal Kozik 		goto err_device_destroy;
2192241da076SRafal Kozik 	}
21931173fca2SJan Medala 
21945920d930SMichal Krawczyk 	adapter->max_tx_ring_size = calc_queue_ctx.max_tx_queue_size;
21955920d930SMichal Krawczyk 	adapter->max_rx_ring_size = calc_queue_ctx.max_rx_queue_size;
2196ea93d37eSRafal Kozik 	adapter->max_tx_sgl_size = calc_queue_ctx.max_tx_sgl_size;
2197ea93d37eSRafal Kozik 	adapter->max_rx_sgl_size = calc_queue_ctx.max_rx_sgl_size;
21985920d930SMichal Krawczyk 	adapter->max_num_io_queues = max_num_io_queues;
21992061fe41SRafal Kozik 
220033dde075SMichal Krawczyk 	if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
220133dde075SMichal Krawczyk 		disable_meta_caching =
220233dde075SMichal Krawczyk 			!!(get_feat_ctx.llq.accel_mode.u.get.supported_flags &
220333dde075SMichal Krawczyk 			BIT(ENA_ADMIN_DISABLE_META_CACHING));
220433dde075SMichal Krawczyk 	} else {
220533dde075SMichal Krawczyk 		disable_meta_caching = false;
220633dde075SMichal Krawczyk 	}
220733dde075SMichal Krawczyk 
22081173fca2SJan Medala 	/* prepare ring structures */
220933dde075SMichal Krawczyk 	ena_init_rings(adapter, disable_meta_caching);
22101173fca2SJan Medala 
2211372c1af5SJan Medala 	ena_config_debug_area(adapter);
2212372c1af5SJan Medala 
22131173fca2SJan Medala 	/* Set max MTU for this device */
22141173fca2SJan Medala 	adapter->max_mtu = get_feat_ctx.dev_attr.max_mtu;
22151173fca2SJan Medala 
2216e8c838fdSMichal Krawczyk 	ena_set_offloads(&adapter->offloads, &get_feat_ctx.offload);
221783277a7cSJakub Palider 
22181173fca2SJan Medala 	/* Copy MAC address and point DPDK to it */
22196d13ea8eSOlivier Matz 	eth_dev->data->mac_addrs = (struct rte_ether_addr *)adapter->mac_addr;
2220538da7a1SOlivier Matz 	rte_ether_addr_copy((struct rte_ether_addr *)
2221538da7a1SOlivier Matz 			get_feat_ctx.dev_attr.mac_addr,
22226d13ea8eSOlivier Matz 			(struct rte_ether_addr *)adapter->mac_addr);
22231173fca2SJan Medala 
222434d5e97eSMichal Krawczyk 	rc = ena_com_rss_init(ena_dev, ENA_RX_RSS_TABLE_LOG_SIZE);
222534d5e97eSMichal Krawczyk 	if (unlikely(rc != 0)) {
222634d5e97eSMichal Krawczyk 		PMD_DRV_LOG(ERR, "Failed to initialize RSS in ENA device\n");
222734d5e97eSMichal Krawczyk 		goto err_delete_debug_area;
222834d5e97eSMichal Krawczyk 	}
222934d5e97eSMichal Krawczyk 
22301173fca2SJan Medala 	adapter->drv_stats = rte_zmalloc("adapter stats",
22311173fca2SJan Medala 					 sizeof(*adapter->drv_stats),
22321173fca2SJan Medala 					 RTE_CACHE_LINE_SIZE);
22331173fca2SJan Medala 	if (!adapter->drv_stats) {
2234617898d1SMichal Krawczyk 		PMD_DRV_LOG(ERR,
2235617898d1SMichal Krawczyk 			"Failed to allocate memory for adapter statistics\n");
2236241da076SRafal Kozik 		rc = -ENOMEM;
223734d5e97eSMichal Krawczyk 		goto err_rss_destroy;
22381173fca2SJan Medala 	}
22391173fca2SJan Medala 
22401343c415SMichal Krawczyk 	rte_spinlock_init(&adapter->admin_lock);
22411343c415SMichal Krawczyk 
2242eb0ef49dSMichal Krawczyk 	rte_intr_callback_register(intr_handle,
2243eb0ef49dSMichal Krawczyk 				   ena_interrupt_handler_rte,
2244aab58857SStanislaw Kardach 				   eth_dev);
2245eb0ef49dSMichal Krawczyk 	rte_intr_enable(intr_handle);
2246eb0ef49dSMichal Krawczyk 	ena_com_set_admin_polling_mode(ena_dev, false);
2247ca148440SMichal Krawczyk 	ena_com_admin_aenq_enable(ena_dev);
2248eb0ef49dSMichal Krawczyk 
2249d9b8b106SMichal Krawczyk 	rte_timer_init(&adapter->timer_wd);
2250d9b8b106SMichal Krawczyk 
22511173fca2SJan Medala 	adapters_found++;
22521173fca2SJan Medala 	adapter->state = ENA_ADAPTER_STATE_INIT;
22531173fca2SJan Medala 
22541173fca2SJan Medala 	return 0;
2255241da076SRafal Kozik 
225634d5e97eSMichal Krawczyk err_rss_destroy:
225734d5e97eSMichal Krawczyk 	ena_com_rss_destroy(ena_dev);
2258241da076SRafal Kozik err_delete_debug_area:
2259241da076SRafal Kozik 	ena_com_delete_debug_area(ena_dev);
2260241da076SRafal Kozik 
2261241da076SRafal Kozik err_device_destroy:
2262241da076SRafal Kozik 	ena_com_delete_host_info(ena_dev);
2263241da076SRafal Kozik 	ena_com_admin_destroy(ena_dev);
2264241da076SRafal Kozik 
2265241da076SRafal Kozik err:
2266241da076SRafal Kozik 	return rc;
22671173fca2SJan Medala }
22681173fca2SJan Medala 
2269e457bc70SRafal Kozik static void ena_destroy_device(struct rte_eth_dev *eth_dev)
2270eb0ef49dSMichal Krawczyk {
2271890728ffSStephen Hemminger 	struct ena_adapter *adapter = eth_dev->data->dev_private;
2272e457bc70SRafal Kozik 	struct ena_com_dev *ena_dev = &adapter->ena_dev;
2273eb0ef49dSMichal Krawczyk 
2274e457bc70SRafal Kozik 	if (adapter->state == ENA_ADAPTER_STATE_FREE)
2275e457bc70SRafal Kozik 		return;
2276e457bc70SRafal Kozik 
2277e457bc70SRafal Kozik 	ena_com_set_admin_running_state(ena_dev, false);
2278eb0ef49dSMichal Krawczyk 
2279eb0ef49dSMichal Krawczyk 	if (adapter->state != ENA_ADAPTER_STATE_CLOSED)
2280eb0ef49dSMichal Krawczyk 		ena_close(eth_dev);
2281eb0ef49dSMichal Krawczyk 
228234d5e97eSMichal Krawczyk 	ena_com_rss_destroy(ena_dev);
228334d5e97eSMichal Krawczyk 
2284e457bc70SRafal Kozik 	ena_com_delete_debug_area(ena_dev);
2285e457bc70SRafal Kozik 	ena_com_delete_host_info(ena_dev);
2286e457bc70SRafal Kozik 
2287e457bc70SRafal Kozik 	ena_com_abort_admin_commands(ena_dev);
2288e457bc70SRafal Kozik 	ena_com_wait_for_abort_completion(ena_dev);
2289e457bc70SRafal Kozik 	ena_com_admin_destroy(ena_dev);
2290e457bc70SRafal Kozik 	ena_com_mmio_reg_read_request_destroy(ena_dev);
2291e457bc70SRafal Kozik 
2292e457bc70SRafal Kozik 	adapter->state = ENA_ADAPTER_STATE_FREE;
2293e457bc70SRafal Kozik }
2294e457bc70SRafal Kozik 
2295e457bc70SRafal Kozik static int eth_ena_dev_uninit(struct rte_eth_dev *eth_dev)
2296e457bc70SRafal Kozik {
2297e457bc70SRafal Kozik 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2298e457bc70SRafal Kozik 		return 0;
2299e457bc70SRafal Kozik 
2300e457bc70SRafal Kozik 	ena_destroy_device(eth_dev);
2301e457bc70SRafal Kozik 
2302eb0ef49dSMichal Krawczyk 	return 0;
2303eb0ef49dSMichal Krawczyk }
2304eb0ef49dSMichal Krawczyk 
23051173fca2SJan Medala static int ena_dev_configure(struct rte_eth_dev *dev)
23061173fca2SJan Medala {
2307890728ffSStephen Hemminger 	struct ena_adapter *adapter = dev->data->dev_private;
2308b9b05d6fSMichal Krawczyk 	int rc;
23097369f88fSRafal Kozik 
23101173fca2SJan Medala 	adapter->state = ENA_ADAPTER_STATE_CONFIG;
23111173fca2SJan Medala 
2312295968d1SFerruh Yigit 	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
2313295968d1SFerruh Yigit 		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
2314295968d1SFerruh Yigit 	dev->data->dev_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
2315b418f0d2SMichal Krawczyk 
2316e2a6d08bSMichal Krawczyk 	/* Scattered Rx cannot be turned off in the HW, so this capability must
2317e2a6d08bSMichal Krawczyk 	 * be forced.
2318e2a6d08bSMichal Krawczyk 	 */
2319e2a6d08bSMichal Krawczyk 	dev->data->scattered_rx = 1;
2320e2a6d08bSMichal Krawczyk 
2321f93e20e5SMichal Krawczyk 	adapter->last_tx_comp_qid = 0;
2322f93e20e5SMichal Krawczyk 
2323f93e20e5SMichal Krawczyk 	adapter->missing_tx_completion_budget =
2324f93e20e5SMichal Krawczyk 		RTE_MIN(ENA_MONITORED_TX_QUEUES, dev->data->nb_tx_queues);
2325f93e20e5SMichal Krawczyk 
2326f93e20e5SMichal Krawczyk 	/* To avoid detection of the spurious Tx completion timeout due to
2327f93e20e5SMichal Krawczyk 	 * application not calling the Tx cleanup function, set timeout for the
2328f93e20e5SMichal Krawczyk 	 * Tx queue which should be half of the missing completion timeout for a
2329f93e20e5SMichal Krawczyk 	 * safety. If there will be a lot of missing Tx completions in the
2330f93e20e5SMichal Krawczyk 	 * queue, they will be detected sooner or later.
2331f93e20e5SMichal Krawczyk 	 */
2332f93e20e5SMichal Krawczyk 	adapter->tx_cleanup_stall_delay = adapter->missing_tx_completion_to / 2;
2333f93e20e5SMichal Krawczyk 
2334b9b05d6fSMichal Krawczyk 	rc = ena_configure_aenq(adapter);
2335b9b05d6fSMichal Krawczyk 
2336b9b05d6fSMichal Krawczyk 	return rc;
23371173fca2SJan Medala }
23381173fca2SJan Medala 
233933dde075SMichal Krawczyk static void ena_init_rings(struct ena_adapter *adapter,
234033dde075SMichal Krawczyk 			   bool disable_meta_caching)
23411173fca2SJan Medala {
23425920d930SMichal Krawczyk 	size_t i;
23431173fca2SJan Medala 
23445920d930SMichal Krawczyk 	for (i = 0; i < adapter->max_num_io_queues; i++) {
23451173fca2SJan Medala 		struct ena_ring *ring = &adapter->tx_ring[i];
23461173fca2SJan Medala 
23471173fca2SJan Medala 		ring->configured = 0;
23481173fca2SJan Medala 		ring->type = ENA_RING_TYPE_TX;
23491173fca2SJan Medala 		ring->adapter = adapter;
23501173fca2SJan Medala 		ring->id = i;
23511173fca2SJan Medala 		ring->tx_mem_queue_type = adapter->ena_dev.tx_mem_queue_type;
23521173fca2SJan Medala 		ring->tx_max_header_size = adapter->ena_dev.tx_max_header_size;
23532061fe41SRafal Kozik 		ring->sgl_size = adapter->max_tx_sgl_size;
235433dde075SMichal Krawczyk 		ring->disable_meta_caching = disable_meta_caching;
23551173fca2SJan Medala 	}
23561173fca2SJan Medala 
23575920d930SMichal Krawczyk 	for (i = 0; i < adapter->max_num_io_queues; i++) {
23581173fca2SJan Medala 		struct ena_ring *ring = &adapter->rx_ring[i];
23591173fca2SJan Medala 
23601173fca2SJan Medala 		ring->configured = 0;
23611173fca2SJan Medala 		ring->type = ENA_RING_TYPE_RX;
23621173fca2SJan Medala 		ring->adapter = adapter;
23631173fca2SJan Medala 		ring->id = i;
2364ea93d37eSRafal Kozik 		ring->sgl_size = adapter->max_rx_sgl_size;
23651173fca2SJan Medala 	}
23661173fca2SJan Medala }
23671173fca2SJan Medala 
23683a822d79SMichal Krawczyk static uint64_t ena_get_rx_port_offloads(struct ena_adapter *adapter)
23693a822d79SMichal Krawczyk {
23703a822d79SMichal Krawczyk 	uint64_t port_offloads = 0;
23713a822d79SMichal Krawczyk 
23723a822d79SMichal Krawczyk 	if (adapter->offloads.rx_offloads & ENA_L3_IPV4_CSUM)
2373295968d1SFerruh Yigit 		port_offloads |= RTE_ETH_RX_OFFLOAD_IPV4_CKSUM;
23743a822d79SMichal Krawczyk 
23753a822d79SMichal Krawczyk 	if (adapter->offloads.rx_offloads &
23763a822d79SMichal Krawczyk 	    (ENA_L4_IPV4_CSUM | ENA_L4_IPV6_CSUM))
23773a822d79SMichal Krawczyk 		port_offloads |=
2378295968d1SFerruh Yigit 			RTE_ETH_RX_OFFLOAD_UDP_CKSUM | RTE_ETH_RX_OFFLOAD_TCP_CKSUM;
23793a822d79SMichal Krawczyk 
23803a822d79SMichal Krawczyk 	if (adapter->offloads.rx_offloads & ENA_RX_RSS_HASH)
2381295968d1SFerruh Yigit 		port_offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
23823a822d79SMichal Krawczyk 
2383295968d1SFerruh Yigit 	port_offloads |= RTE_ETH_RX_OFFLOAD_SCATTER;
2384e2a6d08bSMichal Krawczyk 
23853a822d79SMichal Krawczyk 	return port_offloads;
23863a822d79SMichal Krawczyk }
23873a822d79SMichal Krawczyk 
23883a822d79SMichal Krawczyk static uint64_t ena_get_tx_port_offloads(struct ena_adapter *adapter)
23893a822d79SMichal Krawczyk {
23903a822d79SMichal Krawczyk 	uint64_t port_offloads = 0;
23913a822d79SMichal Krawczyk 
23923a822d79SMichal Krawczyk 	if (adapter->offloads.tx_offloads & ENA_IPV4_TSO)
2393295968d1SFerruh Yigit 		port_offloads |= RTE_ETH_TX_OFFLOAD_TCP_TSO;
23943a822d79SMichal Krawczyk 
23953a822d79SMichal Krawczyk 	if (adapter->offloads.tx_offloads & ENA_L3_IPV4_CSUM)
2396295968d1SFerruh Yigit 		port_offloads |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM;
23973a822d79SMichal Krawczyk 	if (adapter->offloads.tx_offloads &
23983a822d79SMichal Krawczyk 	    (ENA_L4_IPV4_CSUM_PARTIAL | ENA_L4_IPV4_CSUM |
23993a822d79SMichal Krawczyk 	     ENA_L4_IPV6_CSUM | ENA_L4_IPV6_CSUM_PARTIAL))
24003a822d79SMichal Krawczyk 		port_offloads |=
2401295968d1SFerruh Yigit 			RTE_ETH_TX_OFFLOAD_UDP_CKSUM | RTE_ETH_TX_OFFLOAD_TCP_CKSUM;
24023a822d79SMichal Krawczyk 
2403295968d1SFerruh Yigit 	port_offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
24043a822d79SMichal Krawczyk 
24053a822d79SMichal Krawczyk 	return port_offloads;
24063a822d79SMichal Krawczyk }
24073a822d79SMichal Krawczyk 
24083a822d79SMichal Krawczyk static uint64_t ena_get_rx_queue_offloads(struct ena_adapter *adapter)
24093a822d79SMichal Krawczyk {
24103a822d79SMichal Krawczyk 	RTE_SET_USED(adapter);
24113a822d79SMichal Krawczyk 
24123a822d79SMichal Krawczyk 	return 0;
24133a822d79SMichal Krawczyk }
24143a822d79SMichal Krawczyk 
24153a822d79SMichal Krawczyk static uint64_t ena_get_tx_queue_offloads(struct ena_adapter *adapter)
24163a822d79SMichal Krawczyk {
24173a822d79SMichal Krawczyk 	RTE_SET_USED(adapter);
24183a822d79SMichal Krawczyk 
24193a822d79SMichal Krawczyk 	return 0;
24203a822d79SMichal Krawczyk }
24213a822d79SMichal Krawczyk 
2422bdad90d1SIvan Ilchenko static int ena_infos_get(struct rte_eth_dev *dev,
24231173fca2SJan Medala 			  struct rte_eth_dev_info *dev_info)
24241173fca2SJan Medala {
24251173fca2SJan Medala 	struct ena_adapter *adapter;
24261173fca2SJan Medala 	struct ena_com_dev *ena_dev;
24271173fca2SJan Medala 
2428498c687aSRafal Kozik 	ena_assert_msg(dev->data != NULL, "Uninitialized device\n");
2429498c687aSRafal Kozik 	ena_assert_msg(dev->data->dev_private != NULL, "Uninitialized device\n");
2430890728ffSStephen Hemminger 	adapter = dev->data->dev_private;
24311173fca2SJan Medala 
24321173fca2SJan Medala 	ena_dev = &adapter->ena_dev;
2433498c687aSRafal Kozik 	ena_assert_msg(ena_dev != NULL, "Uninitialized device\n");
24341173fca2SJan Medala 
2435e274f573SMarc Sune 	dev_info->speed_capa =
2436295968d1SFerruh Yigit 			RTE_ETH_LINK_SPEED_1G   |
2437295968d1SFerruh Yigit 			RTE_ETH_LINK_SPEED_2_5G |
2438295968d1SFerruh Yigit 			RTE_ETH_LINK_SPEED_5G   |
2439295968d1SFerruh Yigit 			RTE_ETH_LINK_SPEED_10G  |
2440295968d1SFerruh Yigit 			RTE_ETH_LINK_SPEED_25G  |
2441295968d1SFerruh Yigit 			RTE_ETH_LINK_SPEED_40G  |
2442295968d1SFerruh Yigit 			RTE_ETH_LINK_SPEED_50G  |
2443295968d1SFerruh Yigit 			RTE_ETH_LINK_SPEED_100G;
2444e274f573SMarc Sune 
24451173fca2SJan Medala 	/* Inform framework about available features */
24463a822d79SMichal Krawczyk 	dev_info->rx_offload_capa = ena_get_rx_port_offloads(adapter);
24473a822d79SMichal Krawczyk 	dev_info->tx_offload_capa = ena_get_tx_port_offloads(adapter);
24483a822d79SMichal Krawczyk 	dev_info->rx_queue_offload_capa = ena_get_rx_queue_offloads(adapter);
24493a822d79SMichal Krawczyk 	dev_info->tx_queue_offload_capa = ena_get_tx_queue_offloads(adapter);
24501173fca2SJan Medala 
245134d5e97eSMichal Krawczyk 	dev_info->flow_type_rss_offloads = ENA_ALL_RSS_HF;
245234d5e97eSMichal Krawczyk 	dev_info->hash_key_size = ENA_HASH_KEY_SIZE;
2453b01ead20SRafal Kozik 
24541173fca2SJan Medala 	dev_info->min_rx_bufsize = ENA_MIN_FRAME_LEN;
24551bb4a528SFerruh Yigit 	dev_info->max_rx_pktlen  = adapter->max_mtu + RTE_ETHER_HDR_LEN +
24561bb4a528SFerruh Yigit 		RTE_ETHER_CRC_LEN;
24571bb4a528SFerruh Yigit 	dev_info->min_mtu = ENA_MIN_MTU;
24581bb4a528SFerruh Yigit 	dev_info->max_mtu = adapter->max_mtu;
24591173fca2SJan Medala 	dev_info->max_mac_addrs = 1;
24601173fca2SJan Medala 
24615920d930SMichal Krawczyk 	dev_info->max_rx_queues = adapter->max_num_io_queues;
24625920d930SMichal Krawczyk 	dev_info->max_tx_queues = adapter->max_num_io_queues;
24631173fca2SJan Medala 	dev_info->reta_size = ENA_RX_RSS_TABLE_SIZE;
246456b8b9b7SRafal Kozik 
24655920d930SMichal Krawczyk 	dev_info->rx_desc_lim.nb_max = adapter->max_rx_ring_size;
246692680dc2SRafal Kozik 	dev_info->rx_desc_lim.nb_min = ENA_MIN_RING_DESC;
2467ea93d37eSRafal Kozik 	dev_info->rx_desc_lim.nb_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS,
2468ea93d37eSRafal Kozik 					adapter->max_rx_sgl_size);
2469ea93d37eSRafal Kozik 	dev_info->rx_desc_lim.nb_mtu_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS,
2470ea93d37eSRafal Kozik 					adapter->max_rx_sgl_size);
247192680dc2SRafal Kozik 
24725920d930SMichal Krawczyk 	dev_info->tx_desc_lim.nb_max = adapter->max_tx_ring_size;
247392680dc2SRafal Kozik 	dev_info->tx_desc_lim.nb_min = ENA_MIN_RING_DESC;
247492680dc2SRafal Kozik 	dev_info->tx_desc_lim.nb_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS,
2475ea93d37eSRafal Kozik 					adapter->max_tx_sgl_size);
247692680dc2SRafal Kozik 	dev_info->tx_desc_lim.nb_mtu_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS,
2477ea93d37eSRafal Kozik 					adapter->max_tx_sgl_size);
2478bdad90d1SIvan Ilchenko 
247930a6c7efSStanislaw Kardach 	dev_info->default_rxportconf.ring_size = ENA_DEFAULT_RING_SIZE;
248030a6c7efSStanislaw Kardach 	dev_info->default_txportconf.ring_size = ENA_DEFAULT_RING_SIZE;
248130a6c7efSStanislaw Kardach 
2482bdad90d1SIvan Ilchenko 	return 0;
24831173fca2SJan Medala }
24841173fca2SJan Medala 
24851be097dcSMichal Krawczyk static inline void ena_init_rx_mbuf(struct rte_mbuf *mbuf, uint16_t len)
24861be097dcSMichal Krawczyk {
24871be097dcSMichal Krawczyk 	mbuf->data_len = len;
24881be097dcSMichal Krawczyk 	mbuf->data_off = RTE_PKTMBUF_HEADROOM;
24891be097dcSMichal Krawczyk 	mbuf->refcnt = 1;
24901be097dcSMichal Krawczyk 	mbuf->next = NULL;
24911be097dcSMichal Krawczyk }
24921be097dcSMichal Krawczyk 
24931be097dcSMichal Krawczyk static struct rte_mbuf *ena_rx_mbuf(struct ena_ring *rx_ring,
24941be097dcSMichal Krawczyk 				    struct ena_com_rx_buf_info *ena_bufs,
24951be097dcSMichal Krawczyk 				    uint32_t descs,
24961be097dcSMichal Krawczyk 				    uint16_t *next_to_clean,
24971be097dcSMichal Krawczyk 				    uint8_t offset)
24981be097dcSMichal Krawczyk {
24991be097dcSMichal Krawczyk 	struct rte_mbuf *mbuf;
25001be097dcSMichal Krawczyk 	struct rte_mbuf *mbuf_head;
25011be097dcSMichal Krawczyk 	struct ena_rx_buffer *rx_info;
250283fd97b2SMichal Krawczyk 	int rc;
25031be097dcSMichal Krawczyk 	uint16_t ntc, len, req_id, buf = 0;
25041be097dcSMichal Krawczyk 
25051be097dcSMichal Krawczyk 	if (unlikely(descs == 0))
25061be097dcSMichal Krawczyk 		return NULL;
25071be097dcSMichal Krawczyk 
25081be097dcSMichal Krawczyk 	ntc = *next_to_clean;
25091be097dcSMichal Krawczyk 
25101be097dcSMichal Krawczyk 	len = ena_bufs[buf].len;
25111be097dcSMichal Krawczyk 	req_id = ena_bufs[buf].req_id;
25121be097dcSMichal Krawczyk 
25131be097dcSMichal Krawczyk 	rx_info = &rx_ring->rx_buffer_info[req_id];
25141be097dcSMichal Krawczyk 
25151be097dcSMichal Krawczyk 	mbuf = rx_info->mbuf;
25161be097dcSMichal Krawczyk 	RTE_ASSERT(mbuf != NULL);
25171be097dcSMichal Krawczyk 
25181be097dcSMichal Krawczyk 	ena_init_rx_mbuf(mbuf, len);
25191be097dcSMichal Krawczyk 
25201be097dcSMichal Krawczyk 	/* Fill the mbuf head with the data specific for 1st segment. */
25211be097dcSMichal Krawczyk 	mbuf_head = mbuf;
25221be097dcSMichal Krawczyk 	mbuf_head->nb_segs = descs;
25231be097dcSMichal Krawczyk 	mbuf_head->port = rx_ring->port_id;
25241be097dcSMichal Krawczyk 	mbuf_head->pkt_len = len;
25251be097dcSMichal Krawczyk 	mbuf_head->data_off += offset;
25261be097dcSMichal Krawczyk 
25271be097dcSMichal Krawczyk 	rx_info->mbuf = NULL;
2528c0006061SMichal Krawczyk 	rx_ring->empty_rx_reqs[ntc] = req_id;
2529c0006061SMichal Krawczyk 	ntc = ENA_IDX_NEXT_MASKED(ntc, rx_ring->size_mask);
25301be097dcSMichal Krawczyk 
25311be097dcSMichal Krawczyk 	while (--descs) {
25321be097dcSMichal Krawczyk 		++buf;
25331be097dcSMichal Krawczyk 		len = ena_bufs[buf].len;
25341be097dcSMichal Krawczyk 		req_id = ena_bufs[buf].req_id;
25351be097dcSMichal Krawczyk 
25361be097dcSMichal Krawczyk 		rx_info = &rx_ring->rx_buffer_info[req_id];
25371be097dcSMichal Krawczyk 		RTE_ASSERT(rx_info->mbuf != NULL);
25381be097dcSMichal Krawczyk 
253983fd97b2SMichal Krawczyk 		if (unlikely(len == 0)) {
254083fd97b2SMichal Krawczyk 			/*
254183fd97b2SMichal Krawczyk 			 * Some devices can pass descriptor with the length 0.
254283fd97b2SMichal Krawczyk 			 * To avoid confusion, the PMD is simply putting the
254383fd97b2SMichal Krawczyk 			 * descriptor back, as it was never used. We'll avoid
254483fd97b2SMichal Krawczyk 			 * mbuf allocation that way.
254583fd97b2SMichal Krawczyk 			 */
254683fd97b2SMichal Krawczyk 			rc = ena_add_single_rx_desc(rx_ring->ena_com_io_sq,
254783fd97b2SMichal Krawczyk 				rx_info->mbuf, req_id);
254883fd97b2SMichal Krawczyk 			if (unlikely(rc != 0)) {
254983fd97b2SMichal Krawczyk 				/* Free the mbuf in case of an error. */
255083fd97b2SMichal Krawczyk 				rte_mbuf_raw_free(rx_info->mbuf);
255183fd97b2SMichal Krawczyk 			} else {
255283fd97b2SMichal Krawczyk 				/*
255383fd97b2SMichal Krawczyk 				 * If there was no error, just exit the loop as
255483fd97b2SMichal Krawczyk 				 * 0 length descriptor is always the last one.
255583fd97b2SMichal Krawczyk 				 */
255683fd97b2SMichal Krawczyk 				break;
255783fd97b2SMichal Krawczyk 			}
255883fd97b2SMichal Krawczyk 		} else {
25591be097dcSMichal Krawczyk 			/* Create an mbuf chain. */
25601be097dcSMichal Krawczyk 			mbuf->next = rx_info->mbuf;
25611be097dcSMichal Krawczyk 			mbuf = mbuf->next;
25621be097dcSMichal Krawczyk 
25631be097dcSMichal Krawczyk 			ena_init_rx_mbuf(mbuf, len);
25641be097dcSMichal Krawczyk 			mbuf_head->pkt_len += len;
256583fd97b2SMichal Krawczyk 		}
25661be097dcSMichal Krawczyk 
256783fd97b2SMichal Krawczyk 		/*
256883fd97b2SMichal Krawczyk 		 * Mark the descriptor as depleted and perform necessary
256983fd97b2SMichal Krawczyk 		 * cleanup.
257083fd97b2SMichal Krawczyk 		 * This code will execute in two cases:
257183fd97b2SMichal Krawczyk 		 *  1. Descriptor len was greater than 0 - normal situation.
257283fd97b2SMichal Krawczyk 		 *  2. Descriptor len was 0 and we failed to add the descriptor
257383fd97b2SMichal Krawczyk 		 *     to the device. In that situation, we should try to add
257483fd97b2SMichal Krawczyk 		 *     the mbuf again in the populate routine and mark the
257583fd97b2SMichal Krawczyk 		 *     descriptor as used up by the device.
257683fd97b2SMichal Krawczyk 		 */
25771be097dcSMichal Krawczyk 		rx_info->mbuf = NULL;
2578c0006061SMichal Krawczyk 		rx_ring->empty_rx_reqs[ntc] = req_id;
2579c0006061SMichal Krawczyk 		ntc = ENA_IDX_NEXT_MASKED(ntc, rx_ring->size_mask);
25801be097dcSMichal Krawczyk 	}
25811be097dcSMichal Krawczyk 
25821be097dcSMichal Krawczyk 	*next_to_clean = ntc;
25831be097dcSMichal Krawczyk 
25841be097dcSMichal Krawczyk 	return mbuf_head;
25851be097dcSMichal Krawczyk }
25861be097dcSMichal Krawczyk 
25871173fca2SJan Medala static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
25881173fca2SJan Medala 				  uint16_t nb_pkts)
25891173fca2SJan Medala {
25901173fca2SJan Medala 	struct ena_ring *rx_ring = (struct ena_ring *)(rx_queue);
259177550607SMichal Krawczyk 	unsigned int free_queue_entries;
25921173fca2SJan Medala 	uint16_t next_to_clean = rx_ring->next_to_clean;
259374456796SMichal Krawczyk 	uint16_t descs_in_use;
25941be097dcSMichal Krawczyk 	struct rte_mbuf *mbuf;
25951be097dcSMichal Krawczyk 	uint16_t completed;
25961173fca2SJan Medala 	struct ena_com_rx_ctx ena_rx_ctx;
25971be097dcSMichal Krawczyk 	int i, rc = 0;
259834d5e97eSMichal Krawczyk 	bool fill_hash;
25991173fca2SJan Medala 
26000a001d69SMichal Krawczyk #ifdef RTE_ETHDEV_DEBUG_RX
26011173fca2SJan Medala 	/* Check adapter state */
26021173fca2SJan Medala 	if (unlikely(rx_ring->adapter->state != ENA_ADAPTER_STATE_RUNNING)) {
26030a001d69SMichal Krawczyk 		PMD_RX_LOG(ALERT,
26041173fca2SJan Medala 			"Trying to receive pkts while device is NOT running\n");
26051173fca2SJan Medala 		return 0;
26061173fca2SJan Medala 	}
26070a001d69SMichal Krawczyk #endif
26081173fca2SJan Medala 
2609295968d1SFerruh Yigit 	fill_hash = rx_ring->offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH;
261034d5e97eSMichal Krawczyk 
2611c0006061SMichal Krawczyk 	descs_in_use = rx_ring->ring_size -
261274456796SMichal Krawczyk 		ena_com_free_q_entries(rx_ring->ena_com_io_sq) - 1;
261374456796SMichal Krawczyk 	nb_pkts = RTE_MIN(descs_in_use, nb_pkts);
26141173fca2SJan Medala 
26151173fca2SJan Medala 	for (completed = 0; completed < nb_pkts; completed++) {
2616ea93d37eSRafal Kozik 		ena_rx_ctx.max_bufs = rx_ring->sgl_size;
26171173fca2SJan Medala 		ena_rx_ctx.ena_bufs = rx_ring->ena_bufs;
26181173fca2SJan Medala 		ena_rx_ctx.descs = 0;
26197b3a3c4bSMaciej Bielski 		ena_rx_ctx.pkt_offset = 0;
26201173fca2SJan Medala 		/* receive packet context */
26211173fca2SJan Medala 		rc = ena_com_rx_pkt(rx_ring->ena_com_io_cq,
26221173fca2SJan Medala 				    rx_ring->ena_com_io_sq,
26231173fca2SJan Medala 				    &ena_rx_ctx);
26241173fca2SJan Medala 		if (unlikely(rc)) {
26250a001d69SMichal Krawczyk 			PMD_RX_LOG(ERR,
2626617898d1SMichal Krawczyk 				"Failed to get the packet from the device, rc: %d\n",
2627617898d1SMichal Krawczyk 				rc);
262805cffdcfSMichal Krawczyk 			if (rc == ENA_COM_NO_SPACE) {
262905cffdcfSMichal Krawczyk 				++rx_ring->rx_stats.bad_desc_num;
26302bae75eaSDawid Gorecki 				ena_trigger_reset(rx_ring->adapter,
26312bae75eaSDawid Gorecki 					ENA_REGS_RESET_TOO_MANY_RX_DESCS);
263205cffdcfSMichal Krawczyk 			} else {
263305cffdcfSMichal Krawczyk 				++rx_ring->rx_stats.bad_req_id;
26342bae75eaSDawid Gorecki 				ena_trigger_reset(rx_ring->adapter,
26352bae75eaSDawid Gorecki 					ENA_REGS_RESET_INV_RX_REQ_ID);
263605cffdcfSMichal Krawczyk 			}
26371173fca2SJan Medala 			return 0;
26381173fca2SJan Medala 		}
26391173fca2SJan Medala 
26401be097dcSMichal Krawczyk 		mbuf = ena_rx_mbuf(rx_ring,
26411be097dcSMichal Krawczyk 			ena_rx_ctx.ena_bufs,
26421be097dcSMichal Krawczyk 			ena_rx_ctx.descs,
26431be097dcSMichal Krawczyk 			&next_to_clean,
26441be097dcSMichal Krawczyk 			ena_rx_ctx.pkt_offset);
26451be097dcSMichal Krawczyk 		if (unlikely(mbuf == NULL)) {
26461be097dcSMichal Krawczyk 			for (i = 0; i < ena_rx_ctx.descs; ++i) {
2647c0006061SMichal Krawczyk 				rx_ring->empty_rx_reqs[next_to_clean] =
26481be097dcSMichal Krawczyk 					rx_ring->ena_bufs[i].req_id;
2649c0006061SMichal Krawczyk 				next_to_clean = ENA_IDX_NEXT_MASKED(
2650c0006061SMichal Krawczyk 					next_to_clean, rx_ring->size_mask);
26511173fca2SJan Medala 			}
2652f00930d9SRafal Kozik 			break;
26531be097dcSMichal Krawczyk 		}
26541173fca2SJan Medala 
26551173fca2SJan Medala 		/* fill mbuf attributes if any */
265684daba99SMichal Krawczyk 		ena_rx_mbuf_prepare(rx_ring, mbuf, &ena_rx_ctx, fill_hash);
26577830e905SSolganik Alexander 
26581be097dcSMichal Krawczyk 		if (unlikely(mbuf->ol_flags &
265984daba99SMichal Krawczyk 				(RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD)))
2660ef74b5f7SMichal Krawczyk 			rte_atomic64_inc(&rx_ring->adapter->drv_stats->ierrors);
26617830e905SSolganik Alexander 
26621be097dcSMichal Krawczyk 		rx_pkts[completed] = mbuf;
26631be097dcSMichal Krawczyk 		rx_ring->rx_stats.bytes += mbuf->pkt_len;
26641173fca2SJan Medala 	}
26651173fca2SJan Medala 
26661be097dcSMichal Krawczyk 	rx_ring->rx_stats.cnt += completed;
2667ec78af6bSMichal Krawczyk 	rx_ring->next_to_clean = next_to_clean;
2668ec78af6bSMichal Krawczyk 
266977550607SMichal Krawczyk 	free_queue_entries = ena_com_free_q_entries(rx_ring->ena_com_io_sq);
267077550607SMichal Krawczyk 
26711173fca2SJan Medala 	/* Burst refill to save doorbells, memory barriers, const interval */
2672005064e5SMichal Krawczyk 	if (free_queue_entries >= rx_ring->rx_free_thresh) {
2673a45462c5SRafal Kozik 		ena_com_update_dev_comp_head(rx_ring->ena_com_io_cq);
267477550607SMichal Krawczyk 		ena_populate_rx_queue(rx_ring, free_queue_entries);
2675a45462c5SRafal Kozik 	}
26761173fca2SJan Medala 
26771be097dcSMichal Krawczyk 	return completed;
26781173fca2SJan Medala }
26791173fca2SJan Medala 
2680b3fc5a1aSKonstantin Ananyev static uint16_t
268183277a7cSJakub Palider eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
2682b3fc5a1aSKonstantin Ananyev 		uint16_t nb_pkts)
2683b3fc5a1aSKonstantin Ananyev {
2684b3fc5a1aSKonstantin Ananyev 	int32_t ret;
2685b3fc5a1aSKonstantin Ananyev 	uint32_t i;
2686b3fc5a1aSKonstantin Ananyev 	struct rte_mbuf *m;
268783277a7cSJakub Palider 	struct ena_ring *tx_ring = (struct ena_ring *)(tx_queue);
2688e8c838fdSMichal Krawczyk 	struct ena_adapter *adapter = tx_ring->adapter;
2689a7c528e5SOlivier Matz 	struct rte_ipv4_hdr *ip_hdr;
2690b3fc5a1aSKonstantin Ananyev 	uint64_t ol_flags;
2691e8c838fdSMichal Krawczyk 	uint64_t l4_csum_flag;
2692e8c838fdSMichal Krawczyk 	uint64_t dev_offload_capa;
269383277a7cSJakub Palider 	uint16_t frag_field;
2694e8c838fdSMichal Krawczyk 	bool need_pseudo_csum;
269583277a7cSJakub Palider 
2696e8c838fdSMichal Krawczyk 	dev_offload_capa = adapter->offloads.tx_offloads;
2697b3fc5a1aSKonstantin Ananyev 	for (i = 0; i != nb_pkts; i++) {
2698b3fc5a1aSKonstantin Ananyev 		m = tx_pkts[i];
2699b3fc5a1aSKonstantin Ananyev 		ol_flags = m->ol_flags;
2700b3fc5a1aSKonstantin Ananyev 
2701e8c838fdSMichal Krawczyk 		/* Check if any offload flag was set */
2702e8c838fdSMichal Krawczyk 		if (ol_flags == 0)
2703bc5ef57dSMichal Krawczyk 			continue;
2704bc5ef57dSMichal Krawczyk 
2705daa02b5cSOlivier Matz 		l4_csum_flag = ol_flags & RTE_MBUF_F_TX_L4_MASK;
2706e8c838fdSMichal Krawczyk 		/* SCTP checksum offload is not supported by the ENA. */
2707e8c838fdSMichal Krawczyk 		if ((ol_flags & ENA_TX_OFFLOAD_NOTSUP_MASK) ||
2708daa02b5cSOlivier Matz 		    l4_csum_flag == RTE_MBUF_F_TX_SCTP_CKSUM) {
2709e8c838fdSMichal Krawczyk 			PMD_TX_LOG(DEBUG,
2710e8c838fdSMichal Krawczyk 				"mbuf[%" PRIu32 "] has unsupported offloads flags set: 0x%" PRIu64 "\n",
2711e8c838fdSMichal Krawczyk 				i, ol_flags);
2712baeed5f4SMichal Krawczyk 			rte_errno = ENOTSUP;
2713b3fc5a1aSKonstantin Ananyev 			return i;
2714b3fc5a1aSKonstantin Ananyev 		}
2715b3fc5a1aSKonstantin Ananyev 
271696ffa8a7SMichal Krawczyk 		if (unlikely(m->nb_segs >= tx_ring->sgl_size &&
271796ffa8a7SMichal Krawczyk 		    !(tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV &&
271896ffa8a7SMichal Krawczyk 		      m->nb_segs == tx_ring->sgl_size &&
271996ffa8a7SMichal Krawczyk 		      m->data_len < tx_ring->tx_max_header_size))) {
272096ffa8a7SMichal Krawczyk 			PMD_TX_LOG(DEBUG,
272196ffa8a7SMichal Krawczyk 				"mbuf[%" PRIu32 "] has too many segments: %" PRIu16 "\n",
272296ffa8a7SMichal Krawczyk 				i, m->nb_segs);
272396ffa8a7SMichal Krawczyk 			rte_errno = EINVAL;
272496ffa8a7SMichal Krawczyk 			return i;
272596ffa8a7SMichal Krawczyk 		}
272696ffa8a7SMichal Krawczyk 
2727b3fc5a1aSKonstantin Ananyev #ifdef RTE_LIBRTE_ETHDEV_DEBUG
2728e8c838fdSMichal Krawczyk 		/* Check if requested offload is also enabled for the queue */
2729daa02b5cSOlivier Matz 		if ((ol_flags & RTE_MBUF_F_TX_IP_CKSUM &&
2730295968d1SFerruh Yigit 		     !(tx_ring->offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)) ||
2731daa02b5cSOlivier Matz 		    (l4_csum_flag == RTE_MBUF_F_TX_TCP_CKSUM &&
2732295968d1SFerruh Yigit 		     !(tx_ring->offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM)) ||
2733daa02b5cSOlivier Matz 		    (l4_csum_flag == RTE_MBUF_F_TX_UDP_CKSUM &&
2734295968d1SFerruh Yigit 		     !(tx_ring->offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM))) {
2735e8c838fdSMichal Krawczyk 			PMD_TX_LOG(DEBUG,
2736e8c838fdSMichal Krawczyk 				"mbuf[%" PRIu32 "]: requested offloads: %" PRIu16 " are not enabled for the queue[%u]\n",
2737e8c838fdSMichal Krawczyk 				i, m->nb_segs, tx_ring->id);
2738e8c838fdSMichal Krawczyk 			rte_errno = EINVAL;
2739e8c838fdSMichal Krawczyk 			return i;
2740e8c838fdSMichal Krawczyk 		}
2741e8c838fdSMichal Krawczyk 
2742e8c838fdSMichal Krawczyk 		/* The caller is obligated to set l2 and l3 len if any cksum
2743e8c838fdSMichal Krawczyk 		 * offload is enabled.
2744e8c838fdSMichal Krawczyk 		 */
2745daa02b5cSOlivier Matz 		if (unlikely(ol_flags & (RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_L4_MASK) &&
2746e8c838fdSMichal Krawczyk 		    (m->l2_len == 0 || m->l3_len == 0))) {
2747e8c838fdSMichal Krawczyk 			PMD_TX_LOG(DEBUG,
2748e8c838fdSMichal Krawczyk 				"mbuf[%" PRIu32 "]: l2_len or l3_len values are 0 while the offload was requested\n",
2749e8c838fdSMichal Krawczyk 				i);
2750e8c838fdSMichal Krawczyk 			rte_errno = EINVAL;
2751e8c838fdSMichal Krawczyk 			return i;
2752e8c838fdSMichal Krawczyk 		}
2753b3fc5a1aSKonstantin Ananyev 		ret = rte_validate_tx_offload(m);
2754b3fc5a1aSKonstantin Ananyev 		if (ret != 0) {
2755baeed5f4SMichal Krawczyk 			rte_errno = -ret;
2756b3fc5a1aSKonstantin Ananyev 			return i;
2757b3fc5a1aSKonstantin Ananyev 		}
2758b3fc5a1aSKonstantin Ananyev #endif
275983277a7cSJakub Palider 
2760e8c838fdSMichal Krawczyk 		/* Verify HW support for requested offloads and determine if
2761e8c838fdSMichal Krawczyk 		 * pseudo header checksum is needed.
276283277a7cSJakub Palider 		 */
2763e8c838fdSMichal Krawczyk 		need_pseudo_csum = false;
2764daa02b5cSOlivier Matz 		if (ol_flags & RTE_MBUF_F_TX_IPV4) {
2765daa02b5cSOlivier Matz 			if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM &&
2766e8c838fdSMichal Krawczyk 			    !(dev_offload_capa & ENA_L3_IPV4_CSUM)) {
2767e8c838fdSMichal Krawczyk 				rte_errno = ENOTSUP;
2768e8c838fdSMichal Krawczyk 				return i;
2769e8c838fdSMichal Krawczyk 			}
277083277a7cSJakub Palider 
2771daa02b5cSOlivier Matz 			if (ol_flags & RTE_MBUF_F_TX_TCP_SEG &&
2772e8c838fdSMichal Krawczyk 			    !(dev_offload_capa & ENA_IPV4_TSO)) {
2773e8c838fdSMichal Krawczyk 				rte_errno = ENOTSUP;
2774e8c838fdSMichal Krawczyk 				return i;
2775e8c838fdSMichal Krawczyk 			}
2776e8c838fdSMichal Krawczyk 
2777e8c838fdSMichal Krawczyk 			/* Check HW capabilities and if pseudo csum is needed
2778e8c838fdSMichal Krawczyk 			 * for L4 offloads.
2779e8c838fdSMichal Krawczyk 			 */
2780daa02b5cSOlivier Matz 			if (l4_csum_flag != RTE_MBUF_F_TX_L4_NO_CKSUM &&
2781e8c838fdSMichal Krawczyk 			    !(dev_offload_capa & ENA_L4_IPV4_CSUM)) {
2782e8c838fdSMichal Krawczyk 				if (dev_offload_capa &
2783e8c838fdSMichal Krawczyk 				    ENA_L4_IPV4_CSUM_PARTIAL) {
2784e8c838fdSMichal Krawczyk 					need_pseudo_csum = true;
2785e8c838fdSMichal Krawczyk 				} else {
2786e8c838fdSMichal Krawczyk 					rte_errno = ENOTSUP;
2787e8c838fdSMichal Krawczyk 					return i;
2788e8c838fdSMichal Krawczyk 				}
2789e8c838fdSMichal Krawczyk 			}
2790e8c838fdSMichal Krawczyk 
2791e8c838fdSMichal Krawczyk 			/* Parse the DF flag */
2792e8c838fdSMichal Krawczyk 			ip_hdr = rte_pktmbuf_mtod_offset(m,
2793e8c838fdSMichal Krawczyk 				struct rte_ipv4_hdr *, m->l2_len);
2794e8c838fdSMichal Krawczyk 			frag_field = rte_be_to_cpu_16(ip_hdr->fragment_offset);
2795e8c838fdSMichal Krawczyk 			if (frag_field & RTE_IPV4_HDR_DF_FLAG) {
2796e8c838fdSMichal Krawczyk 				m->packet_type |= RTE_PTYPE_L4_NONFRAG;
2797daa02b5cSOlivier Matz 			} else if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
2798e8c838fdSMichal Krawczyk 				/* In case we are supposed to TSO and have DF
2799e8c838fdSMichal Krawczyk 				 * not set (DF=0) hardware must be provided with
2800e8c838fdSMichal Krawczyk 				 * partial checksum.
2801e8c838fdSMichal Krawczyk 				 */
2802e8c838fdSMichal Krawczyk 				need_pseudo_csum = true;
2803e8c838fdSMichal Krawczyk 			}
2804daa02b5cSOlivier Matz 		} else if (ol_flags & RTE_MBUF_F_TX_IPV6) {
2805e8c838fdSMichal Krawczyk 			/* There is no support for IPv6 TSO as for now. */
2806daa02b5cSOlivier Matz 			if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
2807e8c838fdSMichal Krawczyk 				rte_errno = ENOTSUP;
2808e8c838fdSMichal Krawczyk 				return i;
2809e8c838fdSMichal Krawczyk 			}
2810e8c838fdSMichal Krawczyk 
2811e8c838fdSMichal Krawczyk 			/* Check HW capabilities and if pseudo csum is needed */
2812daa02b5cSOlivier Matz 			if (l4_csum_flag != RTE_MBUF_F_TX_L4_NO_CKSUM &&
2813e8c838fdSMichal Krawczyk 			    !(dev_offload_capa & ENA_L4_IPV6_CSUM)) {
2814e8c838fdSMichal Krawczyk 				if (dev_offload_capa &
2815e8c838fdSMichal Krawczyk 				    ENA_L4_IPV6_CSUM_PARTIAL) {
2816e8c838fdSMichal Krawczyk 					need_pseudo_csum = true;
2817e8c838fdSMichal Krawczyk 				} else {
2818e8c838fdSMichal Krawczyk 					rte_errno = ENOTSUP;
2819e8c838fdSMichal Krawczyk 					return i;
2820e8c838fdSMichal Krawczyk 				}
2821e8c838fdSMichal Krawczyk 			}
2822e8c838fdSMichal Krawczyk 		}
2823e8c838fdSMichal Krawczyk 
2824e8c838fdSMichal Krawczyk 		if (need_pseudo_csum) {
2825e8c838fdSMichal Krawczyk 			ret = rte_net_intel_cksum_flags_prepare(m, ol_flags);
2826b3fc5a1aSKonstantin Ananyev 			if (ret != 0) {
2827baeed5f4SMichal Krawczyk 				rte_errno = -ret;
2828b3fc5a1aSKonstantin Ananyev 				return i;
2829b3fc5a1aSKonstantin Ananyev 			}
2830b3fc5a1aSKonstantin Ananyev 		}
2831e8c838fdSMichal Krawczyk 	}
2832b3fc5a1aSKonstantin Ananyev 
2833b3fc5a1aSKonstantin Ananyev 	return i;
2834b3fc5a1aSKonstantin Ananyev }
2835b3fc5a1aSKonstantin Ananyev 
2836f01f060cSRafal Kozik static void ena_update_hints(struct ena_adapter *adapter,
2837f01f060cSRafal Kozik 			     struct ena_admin_ena_hw_hints *hints)
2838f01f060cSRafal Kozik {
2839f01f060cSRafal Kozik 	if (hints->admin_completion_tx_timeout)
2840f01f060cSRafal Kozik 		adapter->ena_dev.admin_queue.completion_timeout =
2841f01f060cSRafal Kozik 			hints->admin_completion_tx_timeout * 1000;
2842f01f060cSRafal Kozik 
2843f01f060cSRafal Kozik 	if (hints->mmio_read_timeout)
2844f01f060cSRafal Kozik 		/* convert to usec */
2845f01f060cSRafal Kozik 		adapter->ena_dev.mmio_read.reg_read_to =
2846f01f060cSRafal Kozik 			hints->mmio_read_timeout * 1000;
2847d9b8b106SMichal Krawczyk 
2848d9b8b106SMichal Krawczyk 	if (hints->driver_watchdog_timeout) {
2849d9b8b106SMichal Krawczyk 		if (hints->driver_watchdog_timeout == ENA_HW_HINTS_NO_TIMEOUT)
2850d9b8b106SMichal Krawczyk 			adapter->keep_alive_timeout = ENA_HW_HINTS_NO_TIMEOUT;
2851d9b8b106SMichal Krawczyk 		else
2852d9b8b106SMichal Krawczyk 			// Convert msecs to ticks
2853d9b8b106SMichal Krawczyk 			adapter->keep_alive_timeout =
2854d9b8b106SMichal Krawczyk 				(hints->driver_watchdog_timeout *
2855d9b8b106SMichal Krawczyk 				rte_get_timer_hz()) / 1000;
2856d9b8b106SMichal Krawczyk 	}
2857f01f060cSRafal Kozik }
2858f01f060cSRafal Kozik 
285936278b82SMichal Krawczyk static void ena_tx_map_mbuf(struct ena_ring *tx_ring,
286036278b82SMichal Krawczyk 	struct ena_tx_buffer *tx_info,
286136278b82SMichal Krawczyk 	struct rte_mbuf *mbuf,
286236278b82SMichal Krawczyk 	void **push_header,
286336278b82SMichal Krawczyk 	uint16_t *header_len)
286436278b82SMichal Krawczyk {
286536278b82SMichal Krawczyk 	struct ena_com_buf *ena_buf;
286636278b82SMichal Krawczyk 	uint16_t delta, seg_len, push_len;
286736278b82SMichal Krawczyk 
286836278b82SMichal Krawczyk 	delta = 0;
286936278b82SMichal Krawczyk 	seg_len = mbuf->data_len;
287036278b82SMichal Krawczyk 
287136278b82SMichal Krawczyk 	tx_info->mbuf = mbuf;
287236278b82SMichal Krawczyk 	ena_buf = tx_info->bufs;
287336278b82SMichal Krawczyk 
287436278b82SMichal Krawczyk 	if (tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
287536278b82SMichal Krawczyk 		/*
287636278b82SMichal Krawczyk 		 * Tx header might be (and will be in most cases) smaller than
287736278b82SMichal Krawczyk 		 * tx_max_header_size. But it's not an issue to send more data
287836278b82SMichal Krawczyk 		 * to the device, than actually needed if the mbuf size is
287936278b82SMichal Krawczyk 		 * greater than tx_max_header_size.
288036278b82SMichal Krawczyk 		 */
288136278b82SMichal Krawczyk 		push_len = RTE_MIN(mbuf->pkt_len, tx_ring->tx_max_header_size);
288236278b82SMichal Krawczyk 		*header_len = push_len;
288336278b82SMichal Krawczyk 
288436278b82SMichal Krawczyk 		if (likely(push_len <= seg_len)) {
288536278b82SMichal Krawczyk 			/* If the push header is in the single segment, then
288636278b82SMichal Krawczyk 			 * just point it to the 1st mbuf data.
288736278b82SMichal Krawczyk 			 */
288836278b82SMichal Krawczyk 			*push_header = rte_pktmbuf_mtod(mbuf, uint8_t *);
288936278b82SMichal Krawczyk 		} else {
289036278b82SMichal Krawczyk 			/* If the push header lays in the several segments, copy
289136278b82SMichal Krawczyk 			 * it to the intermediate buffer.
289236278b82SMichal Krawczyk 			 */
289336278b82SMichal Krawczyk 			rte_pktmbuf_read(mbuf, 0, push_len,
289436278b82SMichal Krawczyk 				tx_ring->push_buf_intermediate_buf);
289536278b82SMichal Krawczyk 			*push_header = tx_ring->push_buf_intermediate_buf;
289636278b82SMichal Krawczyk 			delta = push_len - seg_len;
289736278b82SMichal Krawczyk 		}
289836278b82SMichal Krawczyk 	} else {
289936278b82SMichal Krawczyk 		*push_header = NULL;
290036278b82SMichal Krawczyk 		*header_len = 0;
290136278b82SMichal Krawczyk 		push_len = 0;
290236278b82SMichal Krawczyk 	}
290336278b82SMichal Krawczyk 
290436278b82SMichal Krawczyk 	/* Process first segment taking into consideration pushed header */
290536278b82SMichal Krawczyk 	if (seg_len > push_len) {
290636278b82SMichal Krawczyk 		ena_buf->paddr = mbuf->buf_iova +
290736278b82SMichal Krawczyk 				mbuf->data_off +
290836278b82SMichal Krawczyk 				push_len;
290936278b82SMichal Krawczyk 		ena_buf->len = seg_len - push_len;
291036278b82SMichal Krawczyk 		ena_buf++;
291136278b82SMichal Krawczyk 		tx_info->num_of_bufs++;
291236278b82SMichal Krawczyk 	}
291336278b82SMichal Krawczyk 
291436278b82SMichal Krawczyk 	while ((mbuf = mbuf->next) != NULL) {
291536278b82SMichal Krawczyk 		seg_len = mbuf->data_len;
291636278b82SMichal Krawczyk 
291736278b82SMichal Krawczyk 		/* Skip mbufs if whole data is pushed as a header */
291836278b82SMichal Krawczyk 		if (unlikely(delta > seg_len)) {
291936278b82SMichal Krawczyk 			delta -= seg_len;
292036278b82SMichal Krawczyk 			continue;
292136278b82SMichal Krawczyk 		}
292236278b82SMichal Krawczyk 
292336278b82SMichal Krawczyk 		ena_buf->paddr = mbuf->buf_iova + mbuf->data_off + delta;
292436278b82SMichal Krawczyk 		ena_buf->len = seg_len - delta;
292536278b82SMichal Krawczyk 		ena_buf++;
292636278b82SMichal Krawczyk 		tx_info->num_of_bufs++;
292736278b82SMichal Krawczyk 
292836278b82SMichal Krawczyk 		delta = 0;
292936278b82SMichal Krawczyk 	}
293036278b82SMichal Krawczyk }
293136278b82SMichal Krawczyk 
293236278b82SMichal Krawczyk static int ena_xmit_mbuf(struct ena_ring *tx_ring, struct rte_mbuf *mbuf)
293336278b82SMichal Krawczyk {
293436278b82SMichal Krawczyk 	struct ena_tx_buffer *tx_info;
293536278b82SMichal Krawczyk 	struct ena_com_tx_ctx ena_tx_ctx = { { 0 } };
293636278b82SMichal Krawczyk 	uint16_t next_to_use;
293736278b82SMichal Krawczyk 	uint16_t header_len;
293836278b82SMichal Krawczyk 	uint16_t req_id;
293936278b82SMichal Krawczyk 	void *push_header;
294036278b82SMichal Krawczyk 	int nb_hw_desc;
294136278b82SMichal Krawczyk 	int rc;
294236278b82SMichal Krawczyk 
294396ffa8a7SMichal Krawczyk 	/* Checking for space for 2 additional metadata descriptors due to
294496ffa8a7SMichal Krawczyk 	 * possible header split and metadata descriptor
294596ffa8a7SMichal Krawczyk 	 */
294696ffa8a7SMichal Krawczyk 	if (!ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
294796ffa8a7SMichal Krawczyk 					  mbuf->nb_segs + 2)) {
294896ffa8a7SMichal Krawczyk 		PMD_DRV_LOG(DEBUG, "Not enough space in the tx queue\n");
294996ffa8a7SMichal Krawczyk 		return ENA_COM_NO_MEM;
295096ffa8a7SMichal Krawczyk 	}
295136278b82SMichal Krawczyk 
295236278b82SMichal Krawczyk 	next_to_use = tx_ring->next_to_use;
295336278b82SMichal Krawczyk 
295436278b82SMichal Krawczyk 	req_id = tx_ring->empty_tx_reqs[next_to_use];
295536278b82SMichal Krawczyk 	tx_info = &tx_ring->tx_buffer_info[req_id];
295636278b82SMichal Krawczyk 	tx_info->num_of_bufs = 0;
29573d47e9b1SMichal Krawczyk 	RTE_ASSERT(tx_info->mbuf == NULL);
295836278b82SMichal Krawczyk 
295936278b82SMichal Krawczyk 	ena_tx_map_mbuf(tx_ring, tx_info, mbuf, &push_header, &header_len);
296036278b82SMichal Krawczyk 
296136278b82SMichal Krawczyk 	ena_tx_ctx.ena_bufs = tx_info->bufs;
296236278b82SMichal Krawczyk 	ena_tx_ctx.push_header = push_header;
296336278b82SMichal Krawczyk 	ena_tx_ctx.num_bufs = tx_info->num_of_bufs;
296436278b82SMichal Krawczyk 	ena_tx_ctx.req_id = req_id;
296536278b82SMichal Krawczyk 	ena_tx_ctx.header_len = header_len;
296636278b82SMichal Krawczyk 
296736278b82SMichal Krawczyk 	/* Set Tx offloads flags, if applicable */
296836278b82SMichal Krawczyk 	ena_tx_mbuf_prepare(mbuf, &ena_tx_ctx, tx_ring->offloads,
296936278b82SMichal Krawczyk 		tx_ring->disable_meta_caching);
297036278b82SMichal Krawczyk 
297136278b82SMichal Krawczyk 	if (unlikely(ena_com_is_doorbell_needed(tx_ring->ena_com_io_sq,
297236278b82SMichal Krawczyk 			&ena_tx_ctx))) {
29730a001d69SMichal Krawczyk 		PMD_TX_LOG(DEBUG,
2974617898d1SMichal Krawczyk 			"LLQ Tx max burst size of queue %d achieved, writing doorbell to send burst\n",
297536278b82SMichal Krawczyk 			tx_ring->id);
297636278b82SMichal Krawczyk 		ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq);
29771f949ad9SAmit Bernstein 		tx_ring->tx_stats.doorbells++;
29781d973d8fSIgor Chauskin 		tx_ring->pkts_without_db = false;
297936278b82SMichal Krawczyk 	}
298036278b82SMichal Krawczyk 
298136278b82SMichal Krawczyk 	/* prepare the packet's descriptors to dma engine */
298236278b82SMichal Krawczyk 	rc = ena_com_prepare_tx(tx_ring->ena_com_io_sq,	&ena_tx_ctx,
298336278b82SMichal Krawczyk 		&nb_hw_desc);
298436278b82SMichal Krawczyk 	if (unlikely(rc)) {
2985b57e1053SMichal Krawczyk 		PMD_DRV_LOG(ERR, "Failed to prepare Tx buffers, rc: %d\n", rc);
298636278b82SMichal Krawczyk 		++tx_ring->tx_stats.prepare_ctx_err;
29872bae75eaSDawid Gorecki 		ena_trigger_reset(tx_ring->adapter,
29882bae75eaSDawid Gorecki 			ENA_REGS_RESET_DRIVER_INVALID_STATE);
298936278b82SMichal Krawczyk 		return rc;
299036278b82SMichal Krawczyk 	}
299136278b82SMichal Krawczyk 
299236278b82SMichal Krawczyk 	tx_info->tx_descs = nb_hw_desc;
2993f93e20e5SMichal Krawczyk 	tx_info->timestamp = rte_get_timer_cycles();
299436278b82SMichal Krawczyk 
299536278b82SMichal Krawczyk 	tx_ring->tx_stats.cnt++;
299636278b82SMichal Krawczyk 	tx_ring->tx_stats.bytes += mbuf->pkt_len;
299736278b82SMichal Krawczyk 
299836278b82SMichal Krawczyk 	tx_ring->next_to_use = ENA_IDX_NEXT_MASKED(next_to_use,
299936278b82SMichal Krawczyk 		tx_ring->size_mask);
300036278b82SMichal Krawczyk 
300136278b82SMichal Krawczyk 	return 0;
300236278b82SMichal Krawczyk }
300336278b82SMichal Krawczyk 
3004a52b317eSDawid Gorecki static int ena_tx_cleanup(void *txp, uint32_t free_pkt_cnt)
300536278b82SMichal Krawczyk {
3006a52b317eSDawid Gorecki 	struct ena_ring *tx_ring = (struct ena_ring *)txp;
300736278b82SMichal Krawczyk 	unsigned int total_tx_descs = 0;
3008a52b317eSDawid Gorecki 	unsigned int total_tx_pkts = 0;
3009005064e5SMichal Krawczyk 	uint16_t cleanup_budget;
301036278b82SMichal Krawczyk 	uint16_t next_to_clean = tx_ring->next_to_clean;
301136278b82SMichal Krawczyk 
3012a52b317eSDawid Gorecki 	/*
3013a52b317eSDawid Gorecki 	 * If free_pkt_cnt is equal to 0, it means that the user requested
3014a52b317eSDawid Gorecki 	 * full cleanup, so attempt to release all Tx descriptors
3015a52b317eSDawid Gorecki 	 * (ring_size - 1 -> size_mask)
3016a52b317eSDawid Gorecki 	 */
3017a52b317eSDawid Gorecki 	cleanup_budget = (free_pkt_cnt == 0) ? tx_ring->size_mask : free_pkt_cnt;
301836278b82SMichal Krawczyk 
3019a52b317eSDawid Gorecki 	while (likely(total_tx_pkts < cleanup_budget)) {
302036278b82SMichal Krawczyk 		struct rte_mbuf *mbuf;
302136278b82SMichal Krawczyk 		struct ena_tx_buffer *tx_info;
302236278b82SMichal Krawczyk 		uint16_t req_id;
302336278b82SMichal Krawczyk 
302436278b82SMichal Krawczyk 		if (ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq, &req_id) != 0)
302536278b82SMichal Krawczyk 			break;
302636278b82SMichal Krawczyk 
302736278b82SMichal Krawczyk 		if (unlikely(validate_tx_req_id(tx_ring, req_id) != 0))
302836278b82SMichal Krawczyk 			break;
302936278b82SMichal Krawczyk 
303036278b82SMichal Krawczyk 		/* Get Tx info & store how many descs were processed  */
303136278b82SMichal Krawczyk 		tx_info = &tx_ring->tx_buffer_info[req_id];
3032f93e20e5SMichal Krawczyk 		tx_info->timestamp = 0;
303336278b82SMichal Krawczyk 
303436278b82SMichal Krawczyk 		mbuf = tx_info->mbuf;
303536278b82SMichal Krawczyk 		rte_pktmbuf_free(mbuf);
303636278b82SMichal Krawczyk 
303736278b82SMichal Krawczyk 		tx_info->mbuf = NULL;
303836278b82SMichal Krawczyk 		tx_ring->empty_tx_reqs[next_to_clean] = req_id;
303936278b82SMichal Krawczyk 
304036278b82SMichal Krawczyk 		total_tx_descs += tx_info->tx_descs;
3041a52b317eSDawid Gorecki 		total_tx_pkts++;
304236278b82SMichal Krawczyk 
304336278b82SMichal Krawczyk 		/* Put back descriptor to the ring for reuse */
304436278b82SMichal Krawczyk 		next_to_clean = ENA_IDX_NEXT_MASKED(next_to_clean,
304536278b82SMichal Krawczyk 			tx_ring->size_mask);
304636278b82SMichal Krawczyk 	}
304736278b82SMichal Krawczyk 
304836278b82SMichal Krawczyk 	if (likely(total_tx_descs > 0)) {
304936278b82SMichal Krawczyk 		/* acknowledge completion of sent packets */
305036278b82SMichal Krawczyk 		tx_ring->next_to_clean = next_to_clean;
305136278b82SMichal Krawczyk 		ena_com_comp_ack(tx_ring->ena_com_io_sq, total_tx_descs);
305236278b82SMichal Krawczyk 		ena_com_update_dev_comp_head(tx_ring->ena_com_io_cq);
305336278b82SMichal Krawczyk 	}
3054f93e20e5SMichal Krawczyk 
3055a52b317eSDawid Gorecki 	/* Notify completion handler that full cleanup was performed */
3056a52b317eSDawid Gorecki 	if (free_pkt_cnt == 0 || total_tx_pkts < cleanup_budget)
3057f93e20e5SMichal Krawczyk 		tx_ring->last_cleanup_ticks = rte_get_timer_cycles();
3058a52b317eSDawid Gorecki 
3059a52b317eSDawid Gorecki 	return total_tx_pkts;
306036278b82SMichal Krawczyk }
306136278b82SMichal Krawczyk 
30621173fca2SJan Medala static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
30631173fca2SJan Medala 				  uint16_t nb_pkts)
30641173fca2SJan Medala {
30651173fca2SJan Medala 	struct ena_ring *tx_ring = (struct ena_ring *)(tx_queue);
3066005064e5SMichal Krawczyk 	int available_desc;
306774456796SMichal Krawczyk 	uint16_t sent_idx = 0;
30681173fca2SJan Medala 
30690a001d69SMichal Krawczyk #ifdef RTE_ETHDEV_DEBUG_TX
30701173fca2SJan Medala 	/* Check adapter state */
30711173fca2SJan Medala 	if (unlikely(tx_ring->adapter->state != ENA_ADAPTER_STATE_RUNNING)) {
30720a001d69SMichal Krawczyk 		PMD_TX_LOG(ALERT,
30731173fca2SJan Medala 			"Trying to xmit pkts while device is NOT running\n");
30741173fca2SJan Medala 		return 0;
30751173fca2SJan Medala 	}
30760a001d69SMichal Krawczyk #endif
30771173fca2SJan Medala 
307867216c31SMichal Krawczyk 	available_desc = ena_com_free_q_entries(tx_ring->ena_com_io_sq);
307967216c31SMichal Krawczyk 	if (available_desc < tx_ring->tx_free_thresh)
3080a52b317eSDawid Gorecki 		ena_tx_cleanup((void *)tx_ring, 0);
308167216c31SMichal Krawczyk 
30821173fca2SJan Medala 	for (sent_idx = 0; sent_idx < nb_pkts; sent_idx++) {
308336278b82SMichal Krawczyk 		if (ena_xmit_mbuf(tx_ring, tx_pkts[sent_idx]))
30842061fe41SRafal Kozik 			break;
30851d973d8fSIgor Chauskin 		tx_ring->pkts_without_db = true;
308636278b82SMichal Krawczyk 		rte_prefetch0(tx_pkts[ENA_IDX_ADD_MASKED(sent_idx, 4,
308736278b82SMichal Krawczyk 			tx_ring->size_mask)]);
30882fca2a98SMichal Krawczyk 	}
30892fca2a98SMichal Krawczyk 
30905e02e19eSJan Medala 	/* If there are ready packets to be xmitted... */
30911d973d8fSIgor Chauskin 	if (likely(tx_ring->pkts_without_db)) {
30925e02e19eSJan Medala 		/* ...let HW do its best :-) */
30931173fca2SJan Medala 		ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq);
309445b6d861SMichal Krawczyk 		tx_ring->tx_stats.doorbells++;
30951d973d8fSIgor Chauskin 		tx_ring->pkts_without_db = false;
30965e02e19eSJan Medala 	}
30975e02e19eSJan Medala 
30987830e905SSolganik Alexander 	tx_ring->tx_stats.available_desc =
3099b2b02edeSMichal Krawczyk 		ena_com_free_q_entries(tx_ring->ena_com_io_sq);
31007830e905SSolganik Alexander 	tx_ring->tx_stats.tx_poll++;
31017830e905SSolganik Alexander 
31021173fca2SJan Medala 	return sent_idx;
31031173fca2SJan Medala }
31041173fca2SJan Medala 
3105e3595539SStanislaw Kardach int ena_copy_eni_stats(struct ena_adapter *adapter, struct ena_stats_eni *stats)
310645718adaSMichal Krawczyk {
310745718adaSMichal Krawczyk 	int rc;
310845718adaSMichal Krawczyk 
310945718adaSMichal Krawczyk 	rte_spinlock_lock(&adapter->admin_lock);
3110e3595539SStanislaw Kardach 	/* Retrieve and store the latest statistics from the AQ. This ensures
3111e3595539SStanislaw Kardach 	 * that previous value is returned in case of a com error.
3112e3595539SStanislaw Kardach 	 */
3113e3595539SStanislaw Kardach 	rc = ENA_PROXY(adapter, ena_com_get_eni_stats, &adapter->ena_dev,
3114e3595539SStanislaw Kardach 		(struct ena_admin_eni_stats *)stats);
311545718adaSMichal Krawczyk 	rte_spinlock_unlock(&adapter->admin_lock);
311645718adaSMichal Krawczyk 	if (rc != 0) {
311745718adaSMichal Krawczyk 		if (rc == ENA_COM_UNSUPPORTED) {
311845718adaSMichal Krawczyk 			PMD_DRV_LOG(DEBUG,
3119617898d1SMichal Krawczyk 				"Retrieving ENI metrics is not supported\n");
312045718adaSMichal Krawczyk 		} else {
312145718adaSMichal Krawczyk 			PMD_DRV_LOG(WARNING,
3122617898d1SMichal Krawczyk 				"Failed to get ENI metrics, rc: %d\n", rc);
312345718adaSMichal Krawczyk 		}
312445718adaSMichal Krawczyk 		return rc;
312545718adaSMichal Krawczyk 	}
312645718adaSMichal Krawczyk 
312745718adaSMichal Krawczyk 	return 0;
312845718adaSMichal Krawczyk }
312945718adaSMichal Krawczyk 
31307830e905SSolganik Alexander /**
31317830e905SSolganik Alexander  * DPDK callback to retrieve names of extended device statistics
31327830e905SSolganik Alexander  *
31337830e905SSolganik Alexander  * @param dev
31347830e905SSolganik Alexander  *   Pointer to Ethernet device structure.
31357830e905SSolganik Alexander  * @param[out] xstats_names
31367830e905SSolganik Alexander  *   Buffer to insert names into.
31377830e905SSolganik Alexander  * @param n
31387830e905SSolganik Alexander  *   Number of names.
31397830e905SSolganik Alexander  *
31407830e905SSolganik Alexander  * @return
31417830e905SSolganik Alexander  *   Number of xstats names.
31427830e905SSolganik Alexander  */
31437830e905SSolganik Alexander static int ena_xstats_get_names(struct rte_eth_dev *dev,
31447830e905SSolganik Alexander 				struct rte_eth_xstat_name *xstats_names,
31457830e905SSolganik Alexander 				unsigned int n)
31467830e905SSolganik Alexander {
3147aab58857SStanislaw Kardach 	unsigned int xstats_count = ena_xstats_calc_num(dev->data);
31487830e905SSolganik Alexander 	unsigned int stat, i, count = 0;
31497830e905SSolganik Alexander 
31507830e905SSolganik Alexander 	if (n < xstats_count || !xstats_names)
31517830e905SSolganik Alexander 		return xstats_count;
31527830e905SSolganik Alexander 
31537830e905SSolganik Alexander 	for (stat = 0; stat < ENA_STATS_ARRAY_GLOBAL; stat++, count++)
31547830e905SSolganik Alexander 		strcpy(xstats_names[count].name,
31557830e905SSolganik Alexander 			ena_stats_global_strings[stat].name);
31567830e905SSolganik Alexander 
315745718adaSMichal Krawczyk 	for (stat = 0; stat < ENA_STATS_ARRAY_ENI; stat++, count++)
315845718adaSMichal Krawczyk 		strcpy(xstats_names[count].name,
315945718adaSMichal Krawczyk 			ena_stats_eni_strings[stat].name);
316045718adaSMichal Krawczyk 
31617830e905SSolganik Alexander 	for (stat = 0; stat < ENA_STATS_ARRAY_RX; stat++)
31627830e905SSolganik Alexander 		for (i = 0; i < dev->data->nb_rx_queues; i++, count++)
31637830e905SSolganik Alexander 			snprintf(xstats_names[count].name,
31647830e905SSolganik Alexander 				sizeof(xstats_names[count].name),
31657830e905SSolganik Alexander 				"rx_q%d_%s", i,
31667830e905SSolganik Alexander 				ena_stats_rx_strings[stat].name);
31677830e905SSolganik Alexander 
31687830e905SSolganik Alexander 	for (stat = 0; stat < ENA_STATS_ARRAY_TX; stat++)
31697830e905SSolganik Alexander 		for (i = 0; i < dev->data->nb_tx_queues; i++, count++)
31707830e905SSolganik Alexander 			snprintf(xstats_names[count].name,
31717830e905SSolganik Alexander 				sizeof(xstats_names[count].name),
31727830e905SSolganik Alexander 				"tx_q%d_%s", i,
31737830e905SSolganik Alexander 				ena_stats_tx_strings[stat].name);
31747830e905SSolganik Alexander 
31757830e905SSolganik Alexander 	return xstats_count;
31767830e905SSolganik Alexander }
31777830e905SSolganik Alexander 
31787830e905SSolganik Alexander /**
31793cec73faSMichal Krawczyk  * DPDK callback to retrieve names of extended device statistics for the given
31803cec73faSMichal Krawczyk  * ids.
31813cec73faSMichal Krawczyk  *
31823cec73faSMichal Krawczyk  * @param dev
31833cec73faSMichal Krawczyk  *   Pointer to Ethernet device structure.
31843cec73faSMichal Krawczyk  * @param[out] xstats_names
31853cec73faSMichal Krawczyk  *   Buffer to insert names into.
31863cec73faSMichal Krawczyk  * @param ids
31873cec73faSMichal Krawczyk  *   IDs array for which the names should be retrieved.
31883cec73faSMichal Krawczyk  * @param size
31893cec73faSMichal Krawczyk  *   Number of ids.
31903cec73faSMichal Krawczyk  *
31913cec73faSMichal Krawczyk  * @return
31923cec73faSMichal Krawczyk  *   Positive value: number of xstats names. Negative value: error code.
31933cec73faSMichal Krawczyk  */
31943cec73faSMichal Krawczyk static int ena_xstats_get_names_by_id(struct rte_eth_dev *dev,
31953cec73faSMichal Krawczyk 				      const uint64_t *ids,
31963cec73faSMichal Krawczyk 				      struct rte_eth_xstat_name *xstats_names,
31973cec73faSMichal Krawczyk 				      unsigned int size)
31983cec73faSMichal Krawczyk {
31993cec73faSMichal Krawczyk 	uint64_t xstats_count = ena_xstats_calc_num(dev->data);
32003cec73faSMichal Krawczyk 	uint64_t id, qid;
32013cec73faSMichal Krawczyk 	unsigned int i;
32023cec73faSMichal Krawczyk 
32033cec73faSMichal Krawczyk 	if (xstats_names == NULL)
32043cec73faSMichal Krawczyk 		return xstats_count;
32053cec73faSMichal Krawczyk 
32063cec73faSMichal Krawczyk 	for (i = 0; i < size; ++i) {
32073cec73faSMichal Krawczyk 		id = ids[i];
32083cec73faSMichal Krawczyk 		if (id > xstats_count) {
32093cec73faSMichal Krawczyk 			PMD_DRV_LOG(ERR,
32103cec73faSMichal Krawczyk 				"ID value out of range: id=%" PRIu64 ", xstats_num=%" PRIu64 "\n",
32113cec73faSMichal Krawczyk 				 id, xstats_count);
32123cec73faSMichal Krawczyk 			return -EINVAL;
32133cec73faSMichal Krawczyk 		}
32143cec73faSMichal Krawczyk 
32153cec73faSMichal Krawczyk 		if (id < ENA_STATS_ARRAY_GLOBAL) {
32163cec73faSMichal Krawczyk 			strcpy(xstats_names[i].name,
32173cec73faSMichal Krawczyk 			       ena_stats_global_strings[id].name);
32183cec73faSMichal Krawczyk 			continue;
32193cec73faSMichal Krawczyk 		}
32203cec73faSMichal Krawczyk 
32213cec73faSMichal Krawczyk 		id -= ENA_STATS_ARRAY_GLOBAL;
32223cec73faSMichal Krawczyk 		if (id < ENA_STATS_ARRAY_ENI) {
32233cec73faSMichal Krawczyk 			strcpy(xstats_names[i].name,
32243cec73faSMichal Krawczyk 			       ena_stats_eni_strings[id].name);
32253cec73faSMichal Krawczyk 			continue;
32263cec73faSMichal Krawczyk 		}
32273cec73faSMichal Krawczyk 
32283cec73faSMichal Krawczyk 		id -= ENA_STATS_ARRAY_ENI;
32293cec73faSMichal Krawczyk 		if (id < ENA_STATS_ARRAY_RX) {
32303cec73faSMichal Krawczyk 			qid = id / dev->data->nb_rx_queues;
32313cec73faSMichal Krawczyk 			id %= dev->data->nb_rx_queues;
32323cec73faSMichal Krawczyk 			snprintf(xstats_names[i].name,
32333cec73faSMichal Krawczyk 				 sizeof(xstats_names[i].name),
32343cec73faSMichal Krawczyk 				 "rx_q%" PRIu64 "d_%s",
32353cec73faSMichal Krawczyk 				 qid, ena_stats_rx_strings[id].name);
32363cec73faSMichal Krawczyk 			continue;
32373cec73faSMichal Krawczyk 		}
32383cec73faSMichal Krawczyk 
32393cec73faSMichal Krawczyk 		id -= ENA_STATS_ARRAY_RX;
32403cec73faSMichal Krawczyk 		/* Although this condition is not needed, it was added for
32413cec73faSMichal Krawczyk 		 * compatibility if new xstat structure would be ever added.
32423cec73faSMichal Krawczyk 		 */
32433cec73faSMichal Krawczyk 		if (id < ENA_STATS_ARRAY_TX) {
32443cec73faSMichal Krawczyk 			qid = id / dev->data->nb_tx_queues;
32453cec73faSMichal Krawczyk 			id %= dev->data->nb_tx_queues;
32463cec73faSMichal Krawczyk 			snprintf(xstats_names[i].name,
32473cec73faSMichal Krawczyk 				 sizeof(xstats_names[i].name),
32483cec73faSMichal Krawczyk 				 "tx_q%" PRIu64 "_%s",
32493cec73faSMichal Krawczyk 				 qid, ena_stats_tx_strings[id].name);
32503cec73faSMichal Krawczyk 			continue;
32513cec73faSMichal Krawczyk 		}
32523cec73faSMichal Krawczyk 	}
32533cec73faSMichal Krawczyk 
32543cec73faSMichal Krawczyk 	return i;
32553cec73faSMichal Krawczyk }
32563cec73faSMichal Krawczyk 
32573cec73faSMichal Krawczyk /**
32587830e905SSolganik Alexander  * DPDK callback to get extended device statistics.
32597830e905SSolganik Alexander  *
32607830e905SSolganik Alexander  * @param dev
32617830e905SSolganik Alexander  *   Pointer to Ethernet device structure.
32627830e905SSolganik Alexander  * @param[out] stats
32637830e905SSolganik Alexander  *   Stats table output buffer.
32647830e905SSolganik Alexander  * @param n
32657830e905SSolganik Alexander  *   The size of the stats table.
32667830e905SSolganik Alexander  *
32677830e905SSolganik Alexander  * @return
32687830e905SSolganik Alexander  *   Number of xstats on success, negative on failure.
32697830e905SSolganik Alexander  */
32707830e905SSolganik Alexander static int ena_xstats_get(struct rte_eth_dev *dev,
32717830e905SSolganik Alexander 			  struct rte_eth_xstat *xstats,
32727830e905SSolganik Alexander 			  unsigned int n)
32737830e905SSolganik Alexander {
3274890728ffSStephen Hemminger 	struct ena_adapter *adapter = dev->data->dev_private;
3275aab58857SStanislaw Kardach 	unsigned int xstats_count = ena_xstats_calc_num(dev->data);
3276e3595539SStanislaw Kardach 	struct ena_stats_eni eni_stats;
32777830e905SSolganik Alexander 	unsigned int stat, i, count = 0;
32787830e905SSolganik Alexander 	int stat_offset;
32797830e905SSolganik Alexander 	void *stats_begin;
32807830e905SSolganik Alexander 
32817830e905SSolganik Alexander 	if (n < xstats_count)
32827830e905SSolganik Alexander 		return xstats_count;
32837830e905SSolganik Alexander 
32847830e905SSolganik Alexander 	if (!xstats)
32857830e905SSolganik Alexander 		return 0;
32867830e905SSolganik Alexander 
32877830e905SSolganik Alexander 	for (stat = 0; stat < ENA_STATS_ARRAY_GLOBAL; stat++, count++) {
3288493107fdSMichal Krawczyk 		stat_offset = ena_stats_global_strings[stat].stat_offset;
32897830e905SSolganik Alexander 		stats_begin = &adapter->dev_stats;
32907830e905SSolganik Alexander 
32917830e905SSolganik Alexander 		xstats[count].id = count;
32927830e905SSolganik Alexander 		xstats[count].value = *((uint64_t *)
32937830e905SSolganik Alexander 			((char *)stats_begin + stat_offset));
32947830e905SSolganik Alexander 	}
32957830e905SSolganik Alexander 
329645718adaSMichal Krawczyk 	/* Even if the function below fails, we should copy previous (or initial
329745718adaSMichal Krawczyk 	 * values) to keep structure of rte_eth_xstat consistent.
329845718adaSMichal Krawczyk 	 */
3299e3595539SStanislaw Kardach 	ena_copy_eni_stats(adapter, &eni_stats);
330045718adaSMichal Krawczyk 	for (stat = 0; stat < ENA_STATS_ARRAY_ENI; stat++, count++) {
330145718adaSMichal Krawczyk 		stat_offset = ena_stats_eni_strings[stat].stat_offset;
3302e3595539SStanislaw Kardach 		stats_begin = &eni_stats;
330345718adaSMichal Krawczyk 
330445718adaSMichal Krawczyk 		xstats[count].id = count;
330545718adaSMichal Krawczyk 		xstats[count].value = *((uint64_t *)
330645718adaSMichal Krawczyk 		    ((char *)stats_begin + stat_offset));
330745718adaSMichal Krawczyk 	}
330845718adaSMichal Krawczyk 
33097830e905SSolganik Alexander 	for (stat = 0; stat < ENA_STATS_ARRAY_RX; stat++) {
33107830e905SSolganik Alexander 		for (i = 0; i < dev->data->nb_rx_queues; i++, count++) {
33117830e905SSolganik Alexander 			stat_offset = ena_stats_rx_strings[stat].stat_offset;
33127830e905SSolganik Alexander 			stats_begin = &adapter->rx_ring[i].rx_stats;
33137830e905SSolganik Alexander 
33147830e905SSolganik Alexander 			xstats[count].id = count;
33157830e905SSolganik Alexander 			xstats[count].value = *((uint64_t *)
33167830e905SSolganik Alexander 				((char *)stats_begin + stat_offset));
33177830e905SSolganik Alexander 		}
33187830e905SSolganik Alexander 	}
33197830e905SSolganik Alexander 
33207830e905SSolganik Alexander 	for (stat = 0; stat < ENA_STATS_ARRAY_TX; stat++) {
33217830e905SSolganik Alexander 		for (i = 0; i < dev->data->nb_tx_queues; i++, count++) {
33227830e905SSolganik Alexander 			stat_offset = ena_stats_tx_strings[stat].stat_offset;
33237830e905SSolganik Alexander 			stats_begin = &adapter->tx_ring[i].rx_stats;
33247830e905SSolganik Alexander 
33257830e905SSolganik Alexander 			xstats[count].id = count;
33267830e905SSolganik Alexander 			xstats[count].value = *((uint64_t *)
33277830e905SSolganik Alexander 				((char *)stats_begin + stat_offset));
33287830e905SSolganik Alexander 		}
33297830e905SSolganik Alexander 	}
33307830e905SSolganik Alexander 
33317830e905SSolganik Alexander 	return count;
33327830e905SSolganik Alexander }
33337830e905SSolganik Alexander 
33347830e905SSolganik Alexander static int ena_xstats_get_by_id(struct rte_eth_dev *dev,
33357830e905SSolganik Alexander 				const uint64_t *ids,
33367830e905SSolganik Alexander 				uint64_t *values,
33377830e905SSolganik Alexander 				unsigned int n)
33387830e905SSolganik Alexander {
3339890728ffSStephen Hemminger 	struct ena_adapter *adapter = dev->data->dev_private;
3340e3595539SStanislaw Kardach 	struct ena_stats_eni eni_stats;
33417830e905SSolganik Alexander 	uint64_t id;
33427830e905SSolganik Alexander 	uint64_t rx_entries, tx_entries;
33437830e905SSolganik Alexander 	unsigned int i;
33447830e905SSolganik Alexander 	int qid;
33457830e905SSolganik Alexander 	int valid = 0;
334645718adaSMichal Krawczyk 	bool was_eni_copied = false;
334745718adaSMichal Krawczyk 
33487830e905SSolganik Alexander 	for (i = 0; i < n; ++i) {
33497830e905SSolganik Alexander 		id = ids[i];
33507830e905SSolganik Alexander 		/* Check if id belongs to global statistics */
33517830e905SSolganik Alexander 		if (id < ENA_STATS_ARRAY_GLOBAL) {
33527830e905SSolganik Alexander 			values[i] = *((uint64_t *)&adapter->dev_stats + id);
33537830e905SSolganik Alexander 			++valid;
33547830e905SSolganik Alexander 			continue;
33557830e905SSolganik Alexander 		}
33567830e905SSolganik Alexander 
335745718adaSMichal Krawczyk 		/* Check if id belongs to ENI statistics */
33587830e905SSolganik Alexander 		id -= ENA_STATS_ARRAY_GLOBAL;
335945718adaSMichal Krawczyk 		if (id < ENA_STATS_ARRAY_ENI) {
336045718adaSMichal Krawczyk 			/* Avoid reading ENI stats multiple times in a single
336145718adaSMichal Krawczyk 			 * function call, as it requires communication with the
336245718adaSMichal Krawczyk 			 * admin queue.
336345718adaSMichal Krawczyk 			 */
336445718adaSMichal Krawczyk 			if (!was_eni_copied) {
336545718adaSMichal Krawczyk 				was_eni_copied = true;
3366e3595539SStanislaw Kardach 				ena_copy_eni_stats(adapter, &eni_stats);
336745718adaSMichal Krawczyk 			}
3368e3595539SStanislaw Kardach 			values[i] = *((uint64_t *)&eni_stats + id);
336945718adaSMichal Krawczyk 			++valid;
337045718adaSMichal Krawczyk 			continue;
337145718adaSMichal Krawczyk 		}
337245718adaSMichal Krawczyk 
337345718adaSMichal Krawczyk 		/* Check if id belongs to rx queue statistics */
337445718adaSMichal Krawczyk 		id -= ENA_STATS_ARRAY_ENI;
33757830e905SSolganik Alexander 		rx_entries = ENA_STATS_ARRAY_RX * dev->data->nb_rx_queues;
33767830e905SSolganik Alexander 		if (id < rx_entries) {
33777830e905SSolganik Alexander 			qid = id % dev->data->nb_rx_queues;
33787830e905SSolganik Alexander 			id /= dev->data->nb_rx_queues;
33797830e905SSolganik Alexander 			values[i] = *((uint64_t *)
33807830e905SSolganik Alexander 				&adapter->rx_ring[qid].rx_stats + id);
33817830e905SSolganik Alexander 			++valid;
33827830e905SSolganik Alexander 			continue;
33837830e905SSolganik Alexander 		}
33847830e905SSolganik Alexander 				/* Check if id belongs to rx queue statistics */
33857830e905SSolganik Alexander 		id -= rx_entries;
33867830e905SSolganik Alexander 		tx_entries = ENA_STATS_ARRAY_TX * dev->data->nb_tx_queues;
33877830e905SSolganik Alexander 		if (id < tx_entries) {
33887830e905SSolganik Alexander 			qid = id % dev->data->nb_tx_queues;
33897830e905SSolganik Alexander 			id /= dev->data->nb_tx_queues;
33907830e905SSolganik Alexander 			values[i] = *((uint64_t *)
33917830e905SSolganik Alexander 				&adapter->tx_ring[qid].tx_stats + id);
33927830e905SSolganik Alexander 			++valid;
33937830e905SSolganik Alexander 			continue;
33947830e905SSolganik Alexander 		}
33957830e905SSolganik Alexander 	}
33967830e905SSolganik Alexander 
33977830e905SSolganik Alexander 	return valid;
33987830e905SSolganik Alexander }
33997830e905SSolganik Alexander 
3400cc0c5d25SMichal Krawczyk static int ena_process_uint_devarg(const char *key,
3401cc0c5d25SMichal Krawczyk 				  const char *value,
3402cc0c5d25SMichal Krawczyk 				  void *opaque)
3403cc0c5d25SMichal Krawczyk {
3404cc0c5d25SMichal Krawczyk 	struct ena_adapter *adapter = opaque;
3405cc0c5d25SMichal Krawczyk 	char *str_end;
3406cc0c5d25SMichal Krawczyk 	uint64_t uint_value;
3407cc0c5d25SMichal Krawczyk 
3408cc0c5d25SMichal Krawczyk 	uint_value = strtoull(value, &str_end, 10);
3409cc0c5d25SMichal Krawczyk 	if (value == str_end) {
3410cc0c5d25SMichal Krawczyk 		PMD_INIT_LOG(ERR,
3411cc0c5d25SMichal Krawczyk 			"Invalid value for key '%s'. Only uint values are accepted.\n",
3412cc0c5d25SMichal Krawczyk 			key);
3413cc0c5d25SMichal Krawczyk 		return -EINVAL;
3414cc0c5d25SMichal Krawczyk 	}
3415cc0c5d25SMichal Krawczyk 
3416cc0c5d25SMichal Krawczyk 	if (strcmp(key, ENA_DEVARG_MISS_TXC_TO) == 0) {
3417cc0c5d25SMichal Krawczyk 		if (uint_value > ENA_MAX_TX_TIMEOUT_SECONDS) {
3418cc0c5d25SMichal Krawczyk 			PMD_INIT_LOG(ERR,
3419cc0c5d25SMichal Krawczyk 				"Tx timeout too high: %" PRIu64 " sec. Maximum allowed: %d sec.\n",
3420cc0c5d25SMichal Krawczyk 				uint_value, ENA_MAX_TX_TIMEOUT_SECONDS);
3421cc0c5d25SMichal Krawczyk 			return -EINVAL;
3422cc0c5d25SMichal Krawczyk 		} else if (uint_value == 0) {
3423cc0c5d25SMichal Krawczyk 			PMD_INIT_LOG(INFO,
3424cc0c5d25SMichal Krawczyk 				"Check for missing Tx completions has been disabled.\n");
3425cc0c5d25SMichal Krawczyk 			adapter->missing_tx_completion_to =
3426cc0c5d25SMichal Krawczyk 				ENA_HW_HINTS_NO_TIMEOUT;
3427cc0c5d25SMichal Krawczyk 		} else {
3428cc0c5d25SMichal Krawczyk 			PMD_INIT_LOG(INFO,
3429cc0c5d25SMichal Krawczyk 				"Tx packet completion timeout set to %" PRIu64 " seconds.\n",
3430cc0c5d25SMichal Krawczyk 				uint_value);
3431cc0c5d25SMichal Krawczyk 			adapter->missing_tx_completion_to =
3432cc0c5d25SMichal Krawczyk 				uint_value * rte_get_timer_hz();
3433cc0c5d25SMichal Krawczyk 		}
3434cc0c5d25SMichal Krawczyk 	}
3435cc0c5d25SMichal Krawczyk 
3436cc0c5d25SMichal Krawczyk 	return 0;
3437cc0c5d25SMichal Krawczyk }
3438cc0c5d25SMichal Krawczyk 
34398a7a73f2SMichal Krawczyk static int ena_process_bool_devarg(const char *key,
34408a7a73f2SMichal Krawczyk 				   const char *value,
34418a7a73f2SMichal Krawczyk 				   void *opaque)
34428a7a73f2SMichal Krawczyk {
34438a7a73f2SMichal Krawczyk 	struct ena_adapter *adapter = opaque;
34448a7a73f2SMichal Krawczyk 	bool bool_value;
34458a7a73f2SMichal Krawczyk 
34468a7a73f2SMichal Krawczyk 	/* Parse the value. */
34478a7a73f2SMichal Krawczyk 	if (strcmp(value, "1") == 0) {
34488a7a73f2SMichal Krawczyk 		bool_value = true;
34498a7a73f2SMichal Krawczyk 	} else if (strcmp(value, "0") == 0) {
34508a7a73f2SMichal Krawczyk 		bool_value = false;
34518a7a73f2SMichal Krawczyk 	} else {
34528a7a73f2SMichal Krawczyk 		PMD_INIT_LOG(ERR,
34538a7a73f2SMichal Krawczyk 			"Invalid value: '%s' for key '%s'. Accepted: '0' or '1'\n",
34548a7a73f2SMichal Krawczyk 			value, key);
34558a7a73f2SMichal Krawczyk 		return -EINVAL;
34568a7a73f2SMichal Krawczyk 	}
34578a7a73f2SMichal Krawczyk 
34588a7a73f2SMichal Krawczyk 	/* Now, assign it to the proper adapter field. */
34599b312ad3SIgor Chauskin 	if (strcmp(key, ENA_DEVARG_LARGE_LLQ_HDR) == 0)
34608a7a73f2SMichal Krawczyk 		adapter->use_large_llq_hdr = bool_value;
34618a7a73f2SMichal Krawczyk 
34628a7a73f2SMichal Krawczyk 	return 0;
34638a7a73f2SMichal Krawczyk }
34648a7a73f2SMichal Krawczyk 
34658a7a73f2SMichal Krawczyk static int ena_parse_devargs(struct ena_adapter *adapter,
34668a7a73f2SMichal Krawczyk 			     struct rte_devargs *devargs)
34678a7a73f2SMichal Krawczyk {
34688a7a73f2SMichal Krawczyk 	static const char * const allowed_args[] = {
34698a7a73f2SMichal Krawczyk 		ENA_DEVARG_LARGE_LLQ_HDR,
3470cc0c5d25SMichal Krawczyk 		ENA_DEVARG_MISS_TXC_TO,
34719f220a95SMichal Krawczyk 		NULL,
34728a7a73f2SMichal Krawczyk 	};
34738a7a73f2SMichal Krawczyk 	struct rte_kvargs *kvlist;
34748a7a73f2SMichal Krawczyk 	int rc;
34758a7a73f2SMichal Krawczyk 
34768a7a73f2SMichal Krawczyk 	if (devargs == NULL)
34778a7a73f2SMichal Krawczyk 		return 0;
34788a7a73f2SMichal Krawczyk 
34798a7a73f2SMichal Krawczyk 	kvlist = rte_kvargs_parse(devargs->args, allowed_args);
34808a7a73f2SMichal Krawczyk 	if (kvlist == NULL) {
34818a7a73f2SMichal Krawczyk 		PMD_INIT_LOG(ERR, "Invalid device arguments: %s\n",
34828a7a73f2SMichal Krawczyk 			devargs->args);
34838a7a73f2SMichal Krawczyk 		return -EINVAL;
34848a7a73f2SMichal Krawczyk 	}
34858a7a73f2SMichal Krawczyk 
34868a7a73f2SMichal Krawczyk 	rc = rte_kvargs_process(kvlist, ENA_DEVARG_LARGE_LLQ_HDR,
34878a7a73f2SMichal Krawczyk 		ena_process_bool_devarg, adapter);
3488cc0c5d25SMichal Krawczyk 	if (rc != 0)
3489cc0c5d25SMichal Krawczyk 		goto exit;
3490cc0c5d25SMichal Krawczyk 	rc = rte_kvargs_process(kvlist, ENA_DEVARG_MISS_TXC_TO,
3491cc0c5d25SMichal Krawczyk 		ena_process_uint_devarg, adapter);
34928a7a73f2SMichal Krawczyk 
3493cc0c5d25SMichal Krawczyk exit:
34948a7a73f2SMichal Krawczyk 	rte_kvargs_free(kvlist);
34958a7a73f2SMichal Krawczyk 
34968a7a73f2SMichal Krawczyk 	return rc;
34978a7a73f2SMichal Krawczyk }
34988a7a73f2SMichal Krawczyk 
34996986cdc4SMichal Krawczyk static int ena_setup_rx_intr(struct rte_eth_dev *dev)
35006986cdc4SMichal Krawczyk {
35016986cdc4SMichal Krawczyk 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
3502d61138d4SHarman Kalra 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
35036986cdc4SMichal Krawczyk 	int rc;
35046986cdc4SMichal Krawczyk 	uint16_t vectors_nb, i;
35056986cdc4SMichal Krawczyk 	bool rx_intr_requested = dev->data->dev_conf.intr_conf.rxq;
35066986cdc4SMichal Krawczyk 
35076986cdc4SMichal Krawczyk 	if (!rx_intr_requested)
35086986cdc4SMichal Krawczyk 		return 0;
35096986cdc4SMichal Krawczyk 
35106986cdc4SMichal Krawczyk 	if (!rte_intr_cap_multiple(intr_handle)) {
35116986cdc4SMichal Krawczyk 		PMD_DRV_LOG(ERR,
35126986cdc4SMichal Krawczyk 			"Rx interrupt requested, but it isn't supported by the PCI driver\n");
35136986cdc4SMichal Krawczyk 		return -ENOTSUP;
35146986cdc4SMichal Krawczyk 	}
35156986cdc4SMichal Krawczyk 
35166986cdc4SMichal Krawczyk 	/* Disable interrupt mapping before the configuration starts. */
35176986cdc4SMichal Krawczyk 	rte_intr_disable(intr_handle);
35186986cdc4SMichal Krawczyk 
35196986cdc4SMichal Krawczyk 	/* Verify if there are enough vectors available. */
35206986cdc4SMichal Krawczyk 	vectors_nb = dev->data->nb_rx_queues;
35216986cdc4SMichal Krawczyk 	if (vectors_nb > RTE_MAX_RXTX_INTR_VEC_ID) {
35226986cdc4SMichal Krawczyk 		PMD_DRV_LOG(ERR,
35236986cdc4SMichal Krawczyk 			"Too many Rx interrupts requested, maximum number: %d\n",
35246986cdc4SMichal Krawczyk 			RTE_MAX_RXTX_INTR_VEC_ID);
35256986cdc4SMichal Krawczyk 		rc = -ENOTSUP;
35266986cdc4SMichal Krawczyk 		goto enable_intr;
35276986cdc4SMichal Krawczyk 	}
35286986cdc4SMichal Krawczyk 
3529d61138d4SHarman Kalra 	/* Allocate the vector list */
3530d61138d4SHarman Kalra 	if (rte_intr_vec_list_alloc(intr_handle, "intr_vec",
3531d61138d4SHarman Kalra 					   dev->data->nb_rx_queues)) {
35326986cdc4SMichal Krawczyk 		PMD_DRV_LOG(ERR,
35336986cdc4SMichal Krawczyk 			"Failed to allocate interrupt vector for %d queues\n",
35346986cdc4SMichal Krawczyk 			dev->data->nb_rx_queues);
35356986cdc4SMichal Krawczyk 		rc = -ENOMEM;
35366986cdc4SMichal Krawczyk 		goto enable_intr;
35376986cdc4SMichal Krawczyk 	}
35386986cdc4SMichal Krawczyk 
35396986cdc4SMichal Krawczyk 	rc = rte_intr_efd_enable(intr_handle, vectors_nb);
35406986cdc4SMichal Krawczyk 	if (rc != 0)
35416986cdc4SMichal Krawczyk 		goto free_intr_vec;
35426986cdc4SMichal Krawczyk 
35436986cdc4SMichal Krawczyk 	if (!rte_intr_allow_others(intr_handle)) {
35446986cdc4SMichal Krawczyk 		PMD_DRV_LOG(ERR,
35456986cdc4SMichal Krawczyk 			"Not enough interrupts available to use both ENA Admin and Rx interrupts\n");
35466986cdc4SMichal Krawczyk 		goto disable_intr_efd;
35476986cdc4SMichal Krawczyk 	}
35486986cdc4SMichal Krawczyk 
35496986cdc4SMichal Krawczyk 	for (i = 0; i < vectors_nb; ++i)
3550d61138d4SHarman Kalra 		if (rte_intr_vec_list_index_set(intr_handle, i,
3551d61138d4SHarman Kalra 					   RTE_INTR_VEC_RXTX_OFFSET + i))
3552d61138d4SHarman Kalra 			goto disable_intr_efd;
35536986cdc4SMichal Krawczyk 
35546986cdc4SMichal Krawczyk 	rte_intr_enable(intr_handle);
35556986cdc4SMichal Krawczyk 	return 0;
35566986cdc4SMichal Krawczyk 
35576986cdc4SMichal Krawczyk disable_intr_efd:
35586986cdc4SMichal Krawczyk 	rte_intr_efd_disable(intr_handle);
35596986cdc4SMichal Krawczyk free_intr_vec:
3560d61138d4SHarman Kalra 	rte_intr_vec_list_free(intr_handle);
35616986cdc4SMichal Krawczyk enable_intr:
35626986cdc4SMichal Krawczyk 	rte_intr_enable(intr_handle);
35636986cdc4SMichal Krawczyk 	return rc;
35646986cdc4SMichal Krawczyk }
35656986cdc4SMichal Krawczyk 
35666986cdc4SMichal Krawczyk static void ena_rx_queue_intr_set(struct rte_eth_dev *dev,
35676986cdc4SMichal Krawczyk 				 uint16_t queue_id,
35686986cdc4SMichal Krawczyk 				 bool unmask)
35696986cdc4SMichal Krawczyk {
35706986cdc4SMichal Krawczyk 	struct ena_adapter *adapter = dev->data->dev_private;
35716986cdc4SMichal Krawczyk 	struct ena_ring *rxq = &adapter->rx_ring[queue_id];
35726986cdc4SMichal Krawczyk 	struct ena_eth_io_intr_reg intr_reg;
35736986cdc4SMichal Krawczyk 
35746986cdc4SMichal Krawczyk 	ena_com_update_intr_reg(&intr_reg, 0, 0, unmask);
35756986cdc4SMichal Krawczyk 	ena_com_unmask_intr(rxq->ena_com_io_cq, &intr_reg);
35766986cdc4SMichal Krawczyk }
35776986cdc4SMichal Krawczyk 
35786986cdc4SMichal Krawczyk static int ena_rx_queue_intr_enable(struct rte_eth_dev *dev,
35796986cdc4SMichal Krawczyk 				    uint16_t queue_id)
35806986cdc4SMichal Krawczyk {
35816986cdc4SMichal Krawczyk 	ena_rx_queue_intr_set(dev, queue_id, true);
35826986cdc4SMichal Krawczyk 
35836986cdc4SMichal Krawczyk 	return 0;
35846986cdc4SMichal Krawczyk }
35856986cdc4SMichal Krawczyk 
35866986cdc4SMichal Krawczyk static int ena_rx_queue_intr_disable(struct rte_eth_dev *dev,
35876986cdc4SMichal Krawczyk 				     uint16_t queue_id)
35886986cdc4SMichal Krawczyk {
35896986cdc4SMichal Krawczyk 	ena_rx_queue_intr_set(dev, queue_id, false);
35906986cdc4SMichal Krawczyk 
35916986cdc4SMichal Krawczyk 	return 0;
35926986cdc4SMichal Krawczyk }
35936986cdc4SMichal Krawczyk 
3594b9b05d6fSMichal Krawczyk static int ena_configure_aenq(struct ena_adapter *adapter)
3595b9b05d6fSMichal Krawczyk {
3596b9b05d6fSMichal Krawczyk 	uint32_t aenq_groups = adapter->all_aenq_groups;
3597b9b05d6fSMichal Krawczyk 	int rc;
3598b9b05d6fSMichal Krawczyk 
3599b9b05d6fSMichal Krawczyk 	/* All_aenq_groups holds all AENQ functions supported by the device and
3600b9b05d6fSMichal Krawczyk 	 * the HW, so at first we need to be sure the LSC request is valid.
3601b9b05d6fSMichal Krawczyk 	 */
3602b9b05d6fSMichal Krawczyk 	if (adapter->edev_data->dev_conf.intr_conf.lsc != 0) {
3603b9b05d6fSMichal Krawczyk 		if (!(aenq_groups & BIT(ENA_ADMIN_LINK_CHANGE))) {
3604b9b05d6fSMichal Krawczyk 			PMD_DRV_LOG(ERR,
3605b9b05d6fSMichal Krawczyk 				"LSC requested, but it's not supported by the AENQ\n");
3606b9b05d6fSMichal Krawczyk 			return -EINVAL;
3607b9b05d6fSMichal Krawczyk 		}
3608b9b05d6fSMichal Krawczyk 	} else {
3609b9b05d6fSMichal Krawczyk 		/* If LSC wasn't enabled by the app, let's enable all supported
3610b9b05d6fSMichal Krawczyk 		 * AENQ procedures except the LSC.
3611b9b05d6fSMichal Krawczyk 		 */
3612b9b05d6fSMichal Krawczyk 		aenq_groups &= ~BIT(ENA_ADMIN_LINK_CHANGE);
3613b9b05d6fSMichal Krawczyk 	}
3614b9b05d6fSMichal Krawczyk 
3615b9b05d6fSMichal Krawczyk 	rc = ena_com_set_aenq_config(&adapter->ena_dev, aenq_groups);
3616b9b05d6fSMichal Krawczyk 	if (rc != 0) {
3617b9b05d6fSMichal Krawczyk 		PMD_DRV_LOG(ERR, "Cannot configure AENQ groups, rc=%d\n", rc);
3618b9b05d6fSMichal Krawczyk 		return rc;
3619b9b05d6fSMichal Krawczyk 	}
3620b9b05d6fSMichal Krawczyk 
3621b9b05d6fSMichal Krawczyk 	adapter->active_aenq_groups = aenq_groups;
3622b9b05d6fSMichal Krawczyk 
3623b9b05d6fSMichal Krawczyk 	return 0;
3624b9b05d6fSMichal Krawczyk }
3625b9b05d6fSMichal Krawczyk 
3626e3595539SStanislaw Kardach int ena_mp_indirect_table_set(struct ena_adapter *adapter)
3627e3595539SStanislaw Kardach {
3628e3595539SStanislaw Kardach 	return ENA_PROXY(adapter, ena_com_indirect_table_set, &adapter->ena_dev);
3629e3595539SStanislaw Kardach }
3630e3595539SStanislaw Kardach 
3631e3595539SStanislaw Kardach int ena_mp_indirect_table_get(struct ena_adapter *adapter,
3632e3595539SStanislaw Kardach 			      uint32_t *indirect_table)
3633e3595539SStanislaw Kardach {
3634e3595539SStanislaw Kardach 	return ENA_PROXY(adapter, ena_com_indirect_table_get, &adapter->ena_dev,
3635e3595539SStanislaw Kardach 		indirect_table);
3636e3595539SStanislaw Kardach }
3637e3595539SStanislaw Kardach 
3638ca148440SMichal Krawczyk /*********************************************************************
3639850e1bb1SMichal Krawczyk  *  ena_plat_dpdk.h functions implementations
3640850e1bb1SMichal Krawczyk  *********************************************************************/
3641850e1bb1SMichal Krawczyk 
3642850e1bb1SMichal Krawczyk const struct rte_memzone *
3643850e1bb1SMichal Krawczyk ena_mem_alloc_coherent(struct rte_eth_dev_data *data, size_t size,
3644850e1bb1SMichal Krawczyk 		       int socket_id, unsigned int alignment, void **virt_addr,
3645850e1bb1SMichal Krawczyk 		       dma_addr_t *phys_addr)
3646850e1bb1SMichal Krawczyk {
3647850e1bb1SMichal Krawczyk 	char z_name[RTE_MEMZONE_NAMESIZE];
3648850e1bb1SMichal Krawczyk 	struct ena_adapter *adapter = data->dev_private;
3649850e1bb1SMichal Krawczyk 	const struct rte_memzone *memzone;
3650850e1bb1SMichal Krawczyk 	int rc;
3651850e1bb1SMichal Krawczyk 
3652850e1bb1SMichal Krawczyk 	rc = snprintf(z_name, RTE_MEMZONE_NAMESIZE, "ena_p%d_mz%" PRIu64 "",
3653850e1bb1SMichal Krawczyk 		data->port_id, adapter->memzone_cnt);
3654850e1bb1SMichal Krawczyk 	if (rc >= RTE_MEMZONE_NAMESIZE) {
3655850e1bb1SMichal Krawczyk 		PMD_DRV_LOG(ERR,
3656850e1bb1SMichal Krawczyk 			"Name for the ena_com memzone is too long. Port: %d, mz_num: %" PRIu64 "\n",
3657850e1bb1SMichal Krawczyk 			data->port_id, adapter->memzone_cnt);
3658850e1bb1SMichal Krawczyk 		goto error;
3659850e1bb1SMichal Krawczyk 	}
3660850e1bb1SMichal Krawczyk 	adapter->memzone_cnt++;
3661850e1bb1SMichal Krawczyk 
3662850e1bb1SMichal Krawczyk 	memzone = rte_memzone_reserve_aligned(z_name, size, socket_id,
3663850e1bb1SMichal Krawczyk 		RTE_MEMZONE_IOVA_CONTIG, alignment);
3664850e1bb1SMichal Krawczyk 	if (memzone == NULL) {
3665850e1bb1SMichal Krawczyk 		PMD_DRV_LOG(ERR, "Failed to allocate ena_com memzone: %s\n",
3666850e1bb1SMichal Krawczyk 			z_name);
3667850e1bb1SMichal Krawczyk 		goto error;
3668850e1bb1SMichal Krawczyk 	}
3669850e1bb1SMichal Krawczyk 
3670850e1bb1SMichal Krawczyk 	memset(memzone->addr, 0, size);
3671850e1bb1SMichal Krawczyk 	*virt_addr = memzone->addr;
3672850e1bb1SMichal Krawczyk 	*phys_addr = memzone->iova;
3673850e1bb1SMichal Krawczyk 
3674850e1bb1SMichal Krawczyk 	return memzone;
3675850e1bb1SMichal Krawczyk 
3676850e1bb1SMichal Krawczyk error:
3677850e1bb1SMichal Krawczyk 	*virt_addr = NULL;
3678850e1bb1SMichal Krawczyk 	*phys_addr = 0;
3679850e1bb1SMichal Krawczyk 
3680850e1bb1SMichal Krawczyk 	return NULL;
3681850e1bb1SMichal Krawczyk }
3682850e1bb1SMichal Krawczyk 
3683850e1bb1SMichal Krawczyk 
3684850e1bb1SMichal Krawczyk /*********************************************************************
3685ca148440SMichal Krawczyk  *  PMD configuration
3686ca148440SMichal Krawczyk  *********************************************************************/
3687fdf91e0fSJan Blunck static int eth_ena_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
3688fdf91e0fSJan Blunck 	struct rte_pci_device *pci_dev)
3689fdf91e0fSJan Blunck {
3690fdf91e0fSJan Blunck 	return rte_eth_dev_pci_generic_probe(pci_dev,
3691fdf91e0fSJan Blunck 		sizeof(struct ena_adapter), eth_ena_dev_init);
3692fdf91e0fSJan Blunck }
3693fdf91e0fSJan Blunck 
3694fdf91e0fSJan Blunck static int eth_ena_pci_remove(struct rte_pci_device *pci_dev)
3695fdf91e0fSJan Blunck {
3696eb0ef49dSMichal Krawczyk 	return rte_eth_dev_pci_generic_remove(pci_dev, eth_ena_dev_uninit);
3697fdf91e0fSJan Blunck }
3698fdf91e0fSJan Blunck 
3699fdf91e0fSJan Blunck static struct rte_pci_driver rte_ena_pmd = {
37001173fca2SJan Medala 	.id_table = pci_id_ena_map,
370105e0eee0SRafal Kozik 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
370205e0eee0SRafal Kozik 		     RTE_PCI_DRV_WC_ACTIVATE,
3703fdf91e0fSJan Blunck 	.probe = eth_ena_pci_probe,
3704fdf91e0fSJan Blunck 	.remove = eth_ena_pci_remove,
37051173fca2SJan Medala };
37061173fca2SJan Medala 
3707fdf91e0fSJan Blunck RTE_PMD_REGISTER_PCI(net_ena, rte_ena_pmd);
370801f19227SShreyansh Jain RTE_PMD_REGISTER_PCI_TABLE(net_ena, pci_id_ena_map);
370906e81dc9SDavid Marchand RTE_PMD_REGISTER_KMOD_DEP(net_ena, "* igb_uio | uio_pci_generic | vfio-pci");
37108a7a73f2SMichal Krawczyk RTE_PMD_REGISTER_PARAM_STRING(net_ena, ENA_DEVARG_LARGE_LLQ_HDR "=<0|1>");
3711eeded204SDavid Marchand RTE_LOG_REGISTER_SUFFIX(ena_logtype_init, init, NOTICE);
3712eeded204SDavid Marchand RTE_LOG_REGISTER_SUFFIX(ena_logtype_driver, driver, NOTICE);
37130a001d69SMichal Krawczyk #ifdef RTE_ETHDEV_DEBUG_RX
37140a001d69SMichal Krawczyk RTE_LOG_REGISTER_SUFFIX(ena_logtype_rx, rx, DEBUG);
37156f1c9df9SStephen Hemminger #endif
37160a001d69SMichal Krawczyk #ifdef RTE_ETHDEV_DEBUG_TX
37170a001d69SMichal Krawczyk RTE_LOG_REGISTER_SUFFIX(ena_logtype_tx, tx, DEBUG);
37186f1c9df9SStephen Hemminger #endif
37190a001d69SMichal Krawczyk RTE_LOG_REGISTER_SUFFIX(ena_logtype_com, com, WARNING);
37203adcba9aSMichal Krawczyk 
37213adcba9aSMichal Krawczyk /******************************************************************************
37223adcba9aSMichal Krawczyk  ******************************** AENQ Handlers *******************************
37233adcba9aSMichal Krawczyk  *****************************************************************************/
3724ca148440SMichal Krawczyk static void ena_update_on_link_change(void *adapter_data,
3725ca148440SMichal Krawczyk 				      struct ena_admin_aenq_entry *aenq_e)
3726ca148440SMichal Krawczyk {
3727aab58857SStanislaw Kardach 	struct rte_eth_dev *eth_dev = adapter_data;
3728aab58857SStanislaw Kardach 	struct ena_adapter *adapter = eth_dev->data->dev_private;
3729ca148440SMichal Krawczyk 	struct ena_admin_aenq_link_change_desc *aenq_link_desc;
3730ca148440SMichal Krawczyk 	uint32_t status;
3731ca148440SMichal Krawczyk 
3732ca148440SMichal Krawczyk 	aenq_link_desc = (struct ena_admin_aenq_link_change_desc *)aenq_e;
3733ca148440SMichal Krawczyk 
3734ca148440SMichal Krawczyk 	status = get_ena_admin_aenq_link_change_desc_link_status(aenq_link_desc);
3735ca148440SMichal Krawczyk 	adapter->link_status = status;
3736ca148440SMichal Krawczyk 
3737ca148440SMichal Krawczyk 	ena_link_update(eth_dev, 0);
37385723fbedSFerruh Yigit 	rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL);
3739ca148440SMichal Krawczyk }
3740ca148440SMichal Krawczyk 
3741aab58857SStanislaw Kardach static void ena_notification(void *adapter_data,
3742f01f060cSRafal Kozik 			     struct ena_admin_aenq_entry *aenq_e)
3743f01f060cSRafal Kozik {
3744aab58857SStanislaw Kardach 	struct rte_eth_dev *eth_dev = adapter_data;
3745aab58857SStanislaw Kardach 	struct ena_adapter *adapter = eth_dev->data->dev_private;
3746f01f060cSRafal Kozik 	struct ena_admin_ena_hw_hints *hints;
3747f01f060cSRafal Kozik 
3748f01f060cSRafal Kozik 	if (aenq_e->aenq_common_desc.group != ENA_ADMIN_NOTIFICATION)
3749617898d1SMichal Krawczyk 		PMD_DRV_LOG(WARNING, "Invalid AENQ group: %x. Expected: %x\n",
3750f01f060cSRafal Kozik 			aenq_e->aenq_common_desc.group,
3751f01f060cSRafal Kozik 			ENA_ADMIN_NOTIFICATION);
3752f01f060cSRafal Kozik 
3753b19f366cSMichal Krawczyk 	switch (aenq_e->aenq_common_desc.syndrome) {
3754f01f060cSRafal Kozik 	case ENA_ADMIN_UPDATE_HINTS:
3755f01f060cSRafal Kozik 		hints = (struct ena_admin_ena_hw_hints *)
3756f01f060cSRafal Kozik 			(&aenq_e->inline_data_w4);
3757f01f060cSRafal Kozik 		ena_update_hints(adapter, hints);
3758f01f060cSRafal Kozik 		break;
3759f01f060cSRafal Kozik 	default:
3760617898d1SMichal Krawczyk 		PMD_DRV_LOG(ERR, "Invalid AENQ notification link state: %d\n",
3761b19f366cSMichal Krawczyk 			aenq_e->aenq_common_desc.syndrome);
3762f01f060cSRafal Kozik 	}
3763f01f060cSRafal Kozik }
3764f01f060cSRafal Kozik 
3765d9b8b106SMichal Krawczyk static void ena_keep_alive(void *adapter_data,
3766d9b8b106SMichal Krawczyk 			   __rte_unused struct ena_admin_aenq_entry *aenq_e)
3767d9b8b106SMichal Krawczyk {
3768aab58857SStanislaw Kardach 	struct rte_eth_dev *eth_dev = adapter_data;
3769aab58857SStanislaw Kardach 	struct ena_adapter *adapter = eth_dev->data->dev_private;
377094c3e376SRafal Kozik 	struct ena_admin_aenq_keep_alive_desc *desc;
377194c3e376SRafal Kozik 	uint64_t rx_drops;
3772e1e73e32SMichal Krawczyk 	uint64_t tx_drops;
3773d9b8b106SMichal Krawczyk 
3774d9b8b106SMichal Krawczyk 	adapter->timestamp_wd = rte_get_timer_cycles();
377594c3e376SRafal Kozik 
377694c3e376SRafal Kozik 	desc = (struct ena_admin_aenq_keep_alive_desc *)aenq_e;
377794c3e376SRafal Kozik 	rx_drops = ((uint64_t)desc->rx_drops_high << 32) | desc->rx_drops_low;
3778e1e73e32SMichal Krawczyk 	tx_drops = ((uint64_t)desc->tx_drops_high << 32) | desc->tx_drops_low;
3779e1e73e32SMichal Krawczyk 
3780e1e73e32SMichal Krawczyk 	adapter->drv_stats->rx_drops = rx_drops;
3781e1e73e32SMichal Krawczyk 	adapter->dev_stats.tx_drops = tx_drops;
3782d9b8b106SMichal Krawczyk }
3783d9b8b106SMichal Krawczyk 
37843adcba9aSMichal Krawczyk /**
37853adcba9aSMichal Krawczyk  * This handler will called for unknown event group or unimplemented handlers
37863adcba9aSMichal Krawczyk  **/
37873adcba9aSMichal Krawczyk static void unimplemented_aenq_handler(__rte_unused void *data,
37883adcba9aSMichal Krawczyk 				       __rte_unused struct ena_admin_aenq_entry *aenq_e)
37893adcba9aSMichal Krawczyk {
3790617898d1SMichal Krawczyk 	PMD_DRV_LOG(ERR,
3791617898d1SMichal Krawczyk 		"Unknown event was received or event with unimplemented handler\n");
37923adcba9aSMichal Krawczyk }
37933adcba9aSMichal Krawczyk 
3794ca148440SMichal Krawczyk static struct ena_aenq_handlers aenq_handlers = {
37953adcba9aSMichal Krawczyk 	.handlers = {
3796ca148440SMichal Krawczyk 		[ENA_ADMIN_LINK_CHANGE] = ena_update_on_link_change,
3797f01f060cSRafal Kozik 		[ENA_ADMIN_NOTIFICATION] = ena_notification,
3798d9b8b106SMichal Krawczyk 		[ENA_ADMIN_KEEP_ALIVE] = ena_keep_alive
37993adcba9aSMichal Krawczyk 	},
38003adcba9aSMichal Krawczyk 	.unimplemented_handler = unimplemented_aenq_handler
38013adcba9aSMichal Krawczyk };
3802e3595539SStanislaw Kardach 
3803e3595539SStanislaw Kardach /*********************************************************************
3804e3595539SStanislaw Kardach  *  Multi-Process communication request handling (in primary)
3805e3595539SStanislaw Kardach  *********************************************************************/
3806e3595539SStanislaw Kardach static int
3807e3595539SStanislaw Kardach ena_mp_primary_handle(const struct rte_mp_msg *mp_msg, const void *peer)
3808e3595539SStanislaw Kardach {
3809e3595539SStanislaw Kardach 	const struct ena_mp_body *req =
3810e3595539SStanislaw Kardach 		(const struct ena_mp_body *)mp_msg->param;
3811e3595539SStanislaw Kardach 	struct ena_adapter *adapter;
3812e3595539SStanislaw Kardach 	struct ena_com_dev *ena_dev;
3813e3595539SStanislaw Kardach 	struct ena_mp_body *rsp;
3814e3595539SStanislaw Kardach 	struct rte_mp_msg mp_rsp;
3815e3595539SStanislaw Kardach 	struct rte_eth_dev *dev;
3816e3595539SStanislaw Kardach 	int res = 0;
3817e3595539SStanislaw Kardach 
3818e3595539SStanislaw Kardach 	rsp = (struct ena_mp_body *)&mp_rsp.param;
3819e3595539SStanislaw Kardach 	mp_msg_init(&mp_rsp, req->type, req->port_id);
3820e3595539SStanislaw Kardach 
3821e3595539SStanislaw Kardach 	if (!rte_eth_dev_is_valid_port(req->port_id)) {
3822e3595539SStanislaw Kardach 		rte_errno = ENODEV;
3823e3595539SStanislaw Kardach 		res = -rte_errno;
3824e3595539SStanislaw Kardach 		PMD_DRV_LOG(ERR, "Unknown port %d in request %d\n",
3825e3595539SStanislaw Kardach 			    req->port_id, req->type);
3826e3595539SStanislaw Kardach 		goto end;
3827e3595539SStanislaw Kardach 	}
3828e3595539SStanislaw Kardach 	dev = &rte_eth_devices[req->port_id];
3829e3595539SStanislaw Kardach 	adapter = dev->data->dev_private;
3830e3595539SStanislaw Kardach 	ena_dev = &adapter->ena_dev;
3831e3595539SStanislaw Kardach 
3832e3595539SStanislaw Kardach 	switch (req->type) {
3833e3595539SStanislaw Kardach 	case ENA_MP_DEV_STATS_GET:
3834e3595539SStanislaw Kardach 		res = ena_com_get_dev_basic_stats(ena_dev,
3835e3595539SStanislaw Kardach 						  &adapter->basic_stats);
3836e3595539SStanislaw Kardach 		break;
3837e3595539SStanislaw Kardach 	case ENA_MP_ENI_STATS_GET:
3838e3595539SStanislaw Kardach 		res = ena_com_get_eni_stats(ena_dev,
3839e3595539SStanislaw Kardach 			(struct ena_admin_eni_stats *)&adapter->eni_stats);
3840e3595539SStanislaw Kardach 		break;
3841e3595539SStanislaw Kardach 	case ENA_MP_MTU_SET:
3842e3595539SStanislaw Kardach 		res = ena_com_set_dev_mtu(ena_dev, req->args.mtu);
3843e3595539SStanislaw Kardach 		break;
3844e3595539SStanislaw Kardach 	case ENA_MP_IND_TBL_GET:
3845e3595539SStanislaw Kardach 		res = ena_com_indirect_table_get(ena_dev,
3846e3595539SStanislaw Kardach 						 adapter->indirect_table);
3847e3595539SStanislaw Kardach 		break;
3848e3595539SStanislaw Kardach 	case ENA_MP_IND_TBL_SET:
3849e3595539SStanislaw Kardach 		res = ena_com_indirect_table_set(ena_dev);
3850e3595539SStanislaw Kardach 		break;
3851e3595539SStanislaw Kardach 	default:
3852e3595539SStanislaw Kardach 		PMD_DRV_LOG(ERR, "Unknown request type %d\n", req->type);
3853e3595539SStanislaw Kardach 		res = -EINVAL;
3854e3595539SStanislaw Kardach 		break;
3855e3595539SStanislaw Kardach 	}
3856e3595539SStanislaw Kardach 
3857e3595539SStanislaw Kardach end:
3858e3595539SStanislaw Kardach 	/* Save processing result in the reply */
3859e3595539SStanislaw Kardach 	rsp->result = res;
3860e3595539SStanislaw Kardach 	/* Return just IPC processing status */
3861e3595539SStanislaw Kardach 	return rte_mp_reply(&mp_rsp, peer);
3862e3595539SStanislaw Kardach }
3863