xref: /dpdk/drivers/net/ena/ena_ethdev.c (revision 3dbde902588b5432e3ddfed5407572e5cea0316f)
1702928afSMaciej Bielski /* SPDX-License-Identifier: BSD-3-Clause
238364c26SMichal Krawczyk  * Copyright (c) 2015-2020 Amazon.com, Inc. or its affiliates.
31173fca2SJan Medala  * All rights reserved.
41173fca2SJan Medala  */
51173fca2SJan Medala 
66723c0fcSBruce Richardson #include <rte_string_fns.h>
71173fca2SJan Medala #include <rte_errno.h>
8372c1af5SJan Medala #include <rte_version.h>
9b3fc5a1aSKonstantin Ananyev #include <rte_net.h>
108a7a73f2SMichal Krawczyk #include <rte_kvargs.h>
111173fca2SJan Medala 
121173fca2SJan Medala #include "ena_ethdev.h"
131173fca2SJan Medala #include "ena_logs.h"
141173fca2SJan Medala #include "ena_platform.h"
151173fca2SJan Medala #include "ena_com.h"
161173fca2SJan Medala #include "ena_eth_com.h"
171173fca2SJan Medala 
181173fca2SJan Medala #include <ena_common_defs.h>
191173fca2SJan Medala #include <ena_regs_defs.h>
201173fca2SJan Medala #include <ena_admin_defs.h>
211173fca2SJan Medala #include <ena_eth_io_defs.h>
221173fca2SJan Medala 
23419c3e3eSMichal Krawczyk #define DRV_MODULE_VER_MAJOR	2
24*3dbde902SShai Brandes #define DRV_MODULE_VER_MINOR	8
251b48c60dSMichal Krawczyk #define DRV_MODULE_VER_SUBMINOR	0
26372c1af5SJan Medala 
271173fca2SJan Medala #define __MERGE_64B_H_L(h, l) (((uint64_t)h << 32) | l)
281173fca2SJan Medala 
291173fca2SJan Medala #define GET_L4_HDR_LEN(mbuf)					\
30f41b5156SOlivier Matz 	((rte_pktmbuf_mtod_offset(mbuf,	struct rte_tcp_hdr *,	\
311173fca2SJan Medala 		mbuf->l3_len + mbuf->l2_len)->data_off) >> 4)
321173fca2SJan Medala 
33372c1af5SJan Medala #define ETH_GSTRING_LEN	32
34372c1af5SJan Medala 
35a3c9a11aSAndrew Boyer #define ARRAY_SIZE(x) RTE_DIM(x)
36372c1af5SJan Medala 
3792680dc2SRafal Kozik #define ENA_MIN_RING_DESC	128
3892680dc2SRafal Kozik 
39c339f538SDawid Gorecki /*
40c339f538SDawid Gorecki  * We should try to keep ENA_CLEANUP_BUF_SIZE lower than
41c339f538SDawid Gorecki  * RTE_MEMPOOL_CACHE_MAX_SIZE, so we can fit this in mempool local cache.
42c339f538SDawid Gorecki  */
43c339f538SDawid Gorecki #define ENA_CLEANUP_BUF_SIZE	256
44c339f538SDawid Gorecki 
45b418f0d2SMichal Krawczyk #define ENA_PTYPE_HAS_HASH	(RTE_PTYPE_L4_TCP | RTE_PTYPE_L4_UDP)
46b418f0d2SMichal Krawczyk 
47372c1af5SJan Medala struct ena_stats {
48372c1af5SJan Medala 	char name[ETH_GSTRING_LEN];
49372c1af5SJan Medala 	int stat_offset;
50372c1af5SJan Medala };
51372c1af5SJan Medala 
52372c1af5SJan Medala #define ENA_STAT_ENTRY(stat, stat_type) { \
53372c1af5SJan Medala 	.name = #stat, \
54372c1af5SJan Medala 	.stat_offset = offsetof(struct ena_stats_##stat_type, stat) \
55372c1af5SJan Medala }
56372c1af5SJan Medala 
57372c1af5SJan Medala #define ENA_STAT_RX_ENTRY(stat) \
58372c1af5SJan Medala 	ENA_STAT_ENTRY(stat, rx)
59372c1af5SJan Medala 
60372c1af5SJan Medala #define ENA_STAT_TX_ENTRY(stat) \
61372c1af5SJan Medala 	ENA_STAT_ENTRY(stat, tx)
62372c1af5SJan Medala 
6392401abfSShai Brandes #define ENA_STAT_METRICS_ENTRY(stat) \
6492401abfSShai Brandes 	ENA_STAT_ENTRY(stat, metrics)
6545718adaSMichal Krawczyk 
66372c1af5SJan Medala #define ENA_STAT_GLOBAL_ENTRY(stat) \
67372c1af5SJan Medala 	ENA_STAT_ENTRY(stat, dev)
68372c1af5SJan Medala 
69a73dd098SShai Brandes #define ENA_STAT_ENA_SRD_ENTRY(stat) \
70a73dd098SShai Brandes 	ENA_STAT_ENTRY(stat, srd)
71a73dd098SShai Brandes 
728a7a73f2SMichal Krawczyk /* Device arguments */
738a7a73f2SMichal Krawczyk #define ENA_DEVARG_LARGE_LLQ_HDR "large_llq_hdr"
74cc0c5d25SMichal Krawczyk /* Timeout in seconds after which a single uncompleted Tx packet should be
75cc0c5d25SMichal Krawczyk  * considered as a missing.
76cc0c5d25SMichal Krawczyk  */
77cc0c5d25SMichal Krawczyk #define ENA_DEVARG_MISS_TXC_TO "miss_txc_to"
789944919eSMichal Krawczyk /*
799944919eSMichal Krawczyk  * Controls whether LLQ should be used (if available). Enabled by default.
809944919eSMichal Krawczyk  * NOTE: It's highly not recommended to disable the LLQ, as it may lead to a
819944919eSMichal Krawczyk  * huge performance degradation on 6th generation AWS instances.
829944919eSMichal Krawczyk  */
839944919eSMichal Krawczyk #define ENA_DEVARG_ENABLE_LLQ "enable_llq"
848a7a73f2SMichal Krawczyk 
853adcba9aSMichal Krawczyk /*
863adcba9aSMichal Krawczyk  * Each rte_memzone should have unique name.
873adcba9aSMichal Krawczyk  * To satisfy it, count number of allocation and add it to name.
883adcba9aSMichal Krawczyk  */
897c0a233eSAmit Bernstein rte_atomic64_t ena_alloc_cnt;
903adcba9aSMichal Krawczyk 
91372c1af5SJan Medala static const struct ena_stats ena_stats_global_strings[] = {
92372c1af5SJan Medala 	ENA_STAT_GLOBAL_ENTRY(wd_expired),
937830e905SSolganik Alexander 	ENA_STAT_GLOBAL_ENTRY(dev_start),
947830e905SSolganik Alexander 	ENA_STAT_GLOBAL_ENTRY(dev_stop),
95e1e73e32SMichal Krawczyk 	ENA_STAT_GLOBAL_ENTRY(tx_drops),
9677d4ed30SShai Brandes 	ENA_STAT_GLOBAL_ENTRY(rx_overruns),
97372c1af5SJan Medala };
98372c1af5SJan Medala 
9992401abfSShai Brandes /*
10092401abfSShai Brandes  * The legacy metrics (also known as eni stats) consisted of 5 stats, while the reworked
10192401abfSShai Brandes  * metrics (also known as customer metrics) support an additional stat.
10292401abfSShai Brandes  */
10392401abfSShai Brandes static struct ena_stats ena_stats_metrics_strings[] = {
10492401abfSShai Brandes 	ENA_STAT_METRICS_ENTRY(bw_in_allowance_exceeded),
10592401abfSShai Brandes 	ENA_STAT_METRICS_ENTRY(bw_out_allowance_exceeded),
10692401abfSShai Brandes 	ENA_STAT_METRICS_ENTRY(pps_allowance_exceeded),
10792401abfSShai Brandes 	ENA_STAT_METRICS_ENTRY(conntrack_allowance_exceeded),
10892401abfSShai Brandes 	ENA_STAT_METRICS_ENTRY(linklocal_allowance_exceeded),
10992401abfSShai Brandes 	ENA_STAT_METRICS_ENTRY(conntrack_allowance_available),
11045718adaSMichal Krawczyk };
11145718adaSMichal Krawczyk 
112a73dd098SShai Brandes static const struct ena_stats ena_stats_srd_strings[] = {
113a73dd098SShai Brandes 	ENA_STAT_ENA_SRD_ENTRY(ena_srd_mode),
114a73dd098SShai Brandes 	ENA_STAT_ENA_SRD_ENTRY(ena_srd_tx_pkts),
115a73dd098SShai Brandes 	ENA_STAT_ENA_SRD_ENTRY(ena_srd_eligible_tx_pkts),
116a73dd098SShai Brandes 	ENA_STAT_ENA_SRD_ENTRY(ena_srd_rx_pkts),
117a73dd098SShai Brandes 	ENA_STAT_ENA_SRD_ENTRY(ena_srd_resource_utilization),
118a73dd098SShai Brandes };
119a73dd098SShai Brandes 
120372c1af5SJan Medala static const struct ena_stats ena_stats_tx_strings[] = {
121372c1af5SJan Medala 	ENA_STAT_TX_ENTRY(cnt),
122372c1af5SJan Medala 	ENA_STAT_TX_ENTRY(bytes),
1237830e905SSolganik Alexander 	ENA_STAT_TX_ENTRY(prepare_ctx_err),
124372c1af5SJan Medala 	ENA_STAT_TX_ENTRY(tx_poll),
125372c1af5SJan Medala 	ENA_STAT_TX_ENTRY(doorbells),
126372c1af5SJan Medala 	ENA_STAT_TX_ENTRY(bad_req_id),
1277830e905SSolganik Alexander 	ENA_STAT_TX_ENTRY(available_desc),
128f93e20e5SMichal Krawczyk 	ENA_STAT_TX_ENTRY(missed_tx),
129372c1af5SJan Medala };
130372c1af5SJan Medala 
131372c1af5SJan Medala static const struct ena_stats ena_stats_rx_strings[] = {
132372c1af5SJan Medala 	ENA_STAT_RX_ENTRY(cnt),
133372c1af5SJan Medala 	ENA_STAT_RX_ENTRY(bytes),
1347830e905SSolganik Alexander 	ENA_STAT_RX_ENTRY(refill_partial),
13584daba99SMichal Krawczyk 	ENA_STAT_RX_ENTRY(l3_csum_bad),
13684daba99SMichal Krawczyk 	ENA_STAT_RX_ENTRY(l4_csum_bad),
13784daba99SMichal Krawczyk 	ENA_STAT_RX_ENTRY(l4_csum_good),
1387830e905SSolganik Alexander 	ENA_STAT_RX_ENTRY(mbuf_alloc_fail),
139372c1af5SJan Medala 	ENA_STAT_RX_ENTRY(bad_desc_num),
1407830e905SSolganik Alexander 	ENA_STAT_RX_ENTRY(bad_req_id),
141372c1af5SJan Medala };
142372c1af5SJan Medala 
143372c1af5SJan Medala #define ENA_STATS_ARRAY_GLOBAL	ARRAY_SIZE(ena_stats_global_strings)
14492401abfSShai Brandes #define ENA_STATS_ARRAY_METRICS	ARRAY_SIZE(ena_stats_metrics_strings)
14592401abfSShai Brandes #define ENA_STATS_ARRAY_METRICS_LEGACY	(ENA_STATS_ARRAY_METRICS - 1)
146a73dd098SShai Brandes #define ENA_STATS_ARRAY_ENA_SRD	ARRAY_SIZE(ena_stats_srd_strings)
147372c1af5SJan Medala #define ENA_STATS_ARRAY_TX	ARRAY_SIZE(ena_stats_tx_strings)
148372c1af5SJan Medala #define ENA_STATS_ARRAY_RX	ARRAY_SIZE(ena_stats_rx_strings)
1491173fca2SJan Medala 
150295968d1SFerruh Yigit #define QUEUE_OFFLOADS (RTE_ETH_TX_OFFLOAD_TCP_CKSUM |\
151295968d1SFerruh Yigit 			RTE_ETH_TX_OFFLOAD_UDP_CKSUM |\
152295968d1SFerruh Yigit 			RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |\
153295968d1SFerruh Yigit 			RTE_ETH_TX_OFFLOAD_TCP_TSO)
154daa02b5cSOlivier Matz #define MBUF_OFFLOADS (RTE_MBUF_F_TX_L4_MASK |\
155daa02b5cSOlivier Matz 		       RTE_MBUF_F_TX_IP_CKSUM |\
156daa02b5cSOlivier Matz 		       RTE_MBUF_F_TX_TCP_SEG)
15756b8b9b7SRafal Kozik 
1581173fca2SJan Medala /** Vendor ID used by Amazon devices */
1591173fca2SJan Medala #define PCI_VENDOR_ID_AMAZON 0x1D0F
1601173fca2SJan Medala /** Amazon devices */
1611173fca2SJan Medala #define PCI_DEVICE_ID_ENA_VF		0xEC20
162f7138b91SMichal Krawczyk #define PCI_DEVICE_ID_ENA_VF_RSERV0	0xEC21
1631173fca2SJan Medala 
164daa02b5cSOlivier Matz #define	ENA_TX_OFFLOAD_MASK	(RTE_MBUF_F_TX_L4_MASK |         \
165daa02b5cSOlivier Matz 	RTE_MBUF_F_TX_IPV6 |            \
166daa02b5cSOlivier Matz 	RTE_MBUF_F_TX_IPV4 |            \
167daa02b5cSOlivier Matz 	RTE_MBUF_F_TX_IP_CKSUM |        \
168daa02b5cSOlivier Matz 	RTE_MBUF_F_TX_TCP_SEG)
169b3fc5a1aSKonstantin Ananyev 
170b3fc5a1aSKonstantin Ananyev #define	ENA_TX_OFFLOAD_NOTSUP_MASK	\
171daa02b5cSOlivier Matz 	(RTE_MBUF_F_TX_OFFLOAD_MASK ^ ENA_TX_OFFLOAD_MASK)
172b3fc5a1aSKonstantin Ananyev 
173e8c838fdSMichal Krawczyk /** HW specific offloads capabilities. */
174e8c838fdSMichal Krawczyk /* IPv4 checksum offload. */
175e8c838fdSMichal Krawczyk #define ENA_L3_IPV4_CSUM		0x0001
176e8c838fdSMichal Krawczyk /* TCP/UDP checksum offload for IPv4 packets. */
177e8c838fdSMichal Krawczyk #define ENA_L4_IPV4_CSUM		0x0002
178e8c838fdSMichal Krawczyk /* TCP/UDP checksum offload for IPv4 packets with pseudo header checksum. */
179e8c838fdSMichal Krawczyk #define ENA_L4_IPV4_CSUM_PARTIAL	0x0004
180e8c838fdSMichal Krawczyk /* TCP/UDP checksum offload for IPv6 packets. */
181e8c838fdSMichal Krawczyk #define ENA_L4_IPV6_CSUM		0x0008
182e8c838fdSMichal Krawczyk /* TCP/UDP checksum offload for IPv6 packets with pseudo header checksum. */
183e8c838fdSMichal Krawczyk #define ENA_L4_IPV6_CSUM_PARTIAL	0x0010
184e8c838fdSMichal Krawczyk /* TSO support for IPv4 packets. */
185e8c838fdSMichal Krawczyk #define ENA_IPV4_TSO			0x0020
186e8c838fdSMichal Krawczyk 
187e8c838fdSMichal Krawczyk /* Device supports setting RSS hash. */
188e8c838fdSMichal Krawczyk #define ENA_RX_RSS_HASH			0x0040
189e8c838fdSMichal Krawczyk 
19028a1fd4fSFerruh Yigit static const struct rte_pci_id pci_id_ena_map[] = {
191cb990571SDavid Marchand 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AMAZON, PCI_DEVICE_ID_ENA_VF) },
192f7138b91SMichal Krawczyk 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AMAZON, PCI_DEVICE_ID_ENA_VF_RSERV0) },
1931173fca2SJan Medala 	{ .device_id = 0 },
1941173fca2SJan Medala };
1951173fca2SJan Medala 
196ca148440SMichal Krawczyk static struct ena_aenq_handlers aenq_handlers;
1973adcba9aSMichal Krawczyk 
198b9b05d6fSMichal Krawczyk static int ena_device_init(struct ena_adapter *adapter,
199aab58857SStanislaw Kardach 			   struct rte_pci_device *pdev,
200b9b05d6fSMichal Krawczyk 			   struct ena_com_dev_get_features_ctx *get_feat_ctx);
2011173fca2SJan Medala static int ena_dev_configure(struct rte_eth_dev *dev);
20236278b82SMichal Krawczyk static void ena_tx_map_mbuf(struct ena_ring *tx_ring,
20336278b82SMichal Krawczyk 	struct ena_tx_buffer *tx_info,
20436278b82SMichal Krawczyk 	struct rte_mbuf *mbuf,
20536278b82SMichal Krawczyk 	void **push_header,
20636278b82SMichal Krawczyk 	uint16_t *header_len);
20736278b82SMichal Krawczyk static int ena_xmit_mbuf(struct ena_ring *tx_ring, struct rte_mbuf *mbuf);
208a52b317eSDawid Gorecki static int ena_tx_cleanup(void *txp, uint32_t free_pkt_cnt);
2091173fca2SJan Medala static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
2101173fca2SJan Medala 				  uint16_t nb_pkts);
211b3fc5a1aSKonstantin Ananyev static uint16_t eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
212b3fc5a1aSKonstantin Ananyev 		uint16_t nb_pkts);
2131173fca2SJan Medala static int ena_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
2141173fca2SJan Medala 			      uint16_t nb_desc, unsigned int socket_id,
2151173fca2SJan Medala 			      const struct rte_eth_txconf *tx_conf);
2161173fca2SJan Medala static int ena_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
2171173fca2SJan Medala 			      uint16_t nb_desc, unsigned int socket_id,
2181173fca2SJan Medala 			      const struct rte_eth_rxconf *rx_conf,
2191173fca2SJan Medala 			      struct rte_mempool *mp);
2201be097dcSMichal Krawczyk static inline void ena_init_rx_mbuf(struct rte_mbuf *mbuf, uint16_t len);
2211be097dcSMichal Krawczyk static struct rte_mbuf *ena_rx_mbuf(struct ena_ring *rx_ring,
2221be097dcSMichal Krawczyk 				    struct ena_com_rx_buf_info *ena_bufs,
2231be097dcSMichal Krawczyk 				    uint32_t descs,
2241be097dcSMichal Krawczyk 				    uint16_t *next_to_clean,
2251be097dcSMichal Krawczyk 				    uint8_t offset);
2261173fca2SJan Medala static uint16_t eth_ena_recv_pkts(void *rx_queue,
2271173fca2SJan Medala 				  struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
22883fd97b2SMichal Krawczyk static int ena_add_single_rx_desc(struct ena_com_io_sq *io_sq,
22983fd97b2SMichal Krawczyk 				  struct rte_mbuf *mbuf, uint16_t id);
2301173fca2SJan Medala static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count);
23133dde075SMichal Krawczyk static void ena_init_rings(struct ena_adapter *adapter,
23233dde075SMichal Krawczyk 			   bool disable_meta_caching);
2331173fca2SJan Medala static int ena_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
2341173fca2SJan Medala static int ena_start(struct rte_eth_dev *dev);
23562024eb8SIvan Ilchenko static int ena_stop(struct rte_eth_dev *dev);
236b142387bSThomas Monjalon static int ena_close(struct rte_eth_dev *dev);
2372081d5e2SMichal Krawczyk static int ena_dev_reset(struct rte_eth_dev *dev);
238d5b0924bSMatan Azrad static int ena_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats);
2391173fca2SJan Medala static void ena_rx_queue_release_all(struct rte_eth_dev *dev);
2401173fca2SJan Medala static void ena_tx_queue_release_all(struct rte_eth_dev *dev);
2417483341aSXueming Li static void ena_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
2427483341aSXueming Li static void ena_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
2431173fca2SJan Medala static void ena_rx_queue_release_bufs(struct ena_ring *ring);
2441173fca2SJan Medala static void ena_tx_queue_release_bufs(struct ena_ring *ring);
2451173fca2SJan Medala static int ena_link_update(struct rte_eth_dev *dev,
246dd2c630aSFerruh Yigit 			   int wait_to_complete);
2476986cdc4SMichal Krawczyk static int ena_create_io_queue(struct rte_eth_dev *dev, struct ena_ring *ring);
24826e5543dSRafal Kozik static void ena_queue_stop(struct ena_ring *ring);
24926e5543dSRafal Kozik static void ena_queue_stop_all(struct rte_eth_dev *dev,
25026e5543dSRafal Kozik 			      enum ena_ring_type ring_type);
2516986cdc4SMichal Krawczyk static int ena_queue_start(struct rte_eth_dev *dev, struct ena_ring *ring);
25226e5543dSRafal Kozik static int ena_queue_start_all(struct rte_eth_dev *dev,
2531173fca2SJan Medala 			       enum ena_ring_type ring_type);
2541173fca2SJan Medala static void ena_stats_restart(struct rte_eth_dev *dev);
2553a822d79SMichal Krawczyk static uint64_t ena_get_rx_port_offloads(struct ena_adapter *adapter);
2563a822d79SMichal Krawczyk static uint64_t ena_get_tx_port_offloads(struct ena_adapter *adapter);
2573a822d79SMichal Krawczyk static uint64_t ena_get_rx_queue_offloads(struct ena_adapter *adapter);
2583a822d79SMichal Krawczyk static uint64_t ena_get_tx_queue_offloads(struct ena_adapter *adapter);
259bdad90d1SIvan Ilchenko static int ena_infos_get(struct rte_eth_dev *dev,
2601173fca2SJan Medala 			 struct rte_eth_dev_info *dev_info);
26115773e06SMichal Krawczyk static void ena_interrupt_handler_rte(void *cb_arg);
262d9b8b106SMichal Krawczyk static void ena_timer_wd_callback(struct rte_timer *timer, void *arg);
263e457bc70SRafal Kozik static void ena_destroy_device(struct rte_eth_dev *eth_dev);
264e457bc70SRafal Kozik static int eth_ena_dev_init(struct rte_eth_dev *eth_dev);
2657830e905SSolganik Alexander static int ena_xstats_get_names(struct rte_eth_dev *dev,
2667830e905SSolganik Alexander 				struct rte_eth_xstat_name *xstats_names,
2677830e905SSolganik Alexander 				unsigned int n);
2683cec73faSMichal Krawczyk static int ena_xstats_get_names_by_id(struct rte_eth_dev *dev,
2693cec73faSMichal Krawczyk 				      const uint64_t *ids,
2703cec73faSMichal Krawczyk 				      struct rte_eth_xstat_name *xstats_names,
2713cec73faSMichal Krawczyk 				      unsigned int size);
2727830e905SSolganik Alexander static int ena_xstats_get(struct rte_eth_dev *dev,
2737830e905SSolganik Alexander 			  struct rte_eth_xstat *stats,
2747830e905SSolganik Alexander 			  unsigned int n);
2757830e905SSolganik Alexander static int ena_xstats_get_by_id(struct rte_eth_dev *dev,
2767830e905SSolganik Alexander 				const uint64_t *ids,
2777830e905SSolganik Alexander 				uint64_t *values,
2787830e905SSolganik Alexander 				unsigned int n);
2798a7a73f2SMichal Krawczyk static int ena_process_bool_devarg(const char *key,
2808a7a73f2SMichal Krawczyk 				   const char *value,
2818a7a73f2SMichal Krawczyk 				   void *opaque);
2828a7a73f2SMichal Krawczyk static int ena_parse_devargs(struct ena_adapter *adapter,
2838a7a73f2SMichal Krawczyk 			     struct rte_devargs *devargs);
28492401abfSShai Brandes static void ena_copy_customer_metrics(struct ena_adapter *adapter,
28592401abfSShai Brandes 					uint64_t *buf,
28692401abfSShai Brandes 					size_t buf_size);
287a73dd098SShai Brandes static void ena_copy_ena_srd_info(struct ena_adapter *adapter,
288a73dd098SShai Brandes 				  struct ena_stats_srd *srd_info);
2896986cdc4SMichal Krawczyk static int ena_setup_rx_intr(struct rte_eth_dev *dev);
2906986cdc4SMichal Krawczyk static int ena_rx_queue_intr_enable(struct rte_eth_dev *dev,
2916986cdc4SMichal Krawczyk 				    uint16_t queue_id);
2926986cdc4SMichal Krawczyk static int ena_rx_queue_intr_disable(struct rte_eth_dev *dev,
2936986cdc4SMichal Krawczyk 				     uint16_t queue_id);
294b9b05d6fSMichal Krawczyk static int ena_configure_aenq(struct ena_adapter *adapter);
295e3595539SStanislaw Kardach static int ena_mp_primary_handle(const struct rte_mp_msg *mp_msg,
296e3595539SStanislaw Kardach 				 const void *peer);
2971173fca2SJan Medala 
298103ab18cSFerruh Yigit static const struct eth_dev_ops ena_dev_ops = {
2991173fca2SJan Medala 	.dev_configure          = ena_dev_configure,
3001173fca2SJan Medala 	.dev_infos_get          = ena_infos_get,
3011173fca2SJan Medala 	.rx_queue_setup         = ena_rx_queue_setup,
3021173fca2SJan Medala 	.tx_queue_setup         = ena_tx_queue_setup,
3031173fca2SJan Medala 	.dev_start              = ena_start,
304eb0ef49dSMichal Krawczyk 	.dev_stop               = ena_stop,
3051173fca2SJan Medala 	.link_update            = ena_link_update,
3061173fca2SJan Medala 	.stats_get              = ena_stats_get,
3077830e905SSolganik Alexander 	.xstats_get_names       = ena_xstats_get_names,
3083cec73faSMichal Krawczyk 	.xstats_get_names_by_id = ena_xstats_get_names_by_id,
3097830e905SSolganik Alexander 	.xstats_get             = ena_xstats_get,
3107830e905SSolganik Alexander 	.xstats_get_by_id       = ena_xstats_get_by_id,
3111173fca2SJan Medala 	.mtu_set                = ena_mtu_set,
3121173fca2SJan Medala 	.rx_queue_release       = ena_rx_queue_release,
3131173fca2SJan Medala 	.tx_queue_release       = ena_tx_queue_release,
3141173fca2SJan Medala 	.dev_close              = ena_close,
3152081d5e2SMichal Krawczyk 	.dev_reset              = ena_dev_reset,
3161173fca2SJan Medala 	.reta_update            = ena_rss_reta_update,
3171173fca2SJan Medala 	.reta_query             = ena_rss_reta_query,
3186986cdc4SMichal Krawczyk 	.rx_queue_intr_enable   = ena_rx_queue_intr_enable,
3196986cdc4SMichal Krawczyk 	.rx_queue_intr_disable  = ena_rx_queue_intr_disable,
32034d5e97eSMichal Krawczyk 	.rss_hash_update        = ena_rss_hash_update,
32134d5e97eSMichal Krawczyk 	.rss_hash_conf_get      = ena_rss_hash_conf_get,
322a52b317eSDawid Gorecki 	.tx_done_cleanup        = ena_tx_cleanup,
3231173fca2SJan Medala };
3241173fca2SJan Medala 
325e3595539SStanislaw Kardach /*********************************************************************
326e3595539SStanislaw Kardach  *  Multi-Process communication bits
327e3595539SStanislaw Kardach  *********************************************************************/
328e3595539SStanislaw Kardach /* rte_mp IPC message name */
329e3595539SStanislaw Kardach #define ENA_MP_NAME	"net_ena_mp"
330e3595539SStanislaw Kardach /* Request timeout in seconds */
331e3595539SStanislaw Kardach #define ENA_MP_REQ_TMO	5
332e3595539SStanislaw Kardach 
333e3595539SStanislaw Kardach /** Proxy request type */
334e3595539SStanislaw Kardach enum ena_mp_req {
335e3595539SStanislaw Kardach 	ENA_MP_DEV_STATS_GET,
336e3595539SStanislaw Kardach 	ENA_MP_ENI_STATS_GET,
337e3595539SStanislaw Kardach 	ENA_MP_MTU_SET,
338e3595539SStanislaw Kardach 	ENA_MP_IND_TBL_GET,
33992401abfSShai Brandes 	ENA_MP_IND_TBL_SET,
34092401abfSShai Brandes 	ENA_MP_CUSTOMER_METRICS_GET,
341a73dd098SShai Brandes 	ENA_MP_SRD_STATS_GET,
342e3595539SStanislaw Kardach };
343e3595539SStanislaw Kardach 
344e3595539SStanislaw Kardach /** Proxy message body. Shared between requests and responses. */
345e3595539SStanislaw Kardach struct ena_mp_body {
346e3595539SStanislaw Kardach 	/* Message type */
347e3595539SStanislaw Kardach 	enum ena_mp_req type;
348e3595539SStanislaw Kardach 	int port_id;
349e3595539SStanislaw Kardach 	/* Processing result. Set in replies. 0 if message succeeded, negative
350e3595539SStanislaw Kardach 	 * error code otherwise.
351e3595539SStanislaw Kardach 	 */
352e3595539SStanislaw Kardach 	int result;
353e3595539SStanislaw Kardach 	union {
354e3595539SStanislaw Kardach 		int mtu; /* For ENA_MP_MTU_SET */
355e3595539SStanislaw Kardach 	} args;
356e3595539SStanislaw Kardach };
357e3595539SStanislaw Kardach 
358e3595539SStanislaw Kardach /**
359e3595539SStanislaw Kardach  * Initialize IPC message.
360e3595539SStanislaw Kardach  *
361e3595539SStanislaw Kardach  * @param[out] msg
362e3595539SStanislaw Kardach  *   Pointer to the message to initialize.
363e3595539SStanislaw Kardach  * @param[in] type
364e3595539SStanislaw Kardach  *   Message type.
365e3595539SStanislaw Kardach  * @param[in] port_id
366e3595539SStanislaw Kardach  *   Port ID of target device.
367e3595539SStanislaw Kardach  *
368e3595539SStanislaw Kardach  */
369e3595539SStanislaw Kardach static void
370e3595539SStanislaw Kardach mp_msg_init(struct rte_mp_msg *msg, enum ena_mp_req type, int port_id)
371e3595539SStanislaw Kardach {
372e3595539SStanislaw Kardach 	struct ena_mp_body *body = (struct ena_mp_body *)&msg->param;
373e3595539SStanislaw Kardach 
374e3595539SStanislaw Kardach 	memset(msg, 0, sizeof(*msg));
375e3595539SStanislaw Kardach 	strlcpy(msg->name, ENA_MP_NAME, sizeof(msg->name));
376e3595539SStanislaw Kardach 	msg->len_param = sizeof(*body);
377e3595539SStanislaw Kardach 	body->type = type;
378e3595539SStanislaw Kardach 	body->port_id = port_id;
379e3595539SStanislaw Kardach }
380e3595539SStanislaw Kardach 
381e3595539SStanislaw Kardach /*********************************************************************
382e3595539SStanislaw Kardach  *  Multi-Process communication PMD API
383e3595539SStanislaw Kardach  *********************************************************************/
384e3595539SStanislaw Kardach /**
385e3595539SStanislaw Kardach  * Define proxy request descriptor
386e3595539SStanislaw Kardach  *
387e3595539SStanislaw Kardach  * Used to define all structures and functions required for proxying a given
388e3595539SStanislaw Kardach  * function to the primary process including the code to perform to prepare the
389e3595539SStanislaw Kardach  * request and process the response.
390e3595539SStanislaw Kardach  *
391e3595539SStanislaw Kardach  * @param[in] f
392e3595539SStanislaw Kardach  *   Name of the function to proxy
393e3595539SStanislaw Kardach  * @param[in] t
394e3595539SStanislaw Kardach  *   Message type to use
395e3595539SStanislaw Kardach  * @param[in] prep
396e3595539SStanislaw Kardach  *   Body of a function to prepare the request in form of a statement
397e3595539SStanislaw Kardach  *   expression. It is passed all the original function arguments along with two
398e3595539SStanislaw Kardach  *   extra ones:
399e3595539SStanislaw Kardach  *   - struct ena_adapter *adapter - PMD data of the device calling the proxy.
400e3595539SStanislaw Kardach  *   - struct ena_mp_body *req - body of a request to prepare.
401e3595539SStanislaw Kardach  * @param[in] proc
402e3595539SStanislaw Kardach  *   Body of a function to process the response in form of a statement
403e3595539SStanislaw Kardach  *   expression. It is passed all the original function arguments along with two
404e3595539SStanislaw Kardach  *   extra ones:
405e3595539SStanislaw Kardach  *   - struct ena_adapter *adapter - PMD data of the device calling the proxy.
406e3595539SStanislaw Kardach  *   - struct ena_mp_body *rsp - body of a response to process.
407e3595539SStanislaw Kardach  * @param ...
408e3595539SStanislaw Kardach  *   Proxied function's arguments
409e3595539SStanislaw Kardach  *
410e3595539SStanislaw Kardach  * @note Inside prep and proc any parameters which aren't used should be marked
411e3595539SStanislaw Kardach  *       as such (with ENA_TOUCH or __rte_unused).
412e3595539SStanislaw Kardach  */
413e3595539SStanislaw Kardach #define ENA_PROXY_DESC(f, t, prep, proc, ...)			\
414e3595539SStanislaw Kardach 	static const enum ena_mp_req mp_type_ ## f =  t;	\
415e3595539SStanislaw Kardach 	static const char *mp_name_ ## f = #t;			\
416e3595539SStanislaw Kardach 	static void mp_prep_ ## f(struct ena_adapter *adapter,	\
417e3595539SStanislaw Kardach 				  struct ena_mp_body *req,	\
418e3595539SStanislaw Kardach 				  __VA_ARGS__)			\
419e3595539SStanislaw Kardach 	{							\
420e3595539SStanislaw Kardach 		prep;						\
421e3595539SStanislaw Kardach 	}							\
422e3595539SStanislaw Kardach 	static void mp_proc_ ## f(struct ena_adapter *adapter,	\
423e3595539SStanislaw Kardach 				  struct ena_mp_body *rsp,	\
424e3595539SStanislaw Kardach 				  __VA_ARGS__)			\
425e3595539SStanislaw Kardach 	{							\
426e3595539SStanislaw Kardach 		proc;						\
427e3595539SStanislaw Kardach 	}
428e3595539SStanislaw Kardach 
429e3595539SStanislaw Kardach /**
430e3595539SStanislaw Kardach  * Proxy wrapper for calling primary functions in a secondary process.
431e3595539SStanislaw Kardach  *
432e3595539SStanislaw Kardach  * Depending on whether called in primary or secondary process, calls the
433e3595539SStanislaw Kardach  * @p func directly or proxies the call to the primary process via rte_mp IPC.
434e3595539SStanislaw Kardach  * This macro requires a proxy request descriptor to be defined for @p func
435e3595539SStanislaw Kardach  * using ENA_PROXY_DESC() macro.
436e3595539SStanislaw Kardach  *
437e3595539SStanislaw Kardach  * @param[in/out] a
438e3595539SStanislaw Kardach  *   Device PMD data. Used for sending the message and sharing message results
439e3595539SStanislaw Kardach  *   between primary and secondary.
440e3595539SStanislaw Kardach  * @param[in] f
441e3595539SStanislaw Kardach  *   Function to proxy.
442e3595539SStanislaw Kardach  * @param ...
443e3595539SStanislaw Kardach  *   Arguments of @p func.
444e3595539SStanislaw Kardach  *
445e3595539SStanislaw Kardach  * @return
446e3595539SStanislaw Kardach  *   - 0: Processing succeeded and response handler was called.
447e3595539SStanislaw Kardach  *   - -EPERM: IPC is unavailable on this platform. This means only primary
448e3595539SStanislaw Kardach  *             process may call the proxied function.
449e3595539SStanislaw Kardach  *   - -EIO:   IPC returned error on request send. Inspect rte_errno detailed
450e3595539SStanislaw Kardach  *             error code.
451e3595539SStanislaw Kardach  *   - Negative error code from the proxied function.
452e3595539SStanislaw Kardach  *
453e3595539SStanislaw Kardach  * @note This mechanism is geared towards control-path tasks. Avoid calling it
454e3595539SStanislaw Kardach  *       in fast-path unless unbound delays are allowed. This is due to the IPC
455e3595539SStanislaw Kardach  *       mechanism itself (socket based).
456e3595539SStanislaw Kardach  * @note Due to IPC parameter size limitations the proxy logic shares call
457e3595539SStanislaw Kardach  *       results through the struct ena_adapter shared memory. This makes the
458e3595539SStanislaw Kardach  *       proxy mechanism strictly single-threaded. Therefore be sure to make all
459e3595539SStanislaw Kardach  *       calls to the same proxied function under the same lock.
460e3595539SStanislaw Kardach  */
461e3595539SStanislaw Kardach #define ENA_PROXY(a, f, ...)						\
462e3595539SStanislaw Kardach ({									\
463e3595539SStanislaw Kardach 	struct ena_adapter *_a = (a);					\
464e3595539SStanislaw Kardach 	struct timespec ts = { .tv_sec = ENA_MP_REQ_TMO };		\
465e3595539SStanislaw Kardach 	struct ena_mp_body *req, *rsp;					\
466e3595539SStanislaw Kardach 	struct rte_mp_reply mp_rep;					\
467e3595539SStanislaw Kardach 	struct rte_mp_msg mp_req;					\
468e3595539SStanislaw Kardach 	int ret;							\
469e3595539SStanislaw Kardach 									\
470e3595539SStanislaw Kardach 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {		\
471e3595539SStanislaw Kardach 		ret = f(__VA_ARGS__);					\
472e3595539SStanislaw Kardach 	} else {							\
473e3595539SStanislaw Kardach 		/* Prepare and send request */				\
474e3595539SStanislaw Kardach 		req = (struct ena_mp_body *)&mp_req.param;		\
475e3595539SStanislaw Kardach 		mp_msg_init(&mp_req, mp_type_ ## f, _a->edev_data->port_id); \
476e3595539SStanislaw Kardach 		mp_prep_ ## f(_a, req, ## __VA_ARGS__);			\
477e3595539SStanislaw Kardach 									\
478e3595539SStanislaw Kardach 		ret = rte_mp_request_sync(&mp_req, &mp_rep, &ts);	\
479e3595539SStanislaw Kardach 		if (likely(!ret)) {					\
480e3595539SStanislaw Kardach 			RTE_ASSERT(mp_rep.nb_received == 1);		\
481e3595539SStanislaw Kardach 			rsp = (struct ena_mp_body *)&mp_rep.msgs[0].param; \
482e3595539SStanislaw Kardach 			ret = rsp->result;				\
483e3595539SStanislaw Kardach 			if (ret == 0) {					\
484e3595539SStanislaw Kardach 				mp_proc_##f(_a, rsp, ## __VA_ARGS__);	\
485e3595539SStanislaw Kardach 			} else {					\
486e3595539SStanislaw Kardach 				PMD_DRV_LOG(ERR,			\
487e3595539SStanislaw Kardach 					    "%s returned error: %d\n",	\
488e3595539SStanislaw Kardach 					    mp_name_ ## f, rsp->result);\
489e3595539SStanislaw Kardach 			}						\
490e3595539SStanislaw Kardach 			free(mp_rep.msgs);				\
491e3595539SStanislaw Kardach 		} else if (rte_errno == ENOTSUP) {			\
492e3595539SStanislaw Kardach 			PMD_DRV_LOG(ERR,				\
493e3595539SStanislaw Kardach 				    "No IPC, can't proxy to primary\n");\
494e3595539SStanislaw Kardach 			ret = -rte_errno;				\
495e3595539SStanislaw Kardach 		} else {						\
496e3595539SStanislaw Kardach 			PMD_DRV_LOG(ERR, "Request %s failed: %s\n",	\
497e3595539SStanislaw Kardach 				    mp_name_ ## f,			\
498e3595539SStanislaw Kardach 				    rte_strerror(rte_errno));		\
499e3595539SStanislaw Kardach 			ret = -EIO;					\
500e3595539SStanislaw Kardach 		}							\
501e3595539SStanislaw Kardach 	}								\
502e3595539SStanislaw Kardach 	ret;								\
503e3595539SStanislaw Kardach })
504e3595539SStanislaw Kardach 
505e3595539SStanislaw Kardach /*********************************************************************
506e3595539SStanislaw Kardach  *  Multi-Process communication request descriptors
507e3595539SStanislaw Kardach  *********************************************************************/
508e3595539SStanislaw Kardach 
509e3595539SStanislaw Kardach ENA_PROXY_DESC(ena_com_get_dev_basic_stats, ENA_MP_DEV_STATS_GET,
510e3595539SStanislaw Kardach ({
511e3595539SStanislaw Kardach 	ENA_TOUCH(adapter);
512e3595539SStanislaw Kardach 	ENA_TOUCH(req);
513e3595539SStanislaw Kardach 	ENA_TOUCH(ena_dev);
514e3595539SStanislaw Kardach 	ENA_TOUCH(stats);
515e3595539SStanislaw Kardach }),
516e3595539SStanislaw Kardach ({
517e3595539SStanislaw Kardach 	ENA_TOUCH(rsp);
518e3595539SStanislaw Kardach 	ENA_TOUCH(ena_dev);
519e3595539SStanislaw Kardach 	if (stats != &adapter->basic_stats)
520e3595539SStanislaw Kardach 		rte_memcpy(stats, &adapter->basic_stats, sizeof(*stats));
521e3595539SStanislaw Kardach }),
522e3595539SStanislaw Kardach 	struct ena_com_dev *ena_dev, struct ena_admin_basic_stats *stats);
523e3595539SStanislaw Kardach 
524e3595539SStanislaw Kardach ENA_PROXY_DESC(ena_com_get_eni_stats, ENA_MP_ENI_STATS_GET,
525e3595539SStanislaw Kardach ({
526e3595539SStanislaw Kardach 	ENA_TOUCH(adapter);
527e3595539SStanislaw Kardach 	ENA_TOUCH(req);
528e3595539SStanislaw Kardach 	ENA_TOUCH(ena_dev);
529e3595539SStanislaw Kardach 	ENA_TOUCH(stats);
530e3595539SStanislaw Kardach }),
531e3595539SStanislaw Kardach ({
532e3595539SStanislaw Kardach 	ENA_TOUCH(rsp);
533e3595539SStanislaw Kardach 	ENA_TOUCH(ena_dev);
53492401abfSShai Brandes 	if (stats != (struct ena_admin_eni_stats *)&adapter->metrics_stats)
53592401abfSShai Brandes 		rte_memcpy(stats, &adapter->metrics_stats, sizeof(*stats));
536e3595539SStanislaw Kardach }),
537e3595539SStanislaw Kardach 	struct ena_com_dev *ena_dev, struct ena_admin_eni_stats *stats);
538e3595539SStanislaw Kardach 
539e3595539SStanislaw Kardach ENA_PROXY_DESC(ena_com_set_dev_mtu, ENA_MP_MTU_SET,
540e3595539SStanislaw Kardach ({
541e3595539SStanislaw Kardach 	ENA_TOUCH(adapter);
542e3595539SStanislaw Kardach 	ENA_TOUCH(ena_dev);
543e3595539SStanislaw Kardach 	req->args.mtu = mtu;
544e3595539SStanislaw Kardach }),
545e3595539SStanislaw Kardach ({
546e3595539SStanislaw Kardach 	ENA_TOUCH(adapter);
547e3595539SStanislaw Kardach 	ENA_TOUCH(rsp);
548e3595539SStanislaw Kardach 	ENA_TOUCH(ena_dev);
549e3595539SStanislaw Kardach 	ENA_TOUCH(mtu);
550e3595539SStanislaw Kardach }),
551e3595539SStanislaw Kardach 	struct ena_com_dev *ena_dev, int mtu);
552e3595539SStanislaw Kardach 
553e3595539SStanislaw Kardach ENA_PROXY_DESC(ena_com_indirect_table_set, ENA_MP_IND_TBL_SET,
554e3595539SStanislaw Kardach ({
555e3595539SStanislaw Kardach 	ENA_TOUCH(adapter);
556e3595539SStanislaw Kardach 	ENA_TOUCH(req);
557e3595539SStanislaw Kardach 	ENA_TOUCH(ena_dev);
558e3595539SStanislaw Kardach }),
559e3595539SStanislaw Kardach ({
560e3595539SStanislaw Kardach 	ENA_TOUCH(adapter);
561e3595539SStanislaw Kardach 	ENA_TOUCH(rsp);
562e3595539SStanislaw Kardach 	ENA_TOUCH(ena_dev);
563e3595539SStanislaw Kardach }),
564e3595539SStanislaw Kardach 	struct ena_com_dev *ena_dev);
565e3595539SStanislaw Kardach 
566e3595539SStanislaw Kardach ENA_PROXY_DESC(ena_com_indirect_table_get, ENA_MP_IND_TBL_GET,
567e3595539SStanislaw Kardach ({
568e3595539SStanislaw Kardach 	ENA_TOUCH(adapter);
569e3595539SStanislaw Kardach 	ENA_TOUCH(req);
570e3595539SStanislaw Kardach 	ENA_TOUCH(ena_dev);
571e3595539SStanislaw Kardach 	ENA_TOUCH(ind_tbl);
572e3595539SStanislaw Kardach }),
573e3595539SStanislaw Kardach ({
574e3595539SStanislaw Kardach 	ENA_TOUCH(rsp);
575e3595539SStanislaw Kardach 	ENA_TOUCH(ena_dev);
576e3595539SStanislaw Kardach 	if (ind_tbl != adapter->indirect_table)
577e3595539SStanislaw Kardach 		rte_memcpy(ind_tbl, adapter->indirect_table,
578e3595539SStanislaw Kardach 			   sizeof(adapter->indirect_table));
579e3595539SStanislaw Kardach }),
580e3595539SStanislaw Kardach 	struct ena_com_dev *ena_dev, u32 *ind_tbl);
581e3595539SStanislaw Kardach 
58292401abfSShai Brandes ENA_PROXY_DESC(ena_com_get_customer_metrics, ENA_MP_CUSTOMER_METRICS_GET,
58392401abfSShai Brandes ({
58492401abfSShai Brandes 	ENA_TOUCH(adapter);
58592401abfSShai Brandes 	ENA_TOUCH(req);
58692401abfSShai Brandes 	ENA_TOUCH(ena_dev);
58792401abfSShai Brandes 	ENA_TOUCH(buf);
58892401abfSShai Brandes 	ENA_TOUCH(buf_size);
58992401abfSShai Brandes }),
59092401abfSShai Brandes ({
59192401abfSShai Brandes 	ENA_TOUCH(rsp);
59292401abfSShai Brandes 	ENA_TOUCH(ena_dev);
59392401abfSShai Brandes 	ENA_TOUCH(buf_size);
59492401abfSShai Brandes 	if (buf != (char *)&adapter->metrics_stats)
59592401abfSShai Brandes 		rte_memcpy(buf, &adapter->metrics_stats, adapter->metrics_num * sizeof(uint64_t));
59692401abfSShai Brandes }),
59792401abfSShai Brandes 	struct ena_com_dev *ena_dev, char *buf, size_t buf_size);
59892401abfSShai Brandes 
599a73dd098SShai Brandes ENA_PROXY_DESC(ena_com_get_ena_srd_info, ENA_MP_SRD_STATS_GET,
600a73dd098SShai Brandes ({
601a73dd098SShai Brandes 	ENA_TOUCH(adapter);
602a73dd098SShai Brandes 	ENA_TOUCH(req);
603a73dd098SShai Brandes 	ENA_TOUCH(ena_dev);
604a73dd098SShai Brandes 	ENA_TOUCH(info);
605a73dd098SShai Brandes }),
606a73dd098SShai Brandes ({
607a73dd098SShai Brandes 	ENA_TOUCH(rsp);
608a73dd098SShai Brandes 	ENA_TOUCH(ena_dev);
609a73dd098SShai Brandes 	if ((struct ena_stats_srd *)info != &adapter->srd_stats)
610a73dd098SShai Brandes 		rte_memcpy((struct ena_stats_srd *)info,
611a73dd098SShai Brandes 				&adapter->srd_stats,
612a73dd098SShai Brandes 				sizeof(struct ena_stats_srd));
613a73dd098SShai Brandes }),
614a73dd098SShai Brandes 	struct ena_com_dev *ena_dev, struct ena_admin_ena_srd_info *info);
61592401abfSShai Brandes 
6162bae75eaSDawid Gorecki static inline void ena_trigger_reset(struct ena_adapter *adapter,
6172bae75eaSDawid Gorecki 				     enum ena_regs_reset_reason_types reason)
6182bae75eaSDawid Gorecki {
6192bae75eaSDawid Gorecki 	if (likely(!adapter->trigger_reset)) {
6202bae75eaSDawid Gorecki 		adapter->reset_reason = reason;
6212bae75eaSDawid Gorecki 		adapter->trigger_reset = true;
6222bae75eaSDawid Gorecki 	}
6232bae75eaSDawid Gorecki }
6242bae75eaSDawid Gorecki 
62584daba99SMichal Krawczyk static inline void ena_rx_mbuf_prepare(struct ena_ring *rx_ring,
62684daba99SMichal Krawczyk 				       struct rte_mbuf *mbuf,
62734d5e97eSMichal Krawczyk 				       struct ena_com_rx_ctx *ena_rx_ctx,
62834d5e97eSMichal Krawczyk 				       bool fill_hash)
6291173fca2SJan Medala {
63084daba99SMichal Krawczyk 	struct ena_stats_rx *rx_stats = &rx_ring->rx_stats;
6311173fca2SJan Medala 	uint64_t ol_flags = 0;
632fd617795SRafal Kozik 	uint32_t packet_type = 0;
6331173fca2SJan Medala 
6341173fca2SJan Medala 	if (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP)
635fd617795SRafal Kozik 		packet_type |= RTE_PTYPE_L4_TCP;
6361173fca2SJan Medala 	else if (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP)
637fd617795SRafal Kozik 		packet_type |= RTE_PTYPE_L4_UDP;
6381173fca2SJan Medala 
639856edce2SMichal Krawczyk 	if (ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV4) {
640fd617795SRafal Kozik 		packet_type |= RTE_PTYPE_L3_IPV4;
64184daba99SMichal Krawczyk 		if (unlikely(ena_rx_ctx->l3_csum_err)) {
64284daba99SMichal Krawczyk 			++rx_stats->l3_csum_bad;
643daa02b5cSOlivier Matz 			ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
64484daba99SMichal Krawczyk 		} else {
645daa02b5cSOlivier Matz 			ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
64684daba99SMichal Krawczyk 		}
647856edce2SMichal Krawczyk 	} else if (ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV6) {
648856edce2SMichal Krawczyk 		packet_type |= RTE_PTYPE_L3_IPV6;
649856edce2SMichal Krawczyk 	}
650856edce2SMichal Krawczyk 
65184daba99SMichal Krawczyk 	if (!ena_rx_ctx->l4_csum_checked || ena_rx_ctx->frag) {
652daa02b5cSOlivier Matz 		ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN;
65384daba99SMichal Krawczyk 	} else {
65484daba99SMichal Krawczyk 		if (unlikely(ena_rx_ctx->l4_csum_err)) {
65584daba99SMichal Krawczyk 			++rx_stats->l4_csum_bad;
656b2d2f1cfSMichal Krawczyk 			/*
657b2d2f1cfSMichal Krawczyk 			 * For the L4 Rx checksum offload the HW may indicate
658b2d2f1cfSMichal Krawczyk 			 * bad checksum although it's valid. Because of that,
659b2d2f1cfSMichal Krawczyk 			 * we're setting the UNKNOWN flag to let the app
660b2d2f1cfSMichal Krawczyk 			 * re-verify the checksum.
661b2d2f1cfSMichal Krawczyk 			 */
662b2d2f1cfSMichal Krawczyk 			ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN;
66384daba99SMichal Krawczyk 		} else {
66484daba99SMichal Krawczyk 			++rx_stats->l4_csum_good;
665daa02b5cSOlivier Matz 			ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
66684daba99SMichal Krawczyk 		}
66784daba99SMichal Krawczyk 	}
6681173fca2SJan Medala 
66934d5e97eSMichal Krawczyk 	if (fill_hash &&
67034d5e97eSMichal Krawczyk 	    likely((packet_type & ENA_PTYPE_HAS_HASH) && !ena_rx_ctx->frag)) {
671daa02b5cSOlivier Matz 		ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
672b418f0d2SMichal Krawczyk 		mbuf->hash.rss = ena_rx_ctx->hash;
673b418f0d2SMichal Krawczyk 	}
674b418f0d2SMichal Krawczyk 
6751173fca2SJan Medala 	mbuf->ol_flags = ol_flags;
676fd617795SRafal Kozik 	mbuf->packet_type = packet_type;
6771173fca2SJan Medala }
6781173fca2SJan Medala 
6791173fca2SJan Medala static inline void ena_tx_mbuf_prepare(struct rte_mbuf *mbuf,
68056b8b9b7SRafal Kozik 				       struct ena_com_tx_ctx *ena_tx_ctx,
68133dde075SMichal Krawczyk 				       uint64_t queue_offloads,
68233dde075SMichal Krawczyk 				       bool disable_meta_caching)
6831173fca2SJan Medala {
6841173fca2SJan Medala 	struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta;
6851173fca2SJan Medala 
68656b8b9b7SRafal Kozik 	if ((mbuf->ol_flags & MBUF_OFFLOADS) &&
68756b8b9b7SRafal Kozik 	    (queue_offloads & QUEUE_OFFLOADS)) {
6881173fca2SJan Medala 		/* check if TSO is required */
689daa02b5cSOlivier Matz 		if ((mbuf->ol_flags & RTE_MBUF_F_TX_TCP_SEG) &&
690295968d1SFerruh Yigit 		    (queue_offloads & RTE_ETH_TX_OFFLOAD_TCP_TSO)) {
6911173fca2SJan Medala 			ena_tx_ctx->tso_enable = true;
6921173fca2SJan Medala 
6931173fca2SJan Medala 			ena_meta->l4_hdr_len = GET_L4_HDR_LEN(mbuf);
6941173fca2SJan Medala 		}
6951173fca2SJan Medala 
6961173fca2SJan Medala 		/* check if L3 checksum is needed */
697daa02b5cSOlivier Matz 		if ((mbuf->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) &&
698295968d1SFerruh Yigit 		    (queue_offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM))
6991173fca2SJan Medala 			ena_tx_ctx->l3_csum_enable = true;
7001173fca2SJan Medala 
701daa02b5cSOlivier Matz 		if (mbuf->ol_flags & RTE_MBUF_F_TX_IPV6) {
7021173fca2SJan Medala 			ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV6;
703022fb61bSMichal Krawczyk 			/* For the IPv6 packets, DF always needs to be true. */
704022fb61bSMichal Krawczyk 			ena_tx_ctx->df = 1;
7051173fca2SJan Medala 		} else {
7061173fca2SJan Medala 			ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV4;
7071173fca2SJan Medala 
7081173fca2SJan Medala 			/* set don't fragment (DF) flag */
7091173fca2SJan Medala 			if (mbuf->packet_type &
7101173fca2SJan Medala 				(RTE_PTYPE_L4_NONFRAG
7111173fca2SJan Medala 				 | RTE_PTYPE_INNER_L4_NONFRAG))
712022fb61bSMichal Krawczyk 				ena_tx_ctx->df = 1;
7131173fca2SJan Medala 		}
7141173fca2SJan Medala 
7151173fca2SJan Medala 		/* check if L4 checksum is needed */
716daa02b5cSOlivier Matz 		if (((mbuf->ol_flags & RTE_MBUF_F_TX_L4_MASK) == RTE_MBUF_F_TX_TCP_CKSUM) &&
717295968d1SFerruh Yigit 		    (queue_offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM)) {
7181173fca2SJan Medala 			ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_TCP;
7191173fca2SJan Medala 			ena_tx_ctx->l4_csum_enable = true;
720daa02b5cSOlivier Matz 		} else if (((mbuf->ol_flags & RTE_MBUF_F_TX_L4_MASK) ==
721daa02b5cSOlivier Matz 				RTE_MBUF_F_TX_UDP_CKSUM) &&
722295968d1SFerruh Yigit 				(queue_offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM)) {
7231173fca2SJan Medala 			ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UDP;
7241173fca2SJan Medala 			ena_tx_ctx->l4_csum_enable = true;
72556b8b9b7SRafal Kozik 		} else {
7261173fca2SJan Medala 			ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UNKNOWN;
7271173fca2SJan Medala 			ena_tx_ctx->l4_csum_enable = false;
7281173fca2SJan Medala 		}
7291173fca2SJan Medala 
7301173fca2SJan Medala 		ena_meta->mss = mbuf->tso_segsz;
7311173fca2SJan Medala 		ena_meta->l3_hdr_len = mbuf->l3_len;
7321173fca2SJan Medala 		ena_meta->l3_hdr_offset = mbuf->l2_len;
7331173fca2SJan Medala 
7341173fca2SJan Medala 		ena_tx_ctx->meta_valid = true;
73533dde075SMichal Krawczyk 	} else if (disable_meta_caching) {
73633dde075SMichal Krawczyk 		memset(ena_meta, 0, sizeof(*ena_meta));
73733dde075SMichal Krawczyk 		ena_tx_ctx->meta_valid = true;
7381173fca2SJan Medala 	} else {
7391173fca2SJan Medala 		ena_tx_ctx->meta_valid = false;
7401173fca2SJan Medala 	}
7411173fca2SJan Medala }
7421173fca2SJan Medala 
743f7d82d24SRafal Kozik static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id)
744f7d82d24SRafal Kozik {
745f7d82d24SRafal Kozik 	struct ena_tx_buffer *tx_info = NULL;
746f7d82d24SRafal Kozik 
747f7d82d24SRafal Kozik 	if (likely(req_id < tx_ring->ring_size)) {
748f7d82d24SRafal Kozik 		tx_info = &tx_ring->tx_buffer_info[req_id];
749f7d82d24SRafal Kozik 		if (likely(tx_info->mbuf))
750f7d82d24SRafal Kozik 			return 0;
751f7d82d24SRafal Kozik 	}
752f7d82d24SRafal Kozik 
753f7d82d24SRafal Kozik 	if (tx_info)
75477e764c7SDawid Gorecki 		PMD_TX_LOG(ERR, "tx_info doesn't have valid mbuf. queue %d:%d req_id %u\n",
75577e764c7SDawid Gorecki 			tx_ring->port_id, tx_ring->id, req_id);
756f7d82d24SRafal Kozik 	else
75777e764c7SDawid Gorecki 		PMD_TX_LOG(ERR, "Invalid req_id: %hu in queue %d:%d\n",
75877e764c7SDawid Gorecki 			req_id, tx_ring->port_id, tx_ring->id);
759f7d82d24SRafal Kozik 
760f7d82d24SRafal Kozik 	/* Trigger device reset */
7617830e905SSolganik Alexander 	++tx_ring->tx_stats.bad_req_id;
7622bae75eaSDawid Gorecki 	ena_trigger_reset(tx_ring->adapter, ENA_REGS_RESET_INV_TX_REQ_ID);
763f7d82d24SRafal Kozik 	return -EFAULT;
764f7d82d24SRafal Kozik }
765f7d82d24SRafal Kozik 
766372c1af5SJan Medala static void ena_config_host_info(struct ena_com_dev *ena_dev)
767372c1af5SJan Medala {
768372c1af5SJan Medala 	struct ena_admin_host_info *host_info;
769372c1af5SJan Medala 	int rc;
770372c1af5SJan Medala 
771372c1af5SJan Medala 	/* Allocate only the host info */
772372c1af5SJan Medala 	rc = ena_com_allocate_host_info(ena_dev);
773372c1af5SJan Medala 	if (rc) {
7746f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "Cannot allocate host info\n");
775372c1af5SJan Medala 		return;
776372c1af5SJan Medala 	}
777372c1af5SJan Medala 
778372c1af5SJan Medala 	host_info = ena_dev->host_attr.host_info;
779372c1af5SJan Medala 
780372c1af5SJan Medala 	host_info->os_type = ENA_ADMIN_OS_DPDK;
781372c1af5SJan Medala 	host_info->kernel_ver = RTE_VERSION;
7826723c0fcSBruce Richardson 	strlcpy((char *)host_info->kernel_ver_str, rte_version(),
7836723c0fcSBruce Richardson 		sizeof(host_info->kernel_ver_str));
784372c1af5SJan Medala 	host_info->os_dist = RTE_VERSION;
7856723c0fcSBruce Richardson 	strlcpy((char *)host_info->os_dist_str, rte_version(),
7866723c0fcSBruce Richardson 		sizeof(host_info->os_dist_str));
787372c1af5SJan Medala 	host_info->driver_version =
788372c1af5SJan Medala 		(DRV_MODULE_VER_MAJOR) |
789372c1af5SJan Medala 		(DRV_MODULE_VER_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) |
790c4144557SJan Medala 		(DRV_MODULE_VER_SUBMINOR <<
791c4144557SJan Medala 			ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT);
792b9302eb9SRafal Kozik 	host_info->num_cpus = rte_lcore_count();
793372c1af5SJan Medala 
7947b3a3c4bSMaciej Bielski 	host_info->driver_supported_features =
79534d5e97eSMichal Krawczyk 		ENA_ADMIN_HOST_INFO_RX_OFFSET_MASK |
79634d5e97eSMichal Krawczyk 		ENA_ADMIN_HOST_INFO_RSS_CONFIGURABLE_FUNCTION_KEY_MASK;
7977b3a3c4bSMaciej Bielski 
798372c1af5SJan Medala 	rc = ena_com_set_host_attributes(ena_dev);
799372c1af5SJan Medala 	if (rc) {
800241da076SRafal Kozik 		if (rc == -ENA_COM_UNSUPPORTED)
8016f1c9df9SStephen Hemminger 			PMD_DRV_LOG(WARNING, "Cannot set host attributes\n");
802241da076SRafal Kozik 		else
8036f1c9df9SStephen Hemminger 			PMD_DRV_LOG(ERR, "Cannot set host attributes\n");
804241da076SRafal Kozik 
805372c1af5SJan Medala 		goto err;
806372c1af5SJan Medala 	}
807372c1af5SJan Medala 
808372c1af5SJan Medala 	return;
809372c1af5SJan Medala 
810372c1af5SJan Medala err:
811372c1af5SJan Medala 	ena_com_delete_host_info(ena_dev);
812372c1af5SJan Medala }
813372c1af5SJan Medala 
8147830e905SSolganik Alexander /* This function calculates the number of xstats based on the current config */
815aab58857SStanislaw Kardach static unsigned int ena_xstats_calc_num(struct rte_eth_dev_data *data)
816372c1af5SJan Medala {
81792401abfSShai Brandes 	struct ena_adapter *adapter = data->dev_private;
81892401abfSShai Brandes 
81992401abfSShai Brandes 	return ENA_STATS_ARRAY_GLOBAL +
82092401abfSShai Brandes 		adapter->metrics_num +
821a73dd098SShai Brandes 		ENA_STATS_ARRAY_ENA_SRD +
822aab58857SStanislaw Kardach 		(data->nb_tx_queues * ENA_STATS_ARRAY_TX) +
823aab58857SStanislaw Kardach 		(data->nb_rx_queues * ENA_STATS_ARRAY_RX);
824372c1af5SJan Medala }
825372c1af5SJan Medala 
826372c1af5SJan Medala static void ena_config_debug_area(struct ena_adapter *adapter)
827372c1af5SJan Medala {
828372c1af5SJan Medala 	u32 debug_area_size;
829372c1af5SJan Medala 	int rc, ss_count;
830372c1af5SJan Medala 
831aab58857SStanislaw Kardach 	ss_count = ena_xstats_calc_num(adapter->edev_data);
832372c1af5SJan Medala 
833372c1af5SJan Medala 	/* allocate 32 bytes for each string and 64bit for the value */
834372c1af5SJan Medala 	debug_area_size = ss_count * ETH_GSTRING_LEN + sizeof(u64) * ss_count;
835372c1af5SJan Medala 
836372c1af5SJan Medala 	rc = ena_com_allocate_debug_area(&adapter->ena_dev, debug_area_size);
837372c1af5SJan Medala 	if (rc) {
8386f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "Cannot allocate debug area\n");
839372c1af5SJan Medala 		return;
840372c1af5SJan Medala 	}
841372c1af5SJan Medala 
842372c1af5SJan Medala 	rc = ena_com_set_host_attributes(&adapter->ena_dev);
843372c1af5SJan Medala 	if (rc) {
844241da076SRafal Kozik 		if (rc == -ENA_COM_UNSUPPORTED)
8456f1c9df9SStephen Hemminger 			PMD_DRV_LOG(WARNING, "Cannot set host attributes\n");
846241da076SRafal Kozik 		else
8476f1c9df9SStephen Hemminger 			PMD_DRV_LOG(ERR, "Cannot set host attributes\n");
848241da076SRafal Kozik 
849372c1af5SJan Medala 		goto err;
850372c1af5SJan Medala 	}
851372c1af5SJan Medala 
852372c1af5SJan Medala 	return;
853372c1af5SJan Medala err:
854372c1af5SJan Medala 	ena_com_delete_debug_area(&adapter->ena_dev);
855372c1af5SJan Medala }
856372c1af5SJan Medala 
857b142387bSThomas Monjalon static int ena_close(struct rte_eth_dev *dev)
8581173fca2SJan Medala {
8594d7877fdSMichal Krawczyk 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
860d61138d4SHarman Kalra 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
861890728ffSStephen Hemminger 	struct ena_adapter *adapter = dev->data->dev_private;
86262024eb8SIvan Ilchenko 	int ret = 0;
8631173fca2SJan Medala 
86430410493SThomas Monjalon 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
86530410493SThomas Monjalon 		return 0;
86630410493SThomas Monjalon 
867df238f84SMichal Krawczyk 	if (adapter->state == ENA_ADAPTER_STATE_RUNNING)
86862024eb8SIvan Ilchenko 		ret = ena_stop(dev);
869eb0ef49dSMichal Krawczyk 	adapter->state = ENA_ADAPTER_STATE_CLOSED;
87015773e06SMichal Krawczyk 
8711173fca2SJan Medala 	ena_rx_queue_release_all(dev);
8721173fca2SJan Medala 	ena_tx_queue_release_all(dev);
8734d7877fdSMichal Krawczyk 
8744d7877fdSMichal Krawczyk 	rte_free(adapter->drv_stats);
8754d7877fdSMichal Krawczyk 	adapter->drv_stats = NULL;
8764d7877fdSMichal Krawczyk 
8774d7877fdSMichal Krawczyk 	rte_intr_disable(intr_handle);
8784d7877fdSMichal Krawczyk 	rte_intr_callback_unregister(intr_handle,
8794d7877fdSMichal Krawczyk 				     ena_interrupt_handler_rte,
880aab58857SStanislaw Kardach 				     dev);
8814d7877fdSMichal Krawczyk 
8824d7877fdSMichal Krawczyk 	/*
8834d7877fdSMichal Krawczyk 	 * MAC is not allocated dynamically. Setting NULL should prevent from
8844d7877fdSMichal Krawczyk 	 * release of the resource in the rte_eth_dev_release_port().
8854d7877fdSMichal Krawczyk 	 */
8864d7877fdSMichal Krawczyk 	dev->data->mac_addrs = NULL;
887b142387bSThomas Monjalon 
88862024eb8SIvan Ilchenko 	return ret;
8891173fca2SJan Medala }
8901173fca2SJan Medala 
8912081d5e2SMichal Krawczyk static int
8922081d5e2SMichal Krawczyk ena_dev_reset(struct rte_eth_dev *dev)
8932081d5e2SMichal Krawczyk {
894e457bc70SRafal Kozik 	int rc = 0;
8952081d5e2SMichal Krawczyk 
89639ecdd3dSStanislaw Kardach 	/* Cannot release memory in secondary process */
89739ecdd3dSStanislaw Kardach 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
89839ecdd3dSStanislaw Kardach 		PMD_DRV_LOG(WARNING, "dev_reset not supported in secondary.\n");
89939ecdd3dSStanislaw Kardach 		return -EPERM;
90039ecdd3dSStanislaw Kardach 	}
90139ecdd3dSStanislaw Kardach 
902e457bc70SRafal Kozik 	ena_destroy_device(dev);
903e457bc70SRafal Kozik 	rc = eth_ena_dev_init(dev);
904241da076SRafal Kozik 	if (rc)
905617898d1SMichal Krawczyk 		PMD_INIT_LOG(CRIT, "Cannot initialize device\n");
906e457bc70SRafal Kozik 
9072081d5e2SMichal Krawczyk 	return rc;
9082081d5e2SMichal Krawczyk }
9092081d5e2SMichal Krawczyk 
9101173fca2SJan Medala static void ena_rx_queue_release_all(struct rte_eth_dev *dev)
9111173fca2SJan Medala {
9121173fca2SJan Medala 	int nb_queues = dev->data->nb_rx_queues;
9131173fca2SJan Medala 	int i;
9141173fca2SJan Medala 
9151173fca2SJan Medala 	for (i = 0; i < nb_queues; i++)
9167483341aSXueming Li 		ena_rx_queue_release(dev, i);
9171173fca2SJan Medala }
9181173fca2SJan Medala 
9191173fca2SJan Medala static void ena_tx_queue_release_all(struct rte_eth_dev *dev)
9201173fca2SJan Medala {
9211173fca2SJan Medala 	int nb_queues = dev->data->nb_tx_queues;
9221173fca2SJan Medala 	int i;
9231173fca2SJan Medala 
9241173fca2SJan Medala 	for (i = 0; i < nb_queues; i++)
9257483341aSXueming Li 		ena_tx_queue_release(dev, i);
9261173fca2SJan Medala }
9271173fca2SJan Medala 
9287483341aSXueming Li static void ena_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
9291173fca2SJan Medala {
9307483341aSXueming Li 	struct ena_ring *ring = dev->data->rx_queues[qid];
9311173fca2SJan Medala 
9321173fca2SJan Medala 	/* Free ring resources */
9331173fca2SJan Medala 	rte_free(ring->rx_buffer_info);
9341173fca2SJan Medala 	ring->rx_buffer_info = NULL;
9351173fca2SJan Medala 
93679405ee1SRafal Kozik 	rte_free(ring->rx_refill_buffer);
93779405ee1SRafal Kozik 	ring->rx_refill_buffer = NULL;
93879405ee1SRafal Kozik 
939c2034976SMichal Krawczyk 	rte_free(ring->empty_rx_reqs);
940c2034976SMichal Krawczyk 	ring->empty_rx_reqs = NULL;
941c2034976SMichal Krawczyk 
9421173fca2SJan Medala 	ring->configured = 0;
9431173fca2SJan Medala 
944617898d1SMichal Krawczyk 	PMD_DRV_LOG(NOTICE, "Rx queue %d:%d released\n",
9451173fca2SJan Medala 		ring->port_id, ring->id);
9461173fca2SJan Medala }
9471173fca2SJan Medala 
9487483341aSXueming Li static void ena_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
9491173fca2SJan Medala {
9507483341aSXueming Li 	struct ena_ring *ring = dev->data->tx_queues[qid];
9511173fca2SJan Medala 
9521173fca2SJan Medala 	/* Free ring resources */
9532fca2a98SMichal Krawczyk 	rte_free(ring->push_buf_intermediate_buf);
9542fca2a98SMichal Krawczyk 
9551173fca2SJan Medala 	rte_free(ring->tx_buffer_info);
9561173fca2SJan Medala 
9571173fca2SJan Medala 	rte_free(ring->empty_tx_reqs);
9581173fca2SJan Medala 
9591173fca2SJan Medala 	ring->empty_tx_reqs = NULL;
9601173fca2SJan Medala 	ring->tx_buffer_info = NULL;
9612fca2a98SMichal Krawczyk 	ring->push_buf_intermediate_buf = NULL;
9621173fca2SJan Medala 
9631173fca2SJan Medala 	ring->configured = 0;
9641173fca2SJan Medala 
965617898d1SMichal Krawczyk 	PMD_DRV_LOG(NOTICE, "Tx queue %d:%d released\n",
9661173fca2SJan Medala 		ring->port_id, ring->id);
9671173fca2SJan Medala }
9681173fca2SJan Medala 
9691173fca2SJan Medala static void ena_rx_queue_release_bufs(struct ena_ring *ring)
9701173fca2SJan Medala {
971709b1dcbSRafal Kozik 	unsigned int i;
9721173fca2SJan Medala 
9731be097dcSMichal Krawczyk 	for (i = 0; i < ring->ring_size; ++i) {
9741be097dcSMichal Krawczyk 		struct ena_rx_buffer *rx_info = &ring->rx_buffer_info[i];
9751be097dcSMichal Krawczyk 		if (rx_info->mbuf) {
9761be097dcSMichal Krawczyk 			rte_mbuf_raw_free(rx_info->mbuf);
9771be097dcSMichal Krawczyk 			rx_info->mbuf = NULL;
9781be097dcSMichal Krawczyk 		}
9791173fca2SJan Medala 	}
9801173fca2SJan Medala }
9811173fca2SJan Medala 
9821173fca2SJan Medala static void ena_tx_queue_release_bufs(struct ena_ring *ring)
9831173fca2SJan Medala {
984207a514cSMichal Krawczyk 	unsigned int i;
9851173fca2SJan Medala 
986207a514cSMichal Krawczyk 	for (i = 0; i < ring->ring_size; ++i) {
987207a514cSMichal Krawczyk 		struct ena_tx_buffer *tx_buf = &ring->tx_buffer_info[i];
9881173fca2SJan Medala 
9893c8bc29fSDavid Harton 		if (tx_buf->mbuf) {
9901173fca2SJan Medala 			rte_pktmbuf_free(tx_buf->mbuf);
9913c8bc29fSDavid Harton 			tx_buf->mbuf = NULL;
9923c8bc29fSDavid Harton 		}
9931173fca2SJan Medala 	}
9941173fca2SJan Medala }
9951173fca2SJan Medala 
9961173fca2SJan Medala static int ena_link_update(struct rte_eth_dev *dev,
9971173fca2SJan Medala 			   __rte_unused int wait_to_complete)
9981173fca2SJan Medala {
9991173fca2SJan Medala 	struct rte_eth_link *link = &dev->data->dev_link;
1000890728ffSStephen Hemminger 	struct ena_adapter *adapter = dev->data->dev_private;
1001ca148440SMichal Krawczyk 
1002295968d1SFerruh Yigit 	link->link_status = adapter->link_status ? RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN;
1003295968d1SFerruh Yigit 	link->link_speed = RTE_ETH_SPEED_NUM_NONE;
1004295968d1SFerruh Yigit 	link->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
10051173fca2SJan Medala 
10061173fca2SJan Medala 	return 0;
10071173fca2SJan Medala }
10081173fca2SJan Medala 
100926e5543dSRafal Kozik static int ena_queue_start_all(struct rte_eth_dev *dev,
10101173fca2SJan Medala 			       enum ena_ring_type ring_type)
10111173fca2SJan Medala {
1012890728ffSStephen Hemminger 	struct ena_adapter *adapter = dev->data->dev_private;
10131173fca2SJan Medala 	struct ena_ring *queues = NULL;
101453b61841SMichal Krawczyk 	int nb_queues;
10151173fca2SJan Medala 	int i = 0;
10161173fca2SJan Medala 	int rc = 0;
10171173fca2SJan Medala 
101853b61841SMichal Krawczyk 	if (ring_type == ENA_RING_TYPE_RX) {
101953b61841SMichal Krawczyk 		queues = adapter->rx_ring;
102053b61841SMichal Krawczyk 		nb_queues = dev->data->nb_rx_queues;
102153b61841SMichal Krawczyk 	} else {
102253b61841SMichal Krawczyk 		queues = adapter->tx_ring;
102353b61841SMichal Krawczyk 		nb_queues = dev->data->nb_tx_queues;
102453b61841SMichal Krawczyk 	}
102553b61841SMichal Krawczyk 	for (i = 0; i < nb_queues; i++) {
10261173fca2SJan Medala 		if (queues[i].configured) {
10271173fca2SJan Medala 			if (ring_type == ENA_RING_TYPE_RX) {
10281173fca2SJan Medala 				ena_assert_msg(
10291173fca2SJan Medala 					dev->data->rx_queues[i] == &queues[i],
1030617898d1SMichal Krawczyk 					"Inconsistent state of Rx queues\n");
10311173fca2SJan Medala 			} else {
10321173fca2SJan Medala 				ena_assert_msg(
10331173fca2SJan Medala 					dev->data->tx_queues[i] == &queues[i],
1034617898d1SMichal Krawczyk 					"Inconsistent state of Tx queues\n");
10351173fca2SJan Medala 			}
10361173fca2SJan Medala 
10376986cdc4SMichal Krawczyk 			rc = ena_queue_start(dev, &queues[i]);
10381173fca2SJan Medala 
10391173fca2SJan Medala 			if (rc) {
10401173fca2SJan Medala 				PMD_INIT_LOG(ERR,
1041617898d1SMichal Krawczyk 					"Failed to start queue[%d] of type(%d)\n",
10421173fca2SJan Medala 					i, ring_type);
104326e5543dSRafal Kozik 				goto err;
10441173fca2SJan Medala 			}
10451173fca2SJan Medala 		}
10461173fca2SJan Medala 	}
10471173fca2SJan Medala 
10481173fca2SJan Medala 	return 0;
104926e5543dSRafal Kozik 
105026e5543dSRafal Kozik err:
105126e5543dSRafal Kozik 	while (i--)
105226e5543dSRafal Kozik 		if (queues[i].configured)
105326e5543dSRafal Kozik 			ena_queue_stop(&queues[i]);
105426e5543dSRafal Kozik 
105526e5543dSRafal Kozik 	return rc;
10561173fca2SJan Medala }
10571173fca2SJan Medala 
10581173fca2SJan Medala static int
10598a7a73f2SMichal Krawczyk ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx,
10608a7a73f2SMichal Krawczyk 		       bool use_large_llq_hdr)
10611173fca2SJan Medala {
10622fca2a98SMichal Krawczyk 	struct ena_admin_feature_llq_desc *llq = &ctx->get_feat_ctx->llq;
10632fca2a98SMichal Krawczyk 	struct ena_com_dev *ena_dev = ctx->ena_dev;
10645920d930SMichal Krawczyk 	uint32_t max_tx_queue_size;
10655920d930SMichal Krawczyk 	uint32_t max_rx_queue_size;
10661173fca2SJan Medala 
10672fca2a98SMichal Krawczyk 	if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
1068ea93d37eSRafal Kozik 		struct ena_admin_queue_ext_feature_fields *max_queue_ext =
1069ea93d37eSRafal Kozik 			&ctx->get_feat_ctx->max_queue_ext.max_queue_ext;
10705920d930SMichal Krawczyk 		max_rx_queue_size = RTE_MIN(max_queue_ext->max_rx_cq_depth,
1071ea93d37eSRafal Kozik 			max_queue_ext->max_rx_sq_depth);
10725920d930SMichal Krawczyk 		max_tx_queue_size = max_queue_ext->max_tx_cq_depth;
10732fca2a98SMichal Krawczyk 
10742fca2a98SMichal Krawczyk 		if (ena_dev->tx_mem_queue_type ==
10752fca2a98SMichal Krawczyk 		    ENA_ADMIN_PLACEMENT_POLICY_DEV) {
10765920d930SMichal Krawczyk 			max_tx_queue_size = RTE_MIN(max_tx_queue_size,
10772fca2a98SMichal Krawczyk 				llq->max_llq_depth);
10782fca2a98SMichal Krawczyk 		} else {
10795920d930SMichal Krawczyk 			max_tx_queue_size = RTE_MIN(max_tx_queue_size,
1080ea93d37eSRafal Kozik 				max_queue_ext->max_tx_sq_depth);
10812fca2a98SMichal Krawczyk 		}
10822fca2a98SMichal Krawczyk 
1083ea93d37eSRafal Kozik 		ctx->max_rx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS,
1084ea93d37eSRafal Kozik 			max_queue_ext->max_per_packet_rx_descs);
1085ea93d37eSRafal Kozik 		ctx->max_tx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS,
1086ea93d37eSRafal Kozik 			max_queue_ext->max_per_packet_tx_descs);
1087ea93d37eSRafal Kozik 	} else {
1088ea93d37eSRafal Kozik 		struct ena_admin_queue_feature_desc *max_queues =
1089ea93d37eSRafal Kozik 			&ctx->get_feat_ctx->max_queues;
10905920d930SMichal Krawczyk 		max_rx_queue_size = RTE_MIN(max_queues->max_cq_depth,
1091ea93d37eSRafal Kozik 			max_queues->max_sq_depth);
10925920d930SMichal Krawczyk 		max_tx_queue_size = max_queues->max_cq_depth;
10932fca2a98SMichal Krawczyk 
10942fca2a98SMichal Krawczyk 		if (ena_dev->tx_mem_queue_type ==
10952fca2a98SMichal Krawczyk 		    ENA_ADMIN_PLACEMENT_POLICY_DEV) {
10965920d930SMichal Krawczyk 			max_tx_queue_size = RTE_MIN(max_tx_queue_size,
10972fca2a98SMichal Krawczyk 				llq->max_llq_depth);
10982fca2a98SMichal Krawczyk 		} else {
10995920d930SMichal Krawczyk 			max_tx_queue_size = RTE_MIN(max_tx_queue_size,
11002fca2a98SMichal Krawczyk 				max_queues->max_sq_depth);
11012fca2a98SMichal Krawczyk 		}
11022fca2a98SMichal Krawczyk 
1103ea93d37eSRafal Kozik 		ctx->max_rx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS,
1104ea93d37eSRafal Kozik 			max_queues->max_packet_rx_descs);
11055920d930SMichal Krawczyk 		ctx->max_tx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS,
11065920d930SMichal Krawczyk 			max_queues->max_packet_tx_descs);
1107ea93d37eSRafal Kozik 	}
11081173fca2SJan Medala 
1109ea93d37eSRafal Kozik 	/* Round down to the nearest power of 2 */
11105920d930SMichal Krawczyk 	max_rx_queue_size = rte_align32prevpow2(max_rx_queue_size);
11115920d930SMichal Krawczyk 	max_tx_queue_size = rte_align32prevpow2(max_tx_queue_size);
11121173fca2SJan Medala 
11138a7a73f2SMichal Krawczyk 	if (use_large_llq_hdr) {
11148a7a73f2SMichal Krawczyk 		if ((llq->entry_size_ctrl_supported &
11158a7a73f2SMichal Krawczyk 		     ENA_ADMIN_LIST_ENTRY_SIZE_256B) &&
11168a7a73f2SMichal Krawczyk 		    (ena_dev->tx_mem_queue_type ==
11178a7a73f2SMichal Krawczyk 		     ENA_ADMIN_PLACEMENT_POLICY_DEV)) {
11188a7a73f2SMichal Krawczyk 			max_tx_queue_size /= 2;
11198a7a73f2SMichal Krawczyk 			PMD_INIT_LOG(INFO,
1120617898d1SMichal Krawczyk 				"Forcing large headers and decreasing maximum Tx queue size to %d\n",
11218a7a73f2SMichal Krawczyk 				max_tx_queue_size);
11228a7a73f2SMichal Krawczyk 		} else {
11238a7a73f2SMichal Krawczyk 			PMD_INIT_LOG(ERR,
11248a7a73f2SMichal Krawczyk 				"Forcing large headers failed: LLQ is disabled or device does not support large headers\n");
11258a7a73f2SMichal Krawczyk 		}
11268a7a73f2SMichal Krawczyk 	}
11278a7a73f2SMichal Krawczyk 
11285920d930SMichal Krawczyk 	if (unlikely(max_rx_queue_size == 0 || max_tx_queue_size == 0)) {
1129617898d1SMichal Krawczyk 		PMD_INIT_LOG(ERR, "Invalid queue size\n");
11301173fca2SJan Medala 		return -EFAULT;
11311173fca2SJan Medala 	}
11321173fca2SJan Medala 
11335920d930SMichal Krawczyk 	ctx->max_tx_queue_size = max_tx_queue_size;
11345920d930SMichal Krawczyk 	ctx->max_rx_queue_size = max_rx_queue_size;
11352061fe41SRafal Kozik 
1136ea93d37eSRafal Kozik 	return 0;
11371173fca2SJan Medala }
11381173fca2SJan Medala 
11391173fca2SJan Medala static void ena_stats_restart(struct rte_eth_dev *dev)
11401173fca2SJan Medala {
1141890728ffSStephen Hemminger 	struct ena_adapter *adapter = dev->data->dev_private;
11421173fca2SJan Medala 
11431173fca2SJan Medala 	rte_atomic64_init(&adapter->drv_stats->ierrors);
11441173fca2SJan Medala 	rte_atomic64_init(&adapter->drv_stats->oerrors);
11451173fca2SJan Medala 	rte_atomic64_init(&adapter->drv_stats->rx_nombuf);
1146e1e73e32SMichal Krawczyk 	adapter->drv_stats->rx_drops = 0;
11471173fca2SJan Medala }
11481173fca2SJan Medala 
1149d5b0924bSMatan Azrad static int ena_stats_get(struct rte_eth_dev *dev,
11501173fca2SJan Medala 			  struct rte_eth_stats *stats)
11511173fca2SJan Medala {
11521173fca2SJan Medala 	struct ena_admin_basic_stats ena_stats;
1153890728ffSStephen Hemminger 	struct ena_adapter *adapter = dev->data->dev_private;
11541173fca2SJan Medala 	struct ena_com_dev *ena_dev = &adapter->ena_dev;
11551173fca2SJan Medala 	int rc;
115645b6d861SMichal Krawczyk 	int i;
115745b6d861SMichal Krawczyk 	int max_rings_stats;
11581173fca2SJan Medala 
11591173fca2SJan Medala 	memset(&ena_stats, 0, sizeof(ena_stats));
11601343c415SMichal Krawczyk 
11611343c415SMichal Krawczyk 	rte_spinlock_lock(&adapter->admin_lock);
1162e3595539SStanislaw Kardach 	rc = ENA_PROXY(adapter, ena_com_get_dev_basic_stats, ena_dev,
1163e3595539SStanislaw Kardach 		       &ena_stats);
11641343c415SMichal Krawczyk 	rte_spinlock_unlock(&adapter->admin_lock);
11651173fca2SJan Medala 	if (unlikely(rc)) {
11666f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "Could not retrieve statistics from ENA\n");
1167d5b0924bSMatan Azrad 		return rc;
11681173fca2SJan Medala 	}
11691173fca2SJan Medala 
11701173fca2SJan Medala 	/* Set of basic statistics from ENA */
11711173fca2SJan Medala 	stats->ipackets = __MERGE_64B_H_L(ena_stats.rx_pkts_high,
11721173fca2SJan Medala 					  ena_stats.rx_pkts_low);
11731173fca2SJan Medala 	stats->opackets = __MERGE_64B_H_L(ena_stats.tx_pkts_high,
11741173fca2SJan Medala 					  ena_stats.tx_pkts_low);
11751173fca2SJan Medala 	stats->ibytes = __MERGE_64B_H_L(ena_stats.rx_bytes_high,
11761173fca2SJan Medala 					ena_stats.rx_bytes_low);
11771173fca2SJan Medala 	stats->obytes = __MERGE_64B_H_L(ena_stats.tx_bytes_high,
11781173fca2SJan Medala 					ena_stats.tx_bytes_low);
11791173fca2SJan Medala 
11801173fca2SJan Medala 	/* Driver related stats */
1181e1e73e32SMichal Krawczyk 	stats->imissed = adapter->drv_stats->rx_drops;
11821173fca2SJan Medala 	stats->ierrors = rte_atomic64_read(&adapter->drv_stats->ierrors);
11831173fca2SJan Medala 	stats->oerrors = rte_atomic64_read(&adapter->drv_stats->oerrors);
11841173fca2SJan Medala 	stats->rx_nombuf = rte_atomic64_read(&adapter->drv_stats->rx_nombuf);
118545b6d861SMichal Krawczyk 
118645b6d861SMichal Krawczyk 	max_rings_stats = RTE_MIN(dev->data->nb_rx_queues,
118745b6d861SMichal Krawczyk 		RTE_ETHDEV_QUEUE_STAT_CNTRS);
118845b6d861SMichal Krawczyk 	for (i = 0; i < max_rings_stats; ++i) {
118945b6d861SMichal Krawczyk 		struct ena_stats_rx *rx_stats = &adapter->rx_ring[i].rx_stats;
119045b6d861SMichal Krawczyk 
119145b6d861SMichal Krawczyk 		stats->q_ibytes[i] = rx_stats->bytes;
119245b6d861SMichal Krawczyk 		stats->q_ipackets[i] = rx_stats->cnt;
119345b6d861SMichal Krawczyk 		stats->q_errors[i] = rx_stats->bad_desc_num +
119445b6d861SMichal Krawczyk 			rx_stats->bad_req_id;
119545b6d861SMichal Krawczyk 	}
119645b6d861SMichal Krawczyk 
119745b6d861SMichal Krawczyk 	max_rings_stats = RTE_MIN(dev->data->nb_tx_queues,
119845b6d861SMichal Krawczyk 		RTE_ETHDEV_QUEUE_STAT_CNTRS);
119945b6d861SMichal Krawczyk 	for (i = 0; i < max_rings_stats; ++i) {
120045b6d861SMichal Krawczyk 		struct ena_stats_tx *tx_stats = &adapter->tx_ring[i].tx_stats;
120145b6d861SMichal Krawczyk 
120245b6d861SMichal Krawczyk 		stats->q_obytes[i] = tx_stats->bytes;
120345b6d861SMichal Krawczyk 		stats->q_opackets[i] = tx_stats->cnt;
120445b6d861SMichal Krawczyk 	}
120545b6d861SMichal Krawczyk 
1206d5b0924bSMatan Azrad 	return 0;
12071173fca2SJan Medala }
12081173fca2SJan Medala 
12091173fca2SJan Medala static int ena_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
12101173fca2SJan Medala {
12111173fca2SJan Medala 	struct ena_adapter *adapter;
12121173fca2SJan Medala 	struct ena_com_dev *ena_dev;
12131173fca2SJan Medala 	int rc = 0;
12141173fca2SJan Medala 
1215498c687aSRafal Kozik 	ena_assert_msg(dev->data != NULL, "Uninitialized device\n");
1216498c687aSRafal Kozik 	ena_assert_msg(dev->data->dev_private != NULL, "Uninitialized device\n");
1217890728ffSStephen Hemminger 	adapter = dev->data->dev_private;
12181173fca2SJan Medala 
12191173fca2SJan Medala 	ena_dev = &adapter->ena_dev;
1220498c687aSRafal Kozik 	ena_assert_msg(ena_dev != NULL, "Uninitialized device\n");
12211173fca2SJan Medala 
1222e3595539SStanislaw Kardach 	rc = ENA_PROXY(adapter, ena_com_set_dev_mtu, ena_dev, mtu);
12231173fca2SJan Medala 	if (rc)
12246f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "Could not set MTU: %d\n", mtu);
12251173fca2SJan Medala 	else
1226617898d1SMichal Krawczyk 		PMD_DRV_LOG(NOTICE, "MTU set to: %d\n", mtu);
12271173fca2SJan Medala 
12281173fca2SJan Medala 	return rc;
12291173fca2SJan Medala }
12301173fca2SJan Medala 
12311173fca2SJan Medala static int ena_start(struct rte_eth_dev *dev)
12321173fca2SJan Medala {
1233890728ffSStephen Hemminger 	struct ena_adapter *adapter = dev->data->dev_private;
1234d9b8b106SMichal Krawczyk 	uint64_t ticks;
12351173fca2SJan Medala 	int rc = 0;
12369210f0caSJie Hai 	uint16_t i;
12371173fca2SJan Medala 
123839ecdd3dSStanislaw Kardach 	/* Cannot allocate memory in secondary process */
123939ecdd3dSStanislaw Kardach 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
124039ecdd3dSStanislaw Kardach 		PMD_DRV_LOG(WARNING, "dev_start not supported in secondary.\n");
124139ecdd3dSStanislaw Kardach 		return -EPERM;
124239ecdd3dSStanislaw Kardach 	}
124339ecdd3dSStanislaw Kardach 
12446986cdc4SMichal Krawczyk 	rc = ena_setup_rx_intr(dev);
12456986cdc4SMichal Krawczyk 	if (rc)
12466986cdc4SMichal Krawczyk 		return rc;
12476986cdc4SMichal Krawczyk 
124826e5543dSRafal Kozik 	rc = ena_queue_start_all(dev, ENA_RING_TYPE_RX);
12491173fca2SJan Medala 	if (rc)
12501173fca2SJan Medala 		return rc;
12511173fca2SJan Medala 
125226e5543dSRafal Kozik 	rc = ena_queue_start_all(dev, ENA_RING_TYPE_TX);
12531173fca2SJan Medala 	if (rc)
125426e5543dSRafal Kozik 		goto err_start_tx;
12551173fca2SJan Medala 
1256295968d1SFerruh Yigit 	if (adapter->edev_data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
125734d5e97eSMichal Krawczyk 		rc = ena_rss_configure(adapter);
12581173fca2SJan Medala 		if (rc)
125926e5543dSRafal Kozik 			goto err_rss_init;
12601173fca2SJan Medala 	}
12611173fca2SJan Medala 
12621173fca2SJan Medala 	ena_stats_restart(dev);
12631173fca2SJan Medala 
1264d9b8b106SMichal Krawczyk 	adapter->timestamp_wd = rte_get_timer_cycles();
1265d9b8b106SMichal Krawczyk 	adapter->keep_alive_timeout = ENA_DEVICE_KALIVE_TIMEOUT;
1266d9b8b106SMichal Krawczyk 
1267d9b8b106SMichal Krawczyk 	ticks = rte_get_timer_hz();
1268d9b8b106SMichal Krawczyk 	rte_timer_reset(&adapter->timer_wd, ticks, PERIODICAL, rte_lcore_id(),
1269aab58857SStanislaw Kardach 			ena_timer_wd_callback, dev);
1270d9b8b106SMichal Krawczyk 
12717830e905SSolganik Alexander 	++adapter->dev_stats.dev_start;
12721173fca2SJan Medala 	adapter->state = ENA_ADAPTER_STATE_RUNNING;
12731173fca2SJan Medala 
12749210f0caSJie Hai 	for (i = 0; i < dev->data->nb_rx_queues; i++)
12759210f0caSJie Hai 		dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
12769210f0caSJie Hai 	for (i = 0; i < dev->data->nb_tx_queues; i++)
12779210f0caSJie Hai 		dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
12789210f0caSJie Hai 
12791173fca2SJan Medala 	return 0;
128026e5543dSRafal Kozik 
128126e5543dSRafal Kozik err_rss_init:
128226e5543dSRafal Kozik 	ena_queue_stop_all(dev, ENA_RING_TYPE_TX);
128326e5543dSRafal Kozik err_start_tx:
128426e5543dSRafal Kozik 	ena_queue_stop_all(dev, ENA_RING_TYPE_RX);
128526e5543dSRafal Kozik 	return rc;
12861173fca2SJan Medala }
12871173fca2SJan Medala 
128862024eb8SIvan Ilchenko static int ena_stop(struct rte_eth_dev *dev)
1289eb0ef49dSMichal Krawczyk {
1290890728ffSStephen Hemminger 	struct ena_adapter *adapter = dev->data->dev_private;
1291e457bc70SRafal Kozik 	struct ena_com_dev *ena_dev = &adapter->ena_dev;
12926986cdc4SMichal Krawczyk 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1293d61138d4SHarman Kalra 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
12949210f0caSJie Hai 	uint16_t i;
1295e457bc70SRafal Kozik 	int rc;
1296eb0ef49dSMichal Krawczyk 
129739ecdd3dSStanislaw Kardach 	/* Cannot free memory in secondary process */
129839ecdd3dSStanislaw Kardach 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
129939ecdd3dSStanislaw Kardach 		PMD_DRV_LOG(WARNING, "dev_stop not supported in secondary.\n");
130039ecdd3dSStanislaw Kardach 		return -EPERM;
130139ecdd3dSStanislaw Kardach 	}
130239ecdd3dSStanislaw Kardach 
1303d9b8b106SMichal Krawczyk 	rte_timer_stop_sync(&adapter->timer_wd);
130426e5543dSRafal Kozik 	ena_queue_stop_all(dev, ENA_RING_TYPE_TX);
130526e5543dSRafal Kozik 	ena_queue_stop_all(dev, ENA_RING_TYPE_RX);
1306d9b8b106SMichal Krawczyk 
1307e457bc70SRafal Kozik 	if (adapter->trigger_reset) {
1308e457bc70SRafal Kozik 		rc = ena_com_dev_reset(ena_dev, adapter->reset_reason);
1309e457bc70SRafal Kozik 		if (rc)
1310617898d1SMichal Krawczyk 			PMD_DRV_LOG(ERR, "Device reset failed, rc: %d\n", rc);
1311e457bc70SRafal Kozik 	}
1312e457bc70SRafal Kozik 
13136986cdc4SMichal Krawczyk 	rte_intr_disable(intr_handle);
13146986cdc4SMichal Krawczyk 
13156986cdc4SMichal Krawczyk 	rte_intr_efd_disable(intr_handle);
1316d61138d4SHarman Kalra 
1317d61138d4SHarman Kalra 	/* Cleanup vector list */
1318d61138d4SHarman Kalra 	rte_intr_vec_list_free(intr_handle);
13196986cdc4SMichal Krawczyk 
13206986cdc4SMichal Krawczyk 	rte_intr_enable(intr_handle);
13216986cdc4SMichal Krawczyk 
13227830e905SSolganik Alexander 	++adapter->dev_stats.dev_stop;
1323eb0ef49dSMichal Krawczyk 	adapter->state = ENA_ADAPTER_STATE_STOPPED;
1324b8f5d2aeSThomas Monjalon 	dev->data->dev_started = 0;
132562024eb8SIvan Ilchenko 
13269210f0caSJie Hai 	for (i = 0; i < dev->data->nb_rx_queues; i++)
13279210f0caSJie Hai 		dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
13289210f0caSJie Hai 	for (i = 0; i < dev->data->nb_tx_queues; i++)
13299210f0caSJie Hai 		dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
13309210f0caSJie Hai 
133162024eb8SIvan Ilchenko 	return 0;
1332eb0ef49dSMichal Krawczyk }
1333eb0ef49dSMichal Krawczyk 
13346986cdc4SMichal Krawczyk static int ena_create_io_queue(struct rte_eth_dev *dev, struct ena_ring *ring)
1335df238f84SMichal Krawczyk {
13366986cdc4SMichal Krawczyk 	struct ena_adapter *adapter = ring->adapter;
13376986cdc4SMichal Krawczyk 	struct ena_com_dev *ena_dev = &adapter->ena_dev;
13386986cdc4SMichal Krawczyk 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1339d61138d4SHarman Kalra 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
1340df238f84SMichal Krawczyk 	struct ena_com_create_io_ctx ctx =
1341df238f84SMichal Krawczyk 		/* policy set to _HOST just to satisfy icc compiler */
1342df238f84SMichal Krawczyk 		{ ENA_ADMIN_PLACEMENT_POLICY_HOST,
1343df238f84SMichal Krawczyk 		  0, 0, 0, 0, 0 };
1344df238f84SMichal Krawczyk 	uint16_t ena_qid;
1345778677dcSRafal Kozik 	unsigned int i;
1346df238f84SMichal Krawczyk 	int rc;
1347df238f84SMichal Krawczyk 
13486986cdc4SMichal Krawczyk 	ctx.msix_vector = -1;
1349df238f84SMichal Krawczyk 	if (ring->type == ENA_RING_TYPE_TX) {
1350df238f84SMichal Krawczyk 		ena_qid = ENA_IO_TXQ_IDX(ring->id);
1351df238f84SMichal Krawczyk 		ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX;
1352df238f84SMichal Krawczyk 		ctx.mem_queue_type = ena_dev->tx_mem_queue_type;
1353778677dcSRafal Kozik 		for (i = 0; i < ring->ring_size; i++)
1354778677dcSRafal Kozik 			ring->empty_tx_reqs[i] = i;
1355df238f84SMichal Krawczyk 	} else {
1356df238f84SMichal Krawczyk 		ena_qid = ENA_IO_RXQ_IDX(ring->id);
1357df238f84SMichal Krawczyk 		ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX;
13586986cdc4SMichal Krawczyk 		if (rte_intr_dp_is_en(intr_handle))
1359d61138d4SHarman Kalra 			ctx.msix_vector =
1360d61138d4SHarman Kalra 				rte_intr_vec_list_index_get(intr_handle,
1361d61138d4SHarman Kalra 								   ring->id);
1362d61138d4SHarman Kalra 
1363778677dcSRafal Kozik 		for (i = 0; i < ring->ring_size; i++)
1364778677dcSRafal Kozik 			ring->empty_rx_reqs[i] = i;
1365df238f84SMichal Krawczyk 	}
1366badc3a6aSMichal Krawczyk 	ctx.queue_size = ring->ring_size;
1367df238f84SMichal Krawczyk 	ctx.qid = ena_qid;
13684217cb0bSMichal Krawczyk 	ctx.numa_node = ring->numa_socket_id;
1369df238f84SMichal Krawczyk 
1370df238f84SMichal Krawczyk 	rc = ena_com_create_io_queue(ena_dev, &ctx);
1371df238f84SMichal Krawczyk 	if (rc) {
13726f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR,
1373617898d1SMichal Krawczyk 			"Failed to create IO queue[%d] (qid:%d), rc: %d\n",
1374df238f84SMichal Krawczyk 			ring->id, ena_qid, rc);
1375df238f84SMichal Krawczyk 		return rc;
1376df238f84SMichal Krawczyk 	}
1377df238f84SMichal Krawczyk 
1378df238f84SMichal Krawczyk 	rc = ena_com_get_io_handlers(ena_dev, ena_qid,
1379df238f84SMichal Krawczyk 				     &ring->ena_com_io_sq,
1380df238f84SMichal Krawczyk 				     &ring->ena_com_io_cq);
1381df238f84SMichal Krawczyk 	if (rc) {
13826f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR,
1383617898d1SMichal Krawczyk 			"Failed to get IO queue[%d] handlers, rc: %d\n",
1384df238f84SMichal Krawczyk 			ring->id, rc);
1385df238f84SMichal Krawczyk 		ena_com_destroy_io_queue(ena_dev, ena_qid);
1386df238f84SMichal Krawczyk 		return rc;
1387df238f84SMichal Krawczyk 	}
1388df238f84SMichal Krawczyk 
1389df238f84SMichal Krawczyk 	if (ring->type == ENA_RING_TYPE_TX)
1390df238f84SMichal Krawczyk 		ena_com_update_numa_node(ring->ena_com_io_cq, ctx.numa_node);
1391df238f84SMichal Krawczyk 
13926986cdc4SMichal Krawczyk 	/* Start with Rx interrupts being masked. */
13936986cdc4SMichal Krawczyk 	if (ring->type == ENA_RING_TYPE_RX && rte_intr_dp_is_en(intr_handle))
13946986cdc4SMichal Krawczyk 		ena_rx_queue_intr_disable(dev, ring->id);
13956986cdc4SMichal Krawczyk 
1396df238f84SMichal Krawczyk 	return 0;
1397df238f84SMichal Krawczyk }
1398df238f84SMichal Krawczyk 
139926e5543dSRafal Kozik static void ena_queue_stop(struct ena_ring *ring)
1400df238f84SMichal Krawczyk {
140126e5543dSRafal Kozik 	struct ena_com_dev *ena_dev = &ring->adapter->ena_dev;
1402df238f84SMichal Krawczyk 
140326e5543dSRafal Kozik 	if (ring->type == ENA_RING_TYPE_RX) {
140426e5543dSRafal Kozik 		ena_com_destroy_io_queue(ena_dev, ENA_IO_RXQ_IDX(ring->id));
140526e5543dSRafal Kozik 		ena_rx_queue_release_bufs(ring);
140626e5543dSRafal Kozik 	} else {
140726e5543dSRafal Kozik 		ena_com_destroy_io_queue(ena_dev, ENA_IO_TXQ_IDX(ring->id));
140826e5543dSRafal Kozik 		ena_tx_queue_release_bufs(ring);
1409df238f84SMichal Krawczyk 	}
1410df238f84SMichal Krawczyk }
1411df238f84SMichal Krawczyk 
141226e5543dSRafal Kozik static void ena_queue_stop_all(struct rte_eth_dev *dev,
141326e5543dSRafal Kozik 			      enum ena_ring_type ring_type)
141426e5543dSRafal Kozik {
1415890728ffSStephen Hemminger 	struct ena_adapter *adapter = dev->data->dev_private;
141626e5543dSRafal Kozik 	struct ena_ring *queues = NULL;
141726e5543dSRafal Kozik 	uint16_t nb_queues, i;
141826e5543dSRafal Kozik 
141926e5543dSRafal Kozik 	if (ring_type == ENA_RING_TYPE_RX) {
142026e5543dSRafal Kozik 		queues = adapter->rx_ring;
142126e5543dSRafal Kozik 		nb_queues = dev->data->nb_rx_queues;
142226e5543dSRafal Kozik 	} else {
142326e5543dSRafal Kozik 		queues = adapter->tx_ring;
142426e5543dSRafal Kozik 		nb_queues = dev->data->nb_tx_queues;
142526e5543dSRafal Kozik 	}
142626e5543dSRafal Kozik 
142726e5543dSRafal Kozik 	for (i = 0; i < nb_queues; ++i)
142826e5543dSRafal Kozik 		if (queues[i].configured)
142926e5543dSRafal Kozik 			ena_queue_stop(&queues[i]);
143026e5543dSRafal Kozik }
143126e5543dSRafal Kozik 
14326986cdc4SMichal Krawczyk static int ena_queue_start(struct rte_eth_dev *dev, struct ena_ring *ring)
14331173fca2SJan Medala {
1434a467e8f3SMichal Krawczyk 	int rc, bufs_num;
14351173fca2SJan Medala 
14361173fca2SJan Medala 	ena_assert_msg(ring->configured == 1,
143726e5543dSRafal Kozik 		       "Trying to start unconfigured queue\n");
14381173fca2SJan Medala 
14396986cdc4SMichal Krawczyk 	rc = ena_create_io_queue(dev, ring);
1440df238f84SMichal Krawczyk 	if (rc) {
1441617898d1SMichal Krawczyk 		PMD_INIT_LOG(ERR, "Failed to create IO queue\n");
1442df238f84SMichal Krawczyk 		return rc;
1443df238f84SMichal Krawczyk 	}
1444df238f84SMichal Krawczyk 
14451173fca2SJan Medala 	ring->next_to_clean = 0;
14461173fca2SJan Medala 	ring->next_to_use = 0;
14471173fca2SJan Medala 
14487830e905SSolganik Alexander 	if (ring->type == ENA_RING_TYPE_TX) {
14497830e905SSolganik Alexander 		ring->tx_stats.available_desc =
1450b2b02edeSMichal Krawczyk 			ena_com_free_q_entries(ring->ena_com_io_sq);
14511173fca2SJan Medala 		return 0;
14527830e905SSolganik Alexander 	}
14531173fca2SJan Medala 
1454a467e8f3SMichal Krawczyk 	bufs_num = ring->ring_size - 1;
1455a467e8f3SMichal Krawczyk 	rc = ena_populate_rx_queue(ring, bufs_num);
1456a467e8f3SMichal Krawczyk 	if (rc != bufs_num) {
145726e5543dSRafal Kozik 		ena_com_destroy_io_queue(&ring->adapter->ena_dev,
145826e5543dSRafal Kozik 					 ENA_IO_RXQ_IDX(ring->id));
1459617898d1SMichal Krawczyk 		PMD_INIT_LOG(ERR, "Failed to populate Rx ring\n");
1460241da076SRafal Kozik 		return ENA_COM_FAULT;
14611173fca2SJan Medala 	}
14624387e81cSIdo Segev 	/* Flush per-core RX buffers pools cache as they can be used on other
14634387e81cSIdo Segev 	 * cores as well.
14644387e81cSIdo Segev 	 */
14654387e81cSIdo Segev 	rte_mempool_cache_flush(NULL, ring->mb_pool);
14661173fca2SJan Medala 
14671173fca2SJan Medala 	return 0;
14681173fca2SJan Medala }
14691173fca2SJan Medala 
14701173fca2SJan Medala static int ena_tx_queue_setup(struct rte_eth_dev *dev,
14711173fca2SJan Medala 			      uint16_t queue_idx,
14721173fca2SJan Medala 			      uint16_t nb_desc,
14734217cb0bSMichal Krawczyk 			      unsigned int socket_id,
147456b8b9b7SRafal Kozik 			      const struct rte_eth_txconf *tx_conf)
14751173fca2SJan Medala {
14761173fca2SJan Medala 	struct ena_ring *txq = NULL;
1477890728ffSStephen Hemminger 	struct ena_adapter *adapter = dev->data->dev_private;
14781173fca2SJan Medala 	unsigned int i;
1479005064e5SMichal Krawczyk 	uint16_t dyn_thresh;
14801173fca2SJan Medala 
14811173fca2SJan Medala 	txq = &adapter->tx_ring[queue_idx];
14821173fca2SJan Medala 
14831173fca2SJan Medala 	if (txq->configured) {
14846f1c9df9SStephen Hemminger 		PMD_DRV_LOG(CRIT,
1485617898d1SMichal Krawczyk 			"API violation. Queue[%d] is already configured\n",
14861173fca2SJan Medala 			queue_idx);
1487241da076SRafal Kozik 		return ENA_COM_FAULT;
14881173fca2SJan Medala 	}
14891173fca2SJan Medala 
14901daff526SJakub Palider 	if (!rte_is_power_of_2(nb_desc)) {
14916f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR,
1492617898d1SMichal Krawczyk 			"Unsupported size of Tx queue: %d is not a power of 2.\n",
14931daff526SJakub Palider 			nb_desc);
14941daff526SJakub Palider 		return -EINVAL;
14951daff526SJakub Palider 	}
14961daff526SJakub Palider 
14975920d930SMichal Krawczyk 	if (nb_desc > adapter->max_tx_ring_size) {
14986f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR,
1499617898d1SMichal Krawczyk 			"Unsupported size of Tx queue (max size: %d)\n",
15005920d930SMichal Krawczyk 			adapter->max_tx_ring_size);
15011173fca2SJan Medala 		return -EINVAL;
15021173fca2SJan Medala 	}
15031173fca2SJan Medala 
15041173fca2SJan Medala 	txq->port_id = dev->data->port_id;
15051173fca2SJan Medala 	txq->next_to_clean = 0;
15061173fca2SJan Medala 	txq->next_to_use = 0;
15071173fca2SJan Medala 	txq->ring_size = nb_desc;
1508c0006061SMichal Krawczyk 	txq->size_mask = nb_desc - 1;
15094217cb0bSMichal Krawczyk 	txq->numa_socket_id = socket_id;
15101d973d8fSIgor Chauskin 	txq->pkts_without_db = false;
1511f93e20e5SMichal Krawczyk 	txq->last_cleanup_ticks = 0;
15121173fca2SJan Medala 
151308180833SMichal Krawczyk 	txq->tx_buffer_info = rte_zmalloc_socket("txq->tx_buffer_info",
151408180833SMichal Krawczyk 		sizeof(struct ena_tx_buffer) * txq->ring_size,
151508180833SMichal Krawczyk 		RTE_CACHE_LINE_SIZE,
151608180833SMichal Krawczyk 		socket_id);
15171173fca2SJan Medala 	if (!txq->tx_buffer_info) {
1518617898d1SMichal Krawczyk 		PMD_DRV_LOG(ERR,
1519617898d1SMichal Krawczyk 			"Failed to allocate memory for Tx buffer info\n");
1520df238f84SMichal Krawczyk 		return -ENOMEM;
15211173fca2SJan Medala 	}
15221173fca2SJan Medala 
152308180833SMichal Krawczyk 	txq->empty_tx_reqs = rte_zmalloc_socket("txq->empty_tx_reqs",
152408180833SMichal Krawczyk 		sizeof(uint16_t) * txq->ring_size,
152508180833SMichal Krawczyk 		RTE_CACHE_LINE_SIZE,
152608180833SMichal Krawczyk 		socket_id);
15271173fca2SJan Medala 	if (!txq->empty_tx_reqs) {
1528617898d1SMichal Krawczyk 		PMD_DRV_LOG(ERR,
1529617898d1SMichal Krawczyk 			"Failed to allocate memory for empty Tx requests\n");
1530df238f84SMichal Krawczyk 		rte_free(txq->tx_buffer_info);
1531df238f84SMichal Krawczyk 		return -ENOMEM;
15321173fca2SJan Medala 	}
1533241da076SRafal Kozik 
15342fca2a98SMichal Krawczyk 	txq->push_buf_intermediate_buf =
153508180833SMichal Krawczyk 		rte_zmalloc_socket("txq->push_buf_intermediate_buf",
15362fca2a98SMichal Krawczyk 			txq->tx_max_header_size,
153708180833SMichal Krawczyk 			RTE_CACHE_LINE_SIZE,
153808180833SMichal Krawczyk 			socket_id);
15392fca2a98SMichal Krawczyk 	if (!txq->push_buf_intermediate_buf) {
1540617898d1SMichal Krawczyk 		PMD_DRV_LOG(ERR, "Failed to alloc push buffer for LLQ\n");
15412fca2a98SMichal Krawczyk 		rte_free(txq->tx_buffer_info);
15422fca2a98SMichal Krawczyk 		rte_free(txq->empty_tx_reqs);
15432fca2a98SMichal Krawczyk 		return -ENOMEM;
15442fca2a98SMichal Krawczyk 	}
15452fca2a98SMichal Krawczyk 
15461173fca2SJan Medala 	for (i = 0; i < txq->ring_size; i++)
15471173fca2SJan Medala 		txq->empty_tx_reqs[i] = i;
15481173fca2SJan Medala 
1549005064e5SMichal Krawczyk 	txq->offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
1550005064e5SMichal Krawczyk 
1551005064e5SMichal Krawczyk 	/* Check if caller provided the Tx cleanup threshold value. */
1552005064e5SMichal Krawczyk 	if (tx_conf->tx_free_thresh != 0) {
1553005064e5SMichal Krawczyk 		txq->tx_free_thresh = tx_conf->tx_free_thresh;
1554005064e5SMichal Krawczyk 	} else {
1555005064e5SMichal Krawczyk 		dyn_thresh = txq->ring_size -
1556005064e5SMichal Krawczyk 			txq->ring_size / ENA_REFILL_THRESH_DIVIDER;
1557005064e5SMichal Krawczyk 		txq->tx_free_thresh = RTE_MAX(dyn_thresh,
1558005064e5SMichal Krawczyk 			txq->ring_size - ENA_REFILL_THRESH_PACKET);
15592081d5e2SMichal Krawczyk 	}
1560005064e5SMichal Krawczyk 
1561f93e20e5SMichal Krawczyk 	txq->missing_tx_completion_threshold =
1562f93e20e5SMichal Krawczyk 		RTE_MIN(txq->ring_size / 2, ENA_DEFAULT_MISSING_COMP);
1563f93e20e5SMichal Krawczyk 
15641173fca2SJan Medala 	/* Store pointer to this queue in upper layer */
15651173fca2SJan Medala 	txq->configured = 1;
15661173fca2SJan Medala 	dev->data->tx_queues[queue_idx] = txq;
1567241da076SRafal Kozik 
1568241da076SRafal Kozik 	return 0;
15691173fca2SJan Medala }
15701173fca2SJan Medala 
15711173fca2SJan Medala static int ena_rx_queue_setup(struct rte_eth_dev *dev,
15721173fca2SJan Medala 			      uint16_t queue_idx,
15731173fca2SJan Medala 			      uint16_t nb_desc,
15744217cb0bSMichal Krawczyk 			      unsigned int socket_id,
157534d5e97eSMichal Krawczyk 			      const struct rte_eth_rxconf *rx_conf,
15761173fca2SJan Medala 			      struct rte_mempool *mp)
15771173fca2SJan Medala {
1578890728ffSStephen Hemminger 	struct ena_adapter *adapter = dev->data->dev_private;
15791173fca2SJan Medala 	struct ena_ring *rxq = NULL;
158038364c26SMichal Krawczyk 	size_t buffer_size;
1581df238f84SMichal Krawczyk 	int i;
1582005064e5SMichal Krawczyk 	uint16_t dyn_thresh;
15831173fca2SJan Medala 
15841173fca2SJan Medala 	rxq = &adapter->rx_ring[queue_idx];
15851173fca2SJan Medala 	if (rxq->configured) {
15866f1c9df9SStephen Hemminger 		PMD_DRV_LOG(CRIT,
1587617898d1SMichal Krawczyk 			"API violation. Queue[%d] is already configured\n",
15881173fca2SJan Medala 			queue_idx);
1589241da076SRafal Kozik 		return ENA_COM_FAULT;
15901173fca2SJan Medala 	}
15911173fca2SJan Medala 
15921daff526SJakub Palider 	if (!rte_is_power_of_2(nb_desc)) {
15936f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR,
1594617898d1SMichal Krawczyk 			"Unsupported size of Rx queue: %d is not a power of 2.\n",
15951daff526SJakub Palider 			nb_desc);
15961daff526SJakub Palider 		return -EINVAL;
15971daff526SJakub Palider 	}
15981daff526SJakub Palider 
15995920d930SMichal Krawczyk 	if (nb_desc > adapter->max_rx_ring_size) {
16006f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR,
1601617898d1SMichal Krawczyk 			"Unsupported size of Rx queue (max size: %d)\n",
16025920d930SMichal Krawczyk 			adapter->max_rx_ring_size);
16031173fca2SJan Medala 		return -EINVAL;
16041173fca2SJan Medala 	}
16051173fca2SJan Medala 
160638364c26SMichal Krawczyk 	/* ENA isn't supporting buffers smaller than 1400 bytes */
160738364c26SMichal Krawczyk 	buffer_size = rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM;
160838364c26SMichal Krawczyk 	if (buffer_size < ENA_RX_BUF_MIN_SIZE) {
160938364c26SMichal Krawczyk 		PMD_DRV_LOG(ERR,
1610617898d1SMichal Krawczyk 			"Unsupported size of Rx buffer: %zu (min size: %d)\n",
161138364c26SMichal Krawczyk 			buffer_size, ENA_RX_BUF_MIN_SIZE);
161238364c26SMichal Krawczyk 		return -EINVAL;
161338364c26SMichal Krawczyk 	}
161438364c26SMichal Krawczyk 
16151173fca2SJan Medala 	rxq->port_id = dev->data->port_id;
16161173fca2SJan Medala 	rxq->next_to_clean = 0;
16171173fca2SJan Medala 	rxq->next_to_use = 0;
16181173fca2SJan Medala 	rxq->ring_size = nb_desc;
1619c0006061SMichal Krawczyk 	rxq->size_mask = nb_desc - 1;
16204217cb0bSMichal Krawczyk 	rxq->numa_socket_id = socket_id;
16211173fca2SJan Medala 	rxq->mb_pool = mp;
16221173fca2SJan Medala 
162308180833SMichal Krawczyk 	rxq->rx_buffer_info = rte_zmalloc_socket("rxq->buffer_info",
16241be097dcSMichal Krawczyk 		sizeof(struct ena_rx_buffer) * nb_desc,
162508180833SMichal Krawczyk 		RTE_CACHE_LINE_SIZE,
162608180833SMichal Krawczyk 		socket_id);
16271173fca2SJan Medala 	if (!rxq->rx_buffer_info) {
1628617898d1SMichal Krawczyk 		PMD_DRV_LOG(ERR,
1629617898d1SMichal Krawczyk 			"Failed to allocate memory for Rx buffer info\n");
16301173fca2SJan Medala 		return -ENOMEM;
16311173fca2SJan Medala 	}
16321173fca2SJan Medala 
163308180833SMichal Krawczyk 	rxq->rx_refill_buffer = rte_zmalloc_socket("rxq->rx_refill_buffer",
163479405ee1SRafal Kozik 		sizeof(struct rte_mbuf *) * nb_desc,
163508180833SMichal Krawczyk 		RTE_CACHE_LINE_SIZE,
163608180833SMichal Krawczyk 		socket_id);
163779405ee1SRafal Kozik 	if (!rxq->rx_refill_buffer) {
1638617898d1SMichal Krawczyk 		PMD_DRV_LOG(ERR,
1639617898d1SMichal Krawczyk 			"Failed to allocate memory for Rx refill buffer\n");
164079405ee1SRafal Kozik 		rte_free(rxq->rx_buffer_info);
164179405ee1SRafal Kozik 		rxq->rx_buffer_info = NULL;
164279405ee1SRafal Kozik 		return -ENOMEM;
164379405ee1SRafal Kozik 	}
164479405ee1SRafal Kozik 
164508180833SMichal Krawczyk 	rxq->empty_rx_reqs = rte_zmalloc_socket("rxq->empty_rx_reqs",
1646c2034976SMichal Krawczyk 		sizeof(uint16_t) * nb_desc,
164708180833SMichal Krawczyk 		RTE_CACHE_LINE_SIZE,
164808180833SMichal Krawczyk 		socket_id);
1649c2034976SMichal Krawczyk 	if (!rxq->empty_rx_reqs) {
1650617898d1SMichal Krawczyk 		PMD_DRV_LOG(ERR,
1651617898d1SMichal Krawczyk 			"Failed to allocate memory for empty Rx requests\n");
1652c2034976SMichal Krawczyk 		rte_free(rxq->rx_buffer_info);
1653c2034976SMichal Krawczyk 		rxq->rx_buffer_info = NULL;
165479405ee1SRafal Kozik 		rte_free(rxq->rx_refill_buffer);
165579405ee1SRafal Kozik 		rxq->rx_refill_buffer = NULL;
1656c2034976SMichal Krawczyk 		return -ENOMEM;
1657c2034976SMichal Krawczyk 	}
1658c2034976SMichal Krawczyk 
1659c2034976SMichal Krawczyk 	for (i = 0; i < nb_desc; i++)
1660eccbe2ffSRafal Kozik 		rxq->empty_rx_reqs[i] = i;
1661c2034976SMichal Krawczyk 
166234d5e97eSMichal Krawczyk 	rxq->offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
166334d5e97eSMichal Krawczyk 
1664005064e5SMichal Krawczyk 	if (rx_conf->rx_free_thresh != 0) {
1665005064e5SMichal Krawczyk 		rxq->rx_free_thresh = rx_conf->rx_free_thresh;
1666005064e5SMichal Krawczyk 	} else {
1667005064e5SMichal Krawczyk 		dyn_thresh = rxq->ring_size / ENA_REFILL_THRESH_DIVIDER;
1668005064e5SMichal Krawczyk 		rxq->rx_free_thresh = RTE_MIN(dyn_thresh,
1669005064e5SMichal Krawczyk 			(uint16_t)(ENA_REFILL_THRESH_PACKET));
1670005064e5SMichal Krawczyk 	}
1671005064e5SMichal Krawczyk 
16721173fca2SJan Medala 	/* Store pointer to this queue in upper layer */
16731173fca2SJan Medala 	rxq->configured = 1;
16741173fca2SJan Medala 	dev->data->rx_queues[queue_idx] = rxq;
16751173fca2SJan Medala 
1676df238f84SMichal Krawczyk 	return 0;
16771173fca2SJan Medala }
16781173fca2SJan Medala 
167983fd97b2SMichal Krawczyk static int ena_add_single_rx_desc(struct ena_com_io_sq *io_sq,
168083fd97b2SMichal Krawczyk 				  struct rte_mbuf *mbuf, uint16_t id)
168183fd97b2SMichal Krawczyk {
168283fd97b2SMichal Krawczyk 	struct ena_com_buf ebuf;
168383fd97b2SMichal Krawczyk 	int rc;
168483fd97b2SMichal Krawczyk 
168583fd97b2SMichal Krawczyk 	/* prepare physical address for DMA transaction */
168683fd97b2SMichal Krawczyk 	ebuf.paddr = mbuf->buf_iova + RTE_PKTMBUF_HEADROOM;
168783fd97b2SMichal Krawczyk 	ebuf.len = mbuf->buf_len - RTE_PKTMBUF_HEADROOM;
168883fd97b2SMichal Krawczyk 
168983fd97b2SMichal Krawczyk 	/* pass resource to device */
169083fd97b2SMichal Krawczyk 	rc = ena_com_add_single_rx_desc(io_sq, &ebuf, id);
169183fd97b2SMichal Krawczyk 	if (unlikely(rc != 0))
16920a001d69SMichal Krawczyk 		PMD_RX_LOG(WARNING, "Failed adding Rx desc\n");
169383fd97b2SMichal Krawczyk 
169483fd97b2SMichal Krawczyk 	return rc;
169583fd97b2SMichal Krawczyk }
169683fd97b2SMichal Krawczyk 
16971173fca2SJan Medala static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count)
16981173fca2SJan Medala {
16991173fca2SJan Medala 	unsigned int i;
17001173fca2SJan Medala 	int rc;
17011daff526SJakub Palider 	uint16_t next_to_use = rxq->next_to_use;
17020a001d69SMichal Krawczyk 	uint16_t req_id;
17030a001d69SMichal Krawczyk #ifdef RTE_ETHDEV_DEBUG_RX
17040a001d69SMichal Krawczyk 	uint16_t in_use;
17050a001d69SMichal Krawczyk #endif
170679405ee1SRafal Kozik 	struct rte_mbuf **mbufs = rxq->rx_refill_buffer;
17071173fca2SJan Medala 
17081173fca2SJan Medala 	if (unlikely(!count))
17091173fca2SJan Medala 		return 0;
17101173fca2SJan Medala 
17110a001d69SMichal Krawczyk #ifdef RTE_ETHDEV_DEBUG_RX
1712c0006061SMichal Krawczyk 	in_use = rxq->ring_size - 1 -
1713c0006061SMichal Krawczyk 		ena_com_free_q_entries(rxq->ena_com_io_sq);
17140a001d69SMichal Krawczyk 	if (unlikely((in_use + count) >= rxq->ring_size))
17150a001d69SMichal Krawczyk 		PMD_RX_LOG(ERR, "Bad Rx ring state\n");
17160a001d69SMichal Krawczyk #endif
17171173fca2SJan Medala 
17181173fca2SJan Medala 	/* get resources for incoming packets */
17193c8bc29fSDavid Harton 	rc = rte_pktmbuf_alloc_bulk(rxq->mb_pool, mbufs, count);
17201173fca2SJan Medala 	if (unlikely(rc < 0)) {
17211173fca2SJan Medala 		rte_atomic64_inc(&rxq->adapter->drv_stats->rx_nombuf);
17227830e905SSolganik Alexander 		++rxq->rx_stats.mbuf_alloc_fail;
1723617898d1SMichal Krawczyk 		PMD_RX_LOG(DEBUG, "There are not enough free buffers\n");
17241173fca2SJan Medala 		return 0;
17251173fca2SJan Medala 	}
17261173fca2SJan Medala 
17271173fca2SJan Medala 	for (i = 0; i < count; i++) {
172879405ee1SRafal Kozik 		struct rte_mbuf *mbuf = mbufs[i];
17291be097dcSMichal Krawczyk 		struct ena_rx_buffer *rx_info;
17301173fca2SJan Medala 
173179405ee1SRafal Kozik 		if (likely((i + 4) < count))
173279405ee1SRafal Kozik 			rte_prefetch0(mbufs[i + 4]);
1733c2034976SMichal Krawczyk 
1734c0006061SMichal Krawczyk 		req_id = rxq->empty_rx_reqs[next_to_use];
17351be097dcSMichal Krawczyk 		rx_info = &rxq->rx_buffer_info[req_id];
1736241da076SRafal Kozik 
173783fd97b2SMichal Krawczyk 		rc = ena_add_single_rx_desc(rxq->ena_com_io_sq, mbuf, req_id);
173883fd97b2SMichal Krawczyk 		if (unlikely(rc != 0))
17391173fca2SJan Medala 			break;
174083fd97b2SMichal Krawczyk 
17411be097dcSMichal Krawczyk 		rx_info->mbuf = mbuf;
1742c0006061SMichal Krawczyk 		next_to_use = ENA_IDX_NEXT_MASKED(next_to_use, rxq->size_mask);
17431173fca2SJan Medala 	}
17441173fca2SJan Medala 
174579405ee1SRafal Kozik 	if (unlikely(i < count)) {
17460a001d69SMichal Krawczyk 		PMD_RX_LOG(WARNING,
1747617898d1SMichal Krawczyk 			"Refilled Rx queue[%d] with only %d/%d buffers\n",
1748617898d1SMichal Krawczyk 			rxq->id, i, count);
17493c8bc29fSDavid Harton 		rte_pktmbuf_free_bulk(&mbufs[i], count - i);
17507830e905SSolganik Alexander 		++rxq->rx_stats.refill_partial;
175179405ee1SRafal Kozik 	}
1752241da076SRafal Kozik 
17537be78d02SJosh Soref 	/* When we submitted free resources to device... */
17543d19e1abSRafal Kozik 	if (likely(i > 0)) {
175538faa87eSMichal Krawczyk 		/* ...let HW know that it can fill buffers with data. */
17561173fca2SJan Medala 		ena_com_write_sq_doorbell(rxq->ena_com_io_sq);
17571173fca2SJan Medala 
17585e02e19eSJan Medala 		rxq->next_to_use = next_to_use;
17595e02e19eSJan Medala 	}
17605e02e19eSJan Medala 
17611173fca2SJan Medala 	return i;
17621173fca2SJan Medala }
17631173fca2SJan Medala 
176492401abfSShai Brandes static size_t ena_get_metrics_entries(struct ena_adapter *adapter)
176592401abfSShai Brandes {
176692401abfSShai Brandes 	struct ena_com_dev *ena_dev = &adapter->ena_dev;
176792401abfSShai Brandes 	size_t metrics_num = 0;
176892401abfSShai Brandes 
176992401abfSShai Brandes 	if (ena_com_get_cap(ena_dev, ENA_ADMIN_CUSTOMER_METRICS))
177092401abfSShai Brandes 		metrics_num = ENA_STATS_ARRAY_METRICS;
177192401abfSShai Brandes 	else if (ena_com_get_cap(ena_dev, ENA_ADMIN_ENI_STATS))
177292401abfSShai Brandes 		metrics_num = ENA_STATS_ARRAY_METRICS_LEGACY;
177392401abfSShai Brandes 	PMD_DRV_LOG(NOTICE, "0x%x customer metrics are supported\n", (unsigned int)metrics_num);
177492401abfSShai Brandes 	if (metrics_num > ENA_MAX_CUSTOMER_METRICS) {
177592401abfSShai Brandes 		PMD_DRV_LOG(NOTICE, "Not enough space for the requested customer metrics\n");
177692401abfSShai Brandes 		metrics_num = ENA_MAX_CUSTOMER_METRICS;
177792401abfSShai Brandes 	}
177892401abfSShai Brandes 	return metrics_num;
177992401abfSShai Brandes }
178092401abfSShai Brandes 
1781b9b05d6fSMichal Krawczyk static int ena_device_init(struct ena_adapter *adapter,
1782aab58857SStanislaw Kardach 			   struct rte_pci_device *pdev,
1783b9b05d6fSMichal Krawczyk 			   struct ena_com_dev_get_features_ctx *get_feat_ctx)
17841173fca2SJan Medala {
1785b9b05d6fSMichal Krawczyk 	struct ena_com_dev *ena_dev = &adapter->ena_dev;
1786ca148440SMichal Krawczyk 	uint32_t aenq_groups;
17871173fca2SJan Medala 	int rc;
1788c4144557SJan Medala 	bool readless_supported;
17891173fca2SJan Medala 
17901173fca2SJan Medala 	/* Initialize mmio registers */
17911173fca2SJan Medala 	rc = ena_com_mmio_reg_read_request_init(ena_dev);
17921173fca2SJan Medala 	if (rc) {
1793617898d1SMichal Krawczyk 		PMD_DRV_LOG(ERR, "Failed to init MMIO read less\n");
17941173fca2SJan Medala 		return rc;
17951173fca2SJan Medala 	}
17961173fca2SJan Medala 
1797c4144557SJan Medala 	/* The PCIe configuration space revision id indicate if mmio reg
1798c4144557SJan Medala 	 * read is disabled.
1799c4144557SJan Medala 	 */
1800aab58857SStanislaw Kardach 	readless_supported = !(pdev->id.class_id & ENA_MMIO_DISABLE_REG_READ);
1801c4144557SJan Medala 	ena_com_set_mmio_read_mode(ena_dev, readless_supported);
1802c4144557SJan Medala 
18031173fca2SJan Medala 	/* reset device */
18043adcba9aSMichal Krawczyk 	rc = ena_com_dev_reset(ena_dev, ENA_REGS_RESET_NORMAL);
18051173fca2SJan Medala 	if (rc) {
1806617898d1SMichal Krawczyk 		PMD_DRV_LOG(ERR, "Cannot reset device\n");
18071173fca2SJan Medala 		goto err_mmio_read_less;
18081173fca2SJan Medala 	}
18091173fca2SJan Medala 
18101173fca2SJan Medala 	/* check FW version */
18111173fca2SJan Medala 	rc = ena_com_validate_version(ena_dev);
18121173fca2SJan Medala 	if (rc) {
1813617898d1SMichal Krawczyk 		PMD_DRV_LOG(ERR, "Device version is too low\n");
18141173fca2SJan Medala 		goto err_mmio_read_less;
18151173fca2SJan Medala 	}
18161173fca2SJan Medala 
18171173fca2SJan Medala 	ena_dev->dma_addr_bits = ena_com_get_dma_width(ena_dev);
18181173fca2SJan Medala 
18191173fca2SJan Medala 	/* ENA device administration layer init */
1820b68309beSRafal Kozik 	rc = ena_com_admin_init(ena_dev, &aenq_handlers);
18211173fca2SJan Medala 	if (rc) {
18226f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR,
1823617898d1SMichal Krawczyk 			"Cannot initialize ENA admin queue\n");
18241173fca2SJan Medala 		goto err_mmio_read_less;
18251173fca2SJan Medala 	}
18261173fca2SJan Medala 
18271173fca2SJan Medala 	/* To enable the msix interrupts the driver needs to know the number
18281173fca2SJan Medala 	 * of queues. So the driver uses polling mode to retrieve this
18291173fca2SJan Medala 	 * information.
18301173fca2SJan Medala 	 */
18311173fca2SJan Medala 	ena_com_set_admin_polling_mode(ena_dev, true);
18321173fca2SJan Medala 
1833201ff2e5SJakub Palider 	ena_config_host_info(ena_dev);
1834201ff2e5SJakub Palider 
18351173fca2SJan Medala 	/* Get Device Attributes and features */
18361173fca2SJan Medala 	rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx);
18371173fca2SJan Medala 	if (rc) {
18386f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR,
1839617898d1SMichal Krawczyk 			"Cannot get attribute for ENA device, rc: %d\n", rc);
18401173fca2SJan Medala 		goto err_admin_init;
18411173fca2SJan Medala 	}
18421173fca2SJan Medala 
1843f01f060cSRafal Kozik 	aenq_groups = BIT(ENA_ADMIN_LINK_CHANGE) |
1844d9b8b106SMichal Krawczyk 		      BIT(ENA_ADMIN_NOTIFICATION) |
1845983cce2dSRafal Kozik 		      BIT(ENA_ADMIN_KEEP_ALIVE) |
1846983cce2dSRafal Kozik 		      BIT(ENA_ADMIN_FATAL_ERROR) |
1847983cce2dSRafal Kozik 		      BIT(ENA_ADMIN_WARNING);
1848ca148440SMichal Krawczyk 
1849ca148440SMichal Krawczyk 	aenq_groups &= get_feat_ctx->aenq.supported_groups;
1850ca148440SMichal Krawczyk 
1851b9b05d6fSMichal Krawczyk 	adapter->all_aenq_groups = aenq_groups;
185292401abfSShai Brandes 	/* The actual supported number of metrics is negotiated with the device at runtime */
185392401abfSShai Brandes 	adapter->metrics_num = ena_get_metrics_entries(adapter);
1854e859d2b8SRafal Kozik 
18551173fca2SJan Medala 	return 0;
18561173fca2SJan Medala 
18571173fca2SJan Medala err_admin_init:
18581173fca2SJan Medala 	ena_com_admin_destroy(ena_dev);
18591173fca2SJan Medala 
18601173fca2SJan Medala err_mmio_read_less:
18611173fca2SJan Medala 	ena_com_mmio_reg_read_request_destroy(ena_dev);
18621173fca2SJan Medala 
18631173fca2SJan Medala 	return rc;
18641173fca2SJan Medala }
18651173fca2SJan Medala 
1866ca148440SMichal Krawczyk static void ena_interrupt_handler_rte(void *cb_arg)
186715773e06SMichal Krawczyk {
1868aab58857SStanislaw Kardach 	struct rte_eth_dev *dev = cb_arg;
1869aab58857SStanislaw Kardach 	struct ena_adapter *adapter = dev->data->dev_private;
187015773e06SMichal Krawczyk 	struct ena_com_dev *ena_dev = &adapter->ena_dev;
187115773e06SMichal Krawczyk 
187215773e06SMichal Krawczyk 	ena_com_admin_q_comp_intr_handler(ena_dev);
18733d19e1abSRafal Kozik 	if (likely(adapter->state != ENA_ADAPTER_STATE_CLOSED))
1874aab58857SStanislaw Kardach 		ena_com_aenq_intr_handler(ena_dev, dev);
187515773e06SMichal Krawczyk }
187615773e06SMichal Krawczyk 
18775efb9fc7SMichal Krawczyk static void check_for_missing_keep_alive(struct ena_adapter *adapter)
18785efb9fc7SMichal Krawczyk {
1879b9b05d6fSMichal Krawczyk 	if (!(adapter->active_aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE)))
1880e859d2b8SRafal Kozik 		return;
1881e859d2b8SRafal Kozik 
18825efb9fc7SMichal Krawczyk 	if (adapter->keep_alive_timeout == ENA_HW_HINTS_NO_TIMEOUT)
18835efb9fc7SMichal Krawczyk 		return;
18845efb9fc7SMichal Krawczyk 
18855efb9fc7SMichal Krawczyk 	if (unlikely((rte_get_timer_cycles() - adapter->timestamp_wd) >=
18865efb9fc7SMichal Krawczyk 	    adapter->keep_alive_timeout)) {
18876f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "Keep alive timeout\n");
18882bae75eaSDawid Gorecki 		ena_trigger_reset(adapter, ENA_REGS_RESET_KEEP_ALIVE_TO);
18897830e905SSolganik Alexander 		++adapter->dev_stats.wd_expired;
18905efb9fc7SMichal Krawczyk 	}
18915efb9fc7SMichal Krawczyk }
18925efb9fc7SMichal Krawczyk 
18935efb9fc7SMichal Krawczyk /* Check if admin queue is enabled */
18945efb9fc7SMichal Krawczyk static void check_for_admin_com_state(struct ena_adapter *adapter)
18955efb9fc7SMichal Krawczyk {
18965efb9fc7SMichal Krawczyk 	if (unlikely(!ena_com_get_admin_running_state(&adapter->ena_dev))) {
1897617898d1SMichal Krawczyk 		PMD_DRV_LOG(ERR, "ENA admin queue is not in running state\n");
18982bae75eaSDawid Gorecki 		ena_trigger_reset(adapter, ENA_REGS_RESET_ADMIN_TO);
18995efb9fc7SMichal Krawczyk 	}
19005efb9fc7SMichal Krawczyk }
19015efb9fc7SMichal Krawczyk 
1902f93e20e5SMichal Krawczyk static int check_for_tx_completion_in_queue(struct ena_adapter *adapter,
1903f93e20e5SMichal Krawczyk 					    struct ena_ring *tx_ring)
1904f93e20e5SMichal Krawczyk {
1905f93e20e5SMichal Krawczyk 	struct ena_tx_buffer *tx_buf;
1906f93e20e5SMichal Krawczyk 	uint64_t timestamp;
1907f93e20e5SMichal Krawczyk 	uint64_t completion_delay;
1908f93e20e5SMichal Krawczyk 	uint32_t missed_tx = 0;
1909f93e20e5SMichal Krawczyk 	unsigned int i;
1910f93e20e5SMichal Krawczyk 	int rc = 0;
1911f93e20e5SMichal Krawczyk 
1912f93e20e5SMichal Krawczyk 	for (i = 0; i < tx_ring->ring_size; ++i) {
1913f93e20e5SMichal Krawczyk 		tx_buf = &tx_ring->tx_buffer_info[i];
1914f93e20e5SMichal Krawczyk 		timestamp = tx_buf->timestamp;
1915f93e20e5SMichal Krawczyk 
1916f93e20e5SMichal Krawczyk 		if (timestamp == 0)
1917f93e20e5SMichal Krawczyk 			continue;
1918f93e20e5SMichal Krawczyk 
1919f93e20e5SMichal Krawczyk 		completion_delay = rte_get_timer_cycles() - timestamp;
1920f93e20e5SMichal Krawczyk 		if (completion_delay > adapter->missing_tx_completion_to) {
1921f93e20e5SMichal Krawczyk 			if (unlikely(!tx_buf->print_once)) {
1922f93e20e5SMichal Krawczyk 				PMD_TX_LOG(WARNING,
1923f93e20e5SMichal Krawczyk 					"Found a Tx that wasn't completed on time, qid %d, index %d. "
1924f93e20e5SMichal Krawczyk 					"Missing Tx outstanding for %" PRIu64 " msecs.\n",
1925f93e20e5SMichal Krawczyk 					tx_ring->id, i,	completion_delay /
1926f93e20e5SMichal Krawczyk 					rte_get_timer_hz() * 1000);
1927f93e20e5SMichal Krawczyk 				tx_buf->print_once = true;
1928f93e20e5SMichal Krawczyk 			}
1929f93e20e5SMichal Krawczyk 			++missed_tx;
1930f93e20e5SMichal Krawczyk 		}
1931f93e20e5SMichal Krawczyk 	}
1932f93e20e5SMichal Krawczyk 
1933f93e20e5SMichal Krawczyk 	if (unlikely(missed_tx > tx_ring->missing_tx_completion_threshold)) {
1934f93e20e5SMichal Krawczyk 		PMD_DRV_LOG(ERR,
1935f93e20e5SMichal Krawczyk 			"The number of lost Tx completions is above the threshold (%d > %d). "
1936f93e20e5SMichal Krawczyk 			"Trigger the device reset.\n",
1937f93e20e5SMichal Krawczyk 			missed_tx,
1938f93e20e5SMichal Krawczyk 			tx_ring->missing_tx_completion_threshold);
1939f93e20e5SMichal Krawczyk 		adapter->reset_reason = ENA_REGS_RESET_MISS_TX_CMPL;
1940f93e20e5SMichal Krawczyk 		adapter->trigger_reset = true;
1941f93e20e5SMichal Krawczyk 		rc = -EIO;
1942f93e20e5SMichal Krawczyk 	}
1943f93e20e5SMichal Krawczyk 
1944f93e20e5SMichal Krawczyk 	tx_ring->tx_stats.missed_tx += missed_tx;
1945f93e20e5SMichal Krawczyk 
1946f93e20e5SMichal Krawczyk 	return rc;
1947f93e20e5SMichal Krawczyk }
1948f93e20e5SMichal Krawczyk 
1949f93e20e5SMichal Krawczyk static void check_for_tx_completions(struct ena_adapter *adapter)
1950f93e20e5SMichal Krawczyk {
1951f93e20e5SMichal Krawczyk 	struct ena_ring *tx_ring;
1952f93e20e5SMichal Krawczyk 	uint64_t tx_cleanup_delay;
1953f93e20e5SMichal Krawczyk 	size_t qid;
1954f93e20e5SMichal Krawczyk 	int budget;
1955f93e20e5SMichal Krawczyk 	uint16_t nb_tx_queues = adapter->edev_data->nb_tx_queues;
1956f93e20e5SMichal Krawczyk 
1957f93e20e5SMichal Krawczyk 	if (adapter->missing_tx_completion_to == ENA_HW_HINTS_NO_TIMEOUT)
1958f93e20e5SMichal Krawczyk 		return;
1959f93e20e5SMichal Krawczyk 
1960f93e20e5SMichal Krawczyk 	nb_tx_queues = adapter->edev_data->nb_tx_queues;
1961f93e20e5SMichal Krawczyk 	budget = adapter->missing_tx_completion_budget;
1962f93e20e5SMichal Krawczyk 
1963f93e20e5SMichal Krawczyk 	qid = adapter->last_tx_comp_qid;
1964f93e20e5SMichal Krawczyk 	while (budget-- > 0) {
1965f93e20e5SMichal Krawczyk 		tx_ring = &adapter->tx_ring[qid];
1966f93e20e5SMichal Krawczyk 
1967f93e20e5SMichal Krawczyk 		/* Tx cleanup is called only by the burst function and can be
1968f93e20e5SMichal Krawczyk 		 * called dynamically by the application. Also cleanup is
1969f93e20e5SMichal Krawczyk 		 * limited by the threshold. To avoid false detection of the
1970f93e20e5SMichal Krawczyk 		 * missing HW Tx completion, get the delay since last cleanup
1971f93e20e5SMichal Krawczyk 		 * function was called.
1972f93e20e5SMichal Krawczyk 		 */
1973f93e20e5SMichal Krawczyk 		tx_cleanup_delay = rte_get_timer_cycles() -
1974f93e20e5SMichal Krawczyk 			tx_ring->last_cleanup_ticks;
1975f93e20e5SMichal Krawczyk 		if (tx_cleanup_delay < adapter->tx_cleanup_stall_delay)
1976f93e20e5SMichal Krawczyk 			check_for_tx_completion_in_queue(adapter, tx_ring);
1977f93e20e5SMichal Krawczyk 		qid = (qid + 1) % nb_tx_queues;
1978f93e20e5SMichal Krawczyk 	}
1979f93e20e5SMichal Krawczyk 
1980f93e20e5SMichal Krawczyk 	adapter->last_tx_comp_qid = qid;
1981f93e20e5SMichal Krawczyk }
1982f93e20e5SMichal Krawczyk 
1983d9b8b106SMichal Krawczyk static void ena_timer_wd_callback(__rte_unused struct rte_timer *timer,
1984d9b8b106SMichal Krawczyk 				  void *arg)
1985d9b8b106SMichal Krawczyk {
1986aab58857SStanislaw Kardach 	struct rte_eth_dev *dev = arg;
1987aab58857SStanislaw Kardach 	struct ena_adapter *adapter = dev->data->dev_private;
1988d9b8b106SMichal Krawczyk 
1989e2174a54SMichal Krawczyk 	if (unlikely(adapter->trigger_reset))
1990e2174a54SMichal Krawczyk 		return;
1991e2174a54SMichal Krawczyk 
19925efb9fc7SMichal Krawczyk 	check_for_missing_keep_alive(adapter);
19935efb9fc7SMichal Krawczyk 	check_for_admin_com_state(adapter);
1994f93e20e5SMichal Krawczyk 	check_for_tx_completions(adapter);
1995d9b8b106SMichal Krawczyk 
19965efb9fc7SMichal Krawczyk 	if (unlikely(adapter->trigger_reset)) {
19976f1c9df9SStephen Hemminger 		PMD_DRV_LOG(ERR, "Trigger reset is on\n");
19985723fbedSFerruh Yigit 		rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET,
1999d9b8b106SMichal Krawczyk 			NULL);
2000d9b8b106SMichal Krawczyk 	}
2001d9b8b106SMichal Krawczyk }
2002d9b8b106SMichal Krawczyk 
20032fca2a98SMichal Krawczyk static inline void
20048a7a73f2SMichal Krawczyk set_default_llq_configurations(struct ena_llq_configurations *llq_config,
20058a7a73f2SMichal Krawczyk 			       struct ena_admin_feature_llq_desc *llq,
20068a7a73f2SMichal Krawczyk 			       bool use_large_llq_hdr)
20072fca2a98SMichal Krawczyk {
20082fca2a98SMichal Krawczyk 	llq_config->llq_header_location = ENA_ADMIN_INLINE_HEADER;
20092fca2a98SMichal Krawczyk 	llq_config->llq_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY;
20102fca2a98SMichal Krawczyk 	llq_config->llq_num_decs_before_header =
20112fca2a98SMichal Krawczyk 		ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2;
20128a7a73f2SMichal Krawczyk 
20138a7a73f2SMichal Krawczyk 	if (use_large_llq_hdr &&
20148a7a73f2SMichal Krawczyk 	    (llq->entry_size_ctrl_supported & ENA_ADMIN_LIST_ENTRY_SIZE_256B)) {
20158a7a73f2SMichal Krawczyk 		llq_config->llq_ring_entry_size =
20168a7a73f2SMichal Krawczyk 			ENA_ADMIN_LIST_ENTRY_SIZE_256B;
20178a7a73f2SMichal Krawczyk 		llq_config->llq_ring_entry_size_value = 256;
20188a7a73f2SMichal Krawczyk 	} else {
20198a7a73f2SMichal Krawczyk 		llq_config->llq_ring_entry_size =
20208a7a73f2SMichal Krawczyk 			ENA_ADMIN_LIST_ENTRY_SIZE_128B;
20212fca2a98SMichal Krawczyk 		llq_config->llq_ring_entry_size_value = 128;
20222fca2a98SMichal Krawczyk 	}
20238a7a73f2SMichal Krawczyk }
20242fca2a98SMichal Krawczyk 
20252fca2a98SMichal Krawczyk static int
20262fca2a98SMichal Krawczyk ena_set_queues_placement_policy(struct ena_adapter *adapter,
20272fca2a98SMichal Krawczyk 				struct ena_com_dev *ena_dev,
20282fca2a98SMichal Krawczyk 				struct ena_admin_feature_llq_desc *llq,
20292fca2a98SMichal Krawczyk 				struct ena_llq_configurations *llq_default_configurations)
20302fca2a98SMichal Krawczyk {
20312fca2a98SMichal Krawczyk 	int rc;
20322fca2a98SMichal Krawczyk 	u32 llq_feature_mask;
20332fca2a98SMichal Krawczyk 
20349944919eSMichal Krawczyk 	if (!adapter->enable_llq) {
20359944919eSMichal Krawczyk 		PMD_DRV_LOG(WARNING,
20369944919eSMichal Krawczyk 			"NOTE: LLQ has been disabled as per user's request. "
20379944919eSMichal Krawczyk 			"This may lead to a huge performance degradation!\n");
20389944919eSMichal Krawczyk 		ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
20399944919eSMichal Krawczyk 		return 0;
20409944919eSMichal Krawczyk 	}
20419944919eSMichal Krawczyk 
20422fca2a98SMichal Krawczyk 	llq_feature_mask = 1 << ENA_ADMIN_LLQ;
20432fca2a98SMichal Krawczyk 	if (!(ena_dev->supported_features & llq_feature_mask)) {
20446f1c9df9SStephen Hemminger 		PMD_DRV_LOG(INFO,
20452fca2a98SMichal Krawczyk 			"LLQ is not supported. Fallback to host mode policy.\n");
20462fca2a98SMichal Krawczyk 		ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
20472fca2a98SMichal Krawczyk 		return 0;
20482fca2a98SMichal Krawczyk 	}
20492fca2a98SMichal Krawczyk 
20509ae7a13fSDawid Gorecki 	if (adapter->dev_mem_base == NULL) {
20519ae7a13fSDawid Gorecki 		PMD_DRV_LOG(ERR,
20529ae7a13fSDawid Gorecki 			"LLQ is advertised as supported, but device doesn't expose mem bar\n");
20539ae7a13fSDawid Gorecki 		ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
20549ae7a13fSDawid Gorecki 		return 0;
20559ae7a13fSDawid Gorecki 	}
20569ae7a13fSDawid Gorecki 
20572fca2a98SMichal Krawczyk 	rc = ena_com_config_dev_mode(ena_dev, llq, llq_default_configurations);
20582fca2a98SMichal Krawczyk 	if (unlikely(rc)) {
2059617898d1SMichal Krawczyk 		PMD_INIT_LOG(WARNING,
2060617898d1SMichal Krawczyk 			"Failed to config dev mode. Fallback to host mode policy.\n");
20612fca2a98SMichal Krawczyk 		ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
20622fca2a98SMichal Krawczyk 		return 0;
20632fca2a98SMichal Krawczyk 	}
20642fca2a98SMichal Krawczyk 
20652fca2a98SMichal Krawczyk 	/* Nothing to config, exit */
20662fca2a98SMichal Krawczyk 	if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
20672fca2a98SMichal Krawczyk 		return 0;
20682fca2a98SMichal Krawczyk 
20692fca2a98SMichal Krawczyk 	ena_dev->mem_bar = adapter->dev_mem_base;
20702fca2a98SMichal Krawczyk 
20712fca2a98SMichal Krawczyk 	return 0;
20722fca2a98SMichal Krawczyk }
20732fca2a98SMichal Krawczyk 
20745920d930SMichal Krawczyk static uint32_t ena_calc_max_io_queue_num(struct ena_com_dev *ena_dev,
207501bd6877SRafal Kozik 	struct ena_com_dev_get_features_ctx *get_feat_ctx)
207601bd6877SRafal Kozik {
20775920d930SMichal Krawczyk 	uint32_t io_tx_sq_num, io_tx_cq_num, io_rx_num, max_num_io_queues;
207801bd6877SRafal Kozik 
2079ea93d37eSRafal Kozik 	/* Regular queues capabilities */
2080ea93d37eSRafal Kozik 	if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
2081ea93d37eSRafal Kozik 		struct ena_admin_queue_ext_feature_fields *max_queue_ext =
2082ea93d37eSRafal Kozik 			&get_feat_ctx->max_queue_ext.max_queue_ext;
20832fca2a98SMichal Krawczyk 		io_rx_num = RTE_MIN(max_queue_ext->max_rx_sq_num,
20842fca2a98SMichal Krawczyk 				    max_queue_ext->max_rx_cq_num);
20852fca2a98SMichal Krawczyk 		io_tx_sq_num = max_queue_ext->max_tx_sq_num;
20862fca2a98SMichal Krawczyk 		io_tx_cq_num = max_queue_ext->max_tx_cq_num;
2087ea93d37eSRafal Kozik 	} else {
2088ea93d37eSRafal Kozik 		struct ena_admin_queue_feature_desc *max_queues =
2089ea93d37eSRafal Kozik 			&get_feat_ctx->max_queues;
20902fca2a98SMichal Krawczyk 		io_tx_sq_num = max_queues->max_sq_num;
20912fca2a98SMichal Krawczyk 		io_tx_cq_num = max_queues->max_cq_num;
20922fca2a98SMichal Krawczyk 		io_rx_num = RTE_MIN(io_tx_sq_num, io_tx_cq_num);
2093ea93d37eSRafal Kozik 	}
209401bd6877SRafal Kozik 
20952fca2a98SMichal Krawczyk 	/* In case of LLQ use the llq number in the get feature cmd */
20962fca2a98SMichal Krawczyk 	if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
20972fca2a98SMichal Krawczyk 		io_tx_sq_num = get_feat_ctx->llq.max_llq_num;
20982fca2a98SMichal Krawczyk 
20995920d930SMichal Krawczyk 	max_num_io_queues = RTE_MIN(ENA_MAX_NUM_IO_QUEUES, io_rx_num);
21005920d930SMichal Krawczyk 	max_num_io_queues = RTE_MIN(max_num_io_queues, io_tx_sq_num);
21015920d930SMichal Krawczyk 	max_num_io_queues = RTE_MIN(max_num_io_queues, io_tx_cq_num);
210201bd6877SRafal Kozik 
21035920d930SMichal Krawczyk 	if (unlikely(max_num_io_queues == 0)) {
2104617898d1SMichal Krawczyk 		PMD_DRV_LOG(ERR, "Number of IO queues cannot not be 0\n");
210501bd6877SRafal Kozik 		return -EFAULT;
210601bd6877SRafal Kozik 	}
210701bd6877SRafal Kozik 
21085920d930SMichal Krawczyk 	return max_num_io_queues;
210901bd6877SRafal Kozik }
211001bd6877SRafal Kozik 
2111e8c838fdSMichal Krawczyk static void
2112e8c838fdSMichal Krawczyk ena_set_offloads(struct ena_offloads *offloads,
2113e8c838fdSMichal Krawczyk 		 struct ena_admin_feature_offload_desc *offload_desc)
2114e8c838fdSMichal Krawczyk {
2115e8c838fdSMichal Krawczyk 	if (offload_desc->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK)
2116e8c838fdSMichal Krawczyk 		offloads->tx_offloads |= ENA_IPV4_TSO;
2117e8c838fdSMichal Krawczyk 
2118e8c838fdSMichal Krawczyk 	/* Tx IPv4 checksum offloads */
2119e8c838fdSMichal Krawczyk 	if (offload_desc->tx &
2120e8c838fdSMichal Krawczyk 	    ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L3_CSUM_IPV4_MASK)
2121e8c838fdSMichal Krawczyk 		offloads->tx_offloads |= ENA_L3_IPV4_CSUM;
2122e8c838fdSMichal Krawczyk 	if (offload_desc->tx &
2123e8c838fdSMichal Krawczyk 	    ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_MASK)
2124e8c838fdSMichal Krawczyk 		offloads->tx_offloads |= ENA_L4_IPV4_CSUM;
2125e8c838fdSMichal Krawczyk 	if (offload_desc->tx &
2126e8c838fdSMichal Krawczyk 	    ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK)
2127e8c838fdSMichal Krawczyk 		offloads->tx_offloads |= ENA_L4_IPV4_CSUM_PARTIAL;
2128e8c838fdSMichal Krawczyk 
2129e8c838fdSMichal Krawczyk 	/* Tx IPv6 checksum offloads */
2130e8c838fdSMichal Krawczyk 	if (offload_desc->tx &
2131e8c838fdSMichal Krawczyk 	    ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_MASK)
2132e8c838fdSMichal Krawczyk 		offloads->tx_offloads |= ENA_L4_IPV6_CSUM;
2133e8c838fdSMichal Krawczyk 	if (offload_desc->tx &
2134e8c838fdSMichal Krawczyk 	     ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK)
2135e8c838fdSMichal Krawczyk 		offloads->tx_offloads |= ENA_L4_IPV6_CSUM_PARTIAL;
2136e8c838fdSMichal Krawczyk 
2137e8c838fdSMichal Krawczyk 	/* Rx IPv4 checksum offloads */
2138e8c838fdSMichal Krawczyk 	if (offload_desc->rx_supported &
2139e8c838fdSMichal Krawczyk 	    ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L3_CSUM_IPV4_MASK)
2140e8c838fdSMichal Krawczyk 		offloads->rx_offloads |= ENA_L3_IPV4_CSUM;
2141e8c838fdSMichal Krawczyk 	if (offload_desc->rx_supported &
2142e8c838fdSMichal Krawczyk 	    ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK)
2143e8c838fdSMichal Krawczyk 		offloads->rx_offloads |= ENA_L4_IPV4_CSUM;
2144e8c838fdSMichal Krawczyk 
2145e8c838fdSMichal Krawczyk 	/* Rx IPv6 checksum offloads */
2146e8c838fdSMichal Krawczyk 	if (offload_desc->rx_supported &
2147e8c838fdSMichal Krawczyk 	    ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK)
2148e8c838fdSMichal Krawczyk 		offloads->rx_offloads |= ENA_L4_IPV6_CSUM;
2149e8c838fdSMichal Krawczyk 
2150e8c838fdSMichal Krawczyk 	if (offload_desc->rx_supported &
2151e8c838fdSMichal Krawczyk 	    ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_MASK)
2152e8c838fdSMichal Krawczyk 		offloads->rx_offloads |= ENA_RX_RSS_HASH;
2153e8c838fdSMichal Krawczyk }
2154e8c838fdSMichal Krawczyk 
2155e3595539SStanislaw Kardach static int ena_init_once(void)
2156e3595539SStanislaw Kardach {
2157e3595539SStanislaw Kardach 	static bool init_done;
2158e3595539SStanislaw Kardach 
2159e3595539SStanislaw Kardach 	if (init_done)
2160e3595539SStanislaw Kardach 		return 0;
2161e3595539SStanislaw Kardach 
2162e3595539SStanislaw Kardach 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
2163e3595539SStanislaw Kardach 		/* Init timer subsystem for the ENA timer service. */
2164e3595539SStanislaw Kardach 		rte_timer_subsystem_init();
2165e3595539SStanislaw Kardach 		/* Register handler for requests from secondary processes. */
2166e3595539SStanislaw Kardach 		rte_mp_action_register(ENA_MP_NAME, ena_mp_primary_handle);
2167e3595539SStanislaw Kardach 	}
2168e3595539SStanislaw Kardach 
2169e3595539SStanislaw Kardach 	init_done = true;
2170e3595539SStanislaw Kardach 	return 0;
2171e3595539SStanislaw Kardach }
2172e3595539SStanislaw Kardach 
21731173fca2SJan Medala static int eth_ena_dev_init(struct rte_eth_dev *eth_dev)
21741173fca2SJan Medala {
2175ea93d37eSRafal Kozik 	struct ena_calc_queue_size_ctx calc_queue_ctx = { 0 };
21761173fca2SJan Medala 	struct rte_pci_device *pci_dev;
2177eb0ef49dSMichal Krawczyk 	struct rte_intr_handle *intr_handle;
2178890728ffSStephen Hemminger 	struct ena_adapter *adapter = eth_dev->data->dev_private;
21791173fca2SJan Medala 	struct ena_com_dev *ena_dev = &adapter->ena_dev;
21801173fca2SJan Medala 	struct ena_com_dev_get_features_ctx get_feat_ctx;
21812fca2a98SMichal Krawczyk 	struct ena_llq_configurations llq_config;
21822fca2a98SMichal Krawczyk 	const char *queue_type_str;
21835920d930SMichal Krawczyk 	uint32_t max_num_io_queues;
2184ea93d37eSRafal Kozik 	int rc;
21851173fca2SJan Medala 	static int adapters_found;
218633dde075SMichal Krawczyk 	bool disable_meta_caching;
21871173fca2SJan Medala 
21881173fca2SJan Medala 	eth_dev->dev_ops = &ena_dev_ops;
21891173fca2SJan Medala 	eth_dev->rx_pkt_burst = &eth_ena_recv_pkts;
21901173fca2SJan Medala 	eth_dev->tx_pkt_burst = &eth_ena_xmit_pkts;
2191b3fc5a1aSKonstantin Ananyev 	eth_dev->tx_pkt_prepare = &eth_ena_prep_pkts;
21921173fca2SJan Medala 
2193e3595539SStanislaw Kardach 	rc = ena_init_once();
2194e3595539SStanislaw Kardach 	if (rc != 0)
2195e3595539SStanislaw Kardach 		return rc;
2196e3595539SStanislaw Kardach 
21971173fca2SJan Medala 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
21981173fca2SJan Medala 		return 0;
21991173fca2SJan Medala 
2200f30e69b4SFerruh Yigit 	eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
2201f30e69b4SFerruh Yigit 
2202fd976890SMichal Krawczyk 	memset(adapter, 0, sizeof(struct ena_adapter));
2203fd976890SMichal Krawczyk 	ena_dev = &adapter->ena_dev;
2204fd976890SMichal Krawczyk 
2205aab58857SStanislaw Kardach 	adapter->edev_data = eth_dev->data;
2206fd976890SMichal Krawczyk 
2207c0802544SFerruh Yigit 	pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
22081173fca2SJan Medala 
22092fc03b23SThomas Monjalon 	PMD_INIT_LOG(INFO, "Initializing " PCI_PRI_FMT "\n",
22101173fca2SJan Medala 		     pci_dev->addr.domain,
22111173fca2SJan Medala 		     pci_dev->addr.bus,
22121173fca2SJan Medala 		     pci_dev->addr.devid,
22131173fca2SJan Medala 		     pci_dev->addr.function);
22141173fca2SJan Medala 
2215d61138d4SHarman Kalra 	intr_handle = pci_dev->intr_handle;
2216eb0ef49dSMichal Krawczyk 
22171173fca2SJan Medala 	adapter->regs = pci_dev->mem_resource[ENA_REGS_BAR].addr;
22181173fca2SJan Medala 	adapter->dev_mem_base = pci_dev->mem_resource[ENA_MEM_BAR].addr;
22191173fca2SJan Medala 
22201d339597SRafal Kozik 	if (!adapter->regs) {
2221617898d1SMichal Krawczyk 		PMD_INIT_LOG(CRIT, "Failed to access registers BAR(%d)\n",
22221173fca2SJan Medala 			     ENA_REGS_BAR);
22231d339597SRafal Kozik 		return -ENXIO;
22241d339597SRafal Kozik 	}
22251173fca2SJan Medala 
22261173fca2SJan Medala 	ena_dev->reg_bar = adapter->regs;
2227850e1bb1SMichal Krawczyk 	/* Pass device data as a pointer which can be passed to the IO functions
2228850e1bb1SMichal Krawczyk 	 * by the ena_com (for example - the memory allocation).
2229850e1bb1SMichal Krawczyk 	 */
2230850e1bb1SMichal Krawczyk 	ena_dev->dmadev = eth_dev->data;
22311173fca2SJan Medala 
22321173fca2SJan Medala 	adapter->id_number = adapters_found;
22331173fca2SJan Medala 
22341173fca2SJan Medala 	snprintf(adapter->name, ENA_NAME_MAX_LEN, "ena_%d",
22351173fca2SJan Medala 		 adapter->id_number);
22361173fca2SJan Medala 
22379944919eSMichal Krawczyk 	/* Assign default devargs values */
2238cc0c5d25SMichal Krawczyk 	adapter->missing_tx_completion_to = ENA_TX_TIMEOUT;
22399944919eSMichal Krawczyk 	adapter->enable_llq = true;
22409944919eSMichal Krawczyk 	adapter->use_large_llq_hdr = false;
2241cc0c5d25SMichal Krawczyk 
22428a7a73f2SMichal Krawczyk 	rc = ena_parse_devargs(adapter, pci_dev->device.devargs);
22438a7a73f2SMichal Krawczyk 	if (rc != 0) {
22448a7a73f2SMichal Krawczyk 		PMD_INIT_LOG(CRIT, "Failed to parse devargs\n");
22458a7a73f2SMichal Krawczyk 		goto err;
22468a7a73f2SMichal Krawczyk 	}
224792401abfSShai Brandes 	rc = ena_com_allocate_customer_metrics_buffer(ena_dev);
224892401abfSShai Brandes 	if (rc != 0) {
224992401abfSShai Brandes 		PMD_INIT_LOG(CRIT, "Failed to allocate customer metrics buffer\n");
225092401abfSShai Brandes 		goto err;
225192401abfSShai Brandes 	}
22528a7a73f2SMichal Krawczyk 
22531173fca2SJan Medala 	/* device specific initialization routine */
2254b9b05d6fSMichal Krawczyk 	rc = ena_device_init(adapter, pci_dev, &get_feat_ctx);
22551173fca2SJan Medala 	if (rc) {
2256617898d1SMichal Krawczyk 		PMD_INIT_LOG(CRIT, "Failed to init ENA device\n");
225792401abfSShai Brandes 		goto err_metrics_delete;
22581173fca2SJan Medala 	}
2259b9b05d6fSMichal Krawczyk 
2260b9b05d6fSMichal Krawczyk 	/* Check if device supports LSC */
2261b9b05d6fSMichal Krawczyk 	if (!(adapter->all_aenq_groups & BIT(ENA_ADMIN_LINK_CHANGE)))
2262b9b05d6fSMichal Krawczyk 		adapter->edev_data->dev_flags &= ~RTE_ETH_DEV_INTR_LSC;
22631173fca2SJan Medala 
22648a7a73f2SMichal Krawczyk 	set_default_llq_configurations(&llq_config, &get_feat_ctx.llq,
22658a7a73f2SMichal Krawczyk 		adapter->use_large_llq_hdr);
22662fca2a98SMichal Krawczyk 	rc = ena_set_queues_placement_policy(adapter, ena_dev,
22672fca2a98SMichal Krawczyk 					     &get_feat_ctx.llq, &llq_config);
22682fca2a98SMichal Krawczyk 	if (unlikely(rc)) {
2269617898d1SMichal Krawczyk 		PMD_INIT_LOG(CRIT, "Failed to set placement policy\n");
22702fca2a98SMichal Krawczyk 		return rc;
22712fca2a98SMichal Krawczyk 	}
22722fca2a98SMichal Krawczyk 
22732fca2a98SMichal Krawczyk 	if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
22742fca2a98SMichal Krawczyk 		queue_type_str = "Regular";
22752fca2a98SMichal Krawczyk 	else
22762fca2a98SMichal Krawczyk 		queue_type_str = "Low latency";
22776f1c9df9SStephen Hemminger 	PMD_DRV_LOG(INFO, "Placement policy: %s\n", queue_type_str);
2278ea93d37eSRafal Kozik 
2279ea93d37eSRafal Kozik 	calc_queue_ctx.ena_dev = ena_dev;
2280ea93d37eSRafal Kozik 	calc_queue_ctx.get_feat_ctx = &get_feat_ctx;
22811173fca2SJan Medala 
22825920d930SMichal Krawczyk 	max_num_io_queues = ena_calc_max_io_queue_num(ena_dev, &get_feat_ctx);
22838a7a73f2SMichal Krawczyk 	rc = ena_calc_io_queue_size(&calc_queue_ctx,
22848a7a73f2SMichal Krawczyk 		adapter->use_large_llq_hdr);
22855920d930SMichal Krawczyk 	if (unlikely((rc != 0) || (max_num_io_queues == 0))) {
2286241da076SRafal Kozik 		rc = -EFAULT;
2287241da076SRafal Kozik 		goto err_device_destroy;
2288241da076SRafal Kozik 	}
22891173fca2SJan Medala 
22905920d930SMichal Krawczyk 	adapter->max_tx_ring_size = calc_queue_ctx.max_tx_queue_size;
22915920d930SMichal Krawczyk 	adapter->max_rx_ring_size = calc_queue_ctx.max_rx_queue_size;
2292ea93d37eSRafal Kozik 	adapter->max_tx_sgl_size = calc_queue_ctx.max_tx_sgl_size;
2293ea93d37eSRafal Kozik 	adapter->max_rx_sgl_size = calc_queue_ctx.max_rx_sgl_size;
22945920d930SMichal Krawczyk 	adapter->max_num_io_queues = max_num_io_queues;
22952061fe41SRafal Kozik 
229633dde075SMichal Krawczyk 	if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
229733dde075SMichal Krawczyk 		disable_meta_caching =
229833dde075SMichal Krawczyk 			!!(get_feat_ctx.llq.accel_mode.u.get.supported_flags &
229933dde075SMichal Krawczyk 			BIT(ENA_ADMIN_DISABLE_META_CACHING));
230033dde075SMichal Krawczyk 	} else {
230133dde075SMichal Krawczyk 		disable_meta_caching = false;
230233dde075SMichal Krawczyk 	}
230333dde075SMichal Krawczyk 
23041173fca2SJan Medala 	/* prepare ring structures */
230533dde075SMichal Krawczyk 	ena_init_rings(adapter, disable_meta_caching);
23061173fca2SJan Medala 
2307372c1af5SJan Medala 	ena_config_debug_area(adapter);
2308372c1af5SJan Medala 
23091173fca2SJan Medala 	/* Set max MTU for this device */
23101173fca2SJan Medala 	adapter->max_mtu = get_feat_ctx.dev_attr.max_mtu;
23111173fca2SJan Medala 
2312e8c838fdSMichal Krawczyk 	ena_set_offloads(&adapter->offloads, &get_feat_ctx.offload);
231383277a7cSJakub Palider 
23141173fca2SJan Medala 	/* Copy MAC address and point DPDK to it */
23156d13ea8eSOlivier Matz 	eth_dev->data->mac_addrs = (struct rte_ether_addr *)adapter->mac_addr;
2316538da7a1SOlivier Matz 	rte_ether_addr_copy((struct rte_ether_addr *)
2317538da7a1SOlivier Matz 			get_feat_ctx.dev_attr.mac_addr,
23186d13ea8eSOlivier Matz 			(struct rte_ether_addr *)adapter->mac_addr);
23191173fca2SJan Medala 
232034d5e97eSMichal Krawczyk 	rc = ena_com_rss_init(ena_dev, ENA_RX_RSS_TABLE_LOG_SIZE);
232134d5e97eSMichal Krawczyk 	if (unlikely(rc != 0)) {
232234d5e97eSMichal Krawczyk 		PMD_DRV_LOG(ERR, "Failed to initialize RSS in ENA device\n");
232334d5e97eSMichal Krawczyk 		goto err_delete_debug_area;
232434d5e97eSMichal Krawczyk 	}
232534d5e97eSMichal Krawczyk 
23261173fca2SJan Medala 	adapter->drv_stats = rte_zmalloc("adapter stats",
23271173fca2SJan Medala 					 sizeof(*adapter->drv_stats),
23281173fca2SJan Medala 					 RTE_CACHE_LINE_SIZE);
23291173fca2SJan Medala 	if (!adapter->drv_stats) {
2330617898d1SMichal Krawczyk 		PMD_DRV_LOG(ERR,
2331617898d1SMichal Krawczyk 			"Failed to allocate memory for adapter statistics\n");
2332241da076SRafal Kozik 		rc = -ENOMEM;
233334d5e97eSMichal Krawczyk 		goto err_rss_destroy;
23341173fca2SJan Medala 	}
23351173fca2SJan Medala 
23361343c415SMichal Krawczyk 	rte_spinlock_init(&adapter->admin_lock);
23371343c415SMichal Krawczyk 
2338eb0ef49dSMichal Krawczyk 	rte_intr_callback_register(intr_handle,
2339eb0ef49dSMichal Krawczyk 				   ena_interrupt_handler_rte,
2340aab58857SStanislaw Kardach 				   eth_dev);
2341eb0ef49dSMichal Krawczyk 	rte_intr_enable(intr_handle);
2342eb0ef49dSMichal Krawczyk 	ena_com_set_admin_polling_mode(ena_dev, false);
2343ca148440SMichal Krawczyk 	ena_com_admin_aenq_enable(ena_dev);
2344eb0ef49dSMichal Krawczyk 
2345d9b8b106SMichal Krawczyk 	rte_timer_init(&adapter->timer_wd);
2346d9b8b106SMichal Krawczyk 
23471173fca2SJan Medala 	adapters_found++;
23481173fca2SJan Medala 	adapter->state = ENA_ADAPTER_STATE_INIT;
23491173fca2SJan Medala 
23501173fca2SJan Medala 	return 0;
2351241da076SRafal Kozik 
235234d5e97eSMichal Krawczyk err_rss_destroy:
235334d5e97eSMichal Krawczyk 	ena_com_rss_destroy(ena_dev);
2354241da076SRafal Kozik err_delete_debug_area:
2355241da076SRafal Kozik 	ena_com_delete_debug_area(ena_dev);
2356241da076SRafal Kozik 
2357241da076SRafal Kozik err_device_destroy:
2358241da076SRafal Kozik 	ena_com_delete_host_info(ena_dev);
2359241da076SRafal Kozik 	ena_com_admin_destroy(ena_dev);
236092401abfSShai Brandes err_metrics_delete:
236192401abfSShai Brandes 	ena_com_delete_customer_metrics_buffer(ena_dev);
2362241da076SRafal Kozik err:
2363241da076SRafal Kozik 	return rc;
23641173fca2SJan Medala }
23651173fca2SJan Medala 
2366e457bc70SRafal Kozik static void ena_destroy_device(struct rte_eth_dev *eth_dev)
2367eb0ef49dSMichal Krawczyk {
2368890728ffSStephen Hemminger 	struct ena_adapter *adapter = eth_dev->data->dev_private;
2369e457bc70SRafal Kozik 	struct ena_com_dev *ena_dev = &adapter->ena_dev;
2370eb0ef49dSMichal Krawczyk 
2371e457bc70SRafal Kozik 	if (adapter->state == ENA_ADAPTER_STATE_FREE)
2372e457bc70SRafal Kozik 		return;
2373e457bc70SRafal Kozik 
2374e457bc70SRafal Kozik 	ena_com_set_admin_running_state(ena_dev, false);
2375eb0ef49dSMichal Krawczyk 
2376eb0ef49dSMichal Krawczyk 	if (adapter->state != ENA_ADAPTER_STATE_CLOSED)
2377eb0ef49dSMichal Krawczyk 		ena_close(eth_dev);
2378eb0ef49dSMichal Krawczyk 
237934d5e97eSMichal Krawczyk 	ena_com_rss_destroy(ena_dev);
238034d5e97eSMichal Krawczyk 
2381e457bc70SRafal Kozik 	ena_com_delete_debug_area(ena_dev);
2382e457bc70SRafal Kozik 	ena_com_delete_host_info(ena_dev);
2383e457bc70SRafal Kozik 
2384e457bc70SRafal Kozik 	ena_com_abort_admin_commands(ena_dev);
2385e457bc70SRafal Kozik 	ena_com_wait_for_abort_completion(ena_dev);
2386e457bc70SRafal Kozik 	ena_com_admin_destroy(ena_dev);
2387e457bc70SRafal Kozik 	ena_com_mmio_reg_read_request_destroy(ena_dev);
238892401abfSShai Brandes 	ena_com_delete_customer_metrics_buffer(ena_dev);
2389e457bc70SRafal Kozik 
2390e457bc70SRafal Kozik 	adapter->state = ENA_ADAPTER_STATE_FREE;
2391e457bc70SRafal Kozik }
2392e457bc70SRafal Kozik 
2393e457bc70SRafal Kozik static int eth_ena_dev_uninit(struct rte_eth_dev *eth_dev)
2394e457bc70SRafal Kozik {
2395e457bc70SRafal Kozik 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2396e457bc70SRafal Kozik 		return 0;
2397e457bc70SRafal Kozik 
2398e457bc70SRafal Kozik 	ena_destroy_device(eth_dev);
2399e457bc70SRafal Kozik 
2400eb0ef49dSMichal Krawczyk 	return 0;
2401eb0ef49dSMichal Krawczyk }
2402eb0ef49dSMichal Krawczyk 
24031173fca2SJan Medala static int ena_dev_configure(struct rte_eth_dev *dev)
24041173fca2SJan Medala {
2405890728ffSStephen Hemminger 	struct ena_adapter *adapter = dev->data->dev_private;
2406b9b05d6fSMichal Krawczyk 	int rc;
24077369f88fSRafal Kozik 
24081173fca2SJan Medala 	adapter->state = ENA_ADAPTER_STATE_CONFIG;
24091173fca2SJan Medala 
2410295968d1SFerruh Yigit 	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
2411295968d1SFerruh Yigit 		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
2412295968d1SFerruh Yigit 	dev->data->dev_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
2413b418f0d2SMichal Krawczyk 
2414e2a6d08bSMichal Krawczyk 	/* Scattered Rx cannot be turned off in the HW, so this capability must
2415e2a6d08bSMichal Krawczyk 	 * be forced.
2416e2a6d08bSMichal Krawczyk 	 */
2417e2a6d08bSMichal Krawczyk 	dev->data->scattered_rx = 1;
2418e2a6d08bSMichal Krawczyk 
2419f93e20e5SMichal Krawczyk 	adapter->last_tx_comp_qid = 0;
2420f93e20e5SMichal Krawczyk 
2421f93e20e5SMichal Krawczyk 	adapter->missing_tx_completion_budget =
2422f93e20e5SMichal Krawczyk 		RTE_MIN(ENA_MONITORED_TX_QUEUES, dev->data->nb_tx_queues);
2423f93e20e5SMichal Krawczyk 
2424f93e20e5SMichal Krawczyk 	/* To avoid detection of the spurious Tx completion timeout due to
2425f93e20e5SMichal Krawczyk 	 * application not calling the Tx cleanup function, set timeout for the
2426f93e20e5SMichal Krawczyk 	 * Tx queue which should be half of the missing completion timeout for a
2427f93e20e5SMichal Krawczyk 	 * safety. If there will be a lot of missing Tx completions in the
2428f93e20e5SMichal Krawczyk 	 * queue, they will be detected sooner or later.
2429f93e20e5SMichal Krawczyk 	 */
2430f93e20e5SMichal Krawczyk 	adapter->tx_cleanup_stall_delay = adapter->missing_tx_completion_to / 2;
2431f93e20e5SMichal Krawczyk 
2432b9b05d6fSMichal Krawczyk 	rc = ena_configure_aenq(adapter);
2433b9b05d6fSMichal Krawczyk 
2434b9b05d6fSMichal Krawczyk 	return rc;
24351173fca2SJan Medala }
24361173fca2SJan Medala 
243733dde075SMichal Krawczyk static void ena_init_rings(struct ena_adapter *adapter,
243833dde075SMichal Krawczyk 			   bool disable_meta_caching)
24391173fca2SJan Medala {
24405920d930SMichal Krawczyk 	size_t i;
24411173fca2SJan Medala 
24425920d930SMichal Krawczyk 	for (i = 0; i < adapter->max_num_io_queues; i++) {
24431173fca2SJan Medala 		struct ena_ring *ring = &adapter->tx_ring[i];
24441173fca2SJan Medala 
24451173fca2SJan Medala 		ring->configured = 0;
24461173fca2SJan Medala 		ring->type = ENA_RING_TYPE_TX;
24471173fca2SJan Medala 		ring->adapter = adapter;
24481173fca2SJan Medala 		ring->id = i;
24491173fca2SJan Medala 		ring->tx_mem_queue_type = adapter->ena_dev.tx_mem_queue_type;
24501173fca2SJan Medala 		ring->tx_max_header_size = adapter->ena_dev.tx_max_header_size;
24512061fe41SRafal Kozik 		ring->sgl_size = adapter->max_tx_sgl_size;
245233dde075SMichal Krawczyk 		ring->disable_meta_caching = disable_meta_caching;
24531173fca2SJan Medala 	}
24541173fca2SJan Medala 
24555920d930SMichal Krawczyk 	for (i = 0; i < adapter->max_num_io_queues; i++) {
24561173fca2SJan Medala 		struct ena_ring *ring = &adapter->rx_ring[i];
24571173fca2SJan Medala 
24581173fca2SJan Medala 		ring->configured = 0;
24591173fca2SJan Medala 		ring->type = ENA_RING_TYPE_RX;
24601173fca2SJan Medala 		ring->adapter = adapter;
24611173fca2SJan Medala 		ring->id = i;
2462ea93d37eSRafal Kozik 		ring->sgl_size = adapter->max_rx_sgl_size;
24631173fca2SJan Medala 	}
24641173fca2SJan Medala }
24651173fca2SJan Medala 
24663a822d79SMichal Krawczyk static uint64_t ena_get_rx_port_offloads(struct ena_adapter *adapter)
24673a822d79SMichal Krawczyk {
24683a822d79SMichal Krawczyk 	uint64_t port_offloads = 0;
24693a822d79SMichal Krawczyk 
24703a822d79SMichal Krawczyk 	if (adapter->offloads.rx_offloads & ENA_L3_IPV4_CSUM)
2471295968d1SFerruh Yigit 		port_offloads |= RTE_ETH_RX_OFFLOAD_IPV4_CKSUM;
24723a822d79SMichal Krawczyk 
24733a822d79SMichal Krawczyk 	if (adapter->offloads.rx_offloads &
24743a822d79SMichal Krawczyk 	    (ENA_L4_IPV4_CSUM | ENA_L4_IPV6_CSUM))
24753a822d79SMichal Krawczyk 		port_offloads |=
2476295968d1SFerruh Yigit 			RTE_ETH_RX_OFFLOAD_UDP_CKSUM | RTE_ETH_RX_OFFLOAD_TCP_CKSUM;
24773a822d79SMichal Krawczyk 
24783a822d79SMichal Krawczyk 	if (adapter->offloads.rx_offloads & ENA_RX_RSS_HASH)
2479295968d1SFerruh Yigit 		port_offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
24803a822d79SMichal Krawczyk 
2481295968d1SFerruh Yigit 	port_offloads |= RTE_ETH_RX_OFFLOAD_SCATTER;
2482e2a6d08bSMichal Krawczyk 
24833a822d79SMichal Krawczyk 	return port_offloads;
24843a822d79SMichal Krawczyk }
24853a822d79SMichal Krawczyk 
24863a822d79SMichal Krawczyk static uint64_t ena_get_tx_port_offloads(struct ena_adapter *adapter)
24873a822d79SMichal Krawczyk {
24883a822d79SMichal Krawczyk 	uint64_t port_offloads = 0;
24893a822d79SMichal Krawczyk 
24903a822d79SMichal Krawczyk 	if (adapter->offloads.tx_offloads & ENA_IPV4_TSO)
2491295968d1SFerruh Yigit 		port_offloads |= RTE_ETH_TX_OFFLOAD_TCP_TSO;
24923a822d79SMichal Krawczyk 
24933a822d79SMichal Krawczyk 	if (adapter->offloads.tx_offloads & ENA_L3_IPV4_CSUM)
2494295968d1SFerruh Yigit 		port_offloads |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM;
24953a822d79SMichal Krawczyk 	if (adapter->offloads.tx_offloads &
24963a822d79SMichal Krawczyk 	    (ENA_L4_IPV4_CSUM_PARTIAL | ENA_L4_IPV4_CSUM |
24973a822d79SMichal Krawczyk 	     ENA_L4_IPV6_CSUM | ENA_L4_IPV6_CSUM_PARTIAL))
24983a822d79SMichal Krawczyk 		port_offloads |=
2499295968d1SFerruh Yigit 			RTE_ETH_TX_OFFLOAD_UDP_CKSUM | RTE_ETH_TX_OFFLOAD_TCP_CKSUM;
25003a822d79SMichal Krawczyk 
2501295968d1SFerruh Yigit 	port_offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
25023a822d79SMichal Krawczyk 
2503c339f538SDawid Gorecki 	port_offloads |= RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
2504c339f538SDawid Gorecki 
25053a822d79SMichal Krawczyk 	return port_offloads;
25063a822d79SMichal Krawczyk }
25073a822d79SMichal Krawczyk 
25083a822d79SMichal Krawczyk static uint64_t ena_get_rx_queue_offloads(struct ena_adapter *adapter)
25093a822d79SMichal Krawczyk {
25103a822d79SMichal Krawczyk 	RTE_SET_USED(adapter);
25113a822d79SMichal Krawczyk 
25123a822d79SMichal Krawczyk 	return 0;
25133a822d79SMichal Krawczyk }
25143a822d79SMichal Krawczyk 
25153a822d79SMichal Krawczyk static uint64_t ena_get_tx_queue_offloads(struct ena_adapter *adapter)
25163a822d79SMichal Krawczyk {
2517c339f538SDawid Gorecki 	uint64_t queue_offloads = 0;
25183a822d79SMichal Krawczyk 	RTE_SET_USED(adapter);
25193a822d79SMichal Krawczyk 
2520c339f538SDawid Gorecki 	queue_offloads |= RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
2521c339f538SDawid Gorecki 
2522c339f538SDawid Gorecki 	return queue_offloads;
25233a822d79SMichal Krawczyk }
25243a822d79SMichal Krawczyk 
2525bdad90d1SIvan Ilchenko static int ena_infos_get(struct rte_eth_dev *dev,
25261173fca2SJan Medala 			  struct rte_eth_dev_info *dev_info)
25271173fca2SJan Medala {
25281173fca2SJan Medala 	struct ena_adapter *adapter;
25291173fca2SJan Medala 	struct ena_com_dev *ena_dev;
25301173fca2SJan Medala 
2531498c687aSRafal Kozik 	ena_assert_msg(dev->data != NULL, "Uninitialized device\n");
2532498c687aSRafal Kozik 	ena_assert_msg(dev->data->dev_private != NULL, "Uninitialized device\n");
2533890728ffSStephen Hemminger 	adapter = dev->data->dev_private;
25341173fca2SJan Medala 
25351173fca2SJan Medala 	ena_dev = &adapter->ena_dev;
2536498c687aSRafal Kozik 	ena_assert_msg(ena_dev != NULL, "Uninitialized device\n");
25371173fca2SJan Medala 
2538e274f573SMarc Sune 	dev_info->speed_capa =
2539295968d1SFerruh Yigit 			RTE_ETH_LINK_SPEED_1G   |
2540295968d1SFerruh Yigit 			RTE_ETH_LINK_SPEED_2_5G |
2541295968d1SFerruh Yigit 			RTE_ETH_LINK_SPEED_5G   |
2542295968d1SFerruh Yigit 			RTE_ETH_LINK_SPEED_10G  |
2543295968d1SFerruh Yigit 			RTE_ETH_LINK_SPEED_25G  |
2544295968d1SFerruh Yigit 			RTE_ETH_LINK_SPEED_40G  |
2545295968d1SFerruh Yigit 			RTE_ETH_LINK_SPEED_50G  |
2546295968d1SFerruh Yigit 			RTE_ETH_LINK_SPEED_100G;
2547e274f573SMarc Sune 
25481173fca2SJan Medala 	/* Inform framework about available features */
25493a822d79SMichal Krawczyk 	dev_info->rx_offload_capa = ena_get_rx_port_offloads(adapter);
25503a822d79SMichal Krawczyk 	dev_info->tx_offload_capa = ena_get_tx_port_offloads(adapter);
25513a822d79SMichal Krawczyk 	dev_info->rx_queue_offload_capa = ena_get_rx_queue_offloads(adapter);
25523a822d79SMichal Krawczyk 	dev_info->tx_queue_offload_capa = ena_get_tx_queue_offloads(adapter);
25531173fca2SJan Medala 
255434d5e97eSMichal Krawczyk 	dev_info->flow_type_rss_offloads = ENA_ALL_RSS_HF;
255534d5e97eSMichal Krawczyk 	dev_info->hash_key_size = ENA_HASH_KEY_SIZE;
2556b01ead20SRafal Kozik 
25571173fca2SJan Medala 	dev_info->min_rx_bufsize = ENA_MIN_FRAME_LEN;
25581bb4a528SFerruh Yigit 	dev_info->max_rx_pktlen  = adapter->max_mtu + RTE_ETHER_HDR_LEN +
25591bb4a528SFerruh Yigit 		RTE_ETHER_CRC_LEN;
25601bb4a528SFerruh Yigit 	dev_info->min_mtu = ENA_MIN_MTU;
25611bb4a528SFerruh Yigit 	dev_info->max_mtu = adapter->max_mtu;
25621173fca2SJan Medala 	dev_info->max_mac_addrs = 1;
25631173fca2SJan Medala 
25645920d930SMichal Krawczyk 	dev_info->max_rx_queues = adapter->max_num_io_queues;
25655920d930SMichal Krawczyk 	dev_info->max_tx_queues = adapter->max_num_io_queues;
25661173fca2SJan Medala 	dev_info->reta_size = ENA_RX_RSS_TABLE_SIZE;
256756b8b9b7SRafal Kozik 
25685920d930SMichal Krawczyk 	dev_info->rx_desc_lim.nb_max = adapter->max_rx_ring_size;
256992680dc2SRafal Kozik 	dev_info->rx_desc_lim.nb_min = ENA_MIN_RING_DESC;
2570ea93d37eSRafal Kozik 	dev_info->rx_desc_lim.nb_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS,
2571ea93d37eSRafal Kozik 					adapter->max_rx_sgl_size);
2572ea93d37eSRafal Kozik 	dev_info->rx_desc_lim.nb_mtu_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS,
2573ea93d37eSRafal Kozik 					adapter->max_rx_sgl_size);
257492680dc2SRafal Kozik 
25755920d930SMichal Krawczyk 	dev_info->tx_desc_lim.nb_max = adapter->max_tx_ring_size;
257692680dc2SRafal Kozik 	dev_info->tx_desc_lim.nb_min = ENA_MIN_RING_DESC;
257792680dc2SRafal Kozik 	dev_info->tx_desc_lim.nb_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS,
2578ea93d37eSRafal Kozik 					adapter->max_tx_sgl_size);
257992680dc2SRafal Kozik 	dev_info->tx_desc_lim.nb_mtu_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS,
2580ea93d37eSRafal Kozik 					adapter->max_tx_sgl_size);
2581bdad90d1SIvan Ilchenko 
258230a6c7efSStanislaw Kardach 	dev_info->default_rxportconf.ring_size = ENA_DEFAULT_RING_SIZE;
258330a6c7efSStanislaw Kardach 	dev_info->default_txportconf.ring_size = ENA_DEFAULT_RING_SIZE;
258430a6c7efSStanislaw Kardach 
25850d5c38baSChengwen Feng 	dev_info->err_handle_mode = RTE_ETH_ERROR_HANDLE_MODE_PASSIVE;
25860d5c38baSChengwen Feng 
2587bdad90d1SIvan Ilchenko 	return 0;
25881173fca2SJan Medala }
25891173fca2SJan Medala 
25901be097dcSMichal Krawczyk static inline void ena_init_rx_mbuf(struct rte_mbuf *mbuf, uint16_t len)
25911be097dcSMichal Krawczyk {
25921be097dcSMichal Krawczyk 	mbuf->data_len = len;
25931be097dcSMichal Krawczyk 	mbuf->data_off = RTE_PKTMBUF_HEADROOM;
25941be097dcSMichal Krawczyk 	mbuf->refcnt = 1;
25951be097dcSMichal Krawczyk 	mbuf->next = NULL;
25961be097dcSMichal Krawczyk }
25971be097dcSMichal Krawczyk 
25981be097dcSMichal Krawczyk static struct rte_mbuf *ena_rx_mbuf(struct ena_ring *rx_ring,
25991be097dcSMichal Krawczyk 				    struct ena_com_rx_buf_info *ena_bufs,
26001be097dcSMichal Krawczyk 				    uint32_t descs,
26011be097dcSMichal Krawczyk 				    uint16_t *next_to_clean,
26021be097dcSMichal Krawczyk 				    uint8_t offset)
26031be097dcSMichal Krawczyk {
26041be097dcSMichal Krawczyk 	struct rte_mbuf *mbuf;
26051be097dcSMichal Krawczyk 	struct rte_mbuf *mbuf_head;
26061be097dcSMichal Krawczyk 	struct ena_rx_buffer *rx_info;
260783fd97b2SMichal Krawczyk 	int rc;
26081be097dcSMichal Krawczyk 	uint16_t ntc, len, req_id, buf = 0;
26091be097dcSMichal Krawczyk 
26101be097dcSMichal Krawczyk 	if (unlikely(descs == 0))
26111be097dcSMichal Krawczyk 		return NULL;
26121be097dcSMichal Krawczyk 
26131be097dcSMichal Krawczyk 	ntc = *next_to_clean;
26141be097dcSMichal Krawczyk 
26151be097dcSMichal Krawczyk 	len = ena_bufs[buf].len;
26161be097dcSMichal Krawczyk 	req_id = ena_bufs[buf].req_id;
26171be097dcSMichal Krawczyk 
26181be097dcSMichal Krawczyk 	rx_info = &rx_ring->rx_buffer_info[req_id];
26191be097dcSMichal Krawczyk 
26201be097dcSMichal Krawczyk 	mbuf = rx_info->mbuf;
26211be097dcSMichal Krawczyk 	RTE_ASSERT(mbuf != NULL);
26221be097dcSMichal Krawczyk 
26231be097dcSMichal Krawczyk 	ena_init_rx_mbuf(mbuf, len);
26241be097dcSMichal Krawczyk 
26251be097dcSMichal Krawczyk 	/* Fill the mbuf head with the data specific for 1st segment. */
26261be097dcSMichal Krawczyk 	mbuf_head = mbuf;
26271be097dcSMichal Krawczyk 	mbuf_head->nb_segs = descs;
26281be097dcSMichal Krawczyk 	mbuf_head->port = rx_ring->port_id;
26291be097dcSMichal Krawczyk 	mbuf_head->pkt_len = len;
26301be097dcSMichal Krawczyk 	mbuf_head->data_off += offset;
26311be097dcSMichal Krawczyk 
26321be097dcSMichal Krawczyk 	rx_info->mbuf = NULL;
2633c0006061SMichal Krawczyk 	rx_ring->empty_rx_reqs[ntc] = req_id;
2634c0006061SMichal Krawczyk 	ntc = ENA_IDX_NEXT_MASKED(ntc, rx_ring->size_mask);
26351be097dcSMichal Krawczyk 
26361be097dcSMichal Krawczyk 	while (--descs) {
26371be097dcSMichal Krawczyk 		++buf;
26381be097dcSMichal Krawczyk 		len = ena_bufs[buf].len;
26391be097dcSMichal Krawczyk 		req_id = ena_bufs[buf].req_id;
26401be097dcSMichal Krawczyk 
26411be097dcSMichal Krawczyk 		rx_info = &rx_ring->rx_buffer_info[req_id];
26421be097dcSMichal Krawczyk 		RTE_ASSERT(rx_info->mbuf != NULL);
26431be097dcSMichal Krawczyk 
264483fd97b2SMichal Krawczyk 		if (unlikely(len == 0)) {
264583fd97b2SMichal Krawczyk 			/*
264683fd97b2SMichal Krawczyk 			 * Some devices can pass descriptor with the length 0.
264783fd97b2SMichal Krawczyk 			 * To avoid confusion, the PMD is simply putting the
264883fd97b2SMichal Krawczyk 			 * descriptor back, as it was never used. We'll avoid
264983fd97b2SMichal Krawczyk 			 * mbuf allocation that way.
265083fd97b2SMichal Krawczyk 			 */
265183fd97b2SMichal Krawczyk 			rc = ena_add_single_rx_desc(rx_ring->ena_com_io_sq,
265283fd97b2SMichal Krawczyk 				rx_info->mbuf, req_id);
265383fd97b2SMichal Krawczyk 			if (unlikely(rc != 0)) {
265483fd97b2SMichal Krawczyk 				/* Free the mbuf in case of an error. */
265583fd97b2SMichal Krawczyk 				rte_mbuf_raw_free(rx_info->mbuf);
265683fd97b2SMichal Krawczyk 			} else {
265783fd97b2SMichal Krawczyk 				/*
265883fd97b2SMichal Krawczyk 				 * If there was no error, just exit the loop as
265983fd97b2SMichal Krawczyk 				 * 0 length descriptor is always the last one.
266083fd97b2SMichal Krawczyk 				 */
266183fd97b2SMichal Krawczyk 				break;
266283fd97b2SMichal Krawczyk 			}
266383fd97b2SMichal Krawczyk 		} else {
26641be097dcSMichal Krawczyk 			/* Create an mbuf chain. */
26651be097dcSMichal Krawczyk 			mbuf->next = rx_info->mbuf;
26661be097dcSMichal Krawczyk 			mbuf = mbuf->next;
26671be097dcSMichal Krawczyk 
26681be097dcSMichal Krawczyk 			ena_init_rx_mbuf(mbuf, len);
26691be097dcSMichal Krawczyk 			mbuf_head->pkt_len += len;
267083fd97b2SMichal Krawczyk 		}
26711be097dcSMichal Krawczyk 
267283fd97b2SMichal Krawczyk 		/*
267383fd97b2SMichal Krawczyk 		 * Mark the descriptor as depleted and perform necessary
267483fd97b2SMichal Krawczyk 		 * cleanup.
267583fd97b2SMichal Krawczyk 		 * This code will execute in two cases:
267683fd97b2SMichal Krawczyk 		 *  1. Descriptor len was greater than 0 - normal situation.
267783fd97b2SMichal Krawczyk 		 *  2. Descriptor len was 0 and we failed to add the descriptor
267883fd97b2SMichal Krawczyk 		 *     to the device. In that situation, we should try to add
267983fd97b2SMichal Krawczyk 		 *     the mbuf again in the populate routine and mark the
268083fd97b2SMichal Krawczyk 		 *     descriptor as used up by the device.
268183fd97b2SMichal Krawczyk 		 */
26821be097dcSMichal Krawczyk 		rx_info->mbuf = NULL;
2683c0006061SMichal Krawczyk 		rx_ring->empty_rx_reqs[ntc] = req_id;
2684c0006061SMichal Krawczyk 		ntc = ENA_IDX_NEXT_MASKED(ntc, rx_ring->size_mask);
26851be097dcSMichal Krawczyk 	}
26861be097dcSMichal Krawczyk 
26871be097dcSMichal Krawczyk 	*next_to_clean = ntc;
26881be097dcSMichal Krawczyk 
26891be097dcSMichal Krawczyk 	return mbuf_head;
26901be097dcSMichal Krawczyk }
26911be097dcSMichal Krawczyk 
26921173fca2SJan Medala static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
26931173fca2SJan Medala 				  uint16_t nb_pkts)
26941173fca2SJan Medala {
26951173fca2SJan Medala 	struct ena_ring *rx_ring = (struct ena_ring *)(rx_queue);
269677550607SMichal Krawczyk 	unsigned int free_queue_entries;
26971173fca2SJan Medala 	uint16_t next_to_clean = rx_ring->next_to_clean;
269874456796SMichal Krawczyk 	uint16_t descs_in_use;
26991be097dcSMichal Krawczyk 	struct rte_mbuf *mbuf;
27001be097dcSMichal Krawczyk 	uint16_t completed;
27011173fca2SJan Medala 	struct ena_com_rx_ctx ena_rx_ctx;
27021be097dcSMichal Krawczyk 	int i, rc = 0;
270334d5e97eSMichal Krawczyk 	bool fill_hash;
27041173fca2SJan Medala 
27050a001d69SMichal Krawczyk #ifdef RTE_ETHDEV_DEBUG_RX
27061173fca2SJan Medala 	/* Check adapter state */
27071173fca2SJan Medala 	if (unlikely(rx_ring->adapter->state != ENA_ADAPTER_STATE_RUNNING)) {
27080a001d69SMichal Krawczyk 		PMD_RX_LOG(ALERT,
27091173fca2SJan Medala 			"Trying to receive pkts while device is NOT running\n");
27101173fca2SJan Medala 		return 0;
27111173fca2SJan Medala 	}
27120a001d69SMichal Krawczyk #endif
27131173fca2SJan Medala 
2714295968d1SFerruh Yigit 	fill_hash = rx_ring->offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH;
271534d5e97eSMichal Krawczyk 
2716c0006061SMichal Krawczyk 	descs_in_use = rx_ring->ring_size -
271774456796SMichal Krawczyk 		ena_com_free_q_entries(rx_ring->ena_com_io_sq) - 1;
271874456796SMichal Krawczyk 	nb_pkts = RTE_MIN(descs_in_use, nb_pkts);
27191173fca2SJan Medala 
27201173fca2SJan Medala 	for (completed = 0; completed < nb_pkts; completed++) {
2721ea93d37eSRafal Kozik 		ena_rx_ctx.max_bufs = rx_ring->sgl_size;
27221173fca2SJan Medala 		ena_rx_ctx.ena_bufs = rx_ring->ena_bufs;
27231173fca2SJan Medala 		ena_rx_ctx.descs = 0;
27247b3a3c4bSMaciej Bielski 		ena_rx_ctx.pkt_offset = 0;
27251173fca2SJan Medala 		/* receive packet context */
27261173fca2SJan Medala 		rc = ena_com_rx_pkt(rx_ring->ena_com_io_cq,
27271173fca2SJan Medala 				    rx_ring->ena_com_io_sq,
27281173fca2SJan Medala 				    &ena_rx_ctx);
27291173fca2SJan Medala 		if (unlikely(rc)) {
27300a001d69SMichal Krawczyk 			PMD_RX_LOG(ERR,
2731617898d1SMichal Krawczyk 				"Failed to get the packet from the device, rc: %d\n",
2732617898d1SMichal Krawczyk 				rc);
273305cffdcfSMichal Krawczyk 			if (rc == ENA_COM_NO_SPACE) {
273405cffdcfSMichal Krawczyk 				++rx_ring->rx_stats.bad_desc_num;
27352bae75eaSDawid Gorecki 				ena_trigger_reset(rx_ring->adapter,
27362bae75eaSDawid Gorecki 					ENA_REGS_RESET_TOO_MANY_RX_DESCS);
273705cffdcfSMichal Krawczyk 			} else {
273805cffdcfSMichal Krawczyk 				++rx_ring->rx_stats.bad_req_id;
27392bae75eaSDawid Gorecki 				ena_trigger_reset(rx_ring->adapter,
27402bae75eaSDawid Gorecki 					ENA_REGS_RESET_INV_RX_REQ_ID);
274105cffdcfSMichal Krawczyk 			}
27421173fca2SJan Medala 			return 0;
27431173fca2SJan Medala 		}
27441173fca2SJan Medala 
27451be097dcSMichal Krawczyk 		mbuf = ena_rx_mbuf(rx_ring,
27461be097dcSMichal Krawczyk 			ena_rx_ctx.ena_bufs,
27471be097dcSMichal Krawczyk 			ena_rx_ctx.descs,
27481be097dcSMichal Krawczyk 			&next_to_clean,
27491be097dcSMichal Krawczyk 			ena_rx_ctx.pkt_offset);
27501be097dcSMichal Krawczyk 		if (unlikely(mbuf == NULL)) {
27511be097dcSMichal Krawczyk 			for (i = 0; i < ena_rx_ctx.descs; ++i) {
2752c0006061SMichal Krawczyk 				rx_ring->empty_rx_reqs[next_to_clean] =
27531be097dcSMichal Krawczyk 					rx_ring->ena_bufs[i].req_id;
2754c0006061SMichal Krawczyk 				next_to_clean = ENA_IDX_NEXT_MASKED(
2755c0006061SMichal Krawczyk 					next_to_clean, rx_ring->size_mask);
27561173fca2SJan Medala 			}
2757f00930d9SRafal Kozik 			break;
27581be097dcSMichal Krawczyk 		}
27591173fca2SJan Medala 
27601173fca2SJan Medala 		/* fill mbuf attributes if any */
276184daba99SMichal Krawczyk 		ena_rx_mbuf_prepare(rx_ring, mbuf, &ena_rx_ctx, fill_hash);
27627830e905SSolganik Alexander 
27631be097dcSMichal Krawczyk 		if (unlikely(mbuf->ol_flags &
276484daba99SMichal Krawczyk 				(RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD)))
2765ef74b5f7SMichal Krawczyk 			rte_atomic64_inc(&rx_ring->adapter->drv_stats->ierrors);
27667830e905SSolganik Alexander 
27671be097dcSMichal Krawczyk 		rx_pkts[completed] = mbuf;
27681be097dcSMichal Krawczyk 		rx_ring->rx_stats.bytes += mbuf->pkt_len;
27691173fca2SJan Medala 	}
27701173fca2SJan Medala 
27711be097dcSMichal Krawczyk 	rx_ring->rx_stats.cnt += completed;
2772ec78af6bSMichal Krawczyk 	rx_ring->next_to_clean = next_to_clean;
2773ec78af6bSMichal Krawczyk 
277477550607SMichal Krawczyk 	free_queue_entries = ena_com_free_q_entries(rx_ring->ena_com_io_sq);
277577550607SMichal Krawczyk 
27761173fca2SJan Medala 	/* Burst refill to save doorbells, memory barriers, const interval */
2777005064e5SMichal Krawczyk 	if (free_queue_entries >= rx_ring->rx_free_thresh) {
277877550607SMichal Krawczyk 		ena_populate_rx_queue(rx_ring, free_queue_entries);
2779a45462c5SRafal Kozik 	}
27801173fca2SJan Medala 
27811be097dcSMichal Krawczyk 	return completed;
27821173fca2SJan Medala }
27831173fca2SJan Medala 
2784b3fc5a1aSKonstantin Ananyev static uint16_t
278583277a7cSJakub Palider eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
2786b3fc5a1aSKonstantin Ananyev 		uint16_t nb_pkts)
2787b3fc5a1aSKonstantin Ananyev {
2788b3fc5a1aSKonstantin Ananyev 	int32_t ret;
2789b3fc5a1aSKonstantin Ananyev 	uint32_t i;
2790b3fc5a1aSKonstantin Ananyev 	struct rte_mbuf *m;
279183277a7cSJakub Palider 	struct ena_ring *tx_ring = (struct ena_ring *)(tx_queue);
2792e8c838fdSMichal Krawczyk 	struct ena_adapter *adapter = tx_ring->adapter;
2793a7c528e5SOlivier Matz 	struct rte_ipv4_hdr *ip_hdr;
2794b3fc5a1aSKonstantin Ananyev 	uint64_t ol_flags;
2795e8c838fdSMichal Krawczyk 	uint64_t l4_csum_flag;
2796e8c838fdSMichal Krawczyk 	uint64_t dev_offload_capa;
279783277a7cSJakub Palider 	uint16_t frag_field;
2798e8c838fdSMichal Krawczyk 	bool need_pseudo_csum;
279983277a7cSJakub Palider 
2800e8c838fdSMichal Krawczyk 	dev_offload_capa = adapter->offloads.tx_offloads;
2801b3fc5a1aSKonstantin Ananyev 	for (i = 0; i != nb_pkts; i++) {
2802b3fc5a1aSKonstantin Ananyev 		m = tx_pkts[i];
2803b3fc5a1aSKonstantin Ananyev 		ol_flags = m->ol_flags;
2804b3fc5a1aSKonstantin Ananyev 
2805e8c838fdSMichal Krawczyk 		/* Check if any offload flag was set */
2806e8c838fdSMichal Krawczyk 		if (ol_flags == 0)
2807bc5ef57dSMichal Krawczyk 			continue;
2808bc5ef57dSMichal Krawczyk 
2809daa02b5cSOlivier Matz 		l4_csum_flag = ol_flags & RTE_MBUF_F_TX_L4_MASK;
2810e8c838fdSMichal Krawczyk 		/* SCTP checksum offload is not supported by the ENA. */
2811e8c838fdSMichal Krawczyk 		if ((ol_flags & ENA_TX_OFFLOAD_NOTSUP_MASK) ||
2812daa02b5cSOlivier Matz 		    l4_csum_flag == RTE_MBUF_F_TX_SCTP_CKSUM) {
2813e8c838fdSMichal Krawczyk 			PMD_TX_LOG(DEBUG,
2814e8c838fdSMichal Krawczyk 				"mbuf[%" PRIu32 "] has unsupported offloads flags set: 0x%" PRIu64 "\n",
2815e8c838fdSMichal Krawczyk 				i, ol_flags);
2816baeed5f4SMichal Krawczyk 			rte_errno = ENOTSUP;
2817b3fc5a1aSKonstantin Ananyev 			return i;
2818b3fc5a1aSKonstantin Ananyev 		}
2819b3fc5a1aSKonstantin Ananyev 
282096ffa8a7SMichal Krawczyk 		if (unlikely(m->nb_segs >= tx_ring->sgl_size &&
282196ffa8a7SMichal Krawczyk 		    !(tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV &&
282296ffa8a7SMichal Krawczyk 		      m->nb_segs == tx_ring->sgl_size &&
282396ffa8a7SMichal Krawczyk 		      m->data_len < tx_ring->tx_max_header_size))) {
282496ffa8a7SMichal Krawczyk 			PMD_TX_LOG(DEBUG,
282596ffa8a7SMichal Krawczyk 				"mbuf[%" PRIu32 "] has too many segments: %" PRIu16 "\n",
282696ffa8a7SMichal Krawczyk 				i, m->nb_segs);
282796ffa8a7SMichal Krawczyk 			rte_errno = EINVAL;
282896ffa8a7SMichal Krawczyk 			return i;
282996ffa8a7SMichal Krawczyk 		}
283096ffa8a7SMichal Krawczyk 
2831b3fc5a1aSKonstantin Ananyev #ifdef RTE_LIBRTE_ETHDEV_DEBUG
2832e8c838fdSMichal Krawczyk 		/* Check if requested offload is also enabled for the queue */
2833daa02b5cSOlivier Matz 		if ((ol_flags & RTE_MBUF_F_TX_IP_CKSUM &&
2834295968d1SFerruh Yigit 		     !(tx_ring->offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)) ||
2835daa02b5cSOlivier Matz 		    (l4_csum_flag == RTE_MBUF_F_TX_TCP_CKSUM &&
2836295968d1SFerruh Yigit 		     !(tx_ring->offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM)) ||
2837daa02b5cSOlivier Matz 		    (l4_csum_flag == RTE_MBUF_F_TX_UDP_CKSUM &&
2838295968d1SFerruh Yigit 		     !(tx_ring->offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM))) {
2839e8c838fdSMichal Krawczyk 			PMD_TX_LOG(DEBUG,
2840e8c838fdSMichal Krawczyk 				"mbuf[%" PRIu32 "]: requested offloads: %" PRIu16 " are not enabled for the queue[%u]\n",
2841e8c838fdSMichal Krawczyk 				i, m->nb_segs, tx_ring->id);
2842e8c838fdSMichal Krawczyk 			rte_errno = EINVAL;
2843e8c838fdSMichal Krawczyk 			return i;
2844e8c838fdSMichal Krawczyk 		}
2845e8c838fdSMichal Krawczyk 
2846e8c838fdSMichal Krawczyk 		/* The caller is obligated to set l2 and l3 len if any cksum
2847e8c838fdSMichal Krawczyk 		 * offload is enabled.
2848e8c838fdSMichal Krawczyk 		 */
2849daa02b5cSOlivier Matz 		if (unlikely(ol_flags & (RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_L4_MASK) &&
2850e8c838fdSMichal Krawczyk 		    (m->l2_len == 0 || m->l3_len == 0))) {
2851e8c838fdSMichal Krawczyk 			PMD_TX_LOG(DEBUG,
2852e8c838fdSMichal Krawczyk 				"mbuf[%" PRIu32 "]: l2_len or l3_len values are 0 while the offload was requested\n",
2853e8c838fdSMichal Krawczyk 				i);
2854e8c838fdSMichal Krawczyk 			rte_errno = EINVAL;
2855e8c838fdSMichal Krawczyk 			return i;
2856e8c838fdSMichal Krawczyk 		}
2857b3fc5a1aSKonstantin Ananyev 		ret = rte_validate_tx_offload(m);
2858b3fc5a1aSKonstantin Ananyev 		if (ret != 0) {
2859baeed5f4SMichal Krawczyk 			rte_errno = -ret;
2860b3fc5a1aSKonstantin Ananyev 			return i;
2861b3fc5a1aSKonstantin Ananyev 		}
2862b3fc5a1aSKonstantin Ananyev #endif
286383277a7cSJakub Palider 
2864e8c838fdSMichal Krawczyk 		/* Verify HW support for requested offloads and determine if
2865e8c838fdSMichal Krawczyk 		 * pseudo header checksum is needed.
286683277a7cSJakub Palider 		 */
2867e8c838fdSMichal Krawczyk 		need_pseudo_csum = false;
2868daa02b5cSOlivier Matz 		if (ol_flags & RTE_MBUF_F_TX_IPV4) {
2869daa02b5cSOlivier Matz 			if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM &&
2870e8c838fdSMichal Krawczyk 			    !(dev_offload_capa & ENA_L3_IPV4_CSUM)) {
2871e8c838fdSMichal Krawczyk 				rte_errno = ENOTSUP;
2872e8c838fdSMichal Krawczyk 				return i;
2873e8c838fdSMichal Krawczyk 			}
287483277a7cSJakub Palider 
2875daa02b5cSOlivier Matz 			if (ol_flags & RTE_MBUF_F_TX_TCP_SEG &&
2876e8c838fdSMichal Krawczyk 			    !(dev_offload_capa & ENA_IPV4_TSO)) {
2877e8c838fdSMichal Krawczyk 				rte_errno = ENOTSUP;
2878e8c838fdSMichal Krawczyk 				return i;
2879e8c838fdSMichal Krawczyk 			}
2880e8c838fdSMichal Krawczyk 
2881e8c838fdSMichal Krawczyk 			/* Check HW capabilities and if pseudo csum is needed
2882e8c838fdSMichal Krawczyk 			 * for L4 offloads.
2883e8c838fdSMichal Krawczyk 			 */
2884daa02b5cSOlivier Matz 			if (l4_csum_flag != RTE_MBUF_F_TX_L4_NO_CKSUM &&
2885e8c838fdSMichal Krawczyk 			    !(dev_offload_capa & ENA_L4_IPV4_CSUM)) {
2886e8c838fdSMichal Krawczyk 				if (dev_offload_capa &
2887e8c838fdSMichal Krawczyk 				    ENA_L4_IPV4_CSUM_PARTIAL) {
2888e8c838fdSMichal Krawczyk 					need_pseudo_csum = true;
2889e8c838fdSMichal Krawczyk 				} else {
2890e8c838fdSMichal Krawczyk 					rte_errno = ENOTSUP;
2891e8c838fdSMichal Krawczyk 					return i;
2892e8c838fdSMichal Krawczyk 				}
2893e8c838fdSMichal Krawczyk 			}
2894e8c838fdSMichal Krawczyk 
2895e8c838fdSMichal Krawczyk 			/* Parse the DF flag */
2896e8c838fdSMichal Krawczyk 			ip_hdr = rte_pktmbuf_mtod_offset(m,
2897e8c838fdSMichal Krawczyk 				struct rte_ipv4_hdr *, m->l2_len);
2898e8c838fdSMichal Krawczyk 			frag_field = rte_be_to_cpu_16(ip_hdr->fragment_offset);
2899e8c838fdSMichal Krawczyk 			if (frag_field & RTE_IPV4_HDR_DF_FLAG) {
2900e8c838fdSMichal Krawczyk 				m->packet_type |= RTE_PTYPE_L4_NONFRAG;
2901daa02b5cSOlivier Matz 			} else if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
2902e8c838fdSMichal Krawczyk 				/* In case we are supposed to TSO and have DF
2903e8c838fdSMichal Krawczyk 				 * not set (DF=0) hardware must be provided with
2904e8c838fdSMichal Krawczyk 				 * partial checksum.
2905e8c838fdSMichal Krawczyk 				 */
2906e8c838fdSMichal Krawczyk 				need_pseudo_csum = true;
2907e8c838fdSMichal Krawczyk 			}
2908daa02b5cSOlivier Matz 		} else if (ol_flags & RTE_MBUF_F_TX_IPV6) {
2909e8c838fdSMichal Krawczyk 			/* There is no support for IPv6 TSO as for now. */
2910daa02b5cSOlivier Matz 			if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
2911e8c838fdSMichal Krawczyk 				rte_errno = ENOTSUP;
2912e8c838fdSMichal Krawczyk 				return i;
2913e8c838fdSMichal Krawczyk 			}
2914e8c838fdSMichal Krawczyk 
2915e8c838fdSMichal Krawczyk 			/* Check HW capabilities and if pseudo csum is needed */
2916daa02b5cSOlivier Matz 			if (l4_csum_flag != RTE_MBUF_F_TX_L4_NO_CKSUM &&
2917e8c838fdSMichal Krawczyk 			    !(dev_offload_capa & ENA_L4_IPV6_CSUM)) {
2918e8c838fdSMichal Krawczyk 				if (dev_offload_capa &
2919e8c838fdSMichal Krawczyk 				    ENA_L4_IPV6_CSUM_PARTIAL) {
2920e8c838fdSMichal Krawczyk 					need_pseudo_csum = true;
2921e8c838fdSMichal Krawczyk 				} else {
2922e8c838fdSMichal Krawczyk 					rte_errno = ENOTSUP;
2923e8c838fdSMichal Krawczyk 					return i;
2924e8c838fdSMichal Krawczyk 				}
2925e8c838fdSMichal Krawczyk 			}
2926e8c838fdSMichal Krawczyk 		}
2927e8c838fdSMichal Krawczyk 
2928e8c838fdSMichal Krawczyk 		if (need_pseudo_csum) {
2929e8c838fdSMichal Krawczyk 			ret = rte_net_intel_cksum_flags_prepare(m, ol_flags);
2930b3fc5a1aSKonstantin Ananyev 			if (ret != 0) {
2931baeed5f4SMichal Krawczyk 				rte_errno = -ret;
2932b3fc5a1aSKonstantin Ananyev 				return i;
2933b3fc5a1aSKonstantin Ananyev 			}
2934b3fc5a1aSKonstantin Ananyev 		}
2935e8c838fdSMichal Krawczyk 	}
2936b3fc5a1aSKonstantin Ananyev 
2937b3fc5a1aSKonstantin Ananyev 	return i;
2938b3fc5a1aSKonstantin Ananyev }
2939b3fc5a1aSKonstantin Ananyev 
2940f01f060cSRafal Kozik static void ena_update_hints(struct ena_adapter *adapter,
2941f01f060cSRafal Kozik 			     struct ena_admin_ena_hw_hints *hints)
2942f01f060cSRafal Kozik {
2943f01f060cSRafal Kozik 	if (hints->admin_completion_tx_timeout)
2944f01f060cSRafal Kozik 		adapter->ena_dev.admin_queue.completion_timeout =
2945f01f060cSRafal Kozik 			hints->admin_completion_tx_timeout * 1000;
2946f01f060cSRafal Kozik 
2947f01f060cSRafal Kozik 	if (hints->mmio_read_timeout)
2948f01f060cSRafal Kozik 		/* convert to usec */
2949f01f060cSRafal Kozik 		adapter->ena_dev.mmio_read.reg_read_to =
2950f01f060cSRafal Kozik 			hints->mmio_read_timeout * 1000;
2951d9b8b106SMichal Krawczyk 
2952d9b8b106SMichal Krawczyk 	if (hints->driver_watchdog_timeout) {
2953d9b8b106SMichal Krawczyk 		if (hints->driver_watchdog_timeout == ENA_HW_HINTS_NO_TIMEOUT)
2954d9b8b106SMichal Krawczyk 			adapter->keep_alive_timeout = ENA_HW_HINTS_NO_TIMEOUT;
2955d9b8b106SMichal Krawczyk 		else
2956d9b8b106SMichal Krawczyk 			// Convert msecs to ticks
2957d9b8b106SMichal Krawczyk 			adapter->keep_alive_timeout =
2958d9b8b106SMichal Krawczyk 				(hints->driver_watchdog_timeout *
2959d9b8b106SMichal Krawczyk 				rte_get_timer_hz()) / 1000;
2960d9b8b106SMichal Krawczyk 	}
2961f01f060cSRafal Kozik }
2962f01f060cSRafal Kozik 
296336278b82SMichal Krawczyk static void ena_tx_map_mbuf(struct ena_ring *tx_ring,
296436278b82SMichal Krawczyk 	struct ena_tx_buffer *tx_info,
296536278b82SMichal Krawczyk 	struct rte_mbuf *mbuf,
296636278b82SMichal Krawczyk 	void **push_header,
296736278b82SMichal Krawczyk 	uint16_t *header_len)
296836278b82SMichal Krawczyk {
296936278b82SMichal Krawczyk 	struct ena_com_buf *ena_buf;
297036278b82SMichal Krawczyk 	uint16_t delta, seg_len, push_len;
297136278b82SMichal Krawczyk 
297236278b82SMichal Krawczyk 	delta = 0;
297336278b82SMichal Krawczyk 	seg_len = mbuf->data_len;
297436278b82SMichal Krawczyk 
297536278b82SMichal Krawczyk 	tx_info->mbuf = mbuf;
297636278b82SMichal Krawczyk 	ena_buf = tx_info->bufs;
297736278b82SMichal Krawczyk 
297836278b82SMichal Krawczyk 	if (tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
297936278b82SMichal Krawczyk 		/*
298036278b82SMichal Krawczyk 		 * Tx header might be (and will be in most cases) smaller than
298136278b82SMichal Krawczyk 		 * tx_max_header_size. But it's not an issue to send more data
298236278b82SMichal Krawczyk 		 * to the device, than actually needed if the mbuf size is
298336278b82SMichal Krawczyk 		 * greater than tx_max_header_size.
298436278b82SMichal Krawczyk 		 */
298536278b82SMichal Krawczyk 		push_len = RTE_MIN(mbuf->pkt_len, tx_ring->tx_max_header_size);
298636278b82SMichal Krawczyk 		*header_len = push_len;
298736278b82SMichal Krawczyk 
298836278b82SMichal Krawczyk 		if (likely(push_len <= seg_len)) {
298936278b82SMichal Krawczyk 			/* If the push header is in the single segment, then
299036278b82SMichal Krawczyk 			 * just point it to the 1st mbuf data.
299136278b82SMichal Krawczyk 			 */
299236278b82SMichal Krawczyk 			*push_header = rte_pktmbuf_mtod(mbuf, uint8_t *);
299336278b82SMichal Krawczyk 		} else {
299436278b82SMichal Krawczyk 			/* If the push header lays in the several segments, copy
299536278b82SMichal Krawczyk 			 * it to the intermediate buffer.
299636278b82SMichal Krawczyk 			 */
299736278b82SMichal Krawczyk 			rte_pktmbuf_read(mbuf, 0, push_len,
299836278b82SMichal Krawczyk 				tx_ring->push_buf_intermediate_buf);
299936278b82SMichal Krawczyk 			*push_header = tx_ring->push_buf_intermediate_buf;
300036278b82SMichal Krawczyk 			delta = push_len - seg_len;
300136278b82SMichal Krawczyk 		}
300236278b82SMichal Krawczyk 	} else {
300336278b82SMichal Krawczyk 		*push_header = NULL;
300436278b82SMichal Krawczyk 		*header_len = 0;
300536278b82SMichal Krawczyk 		push_len = 0;
300636278b82SMichal Krawczyk 	}
300736278b82SMichal Krawczyk 
300836278b82SMichal Krawczyk 	/* Process first segment taking into consideration pushed header */
300936278b82SMichal Krawczyk 	if (seg_len > push_len) {
301036278b82SMichal Krawczyk 		ena_buf->paddr = mbuf->buf_iova +
301136278b82SMichal Krawczyk 				mbuf->data_off +
301236278b82SMichal Krawczyk 				push_len;
301336278b82SMichal Krawczyk 		ena_buf->len = seg_len - push_len;
301436278b82SMichal Krawczyk 		ena_buf++;
301536278b82SMichal Krawczyk 		tx_info->num_of_bufs++;
301636278b82SMichal Krawczyk 	}
301736278b82SMichal Krawczyk 
301836278b82SMichal Krawczyk 	while ((mbuf = mbuf->next) != NULL) {
301936278b82SMichal Krawczyk 		seg_len = mbuf->data_len;
302036278b82SMichal Krawczyk 
302136278b82SMichal Krawczyk 		/* Skip mbufs if whole data is pushed as a header */
302236278b82SMichal Krawczyk 		if (unlikely(delta > seg_len)) {
302336278b82SMichal Krawczyk 			delta -= seg_len;
302436278b82SMichal Krawczyk 			continue;
302536278b82SMichal Krawczyk 		}
302636278b82SMichal Krawczyk 
302736278b82SMichal Krawczyk 		ena_buf->paddr = mbuf->buf_iova + mbuf->data_off + delta;
302836278b82SMichal Krawczyk 		ena_buf->len = seg_len - delta;
302936278b82SMichal Krawczyk 		ena_buf++;
303036278b82SMichal Krawczyk 		tx_info->num_of_bufs++;
303136278b82SMichal Krawczyk 
303236278b82SMichal Krawczyk 		delta = 0;
303336278b82SMichal Krawczyk 	}
303436278b82SMichal Krawczyk }
303536278b82SMichal Krawczyk 
303636278b82SMichal Krawczyk static int ena_xmit_mbuf(struct ena_ring *tx_ring, struct rte_mbuf *mbuf)
303736278b82SMichal Krawczyk {
303836278b82SMichal Krawczyk 	struct ena_tx_buffer *tx_info;
303936278b82SMichal Krawczyk 	struct ena_com_tx_ctx ena_tx_ctx = { { 0 } };
304036278b82SMichal Krawczyk 	uint16_t next_to_use;
304136278b82SMichal Krawczyk 	uint16_t header_len;
304236278b82SMichal Krawczyk 	uint16_t req_id;
304336278b82SMichal Krawczyk 	void *push_header;
304436278b82SMichal Krawczyk 	int nb_hw_desc;
304536278b82SMichal Krawczyk 	int rc;
304636278b82SMichal Krawczyk 
304796ffa8a7SMichal Krawczyk 	/* Checking for space for 2 additional metadata descriptors due to
304896ffa8a7SMichal Krawczyk 	 * possible header split and metadata descriptor
304996ffa8a7SMichal Krawczyk 	 */
305096ffa8a7SMichal Krawczyk 	if (!ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
305196ffa8a7SMichal Krawczyk 					  mbuf->nb_segs + 2)) {
305296ffa8a7SMichal Krawczyk 		PMD_DRV_LOG(DEBUG, "Not enough space in the tx queue\n");
305396ffa8a7SMichal Krawczyk 		return ENA_COM_NO_MEM;
305496ffa8a7SMichal Krawczyk 	}
305536278b82SMichal Krawczyk 
305636278b82SMichal Krawczyk 	next_to_use = tx_ring->next_to_use;
305736278b82SMichal Krawczyk 
305836278b82SMichal Krawczyk 	req_id = tx_ring->empty_tx_reqs[next_to_use];
305936278b82SMichal Krawczyk 	tx_info = &tx_ring->tx_buffer_info[req_id];
306036278b82SMichal Krawczyk 	tx_info->num_of_bufs = 0;
30613d47e9b1SMichal Krawczyk 	RTE_ASSERT(tx_info->mbuf == NULL);
306236278b82SMichal Krawczyk 
306336278b82SMichal Krawczyk 	ena_tx_map_mbuf(tx_ring, tx_info, mbuf, &push_header, &header_len);
306436278b82SMichal Krawczyk 
306536278b82SMichal Krawczyk 	ena_tx_ctx.ena_bufs = tx_info->bufs;
306636278b82SMichal Krawczyk 	ena_tx_ctx.push_header = push_header;
306736278b82SMichal Krawczyk 	ena_tx_ctx.num_bufs = tx_info->num_of_bufs;
306836278b82SMichal Krawczyk 	ena_tx_ctx.req_id = req_id;
306936278b82SMichal Krawczyk 	ena_tx_ctx.header_len = header_len;
307036278b82SMichal Krawczyk 
307136278b82SMichal Krawczyk 	/* Set Tx offloads flags, if applicable */
307236278b82SMichal Krawczyk 	ena_tx_mbuf_prepare(mbuf, &ena_tx_ctx, tx_ring->offloads,
307336278b82SMichal Krawczyk 		tx_ring->disable_meta_caching);
307436278b82SMichal Krawczyk 
307536278b82SMichal Krawczyk 	if (unlikely(ena_com_is_doorbell_needed(tx_ring->ena_com_io_sq,
307636278b82SMichal Krawczyk 			&ena_tx_ctx))) {
30770a001d69SMichal Krawczyk 		PMD_TX_LOG(DEBUG,
3078617898d1SMichal Krawczyk 			"LLQ Tx max burst size of queue %d achieved, writing doorbell to send burst\n",
307936278b82SMichal Krawczyk 			tx_ring->id);
308036278b82SMichal Krawczyk 		ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq);
30811f949ad9SAmit Bernstein 		tx_ring->tx_stats.doorbells++;
30821d973d8fSIgor Chauskin 		tx_ring->pkts_without_db = false;
308336278b82SMichal Krawczyk 	}
308436278b82SMichal Krawczyk 
308536278b82SMichal Krawczyk 	/* prepare the packet's descriptors to dma engine */
308636278b82SMichal Krawczyk 	rc = ena_com_prepare_tx(tx_ring->ena_com_io_sq,	&ena_tx_ctx,
308736278b82SMichal Krawczyk 		&nb_hw_desc);
308836278b82SMichal Krawczyk 	if (unlikely(rc)) {
3089b57e1053SMichal Krawczyk 		PMD_DRV_LOG(ERR, "Failed to prepare Tx buffers, rc: %d\n", rc);
309036278b82SMichal Krawczyk 		++tx_ring->tx_stats.prepare_ctx_err;
30912bae75eaSDawid Gorecki 		ena_trigger_reset(tx_ring->adapter,
30922bae75eaSDawid Gorecki 			ENA_REGS_RESET_DRIVER_INVALID_STATE);
309336278b82SMichal Krawczyk 		return rc;
309436278b82SMichal Krawczyk 	}
309536278b82SMichal Krawczyk 
309636278b82SMichal Krawczyk 	tx_info->tx_descs = nb_hw_desc;
3097f93e20e5SMichal Krawczyk 	tx_info->timestamp = rte_get_timer_cycles();
309836278b82SMichal Krawczyk 
309936278b82SMichal Krawczyk 	tx_ring->tx_stats.cnt++;
310036278b82SMichal Krawczyk 	tx_ring->tx_stats.bytes += mbuf->pkt_len;
310136278b82SMichal Krawczyk 
310236278b82SMichal Krawczyk 	tx_ring->next_to_use = ENA_IDX_NEXT_MASKED(next_to_use,
310336278b82SMichal Krawczyk 		tx_ring->size_mask);
310436278b82SMichal Krawczyk 
310536278b82SMichal Krawczyk 	return 0;
310636278b82SMichal Krawczyk }
310736278b82SMichal Krawczyk 
3108c339f538SDawid Gorecki static __rte_always_inline size_t
3109c339f538SDawid Gorecki ena_tx_cleanup_mbuf_fast(struct rte_mbuf **mbufs_to_clean,
3110c339f538SDawid Gorecki 			 struct rte_mbuf *mbuf,
3111c339f538SDawid Gorecki 			 size_t mbuf_cnt,
3112c339f538SDawid Gorecki 			 size_t buf_size)
3113c339f538SDawid Gorecki {
3114c339f538SDawid Gorecki 	struct rte_mbuf *m_next;
3115c339f538SDawid Gorecki 
3116c339f538SDawid Gorecki 	while (mbuf != NULL) {
3117c339f538SDawid Gorecki 		m_next = mbuf->next;
3118c339f538SDawid Gorecki 		mbufs_to_clean[mbuf_cnt++] = mbuf;
3119c339f538SDawid Gorecki 		if (mbuf_cnt == buf_size) {
3120c339f538SDawid Gorecki 			rte_mempool_put_bulk(mbufs_to_clean[0]->pool, (void **)mbufs_to_clean,
3121c339f538SDawid Gorecki 				(unsigned int)mbuf_cnt);
3122c339f538SDawid Gorecki 			mbuf_cnt = 0;
3123c339f538SDawid Gorecki 		}
3124c339f538SDawid Gorecki 		mbuf = m_next;
3125c339f538SDawid Gorecki 	}
3126c339f538SDawid Gorecki 
3127c339f538SDawid Gorecki 	return mbuf_cnt;
3128c339f538SDawid Gorecki }
3129c339f538SDawid Gorecki 
3130a52b317eSDawid Gorecki static int ena_tx_cleanup(void *txp, uint32_t free_pkt_cnt)
313136278b82SMichal Krawczyk {
3132c339f538SDawid Gorecki 	struct rte_mbuf *mbufs_to_clean[ENA_CLEANUP_BUF_SIZE];
3133a52b317eSDawid Gorecki 	struct ena_ring *tx_ring = (struct ena_ring *)txp;
3134c339f538SDawid Gorecki 	size_t mbuf_cnt = 0;
313536278b82SMichal Krawczyk 	unsigned int total_tx_descs = 0;
3136a52b317eSDawid Gorecki 	unsigned int total_tx_pkts = 0;
3137005064e5SMichal Krawczyk 	uint16_t cleanup_budget;
313836278b82SMichal Krawczyk 	uint16_t next_to_clean = tx_ring->next_to_clean;
3139c339f538SDawid Gorecki 	bool fast_free = tx_ring->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
314036278b82SMichal Krawczyk 
3141a52b317eSDawid Gorecki 	/*
3142a52b317eSDawid Gorecki 	 * If free_pkt_cnt is equal to 0, it means that the user requested
3143a52b317eSDawid Gorecki 	 * full cleanup, so attempt to release all Tx descriptors
3144a52b317eSDawid Gorecki 	 * (ring_size - 1 -> size_mask)
3145a52b317eSDawid Gorecki 	 */
3146a52b317eSDawid Gorecki 	cleanup_budget = (free_pkt_cnt == 0) ? tx_ring->size_mask : free_pkt_cnt;
314736278b82SMichal Krawczyk 
3148a52b317eSDawid Gorecki 	while (likely(total_tx_pkts < cleanup_budget)) {
314936278b82SMichal Krawczyk 		struct rte_mbuf *mbuf;
315036278b82SMichal Krawczyk 		struct ena_tx_buffer *tx_info;
315136278b82SMichal Krawczyk 		uint16_t req_id;
315236278b82SMichal Krawczyk 
315336278b82SMichal Krawczyk 		if (ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq, &req_id) != 0)
315436278b82SMichal Krawczyk 			break;
315536278b82SMichal Krawczyk 
315636278b82SMichal Krawczyk 		if (unlikely(validate_tx_req_id(tx_ring, req_id) != 0))
315736278b82SMichal Krawczyk 			break;
315836278b82SMichal Krawczyk 
315936278b82SMichal Krawczyk 		/* Get Tx info & store how many descs were processed  */
316036278b82SMichal Krawczyk 		tx_info = &tx_ring->tx_buffer_info[req_id];
3161f93e20e5SMichal Krawczyk 		tx_info->timestamp = 0;
316236278b82SMichal Krawczyk 
316336278b82SMichal Krawczyk 		mbuf = tx_info->mbuf;
3164c339f538SDawid Gorecki 		if (fast_free) {
3165c339f538SDawid Gorecki 			mbuf_cnt = ena_tx_cleanup_mbuf_fast(mbufs_to_clean, mbuf, mbuf_cnt,
3166c339f538SDawid Gorecki 				ENA_CLEANUP_BUF_SIZE);
3167c339f538SDawid Gorecki 		} else {
316836278b82SMichal Krawczyk 			rte_pktmbuf_free(mbuf);
3169c339f538SDawid Gorecki 		}
317036278b82SMichal Krawczyk 
317136278b82SMichal Krawczyk 		tx_info->mbuf = NULL;
317236278b82SMichal Krawczyk 		tx_ring->empty_tx_reqs[next_to_clean] = req_id;
317336278b82SMichal Krawczyk 
317436278b82SMichal Krawczyk 		total_tx_descs += tx_info->tx_descs;
3175a52b317eSDawid Gorecki 		total_tx_pkts++;
317636278b82SMichal Krawczyk 
317736278b82SMichal Krawczyk 		/* Put back descriptor to the ring for reuse */
317836278b82SMichal Krawczyk 		next_to_clean = ENA_IDX_NEXT_MASKED(next_to_clean,
317936278b82SMichal Krawczyk 			tx_ring->size_mask);
318036278b82SMichal Krawczyk 	}
318136278b82SMichal Krawczyk 
318236278b82SMichal Krawczyk 	if (likely(total_tx_descs > 0)) {
318336278b82SMichal Krawczyk 		/* acknowledge completion of sent packets */
318436278b82SMichal Krawczyk 		tx_ring->next_to_clean = next_to_clean;
318536278b82SMichal Krawczyk 		ena_com_comp_ack(tx_ring->ena_com_io_sq, total_tx_descs);
318636278b82SMichal Krawczyk 	}
3187f93e20e5SMichal Krawczyk 
3188c339f538SDawid Gorecki 	if (mbuf_cnt != 0)
3189c339f538SDawid Gorecki 		rte_mempool_put_bulk(mbufs_to_clean[0]->pool,
3190c339f538SDawid Gorecki 			(void **)mbufs_to_clean, mbuf_cnt);
3191c339f538SDawid Gorecki 
3192a52b317eSDawid Gorecki 	/* Notify completion handler that full cleanup was performed */
3193a52b317eSDawid Gorecki 	if (free_pkt_cnt == 0 || total_tx_pkts < cleanup_budget)
3194f93e20e5SMichal Krawczyk 		tx_ring->last_cleanup_ticks = rte_get_timer_cycles();
3195a52b317eSDawid Gorecki 
3196a52b317eSDawid Gorecki 	return total_tx_pkts;
319736278b82SMichal Krawczyk }
319836278b82SMichal Krawczyk 
31991173fca2SJan Medala static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
32001173fca2SJan Medala 				  uint16_t nb_pkts)
32011173fca2SJan Medala {
32021173fca2SJan Medala 	struct ena_ring *tx_ring = (struct ena_ring *)(tx_queue);
3203005064e5SMichal Krawczyk 	int available_desc;
320474456796SMichal Krawczyk 	uint16_t sent_idx = 0;
32051173fca2SJan Medala 
32060a001d69SMichal Krawczyk #ifdef RTE_ETHDEV_DEBUG_TX
32071173fca2SJan Medala 	/* Check adapter state */
32081173fca2SJan Medala 	if (unlikely(tx_ring->adapter->state != ENA_ADAPTER_STATE_RUNNING)) {
32090a001d69SMichal Krawczyk 		PMD_TX_LOG(ALERT,
32101173fca2SJan Medala 			"Trying to xmit pkts while device is NOT running\n");
32111173fca2SJan Medala 		return 0;
32121173fca2SJan Medala 	}
32130a001d69SMichal Krawczyk #endif
32141173fca2SJan Medala 
321567216c31SMichal Krawczyk 	available_desc = ena_com_free_q_entries(tx_ring->ena_com_io_sq);
321667216c31SMichal Krawczyk 	if (available_desc < tx_ring->tx_free_thresh)
3217a52b317eSDawid Gorecki 		ena_tx_cleanup((void *)tx_ring, 0);
321867216c31SMichal Krawczyk 
32191173fca2SJan Medala 	for (sent_idx = 0; sent_idx < nb_pkts; sent_idx++) {
322036278b82SMichal Krawczyk 		if (ena_xmit_mbuf(tx_ring, tx_pkts[sent_idx]))
32212061fe41SRafal Kozik 			break;
32221d973d8fSIgor Chauskin 		tx_ring->pkts_without_db = true;
322336278b82SMichal Krawczyk 		rte_prefetch0(tx_pkts[ENA_IDX_ADD_MASKED(sent_idx, 4,
322436278b82SMichal Krawczyk 			tx_ring->size_mask)]);
32252fca2a98SMichal Krawczyk 	}
32262fca2a98SMichal Krawczyk 
32275e02e19eSJan Medala 	/* If there are ready packets to be xmitted... */
32281d973d8fSIgor Chauskin 	if (likely(tx_ring->pkts_without_db)) {
32295e02e19eSJan Medala 		/* ...let HW do its best :-) */
32301173fca2SJan Medala 		ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq);
323145b6d861SMichal Krawczyk 		tx_ring->tx_stats.doorbells++;
32321d973d8fSIgor Chauskin 		tx_ring->pkts_without_db = false;
32335e02e19eSJan Medala 	}
32345e02e19eSJan Medala 
32357830e905SSolganik Alexander 	tx_ring->tx_stats.available_desc =
3236b2b02edeSMichal Krawczyk 		ena_com_free_q_entries(tx_ring->ena_com_io_sq);
32377830e905SSolganik Alexander 	tx_ring->tx_stats.tx_poll++;
32387830e905SSolganik Alexander 
32391173fca2SJan Medala 	return sent_idx;
32401173fca2SJan Medala }
32411173fca2SJan Medala 
324292401abfSShai Brandes static void ena_copy_customer_metrics(struct ena_adapter *adapter, uint64_t *buf,
324392401abfSShai Brandes 					     size_t num_metrics)
324445718adaSMichal Krawczyk {
324592401abfSShai Brandes 	struct ena_com_dev *ena_dev = &adapter->ena_dev;
324645718adaSMichal Krawczyk 	int rc;
324745718adaSMichal Krawczyk 
324892401abfSShai Brandes 	if (ena_com_get_cap(ena_dev, ENA_ADMIN_CUSTOMER_METRICS)) {
324992401abfSShai Brandes 		if (num_metrics != ENA_STATS_ARRAY_METRICS) {
325092401abfSShai Brandes 			PMD_DRV_LOG(ERR, "Detected discrepancy in the number of customer metrics");
325192401abfSShai Brandes 			return;
325292401abfSShai Brandes 		}
325345718adaSMichal Krawczyk 		rte_spinlock_lock(&adapter->admin_lock);
325492401abfSShai Brandes 		rc = ENA_PROXY(adapter,
325592401abfSShai Brandes 					ena_com_get_customer_metrics,
325692401abfSShai Brandes 					&adapter->ena_dev,
325792401abfSShai Brandes 					(char *)buf,
325892401abfSShai Brandes 					num_metrics * sizeof(uint64_t));
325945718adaSMichal Krawczyk 		rte_spinlock_unlock(&adapter->admin_lock);
326045718adaSMichal Krawczyk 		if (rc != 0) {
326192401abfSShai Brandes 			PMD_DRV_LOG(WARNING, "Failed to get customer metrics, rc: %d\n", rc);
326292401abfSShai Brandes 			return;
326345718adaSMichal Krawczyk 		}
326445718adaSMichal Krawczyk 
326592401abfSShai Brandes 	} else if (ena_com_get_cap(ena_dev, ENA_ADMIN_ENI_STATS)) {
326692401abfSShai Brandes 		if (num_metrics != ENA_STATS_ARRAY_METRICS_LEGACY) {
326792401abfSShai Brandes 			PMD_DRV_LOG(ERR, "Detected discrepancy in the number of legacy metrics");
326892401abfSShai Brandes 			return;
326992401abfSShai Brandes 		}
327092401abfSShai Brandes 
327192401abfSShai Brandes 		rte_spinlock_lock(&adapter->admin_lock);
327292401abfSShai Brandes 		rc = ENA_PROXY(adapter,
327392401abfSShai Brandes 			       ena_com_get_eni_stats,
327492401abfSShai Brandes 			       &adapter->ena_dev,
327592401abfSShai Brandes 			       (struct ena_admin_eni_stats *)buf);
327692401abfSShai Brandes 		rte_spinlock_unlock(&adapter->admin_lock);
327792401abfSShai Brandes 		if (rc != 0) {
327892401abfSShai Brandes 			PMD_DRV_LOG(WARNING,
327992401abfSShai Brandes 				"Failed to get ENI metrics, rc: %d\n", rc);
328092401abfSShai Brandes 			return;
328192401abfSShai Brandes 		}
328292401abfSShai Brandes 	}
328345718adaSMichal Krawczyk }
328445718adaSMichal Krawczyk 
3285a73dd098SShai Brandes static void ena_copy_ena_srd_info(struct ena_adapter *adapter,
3286a73dd098SShai Brandes 		struct ena_stats_srd *srd_info)
3287a73dd098SShai Brandes {
3288a73dd098SShai Brandes 	int rc;
3289a73dd098SShai Brandes 
3290a73dd098SShai Brandes 	if (!ena_com_get_cap(&adapter->ena_dev, ENA_ADMIN_ENA_SRD_INFO))
3291a73dd098SShai Brandes 		return;
3292a73dd098SShai Brandes 
3293a73dd098SShai Brandes 	rte_spinlock_lock(&adapter->admin_lock);
3294a73dd098SShai Brandes 	rc = ENA_PROXY(adapter,
3295a73dd098SShai Brandes 		       ena_com_get_ena_srd_info,
3296a73dd098SShai Brandes 		       &adapter->ena_dev,
3297a73dd098SShai Brandes 		       (struct ena_admin_ena_srd_info *)srd_info);
3298a73dd098SShai Brandes 	rte_spinlock_unlock(&adapter->admin_lock);
3299a73dd098SShai Brandes 	if (rc != ENA_COM_OK && rc != ENA_COM_UNSUPPORTED) {
3300a73dd098SShai Brandes 		PMD_DRV_LOG(WARNING,
3301a73dd098SShai Brandes 				"Failed to get ENA express srd info, rc: %d\n", rc);
3302a73dd098SShai Brandes 		return;
3303a73dd098SShai Brandes 	}
3304a73dd098SShai Brandes }
3305a73dd098SShai Brandes 
33067830e905SSolganik Alexander /**
33077830e905SSolganik Alexander  * DPDK callback to retrieve names of extended device statistics
33087830e905SSolganik Alexander  *
33097830e905SSolganik Alexander  * @param dev
33107830e905SSolganik Alexander  *   Pointer to Ethernet device structure.
33117830e905SSolganik Alexander  * @param[out] xstats_names
33127830e905SSolganik Alexander  *   Buffer to insert names into.
33137830e905SSolganik Alexander  * @param n
33147830e905SSolganik Alexander  *   Number of names.
33157830e905SSolganik Alexander  *
33167830e905SSolganik Alexander  * @return
33177830e905SSolganik Alexander  *   Number of xstats names.
33187830e905SSolganik Alexander  */
33197830e905SSolganik Alexander static int ena_xstats_get_names(struct rte_eth_dev *dev,
33207830e905SSolganik Alexander 				struct rte_eth_xstat_name *xstats_names,
33217830e905SSolganik Alexander 				unsigned int n)
33227830e905SSolganik Alexander {
332392401abfSShai Brandes 	struct ena_adapter *adapter = dev->data->dev_private;
3324aab58857SStanislaw Kardach 	unsigned int xstats_count = ena_xstats_calc_num(dev->data);
33257830e905SSolganik Alexander 	unsigned int stat, i, count = 0;
33267830e905SSolganik Alexander 
33277830e905SSolganik Alexander 	if (n < xstats_count || !xstats_names)
33287830e905SSolganik Alexander 		return xstats_count;
33297830e905SSolganik Alexander 
33307830e905SSolganik Alexander 	for (stat = 0; stat < ENA_STATS_ARRAY_GLOBAL; stat++, count++)
33317830e905SSolganik Alexander 		strcpy(xstats_names[count].name,
33327830e905SSolganik Alexander 			ena_stats_global_strings[stat].name);
33337830e905SSolganik Alexander 
333492401abfSShai Brandes 	for (stat = 0; stat < adapter->metrics_num; stat++, count++)
333592401abfSShai Brandes 		rte_strscpy(xstats_names[count].name,
333692401abfSShai Brandes 			    ena_stats_metrics_strings[stat].name,
333792401abfSShai Brandes 			    RTE_ETH_XSTATS_NAME_SIZE);
3338a73dd098SShai Brandes 	for (stat = 0; stat < ENA_STATS_ARRAY_ENA_SRD; stat++, count++)
3339a73dd098SShai Brandes 		rte_strscpy(xstats_names[count].name,
3340a73dd098SShai Brandes 			    ena_stats_srd_strings[stat].name,
3341a73dd098SShai Brandes 			    RTE_ETH_XSTATS_NAME_SIZE);
334245718adaSMichal Krawczyk 
33437830e905SSolganik Alexander 	for (stat = 0; stat < ENA_STATS_ARRAY_RX; stat++)
33447830e905SSolganik Alexander 		for (i = 0; i < dev->data->nb_rx_queues; i++, count++)
33457830e905SSolganik Alexander 			snprintf(xstats_names[count].name,
33467830e905SSolganik Alexander 				sizeof(xstats_names[count].name),
33477830e905SSolganik Alexander 				"rx_q%d_%s", i,
33487830e905SSolganik Alexander 				ena_stats_rx_strings[stat].name);
33497830e905SSolganik Alexander 
33507830e905SSolganik Alexander 	for (stat = 0; stat < ENA_STATS_ARRAY_TX; stat++)
33517830e905SSolganik Alexander 		for (i = 0; i < dev->data->nb_tx_queues; i++, count++)
33527830e905SSolganik Alexander 			snprintf(xstats_names[count].name,
33537830e905SSolganik Alexander 				sizeof(xstats_names[count].name),
33547830e905SSolganik Alexander 				"tx_q%d_%s", i,
33557830e905SSolganik Alexander 				ena_stats_tx_strings[stat].name);
33567830e905SSolganik Alexander 
33577830e905SSolganik Alexander 	return xstats_count;
33587830e905SSolganik Alexander }
33597830e905SSolganik Alexander 
33607830e905SSolganik Alexander /**
33613cec73faSMichal Krawczyk  * DPDK callback to retrieve names of extended device statistics for the given
33623cec73faSMichal Krawczyk  * ids.
33633cec73faSMichal Krawczyk  *
33643cec73faSMichal Krawczyk  * @param dev
33653cec73faSMichal Krawczyk  *   Pointer to Ethernet device structure.
33663cec73faSMichal Krawczyk  * @param[out] xstats_names
33673cec73faSMichal Krawczyk  *   Buffer to insert names into.
33683cec73faSMichal Krawczyk  * @param ids
33693cec73faSMichal Krawczyk  *   IDs array for which the names should be retrieved.
33703cec73faSMichal Krawczyk  * @param size
33713cec73faSMichal Krawczyk  *   Number of ids.
33723cec73faSMichal Krawczyk  *
33733cec73faSMichal Krawczyk  * @return
33743cec73faSMichal Krawczyk  *   Positive value: number of xstats names. Negative value: error code.
33753cec73faSMichal Krawczyk  */
33763cec73faSMichal Krawczyk static int ena_xstats_get_names_by_id(struct rte_eth_dev *dev,
33773cec73faSMichal Krawczyk 				      const uint64_t *ids,
33783cec73faSMichal Krawczyk 				      struct rte_eth_xstat_name *xstats_names,
33793cec73faSMichal Krawczyk 				      unsigned int size)
33803cec73faSMichal Krawczyk {
338192401abfSShai Brandes 	struct ena_adapter *adapter = dev->data->dev_private;
33823cec73faSMichal Krawczyk 	uint64_t xstats_count = ena_xstats_calc_num(dev->data);
33833cec73faSMichal Krawczyk 	uint64_t id, qid;
33843cec73faSMichal Krawczyk 	unsigned int i;
33853cec73faSMichal Krawczyk 
33863cec73faSMichal Krawczyk 	if (xstats_names == NULL)
33873cec73faSMichal Krawczyk 		return xstats_count;
33883cec73faSMichal Krawczyk 
33893cec73faSMichal Krawczyk 	for (i = 0; i < size; ++i) {
33903cec73faSMichal Krawczyk 		id = ids[i];
33913cec73faSMichal Krawczyk 		if (id > xstats_count) {
33923cec73faSMichal Krawczyk 			PMD_DRV_LOG(ERR,
33933cec73faSMichal Krawczyk 				"ID value out of range: id=%" PRIu64 ", xstats_num=%" PRIu64 "\n",
33943cec73faSMichal Krawczyk 				 id, xstats_count);
33953cec73faSMichal Krawczyk 			return -EINVAL;
33963cec73faSMichal Krawczyk 		}
33973cec73faSMichal Krawczyk 
33983cec73faSMichal Krawczyk 		if (id < ENA_STATS_ARRAY_GLOBAL) {
33993cec73faSMichal Krawczyk 			strcpy(xstats_names[i].name,
34003cec73faSMichal Krawczyk 			       ena_stats_global_strings[id].name);
34013cec73faSMichal Krawczyk 			continue;
34023cec73faSMichal Krawczyk 		}
34033cec73faSMichal Krawczyk 
34043cec73faSMichal Krawczyk 		id -= ENA_STATS_ARRAY_GLOBAL;
340592401abfSShai Brandes 		if (id < adapter->metrics_num) {
340692401abfSShai Brandes 			rte_strscpy(xstats_names[i].name,
340792401abfSShai Brandes 				    ena_stats_metrics_strings[id].name,
340892401abfSShai Brandes 				    RTE_ETH_XSTATS_NAME_SIZE);
34093cec73faSMichal Krawczyk 			continue;
34103cec73faSMichal Krawczyk 		}
34113cec73faSMichal Krawczyk 
341292401abfSShai Brandes 		id -= adapter->metrics_num;
3413a73dd098SShai Brandes 
3414a73dd098SShai Brandes 		if (id < ENA_STATS_ARRAY_ENA_SRD) {
3415a73dd098SShai Brandes 			rte_strscpy(xstats_names[i].name,
3416a73dd098SShai Brandes 				    ena_stats_srd_strings[id].name,
3417a73dd098SShai Brandes 				    RTE_ETH_XSTATS_NAME_SIZE);
3418a73dd098SShai Brandes 			continue;
3419a73dd098SShai Brandes 		}
3420a73dd098SShai Brandes 		id -= ENA_STATS_ARRAY_ENA_SRD;
3421a73dd098SShai Brandes 
34223cec73faSMichal Krawczyk 		if (id < ENA_STATS_ARRAY_RX) {
34233cec73faSMichal Krawczyk 			qid = id / dev->data->nb_rx_queues;
34243cec73faSMichal Krawczyk 			id %= dev->data->nb_rx_queues;
34253cec73faSMichal Krawczyk 			snprintf(xstats_names[i].name,
34263cec73faSMichal Krawczyk 				 sizeof(xstats_names[i].name),
34273cec73faSMichal Krawczyk 				 "rx_q%" PRIu64 "d_%s",
34283cec73faSMichal Krawczyk 				 qid, ena_stats_rx_strings[id].name);
34293cec73faSMichal Krawczyk 			continue;
34303cec73faSMichal Krawczyk 		}
34313cec73faSMichal Krawczyk 
34323cec73faSMichal Krawczyk 		id -= ENA_STATS_ARRAY_RX;
34333cec73faSMichal Krawczyk 		/* Although this condition is not needed, it was added for
34343cec73faSMichal Krawczyk 		 * compatibility if new xstat structure would be ever added.
34353cec73faSMichal Krawczyk 		 */
34363cec73faSMichal Krawczyk 		if (id < ENA_STATS_ARRAY_TX) {
34373cec73faSMichal Krawczyk 			qid = id / dev->data->nb_tx_queues;
34383cec73faSMichal Krawczyk 			id %= dev->data->nb_tx_queues;
34393cec73faSMichal Krawczyk 			snprintf(xstats_names[i].name,
34403cec73faSMichal Krawczyk 				 sizeof(xstats_names[i].name),
34413cec73faSMichal Krawczyk 				 "tx_q%" PRIu64 "_%s",
34423cec73faSMichal Krawczyk 				 qid, ena_stats_tx_strings[id].name);
34433cec73faSMichal Krawczyk 			continue;
34443cec73faSMichal Krawczyk 		}
34453cec73faSMichal Krawczyk 	}
34463cec73faSMichal Krawczyk 
34473cec73faSMichal Krawczyk 	return i;
34483cec73faSMichal Krawczyk }
34493cec73faSMichal Krawczyk 
34503cec73faSMichal Krawczyk /**
34517830e905SSolganik Alexander  * DPDK callback to get extended device statistics.
34527830e905SSolganik Alexander  *
34537830e905SSolganik Alexander  * @param dev
34547830e905SSolganik Alexander  *   Pointer to Ethernet device structure.
34557830e905SSolganik Alexander  * @param[out] stats
34567830e905SSolganik Alexander  *   Stats table output buffer.
34577830e905SSolganik Alexander  * @param n
34587830e905SSolganik Alexander  *   The size of the stats table.
34597830e905SSolganik Alexander  *
34607830e905SSolganik Alexander  * @return
34617830e905SSolganik Alexander  *   Number of xstats on success, negative on failure.
34627830e905SSolganik Alexander  */
34637830e905SSolganik Alexander static int ena_xstats_get(struct rte_eth_dev *dev,
34647830e905SSolganik Alexander 			  struct rte_eth_xstat *xstats,
34657830e905SSolganik Alexander 			  unsigned int n)
34667830e905SSolganik Alexander {
3467890728ffSStephen Hemminger 	struct ena_adapter *adapter = dev->data->dev_private;
3468aab58857SStanislaw Kardach 	unsigned int xstats_count = ena_xstats_calc_num(dev->data);
34697830e905SSolganik Alexander 	unsigned int stat, i, count = 0;
34707830e905SSolganik Alexander 	int stat_offset;
34717830e905SSolganik Alexander 	void *stats_begin;
347292401abfSShai Brandes 	uint64_t metrics_stats[ENA_MAX_CUSTOMER_METRICS];
3473a73dd098SShai Brandes 	struct ena_stats_srd srd_info = {0};
34747830e905SSolganik Alexander 
34757830e905SSolganik Alexander 	if (n < xstats_count)
34767830e905SSolganik Alexander 		return xstats_count;
34777830e905SSolganik Alexander 
34787830e905SSolganik Alexander 	if (!xstats)
34797830e905SSolganik Alexander 		return 0;
34807830e905SSolganik Alexander 
34817830e905SSolganik Alexander 	for (stat = 0; stat < ENA_STATS_ARRAY_GLOBAL; stat++, count++) {
3482493107fdSMichal Krawczyk 		stat_offset = ena_stats_global_strings[stat].stat_offset;
34837830e905SSolganik Alexander 		stats_begin = &adapter->dev_stats;
34847830e905SSolganik Alexander 
34857830e905SSolganik Alexander 		xstats[count].id = count;
34867830e905SSolganik Alexander 		xstats[count].value = *((uint64_t *)
34877830e905SSolganik Alexander 			((char *)stats_begin + stat_offset));
34887830e905SSolganik Alexander 	}
34897830e905SSolganik Alexander 
349092401abfSShai Brandes 	ena_copy_customer_metrics(adapter, metrics_stats, adapter->metrics_num);
349192401abfSShai Brandes 	stats_begin = metrics_stats;
349292401abfSShai Brandes 	for (stat = 0; stat < adapter->metrics_num; stat++, count++) {
349392401abfSShai Brandes 		stat_offset = ena_stats_metrics_strings[stat].stat_offset;
349445718adaSMichal Krawczyk 
349545718adaSMichal Krawczyk 		xstats[count].id = count;
349645718adaSMichal Krawczyk 		xstats[count].value = *((uint64_t *)
349745718adaSMichal Krawczyk 		    ((char *)stats_begin + stat_offset));
349845718adaSMichal Krawczyk 	}
349945718adaSMichal Krawczyk 
3500a73dd098SShai Brandes 	ena_copy_ena_srd_info(adapter, &srd_info);
3501a73dd098SShai Brandes 	stats_begin = &srd_info;
3502a73dd098SShai Brandes 	for (stat = 0; stat < ENA_STATS_ARRAY_ENA_SRD; stat++, count++) {
3503a73dd098SShai Brandes 		stat_offset = ena_stats_srd_strings[stat].stat_offset;
3504a73dd098SShai Brandes 		xstats[count].id = count;
3505a73dd098SShai Brandes 		xstats[count].value = *((uint64_t *)
3506a73dd098SShai Brandes 		    ((char *)stats_begin + stat_offset));
3507a73dd098SShai Brandes 	}
3508a73dd098SShai Brandes 
35097830e905SSolganik Alexander 	for (stat = 0; stat < ENA_STATS_ARRAY_RX; stat++) {
35107830e905SSolganik Alexander 		for (i = 0; i < dev->data->nb_rx_queues; i++, count++) {
35117830e905SSolganik Alexander 			stat_offset = ena_stats_rx_strings[stat].stat_offset;
35127830e905SSolganik Alexander 			stats_begin = &adapter->rx_ring[i].rx_stats;
35137830e905SSolganik Alexander 
35147830e905SSolganik Alexander 			xstats[count].id = count;
35157830e905SSolganik Alexander 			xstats[count].value = *((uint64_t *)
35167830e905SSolganik Alexander 				((char *)stats_begin + stat_offset));
35177830e905SSolganik Alexander 		}
35187830e905SSolganik Alexander 	}
35197830e905SSolganik Alexander 
35207830e905SSolganik Alexander 	for (stat = 0; stat < ENA_STATS_ARRAY_TX; stat++) {
35217830e905SSolganik Alexander 		for (i = 0; i < dev->data->nb_tx_queues; i++, count++) {
35227830e905SSolganik Alexander 			stat_offset = ena_stats_tx_strings[stat].stat_offset;
35237830e905SSolganik Alexander 			stats_begin = &adapter->tx_ring[i].rx_stats;
35247830e905SSolganik Alexander 
35257830e905SSolganik Alexander 			xstats[count].id = count;
35267830e905SSolganik Alexander 			xstats[count].value = *((uint64_t *)
35277830e905SSolganik Alexander 				((char *)stats_begin + stat_offset));
35287830e905SSolganik Alexander 		}
35297830e905SSolganik Alexander 	}
35307830e905SSolganik Alexander 
35317830e905SSolganik Alexander 	return count;
35327830e905SSolganik Alexander }
35337830e905SSolganik Alexander 
35347830e905SSolganik Alexander static int ena_xstats_get_by_id(struct rte_eth_dev *dev,
35357830e905SSolganik Alexander 				const uint64_t *ids,
35367830e905SSolganik Alexander 				uint64_t *values,
35377830e905SSolganik Alexander 				unsigned int n)
35387830e905SSolganik Alexander {
3539890728ffSStephen Hemminger 	struct ena_adapter *adapter = dev->data->dev_private;
35407830e905SSolganik Alexander 	uint64_t id;
35417830e905SSolganik Alexander 	uint64_t rx_entries, tx_entries;
35427830e905SSolganik Alexander 	unsigned int i;
35437830e905SSolganik Alexander 	int qid;
35447830e905SSolganik Alexander 	int valid = 0;
354592401abfSShai Brandes 	bool were_metrics_copied = false;
3546a73dd098SShai Brandes 	bool was_srd_info_copied = false;
354792401abfSShai Brandes 	uint64_t metrics_stats[ENA_MAX_CUSTOMER_METRICS];
3548a73dd098SShai Brandes 	struct ena_stats_srd srd_info = {0};
354945718adaSMichal Krawczyk 
35507830e905SSolganik Alexander 	for (i = 0; i < n; ++i) {
35517830e905SSolganik Alexander 		id = ids[i];
35527830e905SSolganik Alexander 		/* Check if id belongs to global statistics */
35537830e905SSolganik Alexander 		if (id < ENA_STATS_ARRAY_GLOBAL) {
35547830e905SSolganik Alexander 			values[i] = *((uint64_t *)&adapter->dev_stats + id);
35557830e905SSolganik Alexander 			++valid;
35567830e905SSolganik Alexander 			continue;
35577830e905SSolganik Alexander 		}
35587830e905SSolganik Alexander 
355945718adaSMichal Krawczyk 		/* Check if id belongs to ENI statistics */
35607830e905SSolganik Alexander 		id -= ENA_STATS_ARRAY_GLOBAL;
356192401abfSShai Brandes 		if (id < adapter->metrics_num) {
356292401abfSShai Brandes 			/* Avoid reading metrics multiple times in a single
356345718adaSMichal Krawczyk 			 * function call, as it requires communication with the
356445718adaSMichal Krawczyk 			 * admin queue.
356545718adaSMichal Krawczyk 			 */
356692401abfSShai Brandes 			if (!were_metrics_copied) {
356792401abfSShai Brandes 				were_metrics_copied = true;
356892401abfSShai Brandes 				ena_copy_customer_metrics(adapter,
356992401abfSShai Brandes 						metrics_stats,
357092401abfSShai Brandes 						adapter->metrics_num);
357145718adaSMichal Krawczyk 			}
357292401abfSShai Brandes 
357392401abfSShai Brandes 			values[i] = *((uint64_t *)&metrics_stats + id);
357445718adaSMichal Krawczyk 			++valid;
357545718adaSMichal Krawczyk 			continue;
357645718adaSMichal Krawczyk 		}
357745718adaSMichal Krawczyk 
3578a73dd098SShai Brandes 		/* Check if id belongs to SRD info statistics */
357992401abfSShai Brandes 		id -= adapter->metrics_num;
3580a73dd098SShai Brandes 
3581a73dd098SShai Brandes 		if (id < ENA_STATS_ARRAY_ENA_SRD) {
3582a73dd098SShai Brandes 			/*
3583a73dd098SShai Brandes 			 * Avoid reading srd info multiple times in a single
3584a73dd098SShai Brandes 			 * function call, as it requires communication with the
3585a73dd098SShai Brandes 			 * admin queue.
3586a73dd098SShai Brandes 			 */
3587a73dd098SShai Brandes 			if (!was_srd_info_copied) {
3588a73dd098SShai Brandes 				was_srd_info_copied = true;
3589a73dd098SShai Brandes 				ena_copy_ena_srd_info(adapter, &srd_info);
3590a73dd098SShai Brandes 			}
3591a73dd098SShai Brandes 			values[i] = *((uint64_t *)&adapter->srd_stats + id);
3592a73dd098SShai Brandes 			++valid;
3593a73dd098SShai Brandes 			continue;
3594a73dd098SShai Brandes 		}
3595a73dd098SShai Brandes 
3596a73dd098SShai Brandes 		/* Check if id belongs to rx queue statistics */
3597a73dd098SShai Brandes 		id -= ENA_STATS_ARRAY_ENA_SRD;
3598a73dd098SShai Brandes 
35997830e905SSolganik Alexander 		rx_entries = ENA_STATS_ARRAY_RX * dev->data->nb_rx_queues;
36007830e905SSolganik Alexander 		if (id < rx_entries) {
36017830e905SSolganik Alexander 			qid = id % dev->data->nb_rx_queues;
36027830e905SSolganik Alexander 			id /= dev->data->nb_rx_queues;
36037830e905SSolganik Alexander 			values[i] = *((uint64_t *)
36047830e905SSolganik Alexander 				&adapter->rx_ring[qid].rx_stats + id);
36057830e905SSolganik Alexander 			++valid;
36067830e905SSolganik Alexander 			continue;
36077830e905SSolganik Alexander 		}
36087830e905SSolganik Alexander 				/* Check if id belongs to rx queue statistics */
36097830e905SSolganik Alexander 		id -= rx_entries;
36107830e905SSolganik Alexander 		tx_entries = ENA_STATS_ARRAY_TX * dev->data->nb_tx_queues;
36117830e905SSolganik Alexander 		if (id < tx_entries) {
36127830e905SSolganik Alexander 			qid = id % dev->data->nb_tx_queues;
36137830e905SSolganik Alexander 			id /= dev->data->nb_tx_queues;
36147830e905SSolganik Alexander 			values[i] = *((uint64_t *)
36157830e905SSolganik Alexander 				&adapter->tx_ring[qid].tx_stats + id);
36167830e905SSolganik Alexander 			++valid;
36177830e905SSolganik Alexander 			continue;
36187830e905SSolganik Alexander 		}
36197830e905SSolganik Alexander 	}
36207830e905SSolganik Alexander 
36217830e905SSolganik Alexander 	return valid;
36227830e905SSolganik Alexander }
36237830e905SSolganik Alexander 
3624cc0c5d25SMichal Krawczyk static int ena_process_uint_devarg(const char *key,
3625cc0c5d25SMichal Krawczyk 				  const char *value,
3626cc0c5d25SMichal Krawczyk 				  void *opaque)
3627cc0c5d25SMichal Krawczyk {
3628cc0c5d25SMichal Krawczyk 	struct ena_adapter *adapter = opaque;
3629cc0c5d25SMichal Krawczyk 	char *str_end;
3630cc0c5d25SMichal Krawczyk 	uint64_t uint_value;
3631cc0c5d25SMichal Krawczyk 
3632cc0c5d25SMichal Krawczyk 	uint_value = strtoull(value, &str_end, 10);
3633cc0c5d25SMichal Krawczyk 	if (value == str_end) {
3634cc0c5d25SMichal Krawczyk 		PMD_INIT_LOG(ERR,
3635cc0c5d25SMichal Krawczyk 			"Invalid value for key '%s'. Only uint values are accepted.\n",
3636cc0c5d25SMichal Krawczyk 			key);
3637cc0c5d25SMichal Krawczyk 		return -EINVAL;
3638cc0c5d25SMichal Krawczyk 	}
3639cc0c5d25SMichal Krawczyk 
3640cc0c5d25SMichal Krawczyk 	if (strcmp(key, ENA_DEVARG_MISS_TXC_TO) == 0) {
3641cc0c5d25SMichal Krawczyk 		if (uint_value > ENA_MAX_TX_TIMEOUT_SECONDS) {
3642cc0c5d25SMichal Krawczyk 			PMD_INIT_LOG(ERR,
3643cc0c5d25SMichal Krawczyk 				"Tx timeout too high: %" PRIu64 " sec. Maximum allowed: %d sec.\n",
3644cc0c5d25SMichal Krawczyk 				uint_value, ENA_MAX_TX_TIMEOUT_SECONDS);
3645cc0c5d25SMichal Krawczyk 			return -EINVAL;
3646cc0c5d25SMichal Krawczyk 		} else if (uint_value == 0) {
3647cc0c5d25SMichal Krawczyk 			PMD_INIT_LOG(INFO,
3648cc0c5d25SMichal Krawczyk 				"Check for missing Tx completions has been disabled.\n");
3649cc0c5d25SMichal Krawczyk 			adapter->missing_tx_completion_to =
3650cc0c5d25SMichal Krawczyk 				ENA_HW_HINTS_NO_TIMEOUT;
3651cc0c5d25SMichal Krawczyk 		} else {
3652cc0c5d25SMichal Krawczyk 			PMD_INIT_LOG(INFO,
3653cc0c5d25SMichal Krawczyk 				"Tx packet completion timeout set to %" PRIu64 " seconds.\n",
3654cc0c5d25SMichal Krawczyk 				uint_value);
3655cc0c5d25SMichal Krawczyk 			adapter->missing_tx_completion_to =
3656cc0c5d25SMichal Krawczyk 				uint_value * rte_get_timer_hz();
3657cc0c5d25SMichal Krawczyk 		}
3658cc0c5d25SMichal Krawczyk 	}
3659cc0c5d25SMichal Krawczyk 
3660cc0c5d25SMichal Krawczyk 	return 0;
3661cc0c5d25SMichal Krawczyk }
3662cc0c5d25SMichal Krawczyk 
36638a7a73f2SMichal Krawczyk static int ena_process_bool_devarg(const char *key,
36648a7a73f2SMichal Krawczyk 				   const char *value,
36658a7a73f2SMichal Krawczyk 				   void *opaque)
36668a7a73f2SMichal Krawczyk {
36678a7a73f2SMichal Krawczyk 	struct ena_adapter *adapter = opaque;
36688a7a73f2SMichal Krawczyk 	bool bool_value;
36698a7a73f2SMichal Krawczyk 
36708a7a73f2SMichal Krawczyk 	/* Parse the value. */
36718a7a73f2SMichal Krawczyk 	if (strcmp(value, "1") == 0) {
36728a7a73f2SMichal Krawczyk 		bool_value = true;
36738a7a73f2SMichal Krawczyk 	} else if (strcmp(value, "0") == 0) {
36748a7a73f2SMichal Krawczyk 		bool_value = false;
36758a7a73f2SMichal Krawczyk 	} else {
36768a7a73f2SMichal Krawczyk 		PMD_INIT_LOG(ERR,
36778a7a73f2SMichal Krawczyk 			"Invalid value: '%s' for key '%s'. Accepted: '0' or '1'\n",
36788a7a73f2SMichal Krawczyk 			value, key);
36798a7a73f2SMichal Krawczyk 		return -EINVAL;
36808a7a73f2SMichal Krawczyk 	}
36818a7a73f2SMichal Krawczyk 
36828a7a73f2SMichal Krawczyk 	/* Now, assign it to the proper adapter field. */
36839b312ad3SIgor Chauskin 	if (strcmp(key, ENA_DEVARG_LARGE_LLQ_HDR) == 0)
36848a7a73f2SMichal Krawczyk 		adapter->use_large_llq_hdr = bool_value;
36859944919eSMichal Krawczyk 	else if (strcmp(key, ENA_DEVARG_ENABLE_LLQ) == 0)
36869944919eSMichal Krawczyk 		adapter->enable_llq = bool_value;
36878a7a73f2SMichal Krawczyk 
36888a7a73f2SMichal Krawczyk 	return 0;
36898a7a73f2SMichal Krawczyk }
36908a7a73f2SMichal Krawczyk 
36918a7a73f2SMichal Krawczyk static int ena_parse_devargs(struct ena_adapter *adapter,
36928a7a73f2SMichal Krawczyk 			     struct rte_devargs *devargs)
36938a7a73f2SMichal Krawczyk {
36948a7a73f2SMichal Krawczyk 	static const char * const allowed_args[] = {
36958a7a73f2SMichal Krawczyk 		ENA_DEVARG_LARGE_LLQ_HDR,
3696cc0c5d25SMichal Krawczyk 		ENA_DEVARG_MISS_TXC_TO,
36979944919eSMichal Krawczyk 		ENA_DEVARG_ENABLE_LLQ,
36989f220a95SMichal Krawczyk 		NULL,
36998a7a73f2SMichal Krawczyk 	};
37008a7a73f2SMichal Krawczyk 	struct rte_kvargs *kvlist;
37018a7a73f2SMichal Krawczyk 	int rc;
37028a7a73f2SMichal Krawczyk 
37038a7a73f2SMichal Krawczyk 	if (devargs == NULL)
37048a7a73f2SMichal Krawczyk 		return 0;
37058a7a73f2SMichal Krawczyk 
37068a7a73f2SMichal Krawczyk 	kvlist = rte_kvargs_parse(devargs->args, allowed_args);
37078a7a73f2SMichal Krawczyk 	if (kvlist == NULL) {
37088a7a73f2SMichal Krawczyk 		PMD_INIT_LOG(ERR, "Invalid device arguments: %s\n",
37098a7a73f2SMichal Krawczyk 			devargs->args);
37108a7a73f2SMichal Krawczyk 		return -EINVAL;
37118a7a73f2SMichal Krawczyk 	}
37128a7a73f2SMichal Krawczyk 
37138a7a73f2SMichal Krawczyk 	rc = rte_kvargs_process(kvlist, ENA_DEVARG_LARGE_LLQ_HDR,
37148a7a73f2SMichal Krawczyk 		ena_process_bool_devarg, adapter);
3715cc0c5d25SMichal Krawczyk 	if (rc != 0)
3716cc0c5d25SMichal Krawczyk 		goto exit;
3717cc0c5d25SMichal Krawczyk 	rc = rte_kvargs_process(kvlist, ENA_DEVARG_MISS_TXC_TO,
3718cc0c5d25SMichal Krawczyk 		ena_process_uint_devarg, adapter);
37199944919eSMichal Krawczyk 	if (rc != 0)
37209944919eSMichal Krawczyk 		goto exit;
37219944919eSMichal Krawczyk 	rc = rte_kvargs_process(kvlist, ENA_DEVARG_ENABLE_LLQ,
37229944919eSMichal Krawczyk 		ena_process_bool_devarg, adapter);
37238a7a73f2SMichal Krawczyk 
3724cc0c5d25SMichal Krawczyk exit:
37258a7a73f2SMichal Krawczyk 	rte_kvargs_free(kvlist);
37268a7a73f2SMichal Krawczyk 
37278a7a73f2SMichal Krawczyk 	return rc;
37288a7a73f2SMichal Krawczyk }
37298a7a73f2SMichal Krawczyk 
37306986cdc4SMichal Krawczyk static int ena_setup_rx_intr(struct rte_eth_dev *dev)
37316986cdc4SMichal Krawczyk {
37326986cdc4SMichal Krawczyk 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
3733d61138d4SHarman Kalra 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
37346986cdc4SMichal Krawczyk 	int rc;
37356986cdc4SMichal Krawczyk 	uint16_t vectors_nb, i;
37366986cdc4SMichal Krawczyk 	bool rx_intr_requested = dev->data->dev_conf.intr_conf.rxq;
37376986cdc4SMichal Krawczyk 
37386986cdc4SMichal Krawczyk 	if (!rx_intr_requested)
37396986cdc4SMichal Krawczyk 		return 0;
37406986cdc4SMichal Krawczyk 
37416986cdc4SMichal Krawczyk 	if (!rte_intr_cap_multiple(intr_handle)) {
37426986cdc4SMichal Krawczyk 		PMD_DRV_LOG(ERR,
37436986cdc4SMichal Krawczyk 			"Rx interrupt requested, but it isn't supported by the PCI driver\n");
37446986cdc4SMichal Krawczyk 		return -ENOTSUP;
37456986cdc4SMichal Krawczyk 	}
37466986cdc4SMichal Krawczyk 
37476986cdc4SMichal Krawczyk 	/* Disable interrupt mapping before the configuration starts. */
37486986cdc4SMichal Krawczyk 	rte_intr_disable(intr_handle);
37496986cdc4SMichal Krawczyk 
37506986cdc4SMichal Krawczyk 	/* Verify if there are enough vectors available. */
37516986cdc4SMichal Krawczyk 	vectors_nb = dev->data->nb_rx_queues;
37526986cdc4SMichal Krawczyk 	if (vectors_nb > RTE_MAX_RXTX_INTR_VEC_ID) {
37536986cdc4SMichal Krawczyk 		PMD_DRV_LOG(ERR,
37546986cdc4SMichal Krawczyk 			"Too many Rx interrupts requested, maximum number: %d\n",
37556986cdc4SMichal Krawczyk 			RTE_MAX_RXTX_INTR_VEC_ID);
37566986cdc4SMichal Krawczyk 		rc = -ENOTSUP;
37576986cdc4SMichal Krawczyk 		goto enable_intr;
37586986cdc4SMichal Krawczyk 	}
37596986cdc4SMichal Krawczyk 
3760d61138d4SHarman Kalra 	/* Allocate the vector list */
3761d61138d4SHarman Kalra 	if (rte_intr_vec_list_alloc(intr_handle, "intr_vec",
3762d61138d4SHarman Kalra 					   dev->data->nb_rx_queues)) {
37636986cdc4SMichal Krawczyk 		PMD_DRV_LOG(ERR,
37646986cdc4SMichal Krawczyk 			"Failed to allocate interrupt vector for %d queues\n",
37656986cdc4SMichal Krawczyk 			dev->data->nb_rx_queues);
37666986cdc4SMichal Krawczyk 		rc = -ENOMEM;
37676986cdc4SMichal Krawczyk 		goto enable_intr;
37686986cdc4SMichal Krawczyk 	}
37696986cdc4SMichal Krawczyk 
37706986cdc4SMichal Krawczyk 	rc = rte_intr_efd_enable(intr_handle, vectors_nb);
37716986cdc4SMichal Krawczyk 	if (rc != 0)
37726986cdc4SMichal Krawczyk 		goto free_intr_vec;
37736986cdc4SMichal Krawczyk 
37746986cdc4SMichal Krawczyk 	if (!rte_intr_allow_others(intr_handle)) {
37756986cdc4SMichal Krawczyk 		PMD_DRV_LOG(ERR,
37766986cdc4SMichal Krawczyk 			"Not enough interrupts available to use both ENA Admin and Rx interrupts\n");
37776986cdc4SMichal Krawczyk 		goto disable_intr_efd;
37786986cdc4SMichal Krawczyk 	}
37796986cdc4SMichal Krawczyk 
37806986cdc4SMichal Krawczyk 	for (i = 0; i < vectors_nb; ++i)
3781d61138d4SHarman Kalra 		if (rte_intr_vec_list_index_set(intr_handle, i,
3782d61138d4SHarman Kalra 					   RTE_INTR_VEC_RXTX_OFFSET + i))
3783d61138d4SHarman Kalra 			goto disable_intr_efd;
37846986cdc4SMichal Krawczyk 
37856986cdc4SMichal Krawczyk 	rte_intr_enable(intr_handle);
37866986cdc4SMichal Krawczyk 	return 0;
37876986cdc4SMichal Krawczyk 
37886986cdc4SMichal Krawczyk disable_intr_efd:
37896986cdc4SMichal Krawczyk 	rte_intr_efd_disable(intr_handle);
37906986cdc4SMichal Krawczyk free_intr_vec:
3791d61138d4SHarman Kalra 	rte_intr_vec_list_free(intr_handle);
37926986cdc4SMichal Krawczyk enable_intr:
37936986cdc4SMichal Krawczyk 	rte_intr_enable(intr_handle);
37946986cdc4SMichal Krawczyk 	return rc;
37956986cdc4SMichal Krawczyk }
37966986cdc4SMichal Krawczyk 
37976986cdc4SMichal Krawczyk static void ena_rx_queue_intr_set(struct rte_eth_dev *dev,
37986986cdc4SMichal Krawczyk 				 uint16_t queue_id,
37996986cdc4SMichal Krawczyk 				 bool unmask)
38006986cdc4SMichal Krawczyk {
38016986cdc4SMichal Krawczyk 	struct ena_adapter *adapter = dev->data->dev_private;
38026986cdc4SMichal Krawczyk 	struct ena_ring *rxq = &adapter->rx_ring[queue_id];
38036986cdc4SMichal Krawczyk 	struct ena_eth_io_intr_reg intr_reg;
38046986cdc4SMichal Krawczyk 
3805f73f53f7SShai Brandes 	ena_com_update_intr_reg(&intr_reg, 0, 0, unmask, 1);
38066986cdc4SMichal Krawczyk 	ena_com_unmask_intr(rxq->ena_com_io_cq, &intr_reg);
38076986cdc4SMichal Krawczyk }
38086986cdc4SMichal Krawczyk 
38096986cdc4SMichal Krawczyk static int ena_rx_queue_intr_enable(struct rte_eth_dev *dev,
38106986cdc4SMichal Krawczyk 				    uint16_t queue_id)
38116986cdc4SMichal Krawczyk {
38126986cdc4SMichal Krawczyk 	ena_rx_queue_intr_set(dev, queue_id, true);
38136986cdc4SMichal Krawczyk 
38146986cdc4SMichal Krawczyk 	return 0;
38156986cdc4SMichal Krawczyk }
38166986cdc4SMichal Krawczyk 
38176986cdc4SMichal Krawczyk static int ena_rx_queue_intr_disable(struct rte_eth_dev *dev,
38186986cdc4SMichal Krawczyk 				     uint16_t queue_id)
38196986cdc4SMichal Krawczyk {
38206986cdc4SMichal Krawczyk 	ena_rx_queue_intr_set(dev, queue_id, false);
38216986cdc4SMichal Krawczyk 
38226986cdc4SMichal Krawczyk 	return 0;
38236986cdc4SMichal Krawczyk }
38246986cdc4SMichal Krawczyk 
3825b9b05d6fSMichal Krawczyk static int ena_configure_aenq(struct ena_adapter *adapter)
3826b9b05d6fSMichal Krawczyk {
3827b9b05d6fSMichal Krawczyk 	uint32_t aenq_groups = adapter->all_aenq_groups;
3828b9b05d6fSMichal Krawczyk 	int rc;
3829b9b05d6fSMichal Krawczyk 
3830b9b05d6fSMichal Krawczyk 	/* All_aenq_groups holds all AENQ functions supported by the device and
3831b9b05d6fSMichal Krawczyk 	 * the HW, so at first we need to be sure the LSC request is valid.
3832b9b05d6fSMichal Krawczyk 	 */
3833b9b05d6fSMichal Krawczyk 	if (adapter->edev_data->dev_conf.intr_conf.lsc != 0) {
3834b9b05d6fSMichal Krawczyk 		if (!(aenq_groups & BIT(ENA_ADMIN_LINK_CHANGE))) {
3835b9b05d6fSMichal Krawczyk 			PMD_DRV_LOG(ERR,
3836b9b05d6fSMichal Krawczyk 				"LSC requested, but it's not supported by the AENQ\n");
3837b9b05d6fSMichal Krawczyk 			return -EINVAL;
3838b9b05d6fSMichal Krawczyk 		}
3839b9b05d6fSMichal Krawczyk 	} else {
3840b9b05d6fSMichal Krawczyk 		/* If LSC wasn't enabled by the app, let's enable all supported
3841b9b05d6fSMichal Krawczyk 		 * AENQ procedures except the LSC.
3842b9b05d6fSMichal Krawczyk 		 */
3843b9b05d6fSMichal Krawczyk 		aenq_groups &= ~BIT(ENA_ADMIN_LINK_CHANGE);
3844b9b05d6fSMichal Krawczyk 	}
3845b9b05d6fSMichal Krawczyk 
3846b9b05d6fSMichal Krawczyk 	rc = ena_com_set_aenq_config(&adapter->ena_dev, aenq_groups);
3847b9b05d6fSMichal Krawczyk 	if (rc != 0) {
3848b9b05d6fSMichal Krawczyk 		PMD_DRV_LOG(ERR, "Cannot configure AENQ groups, rc=%d\n", rc);
3849b9b05d6fSMichal Krawczyk 		return rc;
3850b9b05d6fSMichal Krawczyk 	}
3851b9b05d6fSMichal Krawczyk 
3852b9b05d6fSMichal Krawczyk 	adapter->active_aenq_groups = aenq_groups;
3853b9b05d6fSMichal Krawczyk 
3854b9b05d6fSMichal Krawczyk 	return 0;
3855b9b05d6fSMichal Krawczyk }
3856b9b05d6fSMichal Krawczyk 
3857e3595539SStanislaw Kardach int ena_mp_indirect_table_set(struct ena_adapter *adapter)
3858e3595539SStanislaw Kardach {
3859e3595539SStanislaw Kardach 	return ENA_PROXY(adapter, ena_com_indirect_table_set, &adapter->ena_dev);
3860e3595539SStanislaw Kardach }
3861e3595539SStanislaw Kardach 
3862e3595539SStanislaw Kardach int ena_mp_indirect_table_get(struct ena_adapter *adapter,
3863e3595539SStanislaw Kardach 			      uint32_t *indirect_table)
3864e3595539SStanislaw Kardach {
3865e3595539SStanislaw Kardach 	return ENA_PROXY(adapter, ena_com_indirect_table_get, &adapter->ena_dev,
3866e3595539SStanislaw Kardach 		indirect_table);
3867e3595539SStanislaw Kardach }
3868e3595539SStanislaw Kardach 
3869ca148440SMichal Krawczyk /*********************************************************************
3870850e1bb1SMichal Krawczyk  *  ena_plat_dpdk.h functions implementations
3871850e1bb1SMichal Krawczyk  *********************************************************************/
3872850e1bb1SMichal Krawczyk 
3873850e1bb1SMichal Krawczyk const struct rte_memzone *
3874850e1bb1SMichal Krawczyk ena_mem_alloc_coherent(struct rte_eth_dev_data *data, size_t size,
3875850e1bb1SMichal Krawczyk 		       int socket_id, unsigned int alignment, void **virt_addr,
3876850e1bb1SMichal Krawczyk 		       dma_addr_t *phys_addr)
3877850e1bb1SMichal Krawczyk {
3878850e1bb1SMichal Krawczyk 	char z_name[RTE_MEMZONE_NAMESIZE];
3879850e1bb1SMichal Krawczyk 	struct ena_adapter *adapter = data->dev_private;
3880850e1bb1SMichal Krawczyk 	const struct rte_memzone *memzone;
3881850e1bb1SMichal Krawczyk 	int rc;
3882850e1bb1SMichal Krawczyk 
3883850e1bb1SMichal Krawczyk 	rc = snprintf(z_name, RTE_MEMZONE_NAMESIZE, "ena_p%d_mz%" PRIu64 "",
3884850e1bb1SMichal Krawczyk 		data->port_id, adapter->memzone_cnt);
3885850e1bb1SMichal Krawczyk 	if (rc >= RTE_MEMZONE_NAMESIZE) {
3886850e1bb1SMichal Krawczyk 		PMD_DRV_LOG(ERR,
3887850e1bb1SMichal Krawczyk 			"Name for the ena_com memzone is too long. Port: %d, mz_num: %" PRIu64 "\n",
3888850e1bb1SMichal Krawczyk 			data->port_id, adapter->memzone_cnt);
3889850e1bb1SMichal Krawczyk 		goto error;
3890850e1bb1SMichal Krawczyk 	}
3891850e1bb1SMichal Krawczyk 	adapter->memzone_cnt++;
3892850e1bb1SMichal Krawczyk 
3893850e1bb1SMichal Krawczyk 	memzone = rte_memzone_reserve_aligned(z_name, size, socket_id,
3894850e1bb1SMichal Krawczyk 		RTE_MEMZONE_IOVA_CONTIG, alignment);
3895850e1bb1SMichal Krawczyk 	if (memzone == NULL) {
3896850e1bb1SMichal Krawczyk 		PMD_DRV_LOG(ERR, "Failed to allocate ena_com memzone: %s\n",
3897850e1bb1SMichal Krawczyk 			z_name);
3898850e1bb1SMichal Krawczyk 		goto error;
3899850e1bb1SMichal Krawczyk 	}
3900850e1bb1SMichal Krawczyk 
3901850e1bb1SMichal Krawczyk 	memset(memzone->addr, 0, size);
3902850e1bb1SMichal Krawczyk 	*virt_addr = memzone->addr;
3903850e1bb1SMichal Krawczyk 	*phys_addr = memzone->iova;
3904850e1bb1SMichal Krawczyk 
3905850e1bb1SMichal Krawczyk 	return memzone;
3906850e1bb1SMichal Krawczyk 
3907850e1bb1SMichal Krawczyk error:
3908850e1bb1SMichal Krawczyk 	*virt_addr = NULL;
3909850e1bb1SMichal Krawczyk 	*phys_addr = 0;
3910850e1bb1SMichal Krawczyk 
3911850e1bb1SMichal Krawczyk 	return NULL;
3912850e1bb1SMichal Krawczyk }
3913850e1bb1SMichal Krawczyk 
3914850e1bb1SMichal Krawczyk 
3915850e1bb1SMichal Krawczyk /*********************************************************************
3916ca148440SMichal Krawczyk  *  PMD configuration
3917ca148440SMichal Krawczyk  *********************************************************************/
3918fdf91e0fSJan Blunck static int eth_ena_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
3919fdf91e0fSJan Blunck 	struct rte_pci_device *pci_dev)
3920fdf91e0fSJan Blunck {
3921fdf91e0fSJan Blunck 	return rte_eth_dev_pci_generic_probe(pci_dev,
3922fdf91e0fSJan Blunck 		sizeof(struct ena_adapter), eth_ena_dev_init);
3923fdf91e0fSJan Blunck }
3924fdf91e0fSJan Blunck 
3925fdf91e0fSJan Blunck static int eth_ena_pci_remove(struct rte_pci_device *pci_dev)
3926fdf91e0fSJan Blunck {
3927eb0ef49dSMichal Krawczyk 	return rte_eth_dev_pci_generic_remove(pci_dev, eth_ena_dev_uninit);
3928fdf91e0fSJan Blunck }
3929fdf91e0fSJan Blunck 
3930fdf91e0fSJan Blunck static struct rte_pci_driver rte_ena_pmd = {
39311173fca2SJan Medala 	.id_table = pci_id_ena_map,
393205e0eee0SRafal Kozik 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
393305e0eee0SRafal Kozik 		     RTE_PCI_DRV_WC_ACTIVATE,
3934fdf91e0fSJan Blunck 	.probe = eth_ena_pci_probe,
3935fdf91e0fSJan Blunck 	.remove = eth_ena_pci_remove,
39361173fca2SJan Medala };
39371173fca2SJan Medala 
3938fdf91e0fSJan Blunck RTE_PMD_REGISTER_PCI(net_ena, rte_ena_pmd);
393901f19227SShreyansh Jain RTE_PMD_REGISTER_PCI_TABLE(net_ena, pci_id_ena_map);
394006e81dc9SDavid Marchand RTE_PMD_REGISTER_KMOD_DEP(net_ena, "* igb_uio | uio_pci_generic | vfio-pci");
39419944919eSMichal Krawczyk RTE_PMD_REGISTER_PARAM_STRING(net_ena,
39429944919eSMichal Krawczyk 	ENA_DEVARG_LARGE_LLQ_HDR "=<0|1> "
39439944919eSMichal Krawczyk 	ENA_DEVARG_ENABLE_LLQ "=<0|1> "
39449944919eSMichal Krawczyk 	ENA_DEVARG_MISS_TXC_TO "=<uint>");
3945eeded204SDavid Marchand RTE_LOG_REGISTER_SUFFIX(ena_logtype_init, init, NOTICE);
3946eeded204SDavid Marchand RTE_LOG_REGISTER_SUFFIX(ena_logtype_driver, driver, NOTICE);
39470a001d69SMichal Krawczyk #ifdef RTE_ETHDEV_DEBUG_RX
39480a001d69SMichal Krawczyk RTE_LOG_REGISTER_SUFFIX(ena_logtype_rx, rx, DEBUG);
39496f1c9df9SStephen Hemminger #endif
39500a001d69SMichal Krawczyk #ifdef RTE_ETHDEV_DEBUG_TX
39510a001d69SMichal Krawczyk RTE_LOG_REGISTER_SUFFIX(ena_logtype_tx, tx, DEBUG);
39526f1c9df9SStephen Hemminger #endif
39530a001d69SMichal Krawczyk RTE_LOG_REGISTER_SUFFIX(ena_logtype_com, com, WARNING);
39543adcba9aSMichal Krawczyk 
39553adcba9aSMichal Krawczyk /******************************************************************************
39563adcba9aSMichal Krawczyk  ******************************** AENQ Handlers *******************************
39573adcba9aSMichal Krawczyk  *****************************************************************************/
3958ca148440SMichal Krawczyk static void ena_update_on_link_change(void *adapter_data,
3959ca148440SMichal Krawczyk 				      struct ena_admin_aenq_entry *aenq_e)
3960ca148440SMichal Krawczyk {
3961aab58857SStanislaw Kardach 	struct rte_eth_dev *eth_dev = adapter_data;
3962aab58857SStanislaw Kardach 	struct ena_adapter *adapter = eth_dev->data->dev_private;
3963ca148440SMichal Krawczyk 	struct ena_admin_aenq_link_change_desc *aenq_link_desc;
3964ca148440SMichal Krawczyk 	uint32_t status;
3965ca148440SMichal Krawczyk 
3966ca148440SMichal Krawczyk 	aenq_link_desc = (struct ena_admin_aenq_link_change_desc *)aenq_e;
3967ca148440SMichal Krawczyk 
3968ca148440SMichal Krawczyk 	status = get_ena_admin_aenq_link_change_desc_link_status(aenq_link_desc);
3969ca148440SMichal Krawczyk 	adapter->link_status = status;
3970ca148440SMichal Krawczyk 
3971ca148440SMichal Krawczyk 	ena_link_update(eth_dev, 0);
39725723fbedSFerruh Yigit 	rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL);
3973ca148440SMichal Krawczyk }
3974ca148440SMichal Krawczyk 
3975aab58857SStanislaw Kardach static void ena_notification(void *adapter_data,
3976f01f060cSRafal Kozik 			     struct ena_admin_aenq_entry *aenq_e)
3977f01f060cSRafal Kozik {
3978aab58857SStanislaw Kardach 	struct rte_eth_dev *eth_dev = adapter_data;
3979aab58857SStanislaw Kardach 	struct ena_adapter *adapter = eth_dev->data->dev_private;
3980f01f060cSRafal Kozik 	struct ena_admin_ena_hw_hints *hints;
3981f01f060cSRafal Kozik 
3982f01f060cSRafal Kozik 	if (aenq_e->aenq_common_desc.group != ENA_ADMIN_NOTIFICATION)
3983617898d1SMichal Krawczyk 		PMD_DRV_LOG(WARNING, "Invalid AENQ group: %x. Expected: %x\n",
3984f01f060cSRafal Kozik 			aenq_e->aenq_common_desc.group,
3985f01f060cSRafal Kozik 			ENA_ADMIN_NOTIFICATION);
3986f01f060cSRafal Kozik 
3987b19f366cSMichal Krawczyk 	switch (aenq_e->aenq_common_desc.syndrome) {
3988f01f060cSRafal Kozik 	case ENA_ADMIN_UPDATE_HINTS:
3989f01f060cSRafal Kozik 		hints = (struct ena_admin_ena_hw_hints *)
3990f01f060cSRafal Kozik 			(&aenq_e->inline_data_w4);
3991f01f060cSRafal Kozik 		ena_update_hints(adapter, hints);
3992f01f060cSRafal Kozik 		break;
3993f01f060cSRafal Kozik 	default:
3994617898d1SMichal Krawczyk 		PMD_DRV_LOG(ERR, "Invalid AENQ notification link state: %d\n",
3995b19f366cSMichal Krawczyk 			aenq_e->aenq_common_desc.syndrome);
3996f01f060cSRafal Kozik 	}
3997f01f060cSRafal Kozik }
3998f01f060cSRafal Kozik 
3999d9b8b106SMichal Krawczyk static void ena_keep_alive(void *adapter_data,
4000d9b8b106SMichal Krawczyk 			   __rte_unused struct ena_admin_aenq_entry *aenq_e)
4001d9b8b106SMichal Krawczyk {
4002aab58857SStanislaw Kardach 	struct rte_eth_dev *eth_dev = adapter_data;
4003aab58857SStanislaw Kardach 	struct ena_adapter *adapter = eth_dev->data->dev_private;
400494c3e376SRafal Kozik 	struct ena_admin_aenq_keep_alive_desc *desc;
400594c3e376SRafal Kozik 	uint64_t rx_drops;
4006e1e73e32SMichal Krawczyk 	uint64_t tx_drops;
400777d4ed30SShai Brandes 	uint64_t rx_overruns;
4008d9b8b106SMichal Krawczyk 
4009d9b8b106SMichal Krawczyk 	adapter->timestamp_wd = rte_get_timer_cycles();
401094c3e376SRafal Kozik 
401194c3e376SRafal Kozik 	desc = (struct ena_admin_aenq_keep_alive_desc *)aenq_e;
401294c3e376SRafal Kozik 	rx_drops = ((uint64_t)desc->rx_drops_high << 32) | desc->rx_drops_low;
4013e1e73e32SMichal Krawczyk 	tx_drops = ((uint64_t)desc->tx_drops_high << 32) | desc->tx_drops_low;
401477d4ed30SShai Brandes 	rx_overruns = ((uint64_t)desc->rx_overruns_high << 32) | desc->rx_overruns_low;
4015e1e73e32SMichal Krawczyk 
4016e1e73e32SMichal Krawczyk 	adapter->drv_stats->rx_drops = rx_drops;
4017e1e73e32SMichal Krawczyk 	adapter->dev_stats.tx_drops = tx_drops;
401877d4ed30SShai Brandes 	adapter->dev_stats.rx_overruns = rx_overruns;
4019d9b8b106SMichal Krawczyk }
4020d9b8b106SMichal Krawczyk 
40213adcba9aSMichal Krawczyk /**
40223adcba9aSMichal Krawczyk  * This handler will called for unknown event group or unimplemented handlers
40233adcba9aSMichal Krawczyk  **/
40243adcba9aSMichal Krawczyk static void unimplemented_aenq_handler(__rte_unused void *data,
40253adcba9aSMichal Krawczyk 				       __rte_unused struct ena_admin_aenq_entry *aenq_e)
40263adcba9aSMichal Krawczyk {
4027617898d1SMichal Krawczyk 	PMD_DRV_LOG(ERR,
4028617898d1SMichal Krawczyk 		"Unknown event was received or event with unimplemented handler\n");
40293adcba9aSMichal Krawczyk }
40303adcba9aSMichal Krawczyk 
4031ca148440SMichal Krawczyk static struct ena_aenq_handlers aenq_handlers = {
40323adcba9aSMichal Krawczyk 	.handlers = {
4033ca148440SMichal Krawczyk 		[ENA_ADMIN_LINK_CHANGE] = ena_update_on_link_change,
4034f01f060cSRafal Kozik 		[ENA_ADMIN_NOTIFICATION] = ena_notification,
4035d9b8b106SMichal Krawczyk 		[ENA_ADMIN_KEEP_ALIVE] = ena_keep_alive
40363adcba9aSMichal Krawczyk 	},
40373adcba9aSMichal Krawczyk 	.unimplemented_handler = unimplemented_aenq_handler
40383adcba9aSMichal Krawczyk };
4039e3595539SStanislaw Kardach 
4040e3595539SStanislaw Kardach /*********************************************************************
4041e3595539SStanislaw Kardach  *  Multi-Process communication request handling (in primary)
4042e3595539SStanislaw Kardach  *********************************************************************/
4043e3595539SStanislaw Kardach static int
4044e3595539SStanislaw Kardach ena_mp_primary_handle(const struct rte_mp_msg *mp_msg, const void *peer)
4045e3595539SStanislaw Kardach {
4046e3595539SStanislaw Kardach 	const struct ena_mp_body *req =
4047e3595539SStanislaw Kardach 		(const struct ena_mp_body *)mp_msg->param;
4048e3595539SStanislaw Kardach 	struct ena_adapter *adapter;
4049e3595539SStanislaw Kardach 	struct ena_com_dev *ena_dev;
4050e3595539SStanislaw Kardach 	struct ena_mp_body *rsp;
4051e3595539SStanislaw Kardach 	struct rte_mp_msg mp_rsp;
4052e3595539SStanislaw Kardach 	struct rte_eth_dev *dev;
4053e3595539SStanislaw Kardach 	int res = 0;
4054e3595539SStanislaw Kardach 
4055e3595539SStanislaw Kardach 	rsp = (struct ena_mp_body *)&mp_rsp.param;
4056e3595539SStanislaw Kardach 	mp_msg_init(&mp_rsp, req->type, req->port_id);
4057e3595539SStanislaw Kardach 
4058e3595539SStanislaw Kardach 	if (!rte_eth_dev_is_valid_port(req->port_id)) {
4059e3595539SStanislaw Kardach 		rte_errno = ENODEV;
4060e3595539SStanislaw Kardach 		res = -rte_errno;
4061e3595539SStanislaw Kardach 		PMD_DRV_LOG(ERR, "Unknown port %d in request %d\n",
4062e3595539SStanislaw Kardach 			    req->port_id, req->type);
4063e3595539SStanislaw Kardach 		goto end;
4064e3595539SStanislaw Kardach 	}
4065e3595539SStanislaw Kardach 	dev = &rte_eth_devices[req->port_id];
4066e3595539SStanislaw Kardach 	adapter = dev->data->dev_private;
4067e3595539SStanislaw Kardach 	ena_dev = &adapter->ena_dev;
4068e3595539SStanislaw Kardach 
4069e3595539SStanislaw Kardach 	switch (req->type) {
4070e3595539SStanislaw Kardach 	case ENA_MP_DEV_STATS_GET:
4071e3595539SStanislaw Kardach 		res = ena_com_get_dev_basic_stats(ena_dev,
4072e3595539SStanislaw Kardach 						  &adapter->basic_stats);
4073e3595539SStanislaw Kardach 		break;
4074e3595539SStanislaw Kardach 	case ENA_MP_ENI_STATS_GET:
4075e3595539SStanislaw Kardach 		res = ena_com_get_eni_stats(ena_dev,
407692401abfSShai Brandes 			(struct ena_admin_eni_stats *)&adapter->metrics_stats);
4077e3595539SStanislaw Kardach 		break;
4078e3595539SStanislaw Kardach 	case ENA_MP_MTU_SET:
4079e3595539SStanislaw Kardach 		res = ena_com_set_dev_mtu(ena_dev, req->args.mtu);
4080e3595539SStanislaw Kardach 		break;
4081e3595539SStanislaw Kardach 	case ENA_MP_IND_TBL_GET:
4082e3595539SStanislaw Kardach 		res = ena_com_indirect_table_get(ena_dev,
4083e3595539SStanislaw Kardach 						 adapter->indirect_table);
4084e3595539SStanislaw Kardach 		break;
4085e3595539SStanislaw Kardach 	case ENA_MP_IND_TBL_SET:
4086e3595539SStanislaw Kardach 		res = ena_com_indirect_table_set(ena_dev);
4087e3595539SStanislaw Kardach 		break;
408892401abfSShai Brandes 	case ENA_MP_CUSTOMER_METRICS_GET:
408992401abfSShai Brandes 		res = ena_com_get_customer_metrics(ena_dev,
409092401abfSShai Brandes 				(char *)adapter->metrics_stats,
409192401abfSShai Brandes 				sizeof(uint64_t) * adapter->metrics_num);
409292401abfSShai Brandes 		break;
4093a73dd098SShai Brandes 	case ENA_MP_SRD_STATS_GET:
4094a73dd098SShai Brandes 		res = ena_com_get_ena_srd_info(ena_dev,
4095a73dd098SShai Brandes 				(struct ena_admin_ena_srd_info *)&adapter->srd_stats);
4096a73dd098SShai Brandes 		break;
4097e3595539SStanislaw Kardach 	default:
4098e3595539SStanislaw Kardach 		PMD_DRV_LOG(ERR, "Unknown request type %d\n", req->type);
4099e3595539SStanislaw Kardach 		res = -EINVAL;
4100e3595539SStanislaw Kardach 		break;
4101e3595539SStanislaw Kardach 	}
4102e3595539SStanislaw Kardach 
4103e3595539SStanislaw Kardach end:
4104e3595539SStanislaw Kardach 	/* Save processing result in the reply */
4105e3595539SStanislaw Kardach 	rsp->result = res;
4106e3595539SStanislaw Kardach 	/* Return just IPC processing status */
4107e3595539SStanislaw Kardach 	return rte_mp_reply(&mp_rsp, peer);
4108e3595539SStanislaw Kardach }
4109