1702928afSMaciej Bielski /* SPDX-License-Identifier: BSD-3-Clause 238364c26SMichal Krawczyk * Copyright (c) 2015-2020 Amazon.com, Inc. or its affiliates. 31173fca2SJan Medala * All rights reserved. 41173fca2SJan Medala */ 51173fca2SJan Medala 66723c0fcSBruce Richardson #include <rte_string_fns.h> 71173fca2SJan Medala #include <rte_errno.h> 8372c1af5SJan Medala #include <rte_version.h> 9b3fc5a1aSKonstantin Ananyev #include <rte_net.h> 108a7a73f2SMichal Krawczyk #include <rte_kvargs.h> 111173fca2SJan Medala 121173fca2SJan Medala #include "ena_ethdev.h" 131173fca2SJan Medala #include "ena_logs.h" 141173fca2SJan Medala #include "ena_platform.h" 151173fca2SJan Medala #include "ena_com.h" 161173fca2SJan Medala #include "ena_eth_com.h" 171173fca2SJan Medala 181173fca2SJan Medala #include <ena_common_defs.h> 191173fca2SJan Medala #include <ena_regs_defs.h> 201173fca2SJan Medala #include <ena_admin_defs.h> 211173fca2SJan Medala #include <ena_eth_io_defs.h> 221173fca2SJan Medala 23419c3e3eSMichal Krawczyk #define DRV_MODULE_VER_MAJOR 2 243dbde902SShai Brandes #define DRV_MODULE_VER_MINOR 8 251b48c60dSMichal Krawczyk #define DRV_MODULE_VER_SUBMINOR 0 26372c1af5SJan Medala 271173fca2SJan Medala #define __MERGE_64B_H_L(h, l) (((uint64_t)h << 32) | l) 281173fca2SJan Medala 291173fca2SJan Medala #define GET_L4_HDR_LEN(mbuf) \ 30f41b5156SOlivier Matz ((rte_pktmbuf_mtod_offset(mbuf, struct rte_tcp_hdr *, \ 311173fca2SJan Medala mbuf->l3_len + mbuf->l2_len)->data_off) >> 4) 321173fca2SJan Medala 33372c1af5SJan Medala #define ETH_GSTRING_LEN 32 34372c1af5SJan Medala 35a3c9a11aSAndrew Boyer #define ARRAY_SIZE(x) RTE_DIM(x) 36372c1af5SJan Medala 3792680dc2SRafal Kozik #define ENA_MIN_RING_DESC 128 3892680dc2SRafal Kozik 39c339f538SDawid Gorecki /* 40c339f538SDawid Gorecki * We should try to keep ENA_CLEANUP_BUF_SIZE lower than 41c339f538SDawid Gorecki * RTE_MEMPOOL_CACHE_MAX_SIZE, so we can fit this in mempool local cache. 42c339f538SDawid Gorecki */ 43c339f538SDawid Gorecki #define ENA_CLEANUP_BUF_SIZE 256 44c339f538SDawid Gorecki 45b418f0d2SMichal Krawczyk #define ENA_PTYPE_HAS_HASH (RTE_PTYPE_L4_TCP | RTE_PTYPE_L4_UDP) 46b418f0d2SMichal Krawczyk 47372c1af5SJan Medala struct ena_stats { 48372c1af5SJan Medala char name[ETH_GSTRING_LEN]; 49372c1af5SJan Medala int stat_offset; 50372c1af5SJan Medala }; 51372c1af5SJan Medala 52372c1af5SJan Medala #define ENA_STAT_ENTRY(stat, stat_type) { \ 53372c1af5SJan Medala .name = #stat, \ 54372c1af5SJan Medala .stat_offset = offsetof(struct ena_stats_##stat_type, stat) \ 55372c1af5SJan Medala } 56372c1af5SJan Medala 57372c1af5SJan Medala #define ENA_STAT_RX_ENTRY(stat) \ 58372c1af5SJan Medala ENA_STAT_ENTRY(stat, rx) 59372c1af5SJan Medala 60372c1af5SJan Medala #define ENA_STAT_TX_ENTRY(stat) \ 61372c1af5SJan Medala ENA_STAT_ENTRY(stat, tx) 62372c1af5SJan Medala 6392401abfSShai Brandes #define ENA_STAT_METRICS_ENTRY(stat) \ 6492401abfSShai Brandes ENA_STAT_ENTRY(stat, metrics) 6545718adaSMichal Krawczyk 66372c1af5SJan Medala #define ENA_STAT_GLOBAL_ENTRY(stat) \ 67372c1af5SJan Medala ENA_STAT_ENTRY(stat, dev) 68372c1af5SJan Medala 69a73dd098SShai Brandes #define ENA_STAT_ENA_SRD_ENTRY(stat) \ 70a73dd098SShai Brandes ENA_STAT_ENTRY(stat, srd) 71a73dd098SShai Brandes 728a7a73f2SMichal Krawczyk /* Device arguments */ 738a7a73f2SMichal Krawczyk #define ENA_DEVARG_LARGE_LLQ_HDR "large_llq_hdr" 74cc0c5d25SMichal Krawczyk /* Timeout in seconds after which a single uncompleted Tx packet should be 75cc0c5d25SMichal Krawczyk * considered as a missing. 76cc0c5d25SMichal Krawczyk */ 77cc0c5d25SMichal Krawczyk #define ENA_DEVARG_MISS_TXC_TO "miss_txc_to" 789944919eSMichal Krawczyk /* 799944919eSMichal Krawczyk * Controls whether LLQ should be used (if available). Enabled by default. 809944919eSMichal Krawczyk * NOTE: It's highly not recommended to disable the LLQ, as it may lead to a 819944919eSMichal Krawczyk * huge performance degradation on 6th generation AWS instances. 829944919eSMichal Krawczyk */ 839944919eSMichal Krawczyk #define ENA_DEVARG_ENABLE_LLQ "enable_llq" 848a7a73f2SMichal Krawczyk 853adcba9aSMichal Krawczyk /* 863adcba9aSMichal Krawczyk * Each rte_memzone should have unique name. 873adcba9aSMichal Krawczyk * To satisfy it, count number of allocation and add it to name. 883adcba9aSMichal Krawczyk */ 897c0a233eSAmit Bernstein rte_atomic64_t ena_alloc_cnt; 903adcba9aSMichal Krawczyk 91372c1af5SJan Medala static const struct ena_stats ena_stats_global_strings[] = { 92372c1af5SJan Medala ENA_STAT_GLOBAL_ENTRY(wd_expired), 937830e905SSolganik Alexander ENA_STAT_GLOBAL_ENTRY(dev_start), 947830e905SSolganik Alexander ENA_STAT_GLOBAL_ENTRY(dev_stop), 95e1e73e32SMichal Krawczyk ENA_STAT_GLOBAL_ENTRY(tx_drops), 96372c1af5SJan Medala }; 97372c1af5SJan Medala 9892401abfSShai Brandes /* 9992401abfSShai Brandes * The legacy metrics (also known as eni stats) consisted of 5 stats, while the reworked 10092401abfSShai Brandes * metrics (also known as customer metrics) support an additional stat. 10192401abfSShai Brandes */ 10292401abfSShai Brandes static struct ena_stats ena_stats_metrics_strings[] = { 10392401abfSShai Brandes ENA_STAT_METRICS_ENTRY(bw_in_allowance_exceeded), 10492401abfSShai Brandes ENA_STAT_METRICS_ENTRY(bw_out_allowance_exceeded), 10592401abfSShai Brandes ENA_STAT_METRICS_ENTRY(pps_allowance_exceeded), 10692401abfSShai Brandes ENA_STAT_METRICS_ENTRY(conntrack_allowance_exceeded), 10792401abfSShai Brandes ENA_STAT_METRICS_ENTRY(linklocal_allowance_exceeded), 10892401abfSShai Brandes ENA_STAT_METRICS_ENTRY(conntrack_allowance_available), 10945718adaSMichal Krawczyk }; 11045718adaSMichal Krawczyk 111a73dd098SShai Brandes static const struct ena_stats ena_stats_srd_strings[] = { 112a73dd098SShai Brandes ENA_STAT_ENA_SRD_ENTRY(ena_srd_mode), 113a73dd098SShai Brandes ENA_STAT_ENA_SRD_ENTRY(ena_srd_tx_pkts), 114a73dd098SShai Brandes ENA_STAT_ENA_SRD_ENTRY(ena_srd_eligible_tx_pkts), 115a73dd098SShai Brandes ENA_STAT_ENA_SRD_ENTRY(ena_srd_rx_pkts), 116a73dd098SShai Brandes ENA_STAT_ENA_SRD_ENTRY(ena_srd_resource_utilization), 117a73dd098SShai Brandes }; 118a73dd098SShai Brandes 119372c1af5SJan Medala static const struct ena_stats ena_stats_tx_strings[] = { 120372c1af5SJan Medala ENA_STAT_TX_ENTRY(cnt), 121372c1af5SJan Medala ENA_STAT_TX_ENTRY(bytes), 1227830e905SSolganik Alexander ENA_STAT_TX_ENTRY(prepare_ctx_err), 123372c1af5SJan Medala ENA_STAT_TX_ENTRY(tx_poll), 124372c1af5SJan Medala ENA_STAT_TX_ENTRY(doorbells), 125372c1af5SJan Medala ENA_STAT_TX_ENTRY(bad_req_id), 1267830e905SSolganik Alexander ENA_STAT_TX_ENTRY(available_desc), 127f93e20e5SMichal Krawczyk ENA_STAT_TX_ENTRY(missed_tx), 128372c1af5SJan Medala }; 129372c1af5SJan Medala 130372c1af5SJan Medala static const struct ena_stats ena_stats_rx_strings[] = { 131372c1af5SJan Medala ENA_STAT_RX_ENTRY(cnt), 132372c1af5SJan Medala ENA_STAT_RX_ENTRY(bytes), 1337830e905SSolganik Alexander ENA_STAT_RX_ENTRY(refill_partial), 13484daba99SMichal Krawczyk ENA_STAT_RX_ENTRY(l3_csum_bad), 13584daba99SMichal Krawczyk ENA_STAT_RX_ENTRY(l4_csum_bad), 13684daba99SMichal Krawczyk ENA_STAT_RX_ENTRY(l4_csum_good), 1377830e905SSolganik Alexander ENA_STAT_RX_ENTRY(mbuf_alloc_fail), 138372c1af5SJan Medala ENA_STAT_RX_ENTRY(bad_desc_num), 1397830e905SSolganik Alexander ENA_STAT_RX_ENTRY(bad_req_id), 140372c1af5SJan Medala }; 141372c1af5SJan Medala 142372c1af5SJan Medala #define ENA_STATS_ARRAY_GLOBAL ARRAY_SIZE(ena_stats_global_strings) 14392401abfSShai Brandes #define ENA_STATS_ARRAY_METRICS ARRAY_SIZE(ena_stats_metrics_strings) 14492401abfSShai Brandes #define ENA_STATS_ARRAY_METRICS_LEGACY (ENA_STATS_ARRAY_METRICS - 1) 145a73dd098SShai Brandes #define ENA_STATS_ARRAY_ENA_SRD ARRAY_SIZE(ena_stats_srd_strings) 146372c1af5SJan Medala #define ENA_STATS_ARRAY_TX ARRAY_SIZE(ena_stats_tx_strings) 147372c1af5SJan Medala #define ENA_STATS_ARRAY_RX ARRAY_SIZE(ena_stats_rx_strings) 1481173fca2SJan Medala 149295968d1SFerruh Yigit #define QUEUE_OFFLOADS (RTE_ETH_TX_OFFLOAD_TCP_CKSUM |\ 150295968d1SFerruh Yigit RTE_ETH_TX_OFFLOAD_UDP_CKSUM |\ 151295968d1SFerruh Yigit RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |\ 152295968d1SFerruh Yigit RTE_ETH_TX_OFFLOAD_TCP_TSO) 153daa02b5cSOlivier Matz #define MBUF_OFFLOADS (RTE_MBUF_F_TX_L4_MASK |\ 154daa02b5cSOlivier Matz RTE_MBUF_F_TX_IP_CKSUM |\ 155daa02b5cSOlivier Matz RTE_MBUF_F_TX_TCP_SEG) 15656b8b9b7SRafal Kozik 1571173fca2SJan Medala /** Vendor ID used by Amazon devices */ 1581173fca2SJan Medala #define PCI_VENDOR_ID_AMAZON 0x1D0F 1591173fca2SJan Medala /** Amazon devices */ 1601173fca2SJan Medala #define PCI_DEVICE_ID_ENA_VF 0xEC20 161f7138b91SMichal Krawczyk #define PCI_DEVICE_ID_ENA_VF_RSERV0 0xEC21 1621173fca2SJan Medala 163daa02b5cSOlivier Matz #define ENA_TX_OFFLOAD_MASK (RTE_MBUF_F_TX_L4_MASK | \ 164daa02b5cSOlivier Matz RTE_MBUF_F_TX_IPV6 | \ 165daa02b5cSOlivier Matz RTE_MBUF_F_TX_IPV4 | \ 166daa02b5cSOlivier Matz RTE_MBUF_F_TX_IP_CKSUM | \ 167daa02b5cSOlivier Matz RTE_MBUF_F_TX_TCP_SEG) 168b3fc5a1aSKonstantin Ananyev 169b3fc5a1aSKonstantin Ananyev #define ENA_TX_OFFLOAD_NOTSUP_MASK \ 170daa02b5cSOlivier Matz (RTE_MBUF_F_TX_OFFLOAD_MASK ^ ENA_TX_OFFLOAD_MASK) 171b3fc5a1aSKonstantin Ananyev 172e8c838fdSMichal Krawczyk /** HW specific offloads capabilities. */ 173e8c838fdSMichal Krawczyk /* IPv4 checksum offload. */ 174e8c838fdSMichal Krawczyk #define ENA_L3_IPV4_CSUM 0x0001 175e8c838fdSMichal Krawczyk /* TCP/UDP checksum offload for IPv4 packets. */ 176e8c838fdSMichal Krawczyk #define ENA_L4_IPV4_CSUM 0x0002 177e8c838fdSMichal Krawczyk /* TCP/UDP checksum offload for IPv4 packets with pseudo header checksum. */ 178e8c838fdSMichal Krawczyk #define ENA_L4_IPV4_CSUM_PARTIAL 0x0004 179e8c838fdSMichal Krawczyk /* TCP/UDP checksum offload for IPv6 packets. */ 180e8c838fdSMichal Krawczyk #define ENA_L4_IPV6_CSUM 0x0008 181e8c838fdSMichal Krawczyk /* TCP/UDP checksum offload for IPv6 packets with pseudo header checksum. */ 182e8c838fdSMichal Krawczyk #define ENA_L4_IPV6_CSUM_PARTIAL 0x0010 183e8c838fdSMichal Krawczyk /* TSO support for IPv4 packets. */ 184e8c838fdSMichal Krawczyk #define ENA_IPV4_TSO 0x0020 185e8c838fdSMichal Krawczyk 186e8c838fdSMichal Krawczyk /* Device supports setting RSS hash. */ 187e8c838fdSMichal Krawczyk #define ENA_RX_RSS_HASH 0x0040 188e8c838fdSMichal Krawczyk 18928a1fd4fSFerruh Yigit static const struct rte_pci_id pci_id_ena_map[] = { 190cb990571SDavid Marchand { RTE_PCI_DEVICE(PCI_VENDOR_ID_AMAZON, PCI_DEVICE_ID_ENA_VF) }, 191f7138b91SMichal Krawczyk { RTE_PCI_DEVICE(PCI_VENDOR_ID_AMAZON, PCI_DEVICE_ID_ENA_VF_RSERV0) }, 1921173fca2SJan Medala { .device_id = 0 }, 1931173fca2SJan Medala }; 1941173fca2SJan Medala 195ca148440SMichal Krawczyk static struct ena_aenq_handlers aenq_handlers; 1963adcba9aSMichal Krawczyk 197b9b05d6fSMichal Krawczyk static int ena_device_init(struct ena_adapter *adapter, 198aab58857SStanislaw Kardach struct rte_pci_device *pdev, 199b9b05d6fSMichal Krawczyk struct ena_com_dev_get_features_ctx *get_feat_ctx); 2001173fca2SJan Medala static int ena_dev_configure(struct rte_eth_dev *dev); 20136278b82SMichal Krawczyk static void ena_tx_map_mbuf(struct ena_ring *tx_ring, 20236278b82SMichal Krawczyk struct ena_tx_buffer *tx_info, 20336278b82SMichal Krawczyk struct rte_mbuf *mbuf, 20436278b82SMichal Krawczyk void **push_header, 20536278b82SMichal Krawczyk uint16_t *header_len); 20636278b82SMichal Krawczyk static int ena_xmit_mbuf(struct ena_ring *tx_ring, struct rte_mbuf *mbuf); 207a52b317eSDawid Gorecki static int ena_tx_cleanup(void *txp, uint32_t free_pkt_cnt); 2081173fca2SJan Medala static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 2091173fca2SJan Medala uint16_t nb_pkts); 210b3fc5a1aSKonstantin Ananyev static uint16_t eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 211b3fc5a1aSKonstantin Ananyev uint16_t nb_pkts); 2121173fca2SJan Medala static int ena_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, 2131173fca2SJan Medala uint16_t nb_desc, unsigned int socket_id, 2141173fca2SJan Medala const struct rte_eth_txconf *tx_conf); 2151173fca2SJan Medala static int ena_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, 2161173fca2SJan Medala uint16_t nb_desc, unsigned int socket_id, 2171173fca2SJan Medala const struct rte_eth_rxconf *rx_conf, 2181173fca2SJan Medala struct rte_mempool *mp); 2191be097dcSMichal Krawczyk static inline void ena_init_rx_mbuf(struct rte_mbuf *mbuf, uint16_t len); 2201be097dcSMichal Krawczyk static struct rte_mbuf *ena_rx_mbuf(struct ena_ring *rx_ring, 2211be097dcSMichal Krawczyk struct ena_com_rx_buf_info *ena_bufs, 2221be097dcSMichal Krawczyk uint32_t descs, 2231be097dcSMichal Krawczyk uint16_t *next_to_clean, 2241be097dcSMichal Krawczyk uint8_t offset); 2251173fca2SJan Medala static uint16_t eth_ena_recv_pkts(void *rx_queue, 2261173fca2SJan Medala struct rte_mbuf **rx_pkts, uint16_t nb_pkts); 22783fd97b2SMichal Krawczyk static int ena_add_single_rx_desc(struct ena_com_io_sq *io_sq, 22883fd97b2SMichal Krawczyk struct rte_mbuf *mbuf, uint16_t id); 2291173fca2SJan Medala static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count); 23033dde075SMichal Krawczyk static void ena_init_rings(struct ena_adapter *adapter, 23133dde075SMichal Krawczyk bool disable_meta_caching); 2321173fca2SJan Medala static int ena_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); 2331173fca2SJan Medala static int ena_start(struct rte_eth_dev *dev); 23462024eb8SIvan Ilchenko static int ena_stop(struct rte_eth_dev *dev); 235b142387bSThomas Monjalon static int ena_close(struct rte_eth_dev *dev); 2362081d5e2SMichal Krawczyk static int ena_dev_reset(struct rte_eth_dev *dev); 237d5b0924bSMatan Azrad static int ena_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats); 2381173fca2SJan Medala static void ena_rx_queue_release_all(struct rte_eth_dev *dev); 2391173fca2SJan Medala static void ena_tx_queue_release_all(struct rte_eth_dev *dev); 2407483341aSXueming Li static void ena_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid); 2417483341aSXueming Li static void ena_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid); 2421173fca2SJan Medala static void ena_rx_queue_release_bufs(struct ena_ring *ring); 2431173fca2SJan Medala static void ena_tx_queue_release_bufs(struct ena_ring *ring); 2441173fca2SJan Medala static int ena_link_update(struct rte_eth_dev *dev, 245dd2c630aSFerruh Yigit int wait_to_complete); 2466986cdc4SMichal Krawczyk static int ena_create_io_queue(struct rte_eth_dev *dev, struct ena_ring *ring); 24726e5543dSRafal Kozik static void ena_queue_stop(struct ena_ring *ring); 24826e5543dSRafal Kozik static void ena_queue_stop_all(struct rte_eth_dev *dev, 24926e5543dSRafal Kozik enum ena_ring_type ring_type); 2506986cdc4SMichal Krawczyk static int ena_queue_start(struct rte_eth_dev *dev, struct ena_ring *ring); 25126e5543dSRafal Kozik static int ena_queue_start_all(struct rte_eth_dev *dev, 2521173fca2SJan Medala enum ena_ring_type ring_type); 2531173fca2SJan Medala static void ena_stats_restart(struct rte_eth_dev *dev); 2543a822d79SMichal Krawczyk static uint64_t ena_get_rx_port_offloads(struct ena_adapter *adapter); 2553a822d79SMichal Krawczyk static uint64_t ena_get_tx_port_offloads(struct ena_adapter *adapter); 2563a822d79SMichal Krawczyk static uint64_t ena_get_rx_queue_offloads(struct ena_adapter *adapter); 2573a822d79SMichal Krawczyk static uint64_t ena_get_tx_queue_offloads(struct ena_adapter *adapter); 258bdad90d1SIvan Ilchenko static int ena_infos_get(struct rte_eth_dev *dev, 2591173fca2SJan Medala struct rte_eth_dev_info *dev_info); 26015773e06SMichal Krawczyk static void ena_interrupt_handler_rte(void *cb_arg); 261d9b8b106SMichal Krawczyk static void ena_timer_wd_callback(struct rte_timer *timer, void *arg); 262e457bc70SRafal Kozik static void ena_destroy_device(struct rte_eth_dev *eth_dev); 263e457bc70SRafal Kozik static int eth_ena_dev_init(struct rte_eth_dev *eth_dev); 2647830e905SSolganik Alexander static int ena_xstats_get_names(struct rte_eth_dev *dev, 2657830e905SSolganik Alexander struct rte_eth_xstat_name *xstats_names, 2667830e905SSolganik Alexander unsigned int n); 2673cec73faSMichal Krawczyk static int ena_xstats_get_names_by_id(struct rte_eth_dev *dev, 2683cec73faSMichal Krawczyk const uint64_t *ids, 2693cec73faSMichal Krawczyk struct rte_eth_xstat_name *xstats_names, 2703cec73faSMichal Krawczyk unsigned int size); 2717830e905SSolganik Alexander static int ena_xstats_get(struct rte_eth_dev *dev, 2727830e905SSolganik Alexander struct rte_eth_xstat *stats, 2737830e905SSolganik Alexander unsigned int n); 2747830e905SSolganik Alexander static int ena_xstats_get_by_id(struct rte_eth_dev *dev, 2757830e905SSolganik Alexander const uint64_t *ids, 2767830e905SSolganik Alexander uint64_t *values, 2777830e905SSolganik Alexander unsigned int n); 2788a7a73f2SMichal Krawczyk static int ena_process_bool_devarg(const char *key, 2798a7a73f2SMichal Krawczyk const char *value, 2808a7a73f2SMichal Krawczyk void *opaque); 2818a7a73f2SMichal Krawczyk static int ena_parse_devargs(struct ena_adapter *adapter, 2828a7a73f2SMichal Krawczyk struct rte_devargs *devargs); 28392401abfSShai Brandes static void ena_copy_customer_metrics(struct ena_adapter *adapter, 28492401abfSShai Brandes uint64_t *buf, 28592401abfSShai Brandes size_t buf_size); 286a73dd098SShai Brandes static void ena_copy_ena_srd_info(struct ena_adapter *adapter, 287a73dd098SShai Brandes struct ena_stats_srd *srd_info); 2886986cdc4SMichal Krawczyk static int ena_setup_rx_intr(struct rte_eth_dev *dev); 2896986cdc4SMichal Krawczyk static int ena_rx_queue_intr_enable(struct rte_eth_dev *dev, 2906986cdc4SMichal Krawczyk uint16_t queue_id); 2916986cdc4SMichal Krawczyk static int ena_rx_queue_intr_disable(struct rte_eth_dev *dev, 2926986cdc4SMichal Krawczyk uint16_t queue_id); 293b9b05d6fSMichal Krawczyk static int ena_configure_aenq(struct ena_adapter *adapter); 294e3595539SStanislaw Kardach static int ena_mp_primary_handle(const struct rte_mp_msg *mp_msg, 295e3595539SStanislaw Kardach const void *peer); 2961173fca2SJan Medala 297103ab18cSFerruh Yigit static const struct eth_dev_ops ena_dev_ops = { 2981173fca2SJan Medala .dev_configure = ena_dev_configure, 2991173fca2SJan Medala .dev_infos_get = ena_infos_get, 3001173fca2SJan Medala .rx_queue_setup = ena_rx_queue_setup, 3011173fca2SJan Medala .tx_queue_setup = ena_tx_queue_setup, 3021173fca2SJan Medala .dev_start = ena_start, 303eb0ef49dSMichal Krawczyk .dev_stop = ena_stop, 3041173fca2SJan Medala .link_update = ena_link_update, 3051173fca2SJan Medala .stats_get = ena_stats_get, 3067830e905SSolganik Alexander .xstats_get_names = ena_xstats_get_names, 3073cec73faSMichal Krawczyk .xstats_get_names_by_id = ena_xstats_get_names_by_id, 3087830e905SSolganik Alexander .xstats_get = ena_xstats_get, 3097830e905SSolganik Alexander .xstats_get_by_id = ena_xstats_get_by_id, 3101173fca2SJan Medala .mtu_set = ena_mtu_set, 3111173fca2SJan Medala .rx_queue_release = ena_rx_queue_release, 3121173fca2SJan Medala .tx_queue_release = ena_tx_queue_release, 3131173fca2SJan Medala .dev_close = ena_close, 3142081d5e2SMichal Krawczyk .dev_reset = ena_dev_reset, 3151173fca2SJan Medala .reta_update = ena_rss_reta_update, 3161173fca2SJan Medala .reta_query = ena_rss_reta_query, 3176986cdc4SMichal Krawczyk .rx_queue_intr_enable = ena_rx_queue_intr_enable, 3186986cdc4SMichal Krawczyk .rx_queue_intr_disable = ena_rx_queue_intr_disable, 31934d5e97eSMichal Krawczyk .rss_hash_update = ena_rss_hash_update, 32034d5e97eSMichal Krawczyk .rss_hash_conf_get = ena_rss_hash_conf_get, 321a52b317eSDawid Gorecki .tx_done_cleanup = ena_tx_cleanup, 3221173fca2SJan Medala }; 3231173fca2SJan Medala 324e3595539SStanislaw Kardach /********************************************************************* 325e3595539SStanislaw Kardach * Multi-Process communication bits 326e3595539SStanislaw Kardach *********************************************************************/ 327e3595539SStanislaw Kardach /* rte_mp IPC message name */ 328e3595539SStanislaw Kardach #define ENA_MP_NAME "net_ena_mp" 329e3595539SStanislaw Kardach /* Request timeout in seconds */ 330e3595539SStanislaw Kardach #define ENA_MP_REQ_TMO 5 331e3595539SStanislaw Kardach 332e3595539SStanislaw Kardach /** Proxy request type */ 333e3595539SStanislaw Kardach enum ena_mp_req { 334e3595539SStanislaw Kardach ENA_MP_DEV_STATS_GET, 335e3595539SStanislaw Kardach ENA_MP_ENI_STATS_GET, 336e3595539SStanislaw Kardach ENA_MP_MTU_SET, 337e3595539SStanislaw Kardach ENA_MP_IND_TBL_GET, 33892401abfSShai Brandes ENA_MP_IND_TBL_SET, 33992401abfSShai Brandes ENA_MP_CUSTOMER_METRICS_GET, 340a73dd098SShai Brandes ENA_MP_SRD_STATS_GET, 341e3595539SStanislaw Kardach }; 342e3595539SStanislaw Kardach 343e3595539SStanislaw Kardach /** Proxy message body. Shared between requests and responses. */ 344e3595539SStanislaw Kardach struct ena_mp_body { 345e3595539SStanislaw Kardach /* Message type */ 346e3595539SStanislaw Kardach enum ena_mp_req type; 347e3595539SStanislaw Kardach int port_id; 348e3595539SStanislaw Kardach /* Processing result. Set in replies. 0 if message succeeded, negative 349e3595539SStanislaw Kardach * error code otherwise. 350e3595539SStanislaw Kardach */ 351e3595539SStanislaw Kardach int result; 352e3595539SStanislaw Kardach union { 353e3595539SStanislaw Kardach int mtu; /* For ENA_MP_MTU_SET */ 354e3595539SStanislaw Kardach } args; 355e3595539SStanislaw Kardach }; 356e3595539SStanislaw Kardach 357e3595539SStanislaw Kardach /** 358e3595539SStanislaw Kardach * Initialize IPC message. 359e3595539SStanislaw Kardach * 360e3595539SStanislaw Kardach * @param[out] msg 361e3595539SStanislaw Kardach * Pointer to the message to initialize. 362e3595539SStanislaw Kardach * @param[in] type 363e3595539SStanislaw Kardach * Message type. 364e3595539SStanislaw Kardach * @param[in] port_id 365e3595539SStanislaw Kardach * Port ID of target device. 366e3595539SStanislaw Kardach * 367e3595539SStanislaw Kardach */ 368e3595539SStanislaw Kardach static void 369e3595539SStanislaw Kardach mp_msg_init(struct rte_mp_msg *msg, enum ena_mp_req type, int port_id) 370e3595539SStanislaw Kardach { 371e3595539SStanislaw Kardach struct ena_mp_body *body = (struct ena_mp_body *)&msg->param; 372e3595539SStanislaw Kardach 373e3595539SStanislaw Kardach memset(msg, 0, sizeof(*msg)); 374e3595539SStanislaw Kardach strlcpy(msg->name, ENA_MP_NAME, sizeof(msg->name)); 375e3595539SStanislaw Kardach msg->len_param = sizeof(*body); 376e3595539SStanislaw Kardach body->type = type; 377e3595539SStanislaw Kardach body->port_id = port_id; 378e3595539SStanislaw Kardach } 379e3595539SStanislaw Kardach 380e3595539SStanislaw Kardach /********************************************************************* 381e3595539SStanislaw Kardach * Multi-Process communication PMD API 382e3595539SStanislaw Kardach *********************************************************************/ 383e3595539SStanislaw Kardach /** 384e3595539SStanislaw Kardach * Define proxy request descriptor 385e3595539SStanislaw Kardach * 386e3595539SStanislaw Kardach * Used to define all structures and functions required for proxying a given 387e3595539SStanislaw Kardach * function to the primary process including the code to perform to prepare the 388e3595539SStanislaw Kardach * request and process the response. 389e3595539SStanislaw Kardach * 390e3595539SStanislaw Kardach * @param[in] f 391e3595539SStanislaw Kardach * Name of the function to proxy 392e3595539SStanislaw Kardach * @param[in] t 393e3595539SStanislaw Kardach * Message type to use 394e3595539SStanislaw Kardach * @param[in] prep 395e3595539SStanislaw Kardach * Body of a function to prepare the request in form of a statement 396e3595539SStanislaw Kardach * expression. It is passed all the original function arguments along with two 397e3595539SStanislaw Kardach * extra ones: 398e3595539SStanislaw Kardach * - struct ena_adapter *adapter - PMD data of the device calling the proxy. 399e3595539SStanislaw Kardach * - struct ena_mp_body *req - body of a request to prepare. 400e3595539SStanislaw Kardach * @param[in] proc 401e3595539SStanislaw Kardach * Body of a function to process the response in form of a statement 402e3595539SStanislaw Kardach * expression. It is passed all the original function arguments along with two 403e3595539SStanislaw Kardach * extra ones: 404e3595539SStanislaw Kardach * - struct ena_adapter *adapter - PMD data of the device calling the proxy. 405e3595539SStanislaw Kardach * - struct ena_mp_body *rsp - body of a response to process. 406e3595539SStanislaw Kardach * @param ... 407e3595539SStanislaw Kardach * Proxied function's arguments 408e3595539SStanislaw Kardach * 409e3595539SStanislaw Kardach * @note Inside prep and proc any parameters which aren't used should be marked 410e3595539SStanislaw Kardach * as such (with ENA_TOUCH or __rte_unused). 411e3595539SStanislaw Kardach */ 412e3595539SStanislaw Kardach #define ENA_PROXY_DESC(f, t, prep, proc, ...) \ 413e3595539SStanislaw Kardach static const enum ena_mp_req mp_type_ ## f = t; \ 414e3595539SStanislaw Kardach static const char *mp_name_ ## f = #t; \ 415e3595539SStanislaw Kardach static void mp_prep_ ## f(struct ena_adapter *adapter, \ 416e3595539SStanislaw Kardach struct ena_mp_body *req, \ 417e3595539SStanislaw Kardach __VA_ARGS__) \ 418e3595539SStanislaw Kardach { \ 419e3595539SStanislaw Kardach prep; \ 420e3595539SStanislaw Kardach } \ 421e3595539SStanislaw Kardach static void mp_proc_ ## f(struct ena_adapter *adapter, \ 422e3595539SStanislaw Kardach struct ena_mp_body *rsp, \ 423e3595539SStanislaw Kardach __VA_ARGS__) \ 424e3595539SStanislaw Kardach { \ 425e3595539SStanislaw Kardach proc; \ 426e3595539SStanislaw Kardach } 427e3595539SStanislaw Kardach 428e3595539SStanislaw Kardach /** 429e3595539SStanislaw Kardach * Proxy wrapper for calling primary functions in a secondary process. 430e3595539SStanislaw Kardach * 431e3595539SStanislaw Kardach * Depending on whether called in primary or secondary process, calls the 432e3595539SStanislaw Kardach * @p func directly or proxies the call to the primary process via rte_mp IPC. 433e3595539SStanislaw Kardach * This macro requires a proxy request descriptor to be defined for @p func 434e3595539SStanislaw Kardach * using ENA_PROXY_DESC() macro. 435e3595539SStanislaw Kardach * 436e3595539SStanislaw Kardach * @param[in/out] a 437e3595539SStanislaw Kardach * Device PMD data. Used for sending the message and sharing message results 438e3595539SStanislaw Kardach * between primary and secondary. 439e3595539SStanislaw Kardach * @param[in] f 440e3595539SStanislaw Kardach * Function to proxy. 441e3595539SStanislaw Kardach * @param ... 442e3595539SStanislaw Kardach * Arguments of @p func. 443e3595539SStanislaw Kardach * 444e3595539SStanislaw Kardach * @return 445e3595539SStanislaw Kardach * - 0: Processing succeeded and response handler was called. 446e3595539SStanislaw Kardach * - -EPERM: IPC is unavailable on this platform. This means only primary 447e3595539SStanislaw Kardach * process may call the proxied function. 448e3595539SStanislaw Kardach * - -EIO: IPC returned error on request send. Inspect rte_errno detailed 449e3595539SStanislaw Kardach * error code. 450e3595539SStanislaw Kardach * - Negative error code from the proxied function. 451e3595539SStanislaw Kardach * 452e3595539SStanislaw Kardach * @note This mechanism is geared towards control-path tasks. Avoid calling it 453e3595539SStanislaw Kardach * in fast-path unless unbound delays are allowed. This is due to the IPC 454e3595539SStanislaw Kardach * mechanism itself (socket based). 455e3595539SStanislaw Kardach * @note Due to IPC parameter size limitations the proxy logic shares call 456e3595539SStanislaw Kardach * results through the struct ena_adapter shared memory. This makes the 457e3595539SStanislaw Kardach * proxy mechanism strictly single-threaded. Therefore be sure to make all 458e3595539SStanislaw Kardach * calls to the same proxied function under the same lock. 459e3595539SStanislaw Kardach */ 460e3595539SStanislaw Kardach #define ENA_PROXY(a, f, ...) \ 46193998f3cSTyler Retzlaff __extension__ ({ \ 462e3595539SStanislaw Kardach struct ena_adapter *_a = (a); \ 463e3595539SStanislaw Kardach struct timespec ts = { .tv_sec = ENA_MP_REQ_TMO }; \ 464e3595539SStanislaw Kardach struct ena_mp_body *req, *rsp; \ 465e3595539SStanislaw Kardach struct rte_mp_reply mp_rep; \ 466e3595539SStanislaw Kardach struct rte_mp_msg mp_req; \ 467e3595539SStanislaw Kardach int ret; \ 468e3595539SStanislaw Kardach \ 469e3595539SStanislaw Kardach if (rte_eal_process_type() == RTE_PROC_PRIMARY) { \ 470e3595539SStanislaw Kardach ret = f(__VA_ARGS__); \ 471e3595539SStanislaw Kardach } else { \ 472e3595539SStanislaw Kardach /* Prepare and send request */ \ 473e3595539SStanislaw Kardach req = (struct ena_mp_body *)&mp_req.param; \ 474e3595539SStanislaw Kardach mp_msg_init(&mp_req, mp_type_ ## f, _a->edev_data->port_id); \ 475e3595539SStanislaw Kardach mp_prep_ ## f(_a, req, ## __VA_ARGS__); \ 476e3595539SStanislaw Kardach \ 477e3595539SStanislaw Kardach ret = rte_mp_request_sync(&mp_req, &mp_rep, &ts); \ 478e3595539SStanislaw Kardach if (likely(!ret)) { \ 479e3595539SStanislaw Kardach RTE_ASSERT(mp_rep.nb_received == 1); \ 480e3595539SStanislaw Kardach rsp = (struct ena_mp_body *)&mp_rep.msgs[0].param; \ 481e3595539SStanislaw Kardach ret = rsp->result; \ 482e3595539SStanislaw Kardach if (ret == 0) { \ 483e3595539SStanislaw Kardach mp_proc_##f(_a, rsp, ## __VA_ARGS__); \ 484e3595539SStanislaw Kardach } else { \ 485e3595539SStanislaw Kardach PMD_DRV_LOG(ERR, \ 486e3595539SStanislaw Kardach "%s returned error: %d\n", \ 487e3595539SStanislaw Kardach mp_name_ ## f, rsp->result);\ 488e3595539SStanislaw Kardach } \ 489e3595539SStanislaw Kardach free(mp_rep.msgs); \ 490e3595539SStanislaw Kardach } else if (rte_errno == ENOTSUP) { \ 491e3595539SStanislaw Kardach PMD_DRV_LOG(ERR, \ 492e3595539SStanislaw Kardach "No IPC, can't proxy to primary\n");\ 493e3595539SStanislaw Kardach ret = -rte_errno; \ 494e3595539SStanislaw Kardach } else { \ 495e3595539SStanislaw Kardach PMD_DRV_LOG(ERR, "Request %s failed: %s\n", \ 496e3595539SStanislaw Kardach mp_name_ ## f, \ 497e3595539SStanislaw Kardach rte_strerror(rte_errno)); \ 498e3595539SStanislaw Kardach ret = -EIO; \ 499e3595539SStanislaw Kardach } \ 500e3595539SStanislaw Kardach } \ 501e3595539SStanislaw Kardach ret; \ 502e3595539SStanislaw Kardach }) 503e3595539SStanislaw Kardach 504e3595539SStanislaw Kardach /********************************************************************* 505e3595539SStanislaw Kardach * Multi-Process communication request descriptors 506e3595539SStanislaw Kardach *********************************************************************/ 507e3595539SStanislaw Kardach 508e3595539SStanislaw Kardach ENA_PROXY_DESC(ena_com_get_dev_basic_stats, ENA_MP_DEV_STATS_GET, 50993998f3cSTyler Retzlaff __extension__ ({ 510e3595539SStanislaw Kardach ENA_TOUCH(adapter); 511e3595539SStanislaw Kardach ENA_TOUCH(req); 512e3595539SStanislaw Kardach ENA_TOUCH(ena_dev); 513e3595539SStanislaw Kardach ENA_TOUCH(stats); 514e3595539SStanislaw Kardach }), 51593998f3cSTyler Retzlaff __extension__ ({ 516e3595539SStanislaw Kardach ENA_TOUCH(rsp); 517e3595539SStanislaw Kardach ENA_TOUCH(ena_dev); 518e3595539SStanislaw Kardach if (stats != &adapter->basic_stats) 519e3595539SStanislaw Kardach rte_memcpy(stats, &adapter->basic_stats, sizeof(*stats)); 520e3595539SStanislaw Kardach }), 521e3595539SStanislaw Kardach struct ena_com_dev *ena_dev, struct ena_admin_basic_stats *stats); 522e3595539SStanislaw Kardach 523e3595539SStanislaw Kardach ENA_PROXY_DESC(ena_com_get_eni_stats, ENA_MP_ENI_STATS_GET, 52493998f3cSTyler Retzlaff __extension__ ({ 525e3595539SStanislaw Kardach ENA_TOUCH(adapter); 526e3595539SStanislaw Kardach ENA_TOUCH(req); 527e3595539SStanislaw Kardach ENA_TOUCH(ena_dev); 528e3595539SStanislaw Kardach ENA_TOUCH(stats); 529e3595539SStanislaw Kardach }), 53093998f3cSTyler Retzlaff __extension__ ({ 531e3595539SStanislaw Kardach ENA_TOUCH(rsp); 532e3595539SStanislaw Kardach ENA_TOUCH(ena_dev); 533980d0ba4SShai Brandes if (stats != (struct ena_admin_eni_stats *)adapter->metrics_stats) 534980d0ba4SShai Brandes rte_memcpy(stats, adapter->metrics_stats, sizeof(*stats)); 535e3595539SStanislaw Kardach }), 536e3595539SStanislaw Kardach struct ena_com_dev *ena_dev, struct ena_admin_eni_stats *stats); 537e3595539SStanislaw Kardach 538e3595539SStanislaw Kardach ENA_PROXY_DESC(ena_com_set_dev_mtu, ENA_MP_MTU_SET, 53993998f3cSTyler Retzlaff __extension__ ({ 540e3595539SStanislaw Kardach ENA_TOUCH(adapter); 541e3595539SStanislaw Kardach ENA_TOUCH(ena_dev); 542e3595539SStanislaw Kardach req->args.mtu = mtu; 543e3595539SStanislaw Kardach }), 54493998f3cSTyler Retzlaff __extension__ ({ 545e3595539SStanislaw Kardach ENA_TOUCH(adapter); 546e3595539SStanislaw Kardach ENA_TOUCH(rsp); 547e3595539SStanislaw Kardach ENA_TOUCH(ena_dev); 548e3595539SStanislaw Kardach ENA_TOUCH(mtu); 549e3595539SStanislaw Kardach }), 550e3595539SStanislaw Kardach struct ena_com_dev *ena_dev, int mtu); 551e3595539SStanislaw Kardach 552e3595539SStanislaw Kardach ENA_PROXY_DESC(ena_com_indirect_table_set, ENA_MP_IND_TBL_SET, 55393998f3cSTyler Retzlaff __extension__ ({ 554e3595539SStanislaw Kardach ENA_TOUCH(adapter); 555e3595539SStanislaw Kardach ENA_TOUCH(req); 556e3595539SStanislaw Kardach ENA_TOUCH(ena_dev); 557e3595539SStanislaw Kardach }), 55893998f3cSTyler Retzlaff __extension__ ({ 559e3595539SStanislaw Kardach ENA_TOUCH(adapter); 560e3595539SStanislaw Kardach ENA_TOUCH(rsp); 561e3595539SStanislaw Kardach ENA_TOUCH(ena_dev); 562e3595539SStanislaw Kardach }), 563e3595539SStanislaw Kardach struct ena_com_dev *ena_dev); 564e3595539SStanislaw Kardach 565e3595539SStanislaw Kardach ENA_PROXY_DESC(ena_com_indirect_table_get, ENA_MP_IND_TBL_GET, 56693998f3cSTyler Retzlaff __extension__ ({ 567e3595539SStanislaw Kardach ENA_TOUCH(adapter); 568e3595539SStanislaw Kardach ENA_TOUCH(req); 569e3595539SStanislaw Kardach ENA_TOUCH(ena_dev); 570e3595539SStanislaw Kardach ENA_TOUCH(ind_tbl); 571e3595539SStanislaw Kardach }), 57293998f3cSTyler Retzlaff __extension__ ({ 573e3595539SStanislaw Kardach ENA_TOUCH(rsp); 574e3595539SStanislaw Kardach ENA_TOUCH(ena_dev); 575e3595539SStanislaw Kardach if (ind_tbl != adapter->indirect_table) 576e3595539SStanislaw Kardach rte_memcpy(ind_tbl, adapter->indirect_table, 577e3595539SStanislaw Kardach sizeof(adapter->indirect_table)); 578e3595539SStanislaw Kardach }), 579e3595539SStanislaw Kardach struct ena_com_dev *ena_dev, u32 *ind_tbl); 580e3595539SStanislaw Kardach 58192401abfSShai Brandes ENA_PROXY_DESC(ena_com_get_customer_metrics, ENA_MP_CUSTOMER_METRICS_GET, 58293998f3cSTyler Retzlaff __extension__ ({ 58392401abfSShai Brandes ENA_TOUCH(adapter); 58492401abfSShai Brandes ENA_TOUCH(req); 58592401abfSShai Brandes ENA_TOUCH(ena_dev); 58692401abfSShai Brandes ENA_TOUCH(buf); 58792401abfSShai Brandes ENA_TOUCH(buf_size); 58892401abfSShai Brandes }), 58993998f3cSTyler Retzlaff __extension__ ({ 59092401abfSShai Brandes ENA_TOUCH(rsp); 59192401abfSShai Brandes ENA_TOUCH(ena_dev); 592980d0ba4SShai Brandes if (buf != (char *)adapter->metrics_stats) 593980d0ba4SShai Brandes rte_memcpy(buf, adapter->metrics_stats, buf_size); 59492401abfSShai Brandes }), 59592401abfSShai Brandes struct ena_com_dev *ena_dev, char *buf, size_t buf_size); 59692401abfSShai Brandes 597a73dd098SShai Brandes ENA_PROXY_DESC(ena_com_get_ena_srd_info, ENA_MP_SRD_STATS_GET, 59893998f3cSTyler Retzlaff __extension__ ({ 599a73dd098SShai Brandes ENA_TOUCH(adapter); 600a73dd098SShai Brandes ENA_TOUCH(req); 601a73dd098SShai Brandes ENA_TOUCH(ena_dev); 602a73dd098SShai Brandes ENA_TOUCH(info); 603a73dd098SShai Brandes }), 60493998f3cSTyler Retzlaff __extension__ ({ 605a73dd098SShai Brandes ENA_TOUCH(rsp); 606a73dd098SShai Brandes ENA_TOUCH(ena_dev); 607a73dd098SShai Brandes if ((struct ena_stats_srd *)info != &adapter->srd_stats) 608a73dd098SShai Brandes rte_memcpy((struct ena_stats_srd *)info, 609a73dd098SShai Brandes &adapter->srd_stats, 610a73dd098SShai Brandes sizeof(struct ena_stats_srd)); 611a73dd098SShai Brandes }), 612a73dd098SShai Brandes struct ena_com_dev *ena_dev, struct ena_admin_ena_srd_info *info); 61392401abfSShai Brandes 6142bae75eaSDawid Gorecki static inline void ena_trigger_reset(struct ena_adapter *adapter, 6152bae75eaSDawid Gorecki enum ena_regs_reset_reason_types reason) 6162bae75eaSDawid Gorecki { 6172bae75eaSDawid Gorecki if (likely(!adapter->trigger_reset)) { 6182bae75eaSDawid Gorecki adapter->reset_reason = reason; 6192bae75eaSDawid Gorecki adapter->trigger_reset = true; 6202bae75eaSDawid Gorecki } 6212bae75eaSDawid Gorecki } 6222bae75eaSDawid Gorecki 62384daba99SMichal Krawczyk static inline void ena_rx_mbuf_prepare(struct ena_ring *rx_ring, 62484daba99SMichal Krawczyk struct rte_mbuf *mbuf, 62534d5e97eSMichal Krawczyk struct ena_com_rx_ctx *ena_rx_ctx, 62634d5e97eSMichal Krawczyk bool fill_hash) 6271173fca2SJan Medala { 62884daba99SMichal Krawczyk struct ena_stats_rx *rx_stats = &rx_ring->rx_stats; 6291173fca2SJan Medala uint64_t ol_flags = 0; 630fd617795SRafal Kozik uint32_t packet_type = 0; 6311173fca2SJan Medala 6321173fca2SJan Medala if (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP) 633fd617795SRafal Kozik packet_type |= RTE_PTYPE_L4_TCP; 6341173fca2SJan Medala else if (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP) 635fd617795SRafal Kozik packet_type |= RTE_PTYPE_L4_UDP; 6361173fca2SJan Medala 637856edce2SMichal Krawczyk if (ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV4) { 638fd617795SRafal Kozik packet_type |= RTE_PTYPE_L3_IPV4; 63984daba99SMichal Krawczyk if (unlikely(ena_rx_ctx->l3_csum_err)) { 64084daba99SMichal Krawczyk ++rx_stats->l3_csum_bad; 641daa02b5cSOlivier Matz ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD; 64284daba99SMichal Krawczyk } else { 643daa02b5cSOlivier Matz ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD; 64484daba99SMichal Krawczyk } 645856edce2SMichal Krawczyk } else if (ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV6) { 646856edce2SMichal Krawczyk packet_type |= RTE_PTYPE_L3_IPV6; 647856edce2SMichal Krawczyk } 648856edce2SMichal Krawczyk 64984daba99SMichal Krawczyk if (!ena_rx_ctx->l4_csum_checked || ena_rx_ctx->frag) { 650daa02b5cSOlivier Matz ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN; 65184daba99SMichal Krawczyk } else { 65284daba99SMichal Krawczyk if (unlikely(ena_rx_ctx->l4_csum_err)) { 65384daba99SMichal Krawczyk ++rx_stats->l4_csum_bad; 654b2d2f1cfSMichal Krawczyk /* 655b2d2f1cfSMichal Krawczyk * For the L4 Rx checksum offload the HW may indicate 656b2d2f1cfSMichal Krawczyk * bad checksum although it's valid. Because of that, 657b2d2f1cfSMichal Krawczyk * we're setting the UNKNOWN flag to let the app 658b2d2f1cfSMichal Krawczyk * re-verify the checksum. 659b2d2f1cfSMichal Krawczyk */ 660b2d2f1cfSMichal Krawczyk ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN; 66184daba99SMichal Krawczyk } else { 66284daba99SMichal Krawczyk ++rx_stats->l4_csum_good; 663daa02b5cSOlivier Matz ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD; 66484daba99SMichal Krawczyk } 66584daba99SMichal Krawczyk } 6661173fca2SJan Medala 66734d5e97eSMichal Krawczyk if (fill_hash && 66834d5e97eSMichal Krawczyk likely((packet_type & ENA_PTYPE_HAS_HASH) && !ena_rx_ctx->frag)) { 669daa02b5cSOlivier Matz ol_flags |= RTE_MBUF_F_RX_RSS_HASH; 670b418f0d2SMichal Krawczyk mbuf->hash.rss = ena_rx_ctx->hash; 671b418f0d2SMichal Krawczyk } 672b418f0d2SMichal Krawczyk 6731173fca2SJan Medala mbuf->ol_flags = ol_flags; 674fd617795SRafal Kozik mbuf->packet_type = packet_type; 6751173fca2SJan Medala } 6761173fca2SJan Medala 6771173fca2SJan Medala static inline void ena_tx_mbuf_prepare(struct rte_mbuf *mbuf, 67856b8b9b7SRafal Kozik struct ena_com_tx_ctx *ena_tx_ctx, 67933dde075SMichal Krawczyk uint64_t queue_offloads, 68033dde075SMichal Krawczyk bool disable_meta_caching) 6811173fca2SJan Medala { 6821173fca2SJan Medala struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta; 6831173fca2SJan Medala 68456b8b9b7SRafal Kozik if ((mbuf->ol_flags & MBUF_OFFLOADS) && 68556b8b9b7SRafal Kozik (queue_offloads & QUEUE_OFFLOADS)) { 6861173fca2SJan Medala /* check if TSO is required */ 687daa02b5cSOlivier Matz if ((mbuf->ol_flags & RTE_MBUF_F_TX_TCP_SEG) && 688295968d1SFerruh Yigit (queue_offloads & RTE_ETH_TX_OFFLOAD_TCP_TSO)) { 6891173fca2SJan Medala ena_tx_ctx->tso_enable = true; 6901173fca2SJan Medala 6911173fca2SJan Medala ena_meta->l4_hdr_len = GET_L4_HDR_LEN(mbuf); 6921173fca2SJan Medala } 6931173fca2SJan Medala 6941173fca2SJan Medala /* check if L3 checksum is needed */ 695daa02b5cSOlivier Matz if ((mbuf->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) && 696295968d1SFerruh Yigit (queue_offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)) 6971173fca2SJan Medala ena_tx_ctx->l3_csum_enable = true; 6981173fca2SJan Medala 699daa02b5cSOlivier Matz if (mbuf->ol_flags & RTE_MBUF_F_TX_IPV6) { 7001173fca2SJan Medala ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV6; 701022fb61bSMichal Krawczyk /* For the IPv6 packets, DF always needs to be true. */ 702022fb61bSMichal Krawczyk ena_tx_ctx->df = 1; 7031173fca2SJan Medala } else { 7041173fca2SJan Medala ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV4; 7051173fca2SJan Medala 7061173fca2SJan Medala /* set don't fragment (DF) flag */ 7071173fca2SJan Medala if (mbuf->packet_type & 7081173fca2SJan Medala (RTE_PTYPE_L4_NONFRAG 7091173fca2SJan Medala | RTE_PTYPE_INNER_L4_NONFRAG)) 710022fb61bSMichal Krawczyk ena_tx_ctx->df = 1; 7111173fca2SJan Medala } 7121173fca2SJan Medala 7131173fca2SJan Medala /* check if L4 checksum is needed */ 714daa02b5cSOlivier Matz if (((mbuf->ol_flags & RTE_MBUF_F_TX_L4_MASK) == RTE_MBUF_F_TX_TCP_CKSUM) && 715295968d1SFerruh Yigit (queue_offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM)) { 7161173fca2SJan Medala ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_TCP; 7171173fca2SJan Medala ena_tx_ctx->l4_csum_enable = true; 718daa02b5cSOlivier Matz } else if (((mbuf->ol_flags & RTE_MBUF_F_TX_L4_MASK) == 719daa02b5cSOlivier Matz RTE_MBUF_F_TX_UDP_CKSUM) && 720295968d1SFerruh Yigit (queue_offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM)) { 7211173fca2SJan Medala ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UDP; 7221173fca2SJan Medala ena_tx_ctx->l4_csum_enable = true; 72356b8b9b7SRafal Kozik } else { 7241173fca2SJan Medala ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UNKNOWN; 7251173fca2SJan Medala ena_tx_ctx->l4_csum_enable = false; 7261173fca2SJan Medala } 7271173fca2SJan Medala 7281173fca2SJan Medala ena_meta->mss = mbuf->tso_segsz; 7291173fca2SJan Medala ena_meta->l3_hdr_len = mbuf->l3_len; 7301173fca2SJan Medala ena_meta->l3_hdr_offset = mbuf->l2_len; 7311173fca2SJan Medala 7321173fca2SJan Medala ena_tx_ctx->meta_valid = true; 73333dde075SMichal Krawczyk } else if (disable_meta_caching) { 73433dde075SMichal Krawczyk memset(ena_meta, 0, sizeof(*ena_meta)); 73533dde075SMichal Krawczyk ena_tx_ctx->meta_valid = true; 7361173fca2SJan Medala } else { 7371173fca2SJan Medala ena_tx_ctx->meta_valid = false; 7381173fca2SJan Medala } 7391173fca2SJan Medala } 7401173fca2SJan Medala 741f7d82d24SRafal Kozik static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id) 742f7d82d24SRafal Kozik { 743f7d82d24SRafal Kozik struct ena_tx_buffer *tx_info = NULL; 744f7d82d24SRafal Kozik 745f7d82d24SRafal Kozik if (likely(req_id < tx_ring->ring_size)) { 746f7d82d24SRafal Kozik tx_info = &tx_ring->tx_buffer_info[req_id]; 747f7d82d24SRafal Kozik if (likely(tx_info->mbuf)) 748f7d82d24SRafal Kozik return 0; 749f7d82d24SRafal Kozik } 750f7d82d24SRafal Kozik 751f7d82d24SRafal Kozik if (tx_info) 75277e764c7SDawid Gorecki PMD_TX_LOG(ERR, "tx_info doesn't have valid mbuf. queue %d:%d req_id %u\n", 75377e764c7SDawid Gorecki tx_ring->port_id, tx_ring->id, req_id); 754f7d82d24SRafal Kozik else 75577e764c7SDawid Gorecki PMD_TX_LOG(ERR, "Invalid req_id: %hu in queue %d:%d\n", 75677e764c7SDawid Gorecki req_id, tx_ring->port_id, tx_ring->id); 757f7d82d24SRafal Kozik 758f7d82d24SRafal Kozik /* Trigger device reset */ 7597830e905SSolganik Alexander ++tx_ring->tx_stats.bad_req_id; 7602bae75eaSDawid Gorecki ena_trigger_reset(tx_ring->adapter, ENA_REGS_RESET_INV_TX_REQ_ID); 761f7d82d24SRafal Kozik return -EFAULT; 762f7d82d24SRafal Kozik } 763f7d82d24SRafal Kozik 764372c1af5SJan Medala static void ena_config_host_info(struct ena_com_dev *ena_dev) 765372c1af5SJan Medala { 766372c1af5SJan Medala struct ena_admin_host_info *host_info; 767372c1af5SJan Medala int rc; 768372c1af5SJan Medala 769372c1af5SJan Medala /* Allocate only the host info */ 770372c1af5SJan Medala rc = ena_com_allocate_host_info(ena_dev); 771372c1af5SJan Medala if (rc) { 7726f1c9df9SStephen Hemminger PMD_DRV_LOG(ERR, "Cannot allocate host info\n"); 773372c1af5SJan Medala return; 774372c1af5SJan Medala } 775372c1af5SJan Medala 776372c1af5SJan Medala host_info = ena_dev->host_attr.host_info; 777372c1af5SJan Medala 778372c1af5SJan Medala host_info->os_type = ENA_ADMIN_OS_DPDK; 779372c1af5SJan Medala host_info->kernel_ver = RTE_VERSION; 7806723c0fcSBruce Richardson strlcpy((char *)host_info->kernel_ver_str, rte_version(), 7816723c0fcSBruce Richardson sizeof(host_info->kernel_ver_str)); 782372c1af5SJan Medala host_info->os_dist = RTE_VERSION; 7836723c0fcSBruce Richardson strlcpy((char *)host_info->os_dist_str, rte_version(), 7846723c0fcSBruce Richardson sizeof(host_info->os_dist_str)); 785372c1af5SJan Medala host_info->driver_version = 786372c1af5SJan Medala (DRV_MODULE_VER_MAJOR) | 787372c1af5SJan Medala (DRV_MODULE_VER_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) | 788c4144557SJan Medala (DRV_MODULE_VER_SUBMINOR << 789c4144557SJan Medala ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT); 790b9302eb9SRafal Kozik host_info->num_cpus = rte_lcore_count(); 791372c1af5SJan Medala 7927b3a3c4bSMaciej Bielski host_info->driver_supported_features = 79334d5e97eSMichal Krawczyk ENA_ADMIN_HOST_INFO_RX_OFFSET_MASK | 79434d5e97eSMichal Krawczyk ENA_ADMIN_HOST_INFO_RSS_CONFIGURABLE_FUNCTION_KEY_MASK; 7957b3a3c4bSMaciej Bielski 796372c1af5SJan Medala rc = ena_com_set_host_attributes(ena_dev); 797372c1af5SJan Medala if (rc) { 798241da076SRafal Kozik if (rc == -ENA_COM_UNSUPPORTED) 7996f1c9df9SStephen Hemminger PMD_DRV_LOG(WARNING, "Cannot set host attributes\n"); 800241da076SRafal Kozik else 8016f1c9df9SStephen Hemminger PMD_DRV_LOG(ERR, "Cannot set host attributes\n"); 802241da076SRafal Kozik 803372c1af5SJan Medala goto err; 804372c1af5SJan Medala } 805372c1af5SJan Medala 806372c1af5SJan Medala return; 807372c1af5SJan Medala 808372c1af5SJan Medala err: 809372c1af5SJan Medala ena_com_delete_host_info(ena_dev); 810372c1af5SJan Medala } 811372c1af5SJan Medala 8127830e905SSolganik Alexander /* This function calculates the number of xstats based on the current config */ 813aab58857SStanislaw Kardach static unsigned int ena_xstats_calc_num(struct rte_eth_dev_data *data) 814372c1af5SJan Medala { 81592401abfSShai Brandes struct ena_adapter *adapter = data->dev_private; 81692401abfSShai Brandes 81792401abfSShai Brandes return ENA_STATS_ARRAY_GLOBAL + 81892401abfSShai Brandes adapter->metrics_num + 819a73dd098SShai Brandes ENA_STATS_ARRAY_ENA_SRD + 820aab58857SStanislaw Kardach (data->nb_tx_queues * ENA_STATS_ARRAY_TX) + 821aab58857SStanislaw Kardach (data->nb_rx_queues * ENA_STATS_ARRAY_RX); 822372c1af5SJan Medala } 823372c1af5SJan Medala 824372c1af5SJan Medala static void ena_config_debug_area(struct ena_adapter *adapter) 825372c1af5SJan Medala { 826372c1af5SJan Medala u32 debug_area_size; 827372c1af5SJan Medala int rc, ss_count; 828372c1af5SJan Medala 829aab58857SStanislaw Kardach ss_count = ena_xstats_calc_num(adapter->edev_data); 830372c1af5SJan Medala 831372c1af5SJan Medala /* allocate 32 bytes for each string and 64bit for the value */ 832372c1af5SJan Medala debug_area_size = ss_count * ETH_GSTRING_LEN + sizeof(u64) * ss_count; 833372c1af5SJan Medala 834372c1af5SJan Medala rc = ena_com_allocate_debug_area(&adapter->ena_dev, debug_area_size); 835372c1af5SJan Medala if (rc) { 8366f1c9df9SStephen Hemminger PMD_DRV_LOG(ERR, "Cannot allocate debug area\n"); 837372c1af5SJan Medala return; 838372c1af5SJan Medala } 839372c1af5SJan Medala 840372c1af5SJan Medala rc = ena_com_set_host_attributes(&adapter->ena_dev); 841372c1af5SJan Medala if (rc) { 842241da076SRafal Kozik if (rc == -ENA_COM_UNSUPPORTED) 8436f1c9df9SStephen Hemminger PMD_DRV_LOG(WARNING, "Cannot set host attributes\n"); 844241da076SRafal Kozik else 8456f1c9df9SStephen Hemminger PMD_DRV_LOG(ERR, "Cannot set host attributes\n"); 846241da076SRafal Kozik 847372c1af5SJan Medala goto err; 848372c1af5SJan Medala } 849372c1af5SJan Medala 850372c1af5SJan Medala return; 851372c1af5SJan Medala err: 852372c1af5SJan Medala ena_com_delete_debug_area(&adapter->ena_dev); 853372c1af5SJan Medala } 854372c1af5SJan Medala 855b142387bSThomas Monjalon static int ena_close(struct rte_eth_dev *dev) 8561173fca2SJan Medala { 8574d7877fdSMichal Krawczyk struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 858d61138d4SHarman Kalra struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 859890728ffSStephen Hemminger struct ena_adapter *adapter = dev->data->dev_private; 86062024eb8SIvan Ilchenko int ret = 0; 8611173fca2SJan Medala 86230410493SThomas Monjalon if (rte_eal_process_type() != RTE_PROC_PRIMARY) 86330410493SThomas Monjalon return 0; 86430410493SThomas Monjalon 865df238f84SMichal Krawczyk if (adapter->state == ENA_ADAPTER_STATE_RUNNING) 86662024eb8SIvan Ilchenko ret = ena_stop(dev); 867eb0ef49dSMichal Krawczyk adapter->state = ENA_ADAPTER_STATE_CLOSED; 86815773e06SMichal Krawczyk 8691173fca2SJan Medala ena_rx_queue_release_all(dev); 8701173fca2SJan Medala ena_tx_queue_release_all(dev); 8714d7877fdSMichal Krawczyk 8724d7877fdSMichal Krawczyk rte_free(adapter->drv_stats); 8734d7877fdSMichal Krawczyk adapter->drv_stats = NULL; 8744d7877fdSMichal Krawczyk 8754d7877fdSMichal Krawczyk rte_intr_disable(intr_handle); 8764d7877fdSMichal Krawczyk rte_intr_callback_unregister(intr_handle, 8774d7877fdSMichal Krawczyk ena_interrupt_handler_rte, 878aab58857SStanislaw Kardach dev); 8794d7877fdSMichal Krawczyk 8804d7877fdSMichal Krawczyk /* 8814d7877fdSMichal Krawczyk * MAC is not allocated dynamically. Setting NULL should prevent from 8824d7877fdSMichal Krawczyk * release of the resource in the rte_eth_dev_release_port(). 8834d7877fdSMichal Krawczyk */ 8844d7877fdSMichal Krawczyk dev->data->mac_addrs = NULL; 885b142387bSThomas Monjalon 88662024eb8SIvan Ilchenko return ret; 8871173fca2SJan Medala } 8881173fca2SJan Medala 8892081d5e2SMichal Krawczyk static int 8902081d5e2SMichal Krawczyk ena_dev_reset(struct rte_eth_dev *dev) 8912081d5e2SMichal Krawczyk { 892e457bc70SRafal Kozik int rc = 0; 8932081d5e2SMichal Krawczyk 89439ecdd3dSStanislaw Kardach /* Cannot release memory in secondary process */ 89539ecdd3dSStanislaw Kardach if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 89639ecdd3dSStanislaw Kardach PMD_DRV_LOG(WARNING, "dev_reset not supported in secondary.\n"); 89739ecdd3dSStanislaw Kardach return -EPERM; 89839ecdd3dSStanislaw Kardach } 89939ecdd3dSStanislaw Kardach 900e457bc70SRafal Kozik ena_destroy_device(dev); 901e457bc70SRafal Kozik rc = eth_ena_dev_init(dev); 902241da076SRafal Kozik if (rc) 903617898d1SMichal Krawczyk PMD_INIT_LOG(CRIT, "Cannot initialize device\n"); 904e457bc70SRafal Kozik 9052081d5e2SMichal Krawczyk return rc; 9062081d5e2SMichal Krawczyk } 9072081d5e2SMichal Krawczyk 9081173fca2SJan Medala static void ena_rx_queue_release_all(struct rte_eth_dev *dev) 9091173fca2SJan Medala { 9101173fca2SJan Medala int nb_queues = dev->data->nb_rx_queues; 9111173fca2SJan Medala int i; 9121173fca2SJan Medala 9131173fca2SJan Medala for (i = 0; i < nb_queues; i++) 9147483341aSXueming Li ena_rx_queue_release(dev, i); 9151173fca2SJan Medala } 9161173fca2SJan Medala 9171173fca2SJan Medala static void ena_tx_queue_release_all(struct rte_eth_dev *dev) 9181173fca2SJan Medala { 9191173fca2SJan Medala int nb_queues = dev->data->nb_tx_queues; 9201173fca2SJan Medala int i; 9211173fca2SJan Medala 9221173fca2SJan Medala for (i = 0; i < nb_queues; i++) 9237483341aSXueming Li ena_tx_queue_release(dev, i); 9241173fca2SJan Medala } 9251173fca2SJan Medala 9267483341aSXueming Li static void ena_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid) 9271173fca2SJan Medala { 9287483341aSXueming Li struct ena_ring *ring = dev->data->rx_queues[qid]; 9291173fca2SJan Medala 9301173fca2SJan Medala /* Free ring resources */ 9311173fca2SJan Medala rte_free(ring->rx_buffer_info); 9321173fca2SJan Medala ring->rx_buffer_info = NULL; 9331173fca2SJan Medala 93479405ee1SRafal Kozik rte_free(ring->rx_refill_buffer); 93579405ee1SRafal Kozik ring->rx_refill_buffer = NULL; 93679405ee1SRafal Kozik 937c2034976SMichal Krawczyk rte_free(ring->empty_rx_reqs); 938c2034976SMichal Krawczyk ring->empty_rx_reqs = NULL; 939c2034976SMichal Krawczyk 9401173fca2SJan Medala ring->configured = 0; 9411173fca2SJan Medala 942617898d1SMichal Krawczyk PMD_DRV_LOG(NOTICE, "Rx queue %d:%d released\n", 9431173fca2SJan Medala ring->port_id, ring->id); 9441173fca2SJan Medala } 9451173fca2SJan Medala 9467483341aSXueming Li static void ena_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid) 9471173fca2SJan Medala { 9487483341aSXueming Li struct ena_ring *ring = dev->data->tx_queues[qid]; 9491173fca2SJan Medala 9501173fca2SJan Medala /* Free ring resources */ 9512fca2a98SMichal Krawczyk rte_free(ring->push_buf_intermediate_buf); 9522fca2a98SMichal Krawczyk 9531173fca2SJan Medala rte_free(ring->tx_buffer_info); 9541173fca2SJan Medala 9551173fca2SJan Medala rte_free(ring->empty_tx_reqs); 9561173fca2SJan Medala 9571173fca2SJan Medala ring->empty_tx_reqs = NULL; 9581173fca2SJan Medala ring->tx_buffer_info = NULL; 9592fca2a98SMichal Krawczyk ring->push_buf_intermediate_buf = NULL; 9601173fca2SJan Medala 9611173fca2SJan Medala ring->configured = 0; 9621173fca2SJan Medala 963617898d1SMichal Krawczyk PMD_DRV_LOG(NOTICE, "Tx queue %d:%d released\n", 9641173fca2SJan Medala ring->port_id, ring->id); 9651173fca2SJan Medala } 9661173fca2SJan Medala 9671173fca2SJan Medala static void ena_rx_queue_release_bufs(struct ena_ring *ring) 9681173fca2SJan Medala { 969709b1dcbSRafal Kozik unsigned int i; 9701173fca2SJan Medala 9711be097dcSMichal Krawczyk for (i = 0; i < ring->ring_size; ++i) { 9721be097dcSMichal Krawczyk struct ena_rx_buffer *rx_info = &ring->rx_buffer_info[i]; 9731be097dcSMichal Krawczyk if (rx_info->mbuf) { 9741be097dcSMichal Krawczyk rte_mbuf_raw_free(rx_info->mbuf); 9751be097dcSMichal Krawczyk rx_info->mbuf = NULL; 9761be097dcSMichal Krawczyk } 9771173fca2SJan Medala } 9781173fca2SJan Medala } 9791173fca2SJan Medala 9801173fca2SJan Medala static void ena_tx_queue_release_bufs(struct ena_ring *ring) 9811173fca2SJan Medala { 982207a514cSMichal Krawczyk unsigned int i; 9831173fca2SJan Medala 984207a514cSMichal Krawczyk for (i = 0; i < ring->ring_size; ++i) { 985207a514cSMichal Krawczyk struct ena_tx_buffer *tx_buf = &ring->tx_buffer_info[i]; 9861173fca2SJan Medala 9873c8bc29fSDavid Harton if (tx_buf->mbuf) { 9881173fca2SJan Medala rte_pktmbuf_free(tx_buf->mbuf); 9893c8bc29fSDavid Harton tx_buf->mbuf = NULL; 9903c8bc29fSDavid Harton } 9911173fca2SJan Medala } 9921173fca2SJan Medala } 9931173fca2SJan Medala 9941173fca2SJan Medala static int ena_link_update(struct rte_eth_dev *dev, 9951173fca2SJan Medala __rte_unused int wait_to_complete) 9961173fca2SJan Medala { 9971173fca2SJan Medala struct rte_eth_link *link = &dev->data->dev_link; 998890728ffSStephen Hemminger struct ena_adapter *adapter = dev->data->dev_private; 999ca148440SMichal Krawczyk 1000295968d1SFerruh Yigit link->link_status = adapter->link_status ? RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN; 1001295968d1SFerruh Yigit link->link_speed = RTE_ETH_SPEED_NUM_NONE; 1002295968d1SFerruh Yigit link->link_duplex = RTE_ETH_LINK_FULL_DUPLEX; 10031173fca2SJan Medala 10041173fca2SJan Medala return 0; 10051173fca2SJan Medala } 10061173fca2SJan Medala 100726e5543dSRafal Kozik static int ena_queue_start_all(struct rte_eth_dev *dev, 10081173fca2SJan Medala enum ena_ring_type ring_type) 10091173fca2SJan Medala { 1010890728ffSStephen Hemminger struct ena_adapter *adapter = dev->data->dev_private; 10111173fca2SJan Medala struct ena_ring *queues = NULL; 101253b61841SMichal Krawczyk int nb_queues; 10131173fca2SJan Medala int i = 0; 10141173fca2SJan Medala int rc = 0; 10151173fca2SJan Medala 101653b61841SMichal Krawczyk if (ring_type == ENA_RING_TYPE_RX) { 101753b61841SMichal Krawczyk queues = adapter->rx_ring; 101853b61841SMichal Krawczyk nb_queues = dev->data->nb_rx_queues; 101953b61841SMichal Krawczyk } else { 102053b61841SMichal Krawczyk queues = adapter->tx_ring; 102153b61841SMichal Krawczyk nb_queues = dev->data->nb_tx_queues; 102253b61841SMichal Krawczyk } 102353b61841SMichal Krawczyk for (i = 0; i < nb_queues; i++) { 10241173fca2SJan Medala if (queues[i].configured) { 10251173fca2SJan Medala if (ring_type == ENA_RING_TYPE_RX) { 10261173fca2SJan Medala ena_assert_msg( 10271173fca2SJan Medala dev->data->rx_queues[i] == &queues[i], 1028617898d1SMichal Krawczyk "Inconsistent state of Rx queues\n"); 10291173fca2SJan Medala } else { 10301173fca2SJan Medala ena_assert_msg( 10311173fca2SJan Medala dev->data->tx_queues[i] == &queues[i], 1032617898d1SMichal Krawczyk "Inconsistent state of Tx queues\n"); 10331173fca2SJan Medala } 10341173fca2SJan Medala 10356986cdc4SMichal Krawczyk rc = ena_queue_start(dev, &queues[i]); 10361173fca2SJan Medala 10371173fca2SJan Medala if (rc) { 10381173fca2SJan Medala PMD_INIT_LOG(ERR, 1039617898d1SMichal Krawczyk "Failed to start queue[%d] of type(%d)\n", 10401173fca2SJan Medala i, ring_type); 104126e5543dSRafal Kozik goto err; 10421173fca2SJan Medala } 10431173fca2SJan Medala } 10441173fca2SJan Medala } 10451173fca2SJan Medala 10461173fca2SJan Medala return 0; 104726e5543dSRafal Kozik 104826e5543dSRafal Kozik err: 104926e5543dSRafal Kozik while (i--) 105026e5543dSRafal Kozik if (queues[i].configured) 105126e5543dSRafal Kozik ena_queue_stop(&queues[i]); 105226e5543dSRafal Kozik 105326e5543dSRafal Kozik return rc; 10541173fca2SJan Medala } 10551173fca2SJan Medala 10561173fca2SJan Medala static int 10578a7a73f2SMichal Krawczyk ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx, 10588a7a73f2SMichal Krawczyk bool use_large_llq_hdr) 10591173fca2SJan Medala { 10602fca2a98SMichal Krawczyk struct ena_admin_feature_llq_desc *llq = &ctx->get_feat_ctx->llq; 10612fca2a98SMichal Krawczyk struct ena_com_dev *ena_dev = ctx->ena_dev; 10625920d930SMichal Krawczyk uint32_t max_tx_queue_size; 10635920d930SMichal Krawczyk uint32_t max_rx_queue_size; 10641173fca2SJan Medala 10652fca2a98SMichal Krawczyk if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) { 1066ea93d37eSRafal Kozik struct ena_admin_queue_ext_feature_fields *max_queue_ext = 1067ea93d37eSRafal Kozik &ctx->get_feat_ctx->max_queue_ext.max_queue_ext; 10685920d930SMichal Krawczyk max_rx_queue_size = RTE_MIN(max_queue_ext->max_rx_cq_depth, 1069ea93d37eSRafal Kozik max_queue_ext->max_rx_sq_depth); 10705920d930SMichal Krawczyk max_tx_queue_size = max_queue_ext->max_tx_cq_depth; 10712fca2a98SMichal Krawczyk 10722fca2a98SMichal Krawczyk if (ena_dev->tx_mem_queue_type == 10732fca2a98SMichal Krawczyk ENA_ADMIN_PLACEMENT_POLICY_DEV) { 10745920d930SMichal Krawczyk max_tx_queue_size = RTE_MIN(max_tx_queue_size, 10752fca2a98SMichal Krawczyk llq->max_llq_depth); 10762fca2a98SMichal Krawczyk } else { 10775920d930SMichal Krawczyk max_tx_queue_size = RTE_MIN(max_tx_queue_size, 1078ea93d37eSRafal Kozik max_queue_ext->max_tx_sq_depth); 10792fca2a98SMichal Krawczyk } 10802fca2a98SMichal Krawczyk 1081ea93d37eSRafal Kozik ctx->max_rx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS, 1082ea93d37eSRafal Kozik max_queue_ext->max_per_packet_rx_descs); 1083ea93d37eSRafal Kozik ctx->max_tx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS, 1084ea93d37eSRafal Kozik max_queue_ext->max_per_packet_tx_descs); 1085ea93d37eSRafal Kozik } else { 1086ea93d37eSRafal Kozik struct ena_admin_queue_feature_desc *max_queues = 1087ea93d37eSRafal Kozik &ctx->get_feat_ctx->max_queues; 10885920d930SMichal Krawczyk max_rx_queue_size = RTE_MIN(max_queues->max_cq_depth, 1089ea93d37eSRafal Kozik max_queues->max_sq_depth); 10905920d930SMichal Krawczyk max_tx_queue_size = max_queues->max_cq_depth; 10912fca2a98SMichal Krawczyk 10922fca2a98SMichal Krawczyk if (ena_dev->tx_mem_queue_type == 10932fca2a98SMichal Krawczyk ENA_ADMIN_PLACEMENT_POLICY_DEV) { 10945920d930SMichal Krawczyk max_tx_queue_size = RTE_MIN(max_tx_queue_size, 10952fca2a98SMichal Krawczyk llq->max_llq_depth); 10962fca2a98SMichal Krawczyk } else { 10975920d930SMichal Krawczyk max_tx_queue_size = RTE_MIN(max_tx_queue_size, 10982fca2a98SMichal Krawczyk max_queues->max_sq_depth); 10992fca2a98SMichal Krawczyk } 11002fca2a98SMichal Krawczyk 1101ea93d37eSRafal Kozik ctx->max_rx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS, 1102ea93d37eSRafal Kozik max_queues->max_packet_rx_descs); 11035920d930SMichal Krawczyk ctx->max_tx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS, 11045920d930SMichal Krawczyk max_queues->max_packet_tx_descs); 1105ea93d37eSRafal Kozik } 11061173fca2SJan Medala 1107ea93d37eSRafal Kozik /* Round down to the nearest power of 2 */ 11085920d930SMichal Krawczyk max_rx_queue_size = rte_align32prevpow2(max_rx_queue_size); 11095920d930SMichal Krawczyk max_tx_queue_size = rte_align32prevpow2(max_tx_queue_size); 11101173fca2SJan Medala 11118a7a73f2SMichal Krawczyk if (use_large_llq_hdr) { 11128a7a73f2SMichal Krawczyk if ((llq->entry_size_ctrl_supported & 11138a7a73f2SMichal Krawczyk ENA_ADMIN_LIST_ENTRY_SIZE_256B) && 11148a7a73f2SMichal Krawczyk (ena_dev->tx_mem_queue_type == 11158a7a73f2SMichal Krawczyk ENA_ADMIN_PLACEMENT_POLICY_DEV)) { 11168a7a73f2SMichal Krawczyk max_tx_queue_size /= 2; 11178a7a73f2SMichal Krawczyk PMD_INIT_LOG(INFO, 1118617898d1SMichal Krawczyk "Forcing large headers and decreasing maximum Tx queue size to %d\n", 11198a7a73f2SMichal Krawczyk max_tx_queue_size); 11208a7a73f2SMichal Krawczyk } else { 11218a7a73f2SMichal Krawczyk PMD_INIT_LOG(ERR, 11228a7a73f2SMichal Krawczyk "Forcing large headers failed: LLQ is disabled or device does not support large headers\n"); 11238a7a73f2SMichal Krawczyk } 11248a7a73f2SMichal Krawczyk } 11258a7a73f2SMichal Krawczyk 11265920d930SMichal Krawczyk if (unlikely(max_rx_queue_size == 0 || max_tx_queue_size == 0)) { 1127617898d1SMichal Krawczyk PMD_INIT_LOG(ERR, "Invalid queue size\n"); 11281173fca2SJan Medala return -EFAULT; 11291173fca2SJan Medala } 11301173fca2SJan Medala 11315920d930SMichal Krawczyk ctx->max_tx_queue_size = max_tx_queue_size; 11325920d930SMichal Krawczyk ctx->max_rx_queue_size = max_rx_queue_size; 11332061fe41SRafal Kozik 1134ea93d37eSRafal Kozik return 0; 11351173fca2SJan Medala } 11361173fca2SJan Medala 11371173fca2SJan Medala static void ena_stats_restart(struct rte_eth_dev *dev) 11381173fca2SJan Medala { 1139890728ffSStephen Hemminger struct ena_adapter *adapter = dev->data->dev_private; 11401173fca2SJan Medala 11411173fca2SJan Medala rte_atomic64_init(&adapter->drv_stats->ierrors); 11421173fca2SJan Medala rte_atomic64_init(&adapter->drv_stats->oerrors); 11431173fca2SJan Medala rte_atomic64_init(&adapter->drv_stats->rx_nombuf); 1144e1e73e32SMichal Krawczyk adapter->drv_stats->rx_drops = 0; 11451173fca2SJan Medala } 11461173fca2SJan Medala 1147d5b0924bSMatan Azrad static int ena_stats_get(struct rte_eth_dev *dev, 11481173fca2SJan Medala struct rte_eth_stats *stats) 11491173fca2SJan Medala { 11501173fca2SJan Medala struct ena_admin_basic_stats ena_stats; 1151890728ffSStephen Hemminger struct ena_adapter *adapter = dev->data->dev_private; 11521173fca2SJan Medala struct ena_com_dev *ena_dev = &adapter->ena_dev; 11531173fca2SJan Medala int rc; 115445b6d861SMichal Krawczyk int i; 115545b6d861SMichal Krawczyk int max_rings_stats; 11561173fca2SJan Medala 11571173fca2SJan Medala memset(&ena_stats, 0, sizeof(ena_stats)); 11581343c415SMichal Krawczyk 11591343c415SMichal Krawczyk rte_spinlock_lock(&adapter->admin_lock); 1160e3595539SStanislaw Kardach rc = ENA_PROXY(adapter, ena_com_get_dev_basic_stats, ena_dev, 1161e3595539SStanislaw Kardach &ena_stats); 11621343c415SMichal Krawczyk rte_spinlock_unlock(&adapter->admin_lock); 11631173fca2SJan Medala if (unlikely(rc)) { 11646f1c9df9SStephen Hemminger PMD_DRV_LOG(ERR, "Could not retrieve statistics from ENA\n"); 1165d5b0924bSMatan Azrad return rc; 11661173fca2SJan Medala } 11671173fca2SJan Medala 11681173fca2SJan Medala /* Set of basic statistics from ENA */ 11691173fca2SJan Medala stats->ipackets = __MERGE_64B_H_L(ena_stats.rx_pkts_high, 11701173fca2SJan Medala ena_stats.rx_pkts_low); 11711173fca2SJan Medala stats->opackets = __MERGE_64B_H_L(ena_stats.tx_pkts_high, 11721173fca2SJan Medala ena_stats.tx_pkts_low); 11731173fca2SJan Medala stats->ibytes = __MERGE_64B_H_L(ena_stats.rx_bytes_high, 11741173fca2SJan Medala ena_stats.rx_bytes_low); 11751173fca2SJan Medala stats->obytes = __MERGE_64B_H_L(ena_stats.tx_bytes_high, 11761173fca2SJan Medala ena_stats.tx_bytes_low); 11771173fca2SJan Medala 11781173fca2SJan Medala /* Driver related stats */ 1179e1e73e32SMichal Krawczyk stats->imissed = adapter->drv_stats->rx_drops; 11801173fca2SJan Medala stats->ierrors = rte_atomic64_read(&adapter->drv_stats->ierrors); 11811173fca2SJan Medala stats->oerrors = rte_atomic64_read(&adapter->drv_stats->oerrors); 11821173fca2SJan Medala stats->rx_nombuf = rte_atomic64_read(&adapter->drv_stats->rx_nombuf); 118345b6d861SMichal Krawczyk 118445b6d861SMichal Krawczyk max_rings_stats = RTE_MIN(dev->data->nb_rx_queues, 118545b6d861SMichal Krawczyk RTE_ETHDEV_QUEUE_STAT_CNTRS); 118645b6d861SMichal Krawczyk for (i = 0; i < max_rings_stats; ++i) { 118745b6d861SMichal Krawczyk struct ena_stats_rx *rx_stats = &adapter->rx_ring[i].rx_stats; 118845b6d861SMichal Krawczyk 118945b6d861SMichal Krawczyk stats->q_ibytes[i] = rx_stats->bytes; 119045b6d861SMichal Krawczyk stats->q_ipackets[i] = rx_stats->cnt; 119145b6d861SMichal Krawczyk stats->q_errors[i] = rx_stats->bad_desc_num + 119245b6d861SMichal Krawczyk rx_stats->bad_req_id; 119345b6d861SMichal Krawczyk } 119445b6d861SMichal Krawczyk 119545b6d861SMichal Krawczyk max_rings_stats = RTE_MIN(dev->data->nb_tx_queues, 119645b6d861SMichal Krawczyk RTE_ETHDEV_QUEUE_STAT_CNTRS); 119745b6d861SMichal Krawczyk for (i = 0; i < max_rings_stats; ++i) { 119845b6d861SMichal Krawczyk struct ena_stats_tx *tx_stats = &adapter->tx_ring[i].tx_stats; 119945b6d861SMichal Krawczyk 120045b6d861SMichal Krawczyk stats->q_obytes[i] = tx_stats->bytes; 120145b6d861SMichal Krawczyk stats->q_opackets[i] = tx_stats->cnt; 120245b6d861SMichal Krawczyk } 120345b6d861SMichal Krawczyk 1204d5b0924bSMatan Azrad return 0; 12051173fca2SJan Medala } 12061173fca2SJan Medala 12071173fca2SJan Medala static int ena_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 12081173fca2SJan Medala { 12091173fca2SJan Medala struct ena_adapter *adapter; 12101173fca2SJan Medala struct ena_com_dev *ena_dev; 12111173fca2SJan Medala int rc = 0; 12121173fca2SJan Medala 1213498c687aSRafal Kozik ena_assert_msg(dev->data != NULL, "Uninitialized device\n"); 1214498c687aSRafal Kozik ena_assert_msg(dev->data->dev_private != NULL, "Uninitialized device\n"); 1215890728ffSStephen Hemminger adapter = dev->data->dev_private; 12161173fca2SJan Medala 12171173fca2SJan Medala ena_dev = &adapter->ena_dev; 1218498c687aSRafal Kozik ena_assert_msg(ena_dev != NULL, "Uninitialized device\n"); 12191173fca2SJan Medala 1220e3595539SStanislaw Kardach rc = ENA_PROXY(adapter, ena_com_set_dev_mtu, ena_dev, mtu); 12211173fca2SJan Medala if (rc) 12226f1c9df9SStephen Hemminger PMD_DRV_LOG(ERR, "Could not set MTU: %d\n", mtu); 12231173fca2SJan Medala else 1224617898d1SMichal Krawczyk PMD_DRV_LOG(NOTICE, "MTU set to: %d\n", mtu); 12251173fca2SJan Medala 12261173fca2SJan Medala return rc; 12271173fca2SJan Medala } 12281173fca2SJan Medala 12291173fca2SJan Medala static int ena_start(struct rte_eth_dev *dev) 12301173fca2SJan Medala { 1231890728ffSStephen Hemminger struct ena_adapter *adapter = dev->data->dev_private; 1232d9b8b106SMichal Krawczyk uint64_t ticks; 12331173fca2SJan Medala int rc = 0; 12349210f0caSJie Hai uint16_t i; 12351173fca2SJan Medala 123639ecdd3dSStanislaw Kardach /* Cannot allocate memory in secondary process */ 123739ecdd3dSStanislaw Kardach if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 123839ecdd3dSStanislaw Kardach PMD_DRV_LOG(WARNING, "dev_start not supported in secondary.\n"); 123939ecdd3dSStanislaw Kardach return -EPERM; 124039ecdd3dSStanislaw Kardach } 124139ecdd3dSStanislaw Kardach 12426986cdc4SMichal Krawczyk rc = ena_setup_rx_intr(dev); 12436986cdc4SMichal Krawczyk if (rc) 12446986cdc4SMichal Krawczyk return rc; 12456986cdc4SMichal Krawczyk 124626e5543dSRafal Kozik rc = ena_queue_start_all(dev, ENA_RING_TYPE_RX); 12471173fca2SJan Medala if (rc) 12481173fca2SJan Medala return rc; 12491173fca2SJan Medala 125026e5543dSRafal Kozik rc = ena_queue_start_all(dev, ENA_RING_TYPE_TX); 12511173fca2SJan Medala if (rc) 125226e5543dSRafal Kozik goto err_start_tx; 12531173fca2SJan Medala 1254295968d1SFerruh Yigit if (adapter->edev_data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) { 125534d5e97eSMichal Krawczyk rc = ena_rss_configure(adapter); 12561173fca2SJan Medala if (rc) 125726e5543dSRafal Kozik goto err_rss_init; 12581173fca2SJan Medala } 12591173fca2SJan Medala 12601173fca2SJan Medala ena_stats_restart(dev); 12611173fca2SJan Medala 1262d9b8b106SMichal Krawczyk adapter->timestamp_wd = rte_get_timer_cycles(); 1263d9b8b106SMichal Krawczyk adapter->keep_alive_timeout = ENA_DEVICE_KALIVE_TIMEOUT; 1264d9b8b106SMichal Krawczyk 1265d9b8b106SMichal Krawczyk ticks = rte_get_timer_hz(); 1266d9b8b106SMichal Krawczyk rte_timer_reset(&adapter->timer_wd, ticks, PERIODICAL, rte_lcore_id(), 1267aab58857SStanislaw Kardach ena_timer_wd_callback, dev); 1268d9b8b106SMichal Krawczyk 12697830e905SSolganik Alexander ++adapter->dev_stats.dev_start; 12701173fca2SJan Medala adapter->state = ENA_ADAPTER_STATE_RUNNING; 12711173fca2SJan Medala 12729210f0caSJie Hai for (i = 0; i < dev->data->nb_rx_queues; i++) 12739210f0caSJie Hai dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; 12749210f0caSJie Hai for (i = 0; i < dev->data->nb_tx_queues; i++) 12759210f0caSJie Hai dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; 12769210f0caSJie Hai 12771173fca2SJan Medala return 0; 127826e5543dSRafal Kozik 127926e5543dSRafal Kozik err_rss_init: 128026e5543dSRafal Kozik ena_queue_stop_all(dev, ENA_RING_TYPE_TX); 128126e5543dSRafal Kozik err_start_tx: 128226e5543dSRafal Kozik ena_queue_stop_all(dev, ENA_RING_TYPE_RX); 128326e5543dSRafal Kozik return rc; 12841173fca2SJan Medala } 12851173fca2SJan Medala 128662024eb8SIvan Ilchenko static int ena_stop(struct rte_eth_dev *dev) 1287eb0ef49dSMichal Krawczyk { 1288890728ffSStephen Hemminger struct ena_adapter *adapter = dev->data->dev_private; 1289e457bc70SRafal Kozik struct ena_com_dev *ena_dev = &adapter->ena_dev; 12906986cdc4SMichal Krawczyk struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 1291d61138d4SHarman Kalra struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 12929210f0caSJie Hai uint16_t i; 1293e457bc70SRafal Kozik int rc; 1294eb0ef49dSMichal Krawczyk 129539ecdd3dSStanislaw Kardach /* Cannot free memory in secondary process */ 129639ecdd3dSStanislaw Kardach if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 129739ecdd3dSStanislaw Kardach PMD_DRV_LOG(WARNING, "dev_stop not supported in secondary.\n"); 129839ecdd3dSStanislaw Kardach return -EPERM; 129939ecdd3dSStanislaw Kardach } 130039ecdd3dSStanislaw Kardach 1301d9b8b106SMichal Krawczyk rte_timer_stop_sync(&adapter->timer_wd); 130226e5543dSRafal Kozik ena_queue_stop_all(dev, ENA_RING_TYPE_TX); 130326e5543dSRafal Kozik ena_queue_stop_all(dev, ENA_RING_TYPE_RX); 1304d9b8b106SMichal Krawczyk 1305e457bc70SRafal Kozik if (adapter->trigger_reset) { 1306e457bc70SRafal Kozik rc = ena_com_dev_reset(ena_dev, adapter->reset_reason); 1307e457bc70SRafal Kozik if (rc) 1308617898d1SMichal Krawczyk PMD_DRV_LOG(ERR, "Device reset failed, rc: %d\n", rc); 1309e457bc70SRafal Kozik } 1310e457bc70SRafal Kozik 13116986cdc4SMichal Krawczyk rte_intr_disable(intr_handle); 13126986cdc4SMichal Krawczyk 13136986cdc4SMichal Krawczyk rte_intr_efd_disable(intr_handle); 1314d61138d4SHarman Kalra 1315d61138d4SHarman Kalra /* Cleanup vector list */ 1316d61138d4SHarman Kalra rte_intr_vec_list_free(intr_handle); 13176986cdc4SMichal Krawczyk 13186986cdc4SMichal Krawczyk rte_intr_enable(intr_handle); 13196986cdc4SMichal Krawczyk 13207830e905SSolganik Alexander ++adapter->dev_stats.dev_stop; 1321eb0ef49dSMichal Krawczyk adapter->state = ENA_ADAPTER_STATE_STOPPED; 1322b8f5d2aeSThomas Monjalon dev->data->dev_started = 0; 132362024eb8SIvan Ilchenko 13249210f0caSJie Hai for (i = 0; i < dev->data->nb_rx_queues; i++) 13259210f0caSJie Hai dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; 13269210f0caSJie Hai for (i = 0; i < dev->data->nb_tx_queues; i++) 13279210f0caSJie Hai dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; 13289210f0caSJie Hai 132962024eb8SIvan Ilchenko return 0; 1330eb0ef49dSMichal Krawczyk } 1331eb0ef49dSMichal Krawczyk 13326986cdc4SMichal Krawczyk static int ena_create_io_queue(struct rte_eth_dev *dev, struct ena_ring *ring) 1333df238f84SMichal Krawczyk { 13346986cdc4SMichal Krawczyk struct ena_adapter *adapter = ring->adapter; 13356986cdc4SMichal Krawczyk struct ena_com_dev *ena_dev = &adapter->ena_dev; 13366986cdc4SMichal Krawczyk struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 1337d61138d4SHarman Kalra struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 1338df238f84SMichal Krawczyk struct ena_com_create_io_ctx ctx = 1339df238f84SMichal Krawczyk /* policy set to _HOST just to satisfy icc compiler */ 1340df238f84SMichal Krawczyk { ENA_ADMIN_PLACEMENT_POLICY_HOST, 1341df238f84SMichal Krawczyk 0, 0, 0, 0, 0 }; 1342df238f84SMichal Krawczyk uint16_t ena_qid; 1343778677dcSRafal Kozik unsigned int i; 1344df238f84SMichal Krawczyk int rc; 1345df238f84SMichal Krawczyk 13466986cdc4SMichal Krawczyk ctx.msix_vector = -1; 1347df238f84SMichal Krawczyk if (ring->type == ENA_RING_TYPE_TX) { 1348df238f84SMichal Krawczyk ena_qid = ENA_IO_TXQ_IDX(ring->id); 1349df238f84SMichal Krawczyk ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX; 1350df238f84SMichal Krawczyk ctx.mem_queue_type = ena_dev->tx_mem_queue_type; 1351778677dcSRafal Kozik for (i = 0; i < ring->ring_size; i++) 1352778677dcSRafal Kozik ring->empty_tx_reqs[i] = i; 1353df238f84SMichal Krawczyk } else { 1354df238f84SMichal Krawczyk ena_qid = ENA_IO_RXQ_IDX(ring->id); 1355df238f84SMichal Krawczyk ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX; 13566986cdc4SMichal Krawczyk if (rte_intr_dp_is_en(intr_handle)) 1357d61138d4SHarman Kalra ctx.msix_vector = 1358d61138d4SHarman Kalra rte_intr_vec_list_index_get(intr_handle, 1359d61138d4SHarman Kalra ring->id); 1360d61138d4SHarman Kalra 1361778677dcSRafal Kozik for (i = 0; i < ring->ring_size; i++) 1362778677dcSRafal Kozik ring->empty_rx_reqs[i] = i; 1363df238f84SMichal Krawczyk } 1364badc3a6aSMichal Krawczyk ctx.queue_size = ring->ring_size; 1365df238f84SMichal Krawczyk ctx.qid = ena_qid; 13664217cb0bSMichal Krawczyk ctx.numa_node = ring->numa_socket_id; 1367df238f84SMichal Krawczyk 1368df238f84SMichal Krawczyk rc = ena_com_create_io_queue(ena_dev, &ctx); 1369df238f84SMichal Krawczyk if (rc) { 13706f1c9df9SStephen Hemminger PMD_DRV_LOG(ERR, 1371617898d1SMichal Krawczyk "Failed to create IO queue[%d] (qid:%d), rc: %d\n", 1372df238f84SMichal Krawczyk ring->id, ena_qid, rc); 1373df238f84SMichal Krawczyk return rc; 1374df238f84SMichal Krawczyk } 1375df238f84SMichal Krawczyk 1376df238f84SMichal Krawczyk rc = ena_com_get_io_handlers(ena_dev, ena_qid, 1377df238f84SMichal Krawczyk &ring->ena_com_io_sq, 1378df238f84SMichal Krawczyk &ring->ena_com_io_cq); 1379df238f84SMichal Krawczyk if (rc) { 13806f1c9df9SStephen Hemminger PMD_DRV_LOG(ERR, 1381617898d1SMichal Krawczyk "Failed to get IO queue[%d] handlers, rc: %d\n", 1382df238f84SMichal Krawczyk ring->id, rc); 1383df238f84SMichal Krawczyk ena_com_destroy_io_queue(ena_dev, ena_qid); 1384df238f84SMichal Krawczyk return rc; 1385df238f84SMichal Krawczyk } 1386df238f84SMichal Krawczyk 1387df238f84SMichal Krawczyk if (ring->type == ENA_RING_TYPE_TX) 1388df238f84SMichal Krawczyk ena_com_update_numa_node(ring->ena_com_io_cq, ctx.numa_node); 1389df238f84SMichal Krawczyk 13906986cdc4SMichal Krawczyk /* Start with Rx interrupts being masked. */ 13916986cdc4SMichal Krawczyk if (ring->type == ENA_RING_TYPE_RX && rte_intr_dp_is_en(intr_handle)) 13926986cdc4SMichal Krawczyk ena_rx_queue_intr_disable(dev, ring->id); 13936986cdc4SMichal Krawczyk 1394df238f84SMichal Krawczyk return 0; 1395df238f84SMichal Krawczyk } 1396df238f84SMichal Krawczyk 139726e5543dSRafal Kozik static void ena_queue_stop(struct ena_ring *ring) 1398df238f84SMichal Krawczyk { 139926e5543dSRafal Kozik struct ena_com_dev *ena_dev = &ring->adapter->ena_dev; 1400df238f84SMichal Krawczyk 140126e5543dSRafal Kozik if (ring->type == ENA_RING_TYPE_RX) { 140226e5543dSRafal Kozik ena_com_destroy_io_queue(ena_dev, ENA_IO_RXQ_IDX(ring->id)); 140326e5543dSRafal Kozik ena_rx_queue_release_bufs(ring); 140426e5543dSRafal Kozik } else { 140526e5543dSRafal Kozik ena_com_destroy_io_queue(ena_dev, ENA_IO_TXQ_IDX(ring->id)); 140626e5543dSRafal Kozik ena_tx_queue_release_bufs(ring); 1407df238f84SMichal Krawczyk } 1408df238f84SMichal Krawczyk } 1409df238f84SMichal Krawczyk 141026e5543dSRafal Kozik static void ena_queue_stop_all(struct rte_eth_dev *dev, 141126e5543dSRafal Kozik enum ena_ring_type ring_type) 141226e5543dSRafal Kozik { 1413890728ffSStephen Hemminger struct ena_adapter *adapter = dev->data->dev_private; 141426e5543dSRafal Kozik struct ena_ring *queues = NULL; 141526e5543dSRafal Kozik uint16_t nb_queues, i; 141626e5543dSRafal Kozik 141726e5543dSRafal Kozik if (ring_type == ENA_RING_TYPE_RX) { 141826e5543dSRafal Kozik queues = adapter->rx_ring; 141926e5543dSRafal Kozik nb_queues = dev->data->nb_rx_queues; 142026e5543dSRafal Kozik } else { 142126e5543dSRafal Kozik queues = adapter->tx_ring; 142226e5543dSRafal Kozik nb_queues = dev->data->nb_tx_queues; 142326e5543dSRafal Kozik } 142426e5543dSRafal Kozik 142526e5543dSRafal Kozik for (i = 0; i < nb_queues; ++i) 142626e5543dSRafal Kozik if (queues[i].configured) 142726e5543dSRafal Kozik ena_queue_stop(&queues[i]); 142826e5543dSRafal Kozik } 142926e5543dSRafal Kozik 14306986cdc4SMichal Krawczyk static int ena_queue_start(struct rte_eth_dev *dev, struct ena_ring *ring) 14311173fca2SJan Medala { 1432a467e8f3SMichal Krawczyk int rc, bufs_num; 14331173fca2SJan Medala 14341173fca2SJan Medala ena_assert_msg(ring->configured == 1, 143526e5543dSRafal Kozik "Trying to start unconfigured queue\n"); 14361173fca2SJan Medala 14376986cdc4SMichal Krawczyk rc = ena_create_io_queue(dev, ring); 1438df238f84SMichal Krawczyk if (rc) { 1439617898d1SMichal Krawczyk PMD_INIT_LOG(ERR, "Failed to create IO queue\n"); 1440df238f84SMichal Krawczyk return rc; 1441df238f84SMichal Krawczyk } 1442df238f84SMichal Krawczyk 14431173fca2SJan Medala ring->next_to_clean = 0; 14441173fca2SJan Medala ring->next_to_use = 0; 14451173fca2SJan Medala 14467830e905SSolganik Alexander if (ring->type == ENA_RING_TYPE_TX) { 14477830e905SSolganik Alexander ring->tx_stats.available_desc = 1448b2b02edeSMichal Krawczyk ena_com_free_q_entries(ring->ena_com_io_sq); 14491173fca2SJan Medala return 0; 14507830e905SSolganik Alexander } 14511173fca2SJan Medala 1452a467e8f3SMichal Krawczyk bufs_num = ring->ring_size - 1; 1453a467e8f3SMichal Krawczyk rc = ena_populate_rx_queue(ring, bufs_num); 1454a467e8f3SMichal Krawczyk if (rc != bufs_num) { 145526e5543dSRafal Kozik ena_com_destroy_io_queue(&ring->adapter->ena_dev, 145626e5543dSRafal Kozik ENA_IO_RXQ_IDX(ring->id)); 1457617898d1SMichal Krawczyk PMD_INIT_LOG(ERR, "Failed to populate Rx ring\n"); 1458241da076SRafal Kozik return ENA_COM_FAULT; 14591173fca2SJan Medala } 14604387e81cSIdo Segev /* Flush per-core RX buffers pools cache as they can be used on other 14614387e81cSIdo Segev * cores as well. 14624387e81cSIdo Segev */ 14634387e81cSIdo Segev rte_mempool_cache_flush(NULL, ring->mb_pool); 14641173fca2SJan Medala 14651173fca2SJan Medala return 0; 14661173fca2SJan Medala } 14671173fca2SJan Medala 14681173fca2SJan Medala static int ena_tx_queue_setup(struct rte_eth_dev *dev, 14691173fca2SJan Medala uint16_t queue_idx, 14701173fca2SJan Medala uint16_t nb_desc, 14714217cb0bSMichal Krawczyk unsigned int socket_id, 147256b8b9b7SRafal Kozik const struct rte_eth_txconf *tx_conf) 14731173fca2SJan Medala { 14741173fca2SJan Medala struct ena_ring *txq = NULL; 1475890728ffSStephen Hemminger struct ena_adapter *adapter = dev->data->dev_private; 14761173fca2SJan Medala unsigned int i; 1477005064e5SMichal Krawczyk uint16_t dyn_thresh; 14781173fca2SJan Medala 14791173fca2SJan Medala txq = &adapter->tx_ring[queue_idx]; 14801173fca2SJan Medala 14811173fca2SJan Medala if (txq->configured) { 14826f1c9df9SStephen Hemminger PMD_DRV_LOG(CRIT, 1483617898d1SMichal Krawczyk "API violation. Queue[%d] is already configured\n", 14841173fca2SJan Medala queue_idx); 1485241da076SRafal Kozik return ENA_COM_FAULT; 14861173fca2SJan Medala } 14871173fca2SJan Medala 14881daff526SJakub Palider if (!rte_is_power_of_2(nb_desc)) { 14896f1c9df9SStephen Hemminger PMD_DRV_LOG(ERR, 1490617898d1SMichal Krawczyk "Unsupported size of Tx queue: %d is not a power of 2.\n", 14911daff526SJakub Palider nb_desc); 14921daff526SJakub Palider return -EINVAL; 14931daff526SJakub Palider } 14941daff526SJakub Palider 14955920d930SMichal Krawczyk if (nb_desc > adapter->max_tx_ring_size) { 14966f1c9df9SStephen Hemminger PMD_DRV_LOG(ERR, 1497617898d1SMichal Krawczyk "Unsupported size of Tx queue (max size: %d)\n", 14985920d930SMichal Krawczyk adapter->max_tx_ring_size); 14991173fca2SJan Medala return -EINVAL; 15001173fca2SJan Medala } 15011173fca2SJan Medala 15021173fca2SJan Medala txq->port_id = dev->data->port_id; 15031173fca2SJan Medala txq->next_to_clean = 0; 15041173fca2SJan Medala txq->next_to_use = 0; 15051173fca2SJan Medala txq->ring_size = nb_desc; 1506c0006061SMichal Krawczyk txq->size_mask = nb_desc - 1; 15074217cb0bSMichal Krawczyk txq->numa_socket_id = socket_id; 15081d973d8fSIgor Chauskin txq->pkts_without_db = false; 1509f93e20e5SMichal Krawczyk txq->last_cleanup_ticks = 0; 15101173fca2SJan Medala 151108180833SMichal Krawczyk txq->tx_buffer_info = rte_zmalloc_socket("txq->tx_buffer_info", 151208180833SMichal Krawczyk sizeof(struct ena_tx_buffer) * txq->ring_size, 151308180833SMichal Krawczyk RTE_CACHE_LINE_SIZE, 151408180833SMichal Krawczyk socket_id); 15151173fca2SJan Medala if (!txq->tx_buffer_info) { 1516617898d1SMichal Krawczyk PMD_DRV_LOG(ERR, 1517617898d1SMichal Krawczyk "Failed to allocate memory for Tx buffer info\n"); 1518df238f84SMichal Krawczyk return -ENOMEM; 15191173fca2SJan Medala } 15201173fca2SJan Medala 152108180833SMichal Krawczyk txq->empty_tx_reqs = rte_zmalloc_socket("txq->empty_tx_reqs", 152208180833SMichal Krawczyk sizeof(uint16_t) * txq->ring_size, 152308180833SMichal Krawczyk RTE_CACHE_LINE_SIZE, 152408180833SMichal Krawczyk socket_id); 15251173fca2SJan Medala if (!txq->empty_tx_reqs) { 1526617898d1SMichal Krawczyk PMD_DRV_LOG(ERR, 1527617898d1SMichal Krawczyk "Failed to allocate memory for empty Tx requests\n"); 1528df238f84SMichal Krawczyk rte_free(txq->tx_buffer_info); 1529df238f84SMichal Krawczyk return -ENOMEM; 15301173fca2SJan Medala } 1531241da076SRafal Kozik 15322fca2a98SMichal Krawczyk txq->push_buf_intermediate_buf = 153308180833SMichal Krawczyk rte_zmalloc_socket("txq->push_buf_intermediate_buf", 15342fca2a98SMichal Krawczyk txq->tx_max_header_size, 153508180833SMichal Krawczyk RTE_CACHE_LINE_SIZE, 153608180833SMichal Krawczyk socket_id); 15372fca2a98SMichal Krawczyk if (!txq->push_buf_intermediate_buf) { 1538617898d1SMichal Krawczyk PMD_DRV_LOG(ERR, "Failed to alloc push buffer for LLQ\n"); 15392fca2a98SMichal Krawczyk rte_free(txq->tx_buffer_info); 15402fca2a98SMichal Krawczyk rte_free(txq->empty_tx_reqs); 15412fca2a98SMichal Krawczyk return -ENOMEM; 15422fca2a98SMichal Krawczyk } 15432fca2a98SMichal Krawczyk 15441173fca2SJan Medala for (i = 0; i < txq->ring_size; i++) 15451173fca2SJan Medala txq->empty_tx_reqs[i] = i; 15461173fca2SJan Medala 1547005064e5SMichal Krawczyk txq->offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads; 1548005064e5SMichal Krawczyk 1549005064e5SMichal Krawczyk /* Check if caller provided the Tx cleanup threshold value. */ 1550005064e5SMichal Krawczyk if (tx_conf->tx_free_thresh != 0) { 1551005064e5SMichal Krawczyk txq->tx_free_thresh = tx_conf->tx_free_thresh; 1552005064e5SMichal Krawczyk } else { 1553005064e5SMichal Krawczyk dyn_thresh = txq->ring_size - 1554005064e5SMichal Krawczyk txq->ring_size / ENA_REFILL_THRESH_DIVIDER; 1555005064e5SMichal Krawczyk txq->tx_free_thresh = RTE_MAX(dyn_thresh, 1556005064e5SMichal Krawczyk txq->ring_size - ENA_REFILL_THRESH_PACKET); 15572081d5e2SMichal Krawczyk } 1558005064e5SMichal Krawczyk 1559f93e20e5SMichal Krawczyk txq->missing_tx_completion_threshold = 1560f93e20e5SMichal Krawczyk RTE_MIN(txq->ring_size / 2, ENA_DEFAULT_MISSING_COMP); 1561f93e20e5SMichal Krawczyk 15621173fca2SJan Medala /* Store pointer to this queue in upper layer */ 15631173fca2SJan Medala txq->configured = 1; 15641173fca2SJan Medala dev->data->tx_queues[queue_idx] = txq; 1565241da076SRafal Kozik 1566241da076SRafal Kozik return 0; 15671173fca2SJan Medala } 15681173fca2SJan Medala 15691173fca2SJan Medala static int ena_rx_queue_setup(struct rte_eth_dev *dev, 15701173fca2SJan Medala uint16_t queue_idx, 15711173fca2SJan Medala uint16_t nb_desc, 15724217cb0bSMichal Krawczyk unsigned int socket_id, 157334d5e97eSMichal Krawczyk const struct rte_eth_rxconf *rx_conf, 15741173fca2SJan Medala struct rte_mempool *mp) 15751173fca2SJan Medala { 1576890728ffSStephen Hemminger struct ena_adapter *adapter = dev->data->dev_private; 15771173fca2SJan Medala struct ena_ring *rxq = NULL; 157838364c26SMichal Krawczyk size_t buffer_size; 1579df238f84SMichal Krawczyk int i; 1580005064e5SMichal Krawczyk uint16_t dyn_thresh; 15811173fca2SJan Medala 15821173fca2SJan Medala rxq = &adapter->rx_ring[queue_idx]; 15831173fca2SJan Medala if (rxq->configured) { 15846f1c9df9SStephen Hemminger PMD_DRV_LOG(CRIT, 1585617898d1SMichal Krawczyk "API violation. Queue[%d] is already configured\n", 15861173fca2SJan Medala queue_idx); 1587241da076SRafal Kozik return ENA_COM_FAULT; 15881173fca2SJan Medala } 15891173fca2SJan Medala 15901daff526SJakub Palider if (!rte_is_power_of_2(nb_desc)) { 15916f1c9df9SStephen Hemminger PMD_DRV_LOG(ERR, 1592617898d1SMichal Krawczyk "Unsupported size of Rx queue: %d is not a power of 2.\n", 15931daff526SJakub Palider nb_desc); 15941daff526SJakub Palider return -EINVAL; 15951daff526SJakub Palider } 15961daff526SJakub Palider 15975920d930SMichal Krawczyk if (nb_desc > adapter->max_rx_ring_size) { 15986f1c9df9SStephen Hemminger PMD_DRV_LOG(ERR, 1599617898d1SMichal Krawczyk "Unsupported size of Rx queue (max size: %d)\n", 16005920d930SMichal Krawczyk adapter->max_rx_ring_size); 16011173fca2SJan Medala return -EINVAL; 16021173fca2SJan Medala } 16031173fca2SJan Medala 160438364c26SMichal Krawczyk /* ENA isn't supporting buffers smaller than 1400 bytes */ 160538364c26SMichal Krawczyk buffer_size = rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM; 160638364c26SMichal Krawczyk if (buffer_size < ENA_RX_BUF_MIN_SIZE) { 160738364c26SMichal Krawczyk PMD_DRV_LOG(ERR, 1608617898d1SMichal Krawczyk "Unsupported size of Rx buffer: %zu (min size: %d)\n", 160938364c26SMichal Krawczyk buffer_size, ENA_RX_BUF_MIN_SIZE); 161038364c26SMichal Krawczyk return -EINVAL; 161138364c26SMichal Krawczyk } 161238364c26SMichal Krawczyk 16131173fca2SJan Medala rxq->port_id = dev->data->port_id; 16141173fca2SJan Medala rxq->next_to_clean = 0; 16151173fca2SJan Medala rxq->next_to_use = 0; 16161173fca2SJan Medala rxq->ring_size = nb_desc; 1617c0006061SMichal Krawczyk rxq->size_mask = nb_desc - 1; 16184217cb0bSMichal Krawczyk rxq->numa_socket_id = socket_id; 16191173fca2SJan Medala rxq->mb_pool = mp; 16201173fca2SJan Medala 162108180833SMichal Krawczyk rxq->rx_buffer_info = rte_zmalloc_socket("rxq->buffer_info", 16221be097dcSMichal Krawczyk sizeof(struct ena_rx_buffer) * nb_desc, 162308180833SMichal Krawczyk RTE_CACHE_LINE_SIZE, 162408180833SMichal Krawczyk socket_id); 16251173fca2SJan Medala if (!rxq->rx_buffer_info) { 1626617898d1SMichal Krawczyk PMD_DRV_LOG(ERR, 1627617898d1SMichal Krawczyk "Failed to allocate memory for Rx buffer info\n"); 16281173fca2SJan Medala return -ENOMEM; 16291173fca2SJan Medala } 16301173fca2SJan Medala 163108180833SMichal Krawczyk rxq->rx_refill_buffer = rte_zmalloc_socket("rxq->rx_refill_buffer", 163279405ee1SRafal Kozik sizeof(struct rte_mbuf *) * nb_desc, 163308180833SMichal Krawczyk RTE_CACHE_LINE_SIZE, 163408180833SMichal Krawczyk socket_id); 163579405ee1SRafal Kozik if (!rxq->rx_refill_buffer) { 1636617898d1SMichal Krawczyk PMD_DRV_LOG(ERR, 1637617898d1SMichal Krawczyk "Failed to allocate memory for Rx refill buffer\n"); 163879405ee1SRafal Kozik rte_free(rxq->rx_buffer_info); 163979405ee1SRafal Kozik rxq->rx_buffer_info = NULL; 164079405ee1SRafal Kozik return -ENOMEM; 164179405ee1SRafal Kozik } 164279405ee1SRafal Kozik 164308180833SMichal Krawczyk rxq->empty_rx_reqs = rte_zmalloc_socket("rxq->empty_rx_reqs", 1644c2034976SMichal Krawczyk sizeof(uint16_t) * nb_desc, 164508180833SMichal Krawczyk RTE_CACHE_LINE_SIZE, 164608180833SMichal Krawczyk socket_id); 1647c2034976SMichal Krawczyk if (!rxq->empty_rx_reqs) { 1648617898d1SMichal Krawczyk PMD_DRV_LOG(ERR, 1649617898d1SMichal Krawczyk "Failed to allocate memory for empty Rx requests\n"); 1650c2034976SMichal Krawczyk rte_free(rxq->rx_buffer_info); 1651c2034976SMichal Krawczyk rxq->rx_buffer_info = NULL; 165279405ee1SRafal Kozik rte_free(rxq->rx_refill_buffer); 165379405ee1SRafal Kozik rxq->rx_refill_buffer = NULL; 1654c2034976SMichal Krawczyk return -ENOMEM; 1655c2034976SMichal Krawczyk } 1656c2034976SMichal Krawczyk 1657c2034976SMichal Krawczyk for (i = 0; i < nb_desc; i++) 1658eccbe2ffSRafal Kozik rxq->empty_rx_reqs[i] = i; 1659c2034976SMichal Krawczyk 166034d5e97eSMichal Krawczyk rxq->offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads; 166134d5e97eSMichal Krawczyk 1662005064e5SMichal Krawczyk if (rx_conf->rx_free_thresh != 0) { 1663005064e5SMichal Krawczyk rxq->rx_free_thresh = rx_conf->rx_free_thresh; 1664005064e5SMichal Krawczyk } else { 1665005064e5SMichal Krawczyk dyn_thresh = rxq->ring_size / ENA_REFILL_THRESH_DIVIDER; 1666005064e5SMichal Krawczyk rxq->rx_free_thresh = RTE_MIN(dyn_thresh, 1667005064e5SMichal Krawczyk (uint16_t)(ENA_REFILL_THRESH_PACKET)); 1668005064e5SMichal Krawczyk } 1669005064e5SMichal Krawczyk 16701173fca2SJan Medala /* Store pointer to this queue in upper layer */ 16711173fca2SJan Medala rxq->configured = 1; 16721173fca2SJan Medala dev->data->rx_queues[queue_idx] = rxq; 16731173fca2SJan Medala 1674df238f84SMichal Krawczyk return 0; 16751173fca2SJan Medala } 16761173fca2SJan Medala 167783fd97b2SMichal Krawczyk static int ena_add_single_rx_desc(struct ena_com_io_sq *io_sq, 167883fd97b2SMichal Krawczyk struct rte_mbuf *mbuf, uint16_t id) 167983fd97b2SMichal Krawczyk { 168083fd97b2SMichal Krawczyk struct ena_com_buf ebuf; 168183fd97b2SMichal Krawczyk int rc; 168283fd97b2SMichal Krawczyk 168383fd97b2SMichal Krawczyk /* prepare physical address for DMA transaction */ 168483fd97b2SMichal Krawczyk ebuf.paddr = mbuf->buf_iova + RTE_PKTMBUF_HEADROOM; 168583fd97b2SMichal Krawczyk ebuf.len = mbuf->buf_len - RTE_PKTMBUF_HEADROOM; 168683fd97b2SMichal Krawczyk 168783fd97b2SMichal Krawczyk /* pass resource to device */ 168883fd97b2SMichal Krawczyk rc = ena_com_add_single_rx_desc(io_sq, &ebuf, id); 168983fd97b2SMichal Krawczyk if (unlikely(rc != 0)) 16900a001d69SMichal Krawczyk PMD_RX_LOG(WARNING, "Failed adding Rx desc\n"); 169183fd97b2SMichal Krawczyk 169283fd97b2SMichal Krawczyk return rc; 169383fd97b2SMichal Krawczyk } 169483fd97b2SMichal Krawczyk 16951173fca2SJan Medala static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count) 16961173fca2SJan Medala { 16971173fca2SJan Medala unsigned int i; 16981173fca2SJan Medala int rc; 16991daff526SJakub Palider uint16_t next_to_use = rxq->next_to_use; 17000a001d69SMichal Krawczyk uint16_t req_id; 17010a001d69SMichal Krawczyk #ifdef RTE_ETHDEV_DEBUG_RX 17020a001d69SMichal Krawczyk uint16_t in_use; 17030a001d69SMichal Krawczyk #endif 170479405ee1SRafal Kozik struct rte_mbuf **mbufs = rxq->rx_refill_buffer; 17051173fca2SJan Medala 17061173fca2SJan Medala if (unlikely(!count)) 17071173fca2SJan Medala return 0; 17081173fca2SJan Medala 17090a001d69SMichal Krawczyk #ifdef RTE_ETHDEV_DEBUG_RX 1710c0006061SMichal Krawczyk in_use = rxq->ring_size - 1 - 1711c0006061SMichal Krawczyk ena_com_free_q_entries(rxq->ena_com_io_sq); 17120a001d69SMichal Krawczyk if (unlikely((in_use + count) >= rxq->ring_size)) 17130a001d69SMichal Krawczyk PMD_RX_LOG(ERR, "Bad Rx ring state\n"); 17140a001d69SMichal Krawczyk #endif 17151173fca2SJan Medala 17161173fca2SJan Medala /* get resources for incoming packets */ 17173c8bc29fSDavid Harton rc = rte_pktmbuf_alloc_bulk(rxq->mb_pool, mbufs, count); 17181173fca2SJan Medala if (unlikely(rc < 0)) { 17191173fca2SJan Medala rte_atomic64_inc(&rxq->adapter->drv_stats->rx_nombuf); 17207830e905SSolganik Alexander ++rxq->rx_stats.mbuf_alloc_fail; 1721617898d1SMichal Krawczyk PMD_RX_LOG(DEBUG, "There are not enough free buffers\n"); 17221173fca2SJan Medala return 0; 17231173fca2SJan Medala } 17241173fca2SJan Medala 17251173fca2SJan Medala for (i = 0; i < count; i++) { 172679405ee1SRafal Kozik struct rte_mbuf *mbuf = mbufs[i]; 17271be097dcSMichal Krawczyk struct ena_rx_buffer *rx_info; 17281173fca2SJan Medala 172979405ee1SRafal Kozik if (likely((i + 4) < count)) 173079405ee1SRafal Kozik rte_prefetch0(mbufs[i + 4]); 1731c2034976SMichal Krawczyk 1732c0006061SMichal Krawczyk req_id = rxq->empty_rx_reqs[next_to_use]; 17331be097dcSMichal Krawczyk rx_info = &rxq->rx_buffer_info[req_id]; 1734241da076SRafal Kozik 173583fd97b2SMichal Krawczyk rc = ena_add_single_rx_desc(rxq->ena_com_io_sq, mbuf, req_id); 173683fd97b2SMichal Krawczyk if (unlikely(rc != 0)) 17371173fca2SJan Medala break; 173883fd97b2SMichal Krawczyk 17391be097dcSMichal Krawczyk rx_info->mbuf = mbuf; 1740c0006061SMichal Krawczyk next_to_use = ENA_IDX_NEXT_MASKED(next_to_use, rxq->size_mask); 17411173fca2SJan Medala } 17421173fca2SJan Medala 174379405ee1SRafal Kozik if (unlikely(i < count)) { 17440a001d69SMichal Krawczyk PMD_RX_LOG(WARNING, 1745617898d1SMichal Krawczyk "Refilled Rx queue[%d] with only %d/%d buffers\n", 1746617898d1SMichal Krawczyk rxq->id, i, count); 17473c8bc29fSDavid Harton rte_pktmbuf_free_bulk(&mbufs[i], count - i); 17487830e905SSolganik Alexander ++rxq->rx_stats.refill_partial; 174979405ee1SRafal Kozik } 1750241da076SRafal Kozik 17517be78d02SJosh Soref /* When we submitted free resources to device... */ 17523d19e1abSRafal Kozik if (likely(i > 0)) { 175338faa87eSMichal Krawczyk /* ...let HW know that it can fill buffers with data. */ 17541173fca2SJan Medala ena_com_write_sq_doorbell(rxq->ena_com_io_sq); 17551173fca2SJan Medala 17565e02e19eSJan Medala rxq->next_to_use = next_to_use; 17575e02e19eSJan Medala } 17585e02e19eSJan Medala 17591173fca2SJan Medala return i; 17601173fca2SJan Medala } 17611173fca2SJan Medala 176292401abfSShai Brandes static size_t ena_get_metrics_entries(struct ena_adapter *adapter) 176392401abfSShai Brandes { 176492401abfSShai Brandes struct ena_com_dev *ena_dev = &adapter->ena_dev; 176592401abfSShai Brandes size_t metrics_num = 0; 176692401abfSShai Brandes 176792401abfSShai Brandes if (ena_com_get_cap(ena_dev, ENA_ADMIN_CUSTOMER_METRICS)) 176892401abfSShai Brandes metrics_num = ENA_STATS_ARRAY_METRICS; 176992401abfSShai Brandes else if (ena_com_get_cap(ena_dev, ENA_ADMIN_ENI_STATS)) 177092401abfSShai Brandes metrics_num = ENA_STATS_ARRAY_METRICS_LEGACY; 177192401abfSShai Brandes PMD_DRV_LOG(NOTICE, "0x%x customer metrics are supported\n", (unsigned int)metrics_num); 177292401abfSShai Brandes if (metrics_num > ENA_MAX_CUSTOMER_METRICS) { 177392401abfSShai Brandes PMD_DRV_LOG(NOTICE, "Not enough space for the requested customer metrics\n"); 177492401abfSShai Brandes metrics_num = ENA_MAX_CUSTOMER_METRICS; 177592401abfSShai Brandes } 177692401abfSShai Brandes return metrics_num; 177792401abfSShai Brandes } 177892401abfSShai Brandes 1779b9b05d6fSMichal Krawczyk static int ena_device_init(struct ena_adapter *adapter, 1780aab58857SStanislaw Kardach struct rte_pci_device *pdev, 1781b9b05d6fSMichal Krawczyk struct ena_com_dev_get_features_ctx *get_feat_ctx) 17821173fca2SJan Medala { 1783b9b05d6fSMichal Krawczyk struct ena_com_dev *ena_dev = &adapter->ena_dev; 1784ca148440SMichal Krawczyk uint32_t aenq_groups; 17851173fca2SJan Medala int rc; 1786c4144557SJan Medala bool readless_supported; 17871173fca2SJan Medala 17881173fca2SJan Medala /* Initialize mmio registers */ 17891173fca2SJan Medala rc = ena_com_mmio_reg_read_request_init(ena_dev); 17901173fca2SJan Medala if (rc) { 1791617898d1SMichal Krawczyk PMD_DRV_LOG(ERR, "Failed to init MMIO read less\n"); 17921173fca2SJan Medala return rc; 17931173fca2SJan Medala } 17941173fca2SJan Medala 1795c4144557SJan Medala /* The PCIe configuration space revision id indicate if mmio reg 1796c4144557SJan Medala * read is disabled. 1797c4144557SJan Medala */ 1798aab58857SStanislaw Kardach readless_supported = !(pdev->id.class_id & ENA_MMIO_DISABLE_REG_READ); 1799c4144557SJan Medala ena_com_set_mmio_read_mode(ena_dev, readless_supported); 1800c4144557SJan Medala 18011173fca2SJan Medala /* reset device */ 18023adcba9aSMichal Krawczyk rc = ena_com_dev_reset(ena_dev, ENA_REGS_RESET_NORMAL); 18031173fca2SJan Medala if (rc) { 1804617898d1SMichal Krawczyk PMD_DRV_LOG(ERR, "Cannot reset device\n"); 18051173fca2SJan Medala goto err_mmio_read_less; 18061173fca2SJan Medala } 18071173fca2SJan Medala 18081173fca2SJan Medala /* check FW version */ 18091173fca2SJan Medala rc = ena_com_validate_version(ena_dev); 18101173fca2SJan Medala if (rc) { 1811617898d1SMichal Krawczyk PMD_DRV_LOG(ERR, "Device version is too low\n"); 18121173fca2SJan Medala goto err_mmio_read_less; 18131173fca2SJan Medala } 18141173fca2SJan Medala 18151173fca2SJan Medala ena_dev->dma_addr_bits = ena_com_get_dma_width(ena_dev); 18161173fca2SJan Medala 18171173fca2SJan Medala /* ENA device administration layer init */ 1818b68309beSRafal Kozik rc = ena_com_admin_init(ena_dev, &aenq_handlers); 18191173fca2SJan Medala if (rc) { 18206f1c9df9SStephen Hemminger PMD_DRV_LOG(ERR, 1821617898d1SMichal Krawczyk "Cannot initialize ENA admin queue\n"); 18221173fca2SJan Medala goto err_mmio_read_less; 18231173fca2SJan Medala } 18241173fca2SJan Medala 18251173fca2SJan Medala /* To enable the msix interrupts the driver needs to know the number 18261173fca2SJan Medala * of queues. So the driver uses polling mode to retrieve this 18271173fca2SJan Medala * information. 18281173fca2SJan Medala */ 18291173fca2SJan Medala ena_com_set_admin_polling_mode(ena_dev, true); 18301173fca2SJan Medala 1831201ff2e5SJakub Palider ena_config_host_info(ena_dev); 1832201ff2e5SJakub Palider 18331173fca2SJan Medala /* Get Device Attributes and features */ 18341173fca2SJan Medala rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx); 18351173fca2SJan Medala if (rc) { 18366f1c9df9SStephen Hemminger PMD_DRV_LOG(ERR, 1837617898d1SMichal Krawczyk "Cannot get attribute for ENA device, rc: %d\n", rc); 18381173fca2SJan Medala goto err_admin_init; 18391173fca2SJan Medala } 18401173fca2SJan Medala 1841f01f060cSRafal Kozik aenq_groups = BIT(ENA_ADMIN_LINK_CHANGE) | 1842d9b8b106SMichal Krawczyk BIT(ENA_ADMIN_NOTIFICATION) | 1843983cce2dSRafal Kozik BIT(ENA_ADMIN_KEEP_ALIVE) | 1844983cce2dSRafal Kozik BIT(ENA_ADMIN_FATAL_ERROR) | 1845983cce2dSRafal Kozik BIT(ENA_ADMIN_WARNING); 1846ca148440SMichal Krawczyk 1847ca148440SMichal Krawczyk aenq_groups &= get_feat_ctx->aenq.supported_groups; 1848ca148440SMichal Krawczyk 1849b9b05d6fSMichal Krawczyk adapter->all_aenq_groups = aenq_groups; 185092401abfSShai Brandes /* The actual supported number of metrics is negotiated with the device at runtime */ 185192401abfSShai Brandes adapter->metrics_num = ena_get_metrics_entries(adapter); 1852e859d2b8SRafal Kozik 18531173fca2SJan Medala return 0; 18541173fca2SJan Medala 18551173fca2SJan Medala err_admin_init: 18561173fca2SJan Medala ena_com_admin_destroy(ena_dev); 18571173fca2SJan Medala 18581173fca2SJan Medala err_mmio_read_less: 18591173fca2SJan Medala ena_com_mmio_reg_read_request_destroy(ena_dev); 18601173fca2SJan Medala 18611173fca2SJan Medala return rc; 18621173fca2SJan Medala } 18631173fca2SJan Medala 1864ca148440SMichal Krawczyk static void ena_interrupt_handler_rte(void *cb_arg) 186515773e06SMichal Krawczyk { 1866aab58857SStanislaw Kardach struct rte_eth_dev *dev = cb_arg; 1867aab58857SStanislaw Kardach struct ena_adapter *adapter = dev->data->dev_private; 186815773e06SMichal Krawczyk struct ena_com_dev *ena_dev = &adapter->ena_dev; 186915773e06SMichal Krawczyk 187015773e06SMichal Krawczyk ena_com_admin_q_comp_intr_handler(ena_dev); 18713d19e1abSRafal Kozik if (likely(adapter->state != ENA_ADAPTER_STATE_CLOSED)) 1872aab58857SStanislaw Kardach ena_com_aenq_intr_handler(ena_dev, dev); 187315773e06SMichal Krawczyk } 187415773e06SMichal Krawczyk 18755efb9fc7SMichal Krawczyk static void check_for_missing_keep_alive(struct ena_adapter *adapter) 18765efb9fc7SMichal Krawczyk { 1877b9b05d6fSMichal Krawczyk if (!(adapter->active_aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE))) 1878e859d2b8SRafal Kozik return; 1879e859d2b8SRafal Kozik 18805efb9fc7SMichal Krawczyk if (adapter->keep_alive_timeout == ENA_HW_HINTS_NO_TIMEOUT) 18815efb9fc7SMichal Krawczyk return; 18825efb9fc7SMichal Krawczyk 18835efb9fc7SMichal Krawczyk if (unlikely((rte_get_timer_cycles() - adapter->timestamp_wd) >= 18845efb9fc7SMichal Krawczyk adapter->keep_alive_timeout)) { 18856f1c9df9SStephen Hemminger PMD_DRV_LOG(ERR, "Keep alive timeout\n"); 18862bae75eaSDawid Gorecki ena_trigger_reset(adapter, ENA_REGS_RESET_KEEP_ALIVE_TO); 18877830e905SSolganik Alexander ++adapter->dev_stats.wd_expired; 18885efb9fc7SMichal Krawczyk } 18895efb9fc7SMichal Krawczyk } 18905efb9fc7SMichal Krawczyk 18915efb9fc7SMichal Krawczyk /* Check if admin queue is enabled */ 18925efb9fc7SMichal Krawczyk static void check_for_admin_com_state(struct ena_adapter *adapter) 18935efb9fc7SMichal Krawczyk { 18945efb9fc7SMichal Krawczyk if (unlikely(!ena_com_get_admin_running_state(&adapter->ena_dev))) { 1895617898d1SMichal Krawczyk PMD_DRV_LOG(ERR, "ENA admin queue is not in running state\n"); 18962bae75eaSDawid Gorecki ena_trigger_reset(adapter, ENA_REGS_RESET_ADMIN_TO); 18975efb9fc7SMichal Krawczyk } 18985efb9fc7SMichal Krawczyk } 18995efb9fc7SMichal Krawczyk 1900f93e20e5SMichal Krawczyk static int check_for_tx_completion_in_queue(struct ena_adapter *adapter, 1901f93e20e5SMichal Krawczyk struct ena_ring *tx_ring) 1902f93e20e5SMichal Krawczyk { 1903f93e20e5SMichal Krawczyk struct ena_tx_buffer *tx_buf; 1904f93e20e5SMichal Krawczyk uint64_t timestamp; 1905f93e20e5SMichal Krawczyk uint64_t completion_delay; 1906f93e20e5SMichal Krawczyk uint32_t missed_tx = 0; 1907f93e20e5SMichal Krawczyk unsigned int i; 1908f93e20e5SMichal Krawczyk int rc = 0; 1909f93e20e5SMichal Krawczyk 1910f93e20e5SMichal Krawczyk for (i = 0; i < tx_ring->ring_size; ++i) { 1911f93e20e5SMichal Krawczyk tx_buf = &tx_ring->tx_buffer_info[i]; 1912f93e20e5SMichal Krawczyk timestamp = tx_buf->timestamp; 1913f93e20e5SMichal Krawczyk 1914f93e20e5SMichal Krawczyk if (timestamp == 0) 1915f93e20e5SMichal Krawczyk continue; 1916f93e20e5SMichal Krawczyk 1917f93e20e5SMichal Krawczyk completion_delay = rte_get_timer_cycles() - timestamp; 1918f93e20e5SMichal Krawczyk if (completion_delay > adapter->missing_tx_completion_to) { 1919f93e20e5SMichal Krawczyk if (unlikely(!tx_buf->print_once)) { 1920f93e20e5SMichal Krawczyk PMD_TX_LOG(WARNING, 1921f93e20e5SMichal Krawczyk "Found a Tx that wasn't completed on time, qid %d, index %d. " 1922f93e20e5SMichal Krawczyk "Missing Tx outstanding for %" PRIu64 " msecs.\n", 1923f93e20e5SMichal Krawczyk tx_ring->id, i, completion_delay / 1924f93e20e5SMichal Krawczyk rte_get_timer_hz() * 1000); 1925f93e20e5SMichal Krawczyk tx_buf->print_once = true; 1926f93e20e5SMichal Krawczyk } 1927f93e20e5SMichal Krawczyk ++missed_tx; 1928f93e20e5SMichal Krawczyk } 1929f93e20e5SMichal Krawczyk } 1930f93e20e5SMichal Krawczyk 1931f93e20e5SMichal Krawczyk if (unlikely(missed_tx > tx_ring->missing_tx_completion_threshold)) { 1932f93e20e5SMichal Krawczyk PMD_DRV_LOG(ERR, 1933f93e20e5SMichal Krawczyk "The number of lost Tx completions is above the threshold (%d > %d). " 1934f93e20e5SMichal Krawczyk "Trigger the device reset.\n", 1935f93e20e5SMichal Krawczyk missed_tx, 1936f93e20e5SMichal Krawczyk tx_ring->missing_tx_completion_threshold); 1937f93e20e5SMichal Krawczyk adapter->reset_reason = ENA_REGS_RESET_MISS_TX_CMPL; 1938f93e20e5SMichal Krawczyk adapter->trigger_reset = true; 1939f93e20e5SMichal Krawczyk rc = -EIO; 1940f93e20e5SMichal Krawczyk } 1941f93e20e5SMichal Krawczyk 1942f93e20e5SMichal Krawczyk tx_ring->tx_stats.missed_tx += missed_tx; 1943f93e20e5SMichal Krawczyk 1944f93e20e5SMichal Krawczyk return rc; 1945f93e20e5SMichal Krawczyk } 1946f93e20e5SMichal Krawczyk 1947f93e20e5SMichal Krawczyk static void check_for_tx_completions(struct ena_adapter *adapter) 1948f93e20e5SMichal Krawczyk { 1949f93e20e5SMichal Krawczyk struct ena_ring *tx_ring; 1950f93e20e5SMichal Krawczyk uint64_t tx_cleanup_delay; 1951f93e20e5SMichal Krawczyk size_t qid; 1952f93e20e5SMichal Krawczyk int budget; 1953f93e20e5SMichal Krawczyk uint16_t nb_tx_queues = adapter->edev_data->nb_tx_queues; 1954f93e20e5SMichal Krawczyk 1955f93e20e5SMichal Krawczyk if (adapter->missing_tx_completion_to == ENA_HW_HINTS_NO_TIMEOUT) 1956f93e20e5SMichal Krawczyk return; 1957f93e20e5SMichal Krawczyk 1958f93e20e5SMichal Krawczyk nb_tx_queues = adapter->edev_data->nb_tx_queues; 1959f93e20e5SMichal Krawczyk budget = adapter->missing_tx_completion_budget; 1960f93e20e5SMichal Krawczyk 1961f93e20e5SMichal Krawczyk qid = adapter->last_tx_comp_qid; 1962f93e20e5SMichal Krawczyk while (budget-- > 0) { 1963f93e20e5SMichal Krawczyk tx_ring = &adapter->tx_ring[qid]; 1964f93e20e5SMichal Krawczyk 1965f93e20e5SMichal Krawczyk /* Tx cleanup is called only by the burst function and can be 1966f93e20e5SMichal Krawczyk * called dynamically by the application. Also cleanup is 1967f93e20e5SMichal Krawczyk * limited by the threshold. To avoid false detection of the 1968f93e20e5SMichal Krawczyk * missing HW Tx completion, get the delay since last cleanup 1969f93e20e5SMichal Krawczyk * function was called. 1970f93e20e5SMichal Krawczyk */ 1971f93e20e5SMichal Krawczyk tx_cleanup_delay = rte_get_timer_cycles() - 1972f93e20e5SMichal Krawczyk tx_ring->last_cleanup_ticks; 1973f93e20e5SMichal Krawczyk if (tx_cleanup_delay < adapter->tx_cleanup_stall_delay) 1974f93e20e5SMichal Krawczyk check_for_tx_completion_in_queue(adapter, tx_ring); 1975f93e20e5SMichal Krawczyk qid = (qid + 1) % nb_tx_queues; 1976f93e20e5SMichal Krawczyk } 1977f93e20e5SMichal Krawczyk 1978f93e20e5SMichal Krawczyk adapter->last_tx_comp_qid = qid; 1979f93e20e5SMichal Krawczyk } 1980f93e20e5SMichal Krawczyk 1981d9b8b106SMichal Krawczyk static void ena_timer_wd_callback(__rte_unused struct rte_timer *timer, 1982d9b8b106SMichal Krawczyk void *arg) 1983d9b8b106SMichal Krawczyk { 1984aab58857SStanislaw Kardach struct rte_eth_dev *dev = arg; 1985aab58857SStanislaw Kardach struct ena_adapter *adapter = dev->data->dev_private; 1986d9b8b106SMichal Krawczyk 1987e2174a54SMichal Krawczyk if (unlikely(adapter->trigger_reset)) 1988e2174a54SMichal Krawczyk return; 1989e2174a54SMichal Krawczyk 19905efb9fc7SMichal Krawczyk check_for_missing_keep_alive(adapter); 19915efb9fc7SMichal Krawczyk check_for_admin_com_state(adapter); 1992f93e20e5SMichal Krawczyk check_for_tx_completions(adapter); 1993d9b8b106SMichal Krawczyk 19945efb9fc7SMichal Krawczyk if (unlikely(adapter->trigger_reset)) { 19956f1c9df9SStephen Hemminger PMD_DRV_LOG(ERR, "Trigger reset is on\n"); 19965723fbedSFerruh Yigit rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET, 1997d9b8b106SMichal Krawczyk NULL); 1998d9b8b106SMichal Krawczyk } 1999d9b8b106SMichal Krawczyk } 2000d9b8b106SMichal Krawczyk 20012fca2a98SMichal Krawczyk static inline void 20028a7a73f2SMichal Krawczyk set_default_llq_configurations(struct ena_llq_configurations *llq_config, 20038a7a73f2SMichal Krawczyk struct ena_admin_feature_llq_desc *llq, 20048a7a73f2SMichal Krawczyk bool use_large_llq_hdr) 20052fca2a98SMichal Krawczyk { 20062fca2a98SMichal Krawczyk llq_config->llq_header_location = ENA_ADMIN_INLINE_HEADER; 20072fca2a98SMichal Krawczyk llq_config->llq_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY; 20082fca2a98SMichal Krawczyk llq_config->llq_num_decs_before_header = 20092fca2a98SMichal Krawczyk ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2; 20108a7a73f2SMichal Krawczyk 20118a7a73f2SMichal Krawczyk if (use_large_llq_hdr && 20128a7a73f2SMichal Krawczyk (llq->entry_size_ctrl_supported & ENA_ADMIN_LIST_ENTRY_SIZE_256B)) { 20138a7a73f2SMichal Krawczyk llq_config->llq_ring_entry_size = 20148a7a73f2SMichal Krawczyk ENA_ADMIN_LIST_ENTRY_SIZE_256B; 20158a7a73f2SMichal Krawczyk llq_config->llq_ring_entry_size_value = 256; 20168a7a73f2SMichal Krawczyk } else { 20178a7a73f2SMichal Krawczyk llq_config->llq_ring_entry_size = 20188a7a73f2SMichal Krawczyk ENA_ADMIN_LIST_ENTRY_SIZE_128B; 20192fca2a98SMichal Krawczyk llq_config->llq_ring_entry_size_value = 128; 20202fca2a98SMichal Krawczyk } 20218a7a73f2SMichal Krawczyk } 20222fca2a98SMichal Krawczyk 20232fca2a98SMichal Krawczyk static int 20242fca2a98SMichal Krawczyk ena_set_queues_placement_policy(struct ena_adapter *adapter, 20252fca2a98SMichal Krawczyk struct ena_com_dev *ena_dev, 20262fca2a98SMichal Krawczyk struct ena_admin_feature_llq_desc *llq, 20272fca2a98SMichal Krawczyk struct ena_llq_configurations *llq_default_configurations) 20282fca2a98SMichal Krawczyk { 20292fca2a98SMichal Krawczyk int rc; 20302fca2a98SMichal Krawczyk u32 llq_feature_mask; 20312fca2a98SMichal Krawczyk 20329944919eSMichal Krawczyk if (!adapter->enable_llq) { 20339944919eSMichal Krawczyk PMD_DRV_LOG(WARNING, 20349944919eSMichal Krawczyk "NOTE: LLQ has been disabled as per user's request. " 20359944919eSMichal Krawczyk "This may lead to a huge performance degradation!\n"); 20369944919eSMichal Krawczyk ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 20379944919eSMichal Krawczyk return 0; 20389944919eSMichal Krawczyk } 20399944919eSMichal Krawczyk 20402fca2a98SMichal Krawczyk llq_feature_mask = 1 << ENA_ADMIN_LLQ; 20412fca2a98SMichal Krawczyk if (!(ena_dev->supported_features & llq_feature_mask)) { 20426f1c9df9SStephen Hemminger PMD_DRV_LOG(INFO, 20432fca2a98SMichal Krawczyk "LLQ is not supported. Fallback to host mode policy.\n"); 20442fca2a98SMichal Krawczyk ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 20452fca2a98SMichal Krawczyk return 0; 20462fca2a98SMichal Krawczyk } 20472fca2a98SMichal Krawczyk 20489ae7a13fSDawid Gorecki if (adapter->dev_mem_base == NULL) { 20499ae7a13fSDawid Gorecki PMD_DRV_LOG(ERR, 20509ae7a13fSDawid Gorecki "LLQ is advertised as supported, but device doesn't expose mem bar\n"); 20519ae7a13fSDawid Gorecki ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 20529ae7a13fSDawid Gorecki return 0; 20539ae7a13fSDawid Gorecki } 20549ae7a13fSDawid Gorecki 20552fca2a98SMichal Krawczyk rc = ena_com_config_dev_mode(ena_dev, llq, llq_default_configurations); 20562fca2a98SMichal Krawczyk if (unlikely(rc)) { 2057617898d1SMichal Krawczyk PMD_INIT_LOG(WARNING, 2058617898d1SMichal Krawczyk "Failed to config dev mode. Fallback to host mode policy.\n"); 20592fca2a98SMichal Krawczyk ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 20602fca2a98SMichal Krawczyk return 0; 20612fca2a98SMichal Krawczyk } 20622fca2a98SMichal Krawczyk 20632fca2a98SMichal Krawczyk /* Nothing to config, exit */ 20642fca2a98SMichal Krawczyk if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) 20652fca2a98SMichal Krawczyk return 0; 20662fca2a98SMichal Krawczyk 20672fca2a98SMichal Krawczyk ena_dev->mem_bar = adapter->dev_mem_base; 20682fca2a98SMichal Krawczyk 20692fca2a98SMichal Krawczyk return 0; 20702fca2a98SMichal Krawczyk } 20712fca2a98SMichal Krawczyk 20725920d930SMichal Krawczyk static uint32_t ena_calc_max_io_queue_num(struct ena_com_dev *ena_dev, 207301bd6877SRafal Kozik struct ena_com_dev_get_features_ctx *get_feat_ctx) 207401bd6877SRafal Kozik { 20755920d930SMichal Krawczyk uint32_t io_tx_sq_num, io_tx_cq_num, io_rx_num, max_num_io_queues; 207601bd6877SRafal Kozik 2077ea93d37eSRafal Kozik /* Regular queues capabilities */ 2078ea93d37eSRafal Kozik if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) { 2079ea93d37eSRafal Kozik struct ena_admin_queue_ext_feature_fields *max_queue_ext = 2080ea93d37eSRafal Kozik &get_feat_ctx->max_queue_ext.max_queue_ext; 20812fca2a98SMichal Krawczyk io_rx_num = RTE_MIN(max_queue_ext->max_rx_sq_num, 20822fca2a98SMichal Krawczyk max_queue_ext->max_rx_cq_num); 20832fca2a98SMichal Krawczyk io_tx_sq_num = max_queue_ext->max_tx_sq_num; 20842fca2a98SMichal Krawczyk io_tx_cq_num = max_queue_ext->max_tx_cq_num; 2085ea93d37eSRafal Kozik } else { 2086ea93d37eSRafal Kozik struct ena_admin_queue_feature_desc *max_queues = 2087ea93d37eSRafal Kozik &get_feat_ctx->max_queues; 20882fca2a98SMichal Krawczyk io_tx_sq_num = max_queues->max_sq_num; 20892fca2a98SMichal Krawczyk io_tx_cq_num = max_queues->max_cq_num; 20902fca2a98SMichal Krawczyk io_rx_num = RTE_MIN(io_tx_sq_num, io_tx_cq_num); 2091ea93d37eSRafal Kozik } 209201bd6877SRafal Kozik 20932fca2a98SMichal Krawczyk /* In case of LLQ use the llq number in the get feature cmd */ 20942fca2a98SMichal Krawczyk if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) 20952fca2a98SMichal Krawczyk io_tx_sq_num = get_feat_ctx->llq.max_llq_num; 20962fca2a98SMichal Krawczyk 20975920d930SMichal Krawczyk max_num_io_queues = RTE_MIN(ENA_MAX_NUM_IO_QUEUES, io_rx_num); 20985920d930SMichal Krawczyk max_num_io_queues = RTE_MIN(max_num_io_queues, io_tx_sq_num); 20995920d930SMichal Krawczyk max_num_io_queues = RTE_MIN(max_num_io_queues, io_tx_cq_num); 210001bd6877SRafal Kozik 21015920d930SMichal Krawczyk if (unlikely(max_num_io_queues == 0)) { 2102617898d1SMichal Krawczyk PMD_DRV_LOG(ERR, "Number of IO queues cannot not be 0\n"); 210301bd6877SRafal Kozik return -EFAULT; 210401bd6877SRafal Kozik } 210501bd6877SRafal Kozik 21065920d930SMichal Krawczyk return max_num_io_queues; 210701bd6877SRafal Kozik } 210801bd6877SRafal Kozik 2109e8c838fdSMichal Krawczyk static void 2110e8c838fdSMichal Krawczyk ena_set_offloads(struct ena_offloads *offloads, 2111e8c838fdSMichal Krawczyk struct ena_admin_feature_offload_desc *offload_desc) 2112e8c838fdSMichal Krawczyk { 2113e8c838fdSMichal Krawczyk if (offload_desc->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK) 2114e8c838fdSMichal Krawczyk offloads->tx_offloads |= ENA_IPV4_TSO; 2115e8c838fdSMichal Krawczyk 2116e8c838fdSMichal Krawczyk /* Tx IPv4 checksum offloads */ 2117e8c838fdSMichal Krawczyk if (offload_desc->tx & 2118e8c838fdSMichal Krawczyk ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L3_CSUM_IPV4_MASK) 2119e8c838fdSMichal Krawczyk offloads->tx_offloads |= ENA_L3_IPV4_CSUM; 2120e8c838fdSMichal Krawczyk if (offload_desc->tx & 2121e8c838fdSMichal Krawczyk ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_MASK) 2122e8c838fdSMichal Krawczyk offloads->tx_offloads |= ENA_L4_IPV4_CSUM; 2123e8c838fdSMichal Krawczyk if (offload_desc->tx & 2124e8c838fdSMichal Krawczyk ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK) 2125e8c838fdSMichal Krawczyk offloads->tx_offloads |= ENA_L4_IPV4_CSUM_PARTIAL; 2126e8c838fdSMichal Krawczyk 2127e8c838fdSMichal Krawczyk /* Tx IPv6 checksum offloads */ 2128e8c838fdSMichal Krawczyk if (offload_desc->tx & 2129e8c838fdSMichal Krawczyk ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_MASK) 2130e8c838fdSMichal Krawczyk offloads->tx_offloads |= ENA_L4_IPV6_CSUM; 2131e8c838fdSMichal Krawczyk if (offload_desc->tx & 2132e8c838fdSMichal Krawczyk ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK) 2133e8c838fdSMichal Krawczyk offloads->tx_offloads |= ENA_L4_IPV6_CSUM_PARTIAL; 2134e8c838fdSMichal Krawczyk 2135e8c838fdSMichal Krawczyk /* Rx IPv4 checksum offloads */ 2136e8c838fdSMichal Krawczyk if (offload_desc->rx_supported & 2137e8c838fdSMichal Krawczyk ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L3_CSUM_IPV4_MASK) 2138e8c838fdSMichal Krawczyk offloads->rx_offloads |= ENA_L3_IPV4_CSUM; 2139e8c838fdSMichal Krawczyk if (offload_desc->rx_supported & 2140e8c838fdSMichal Krawczyk ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK) 2141e8c838fdSMichal Krawczyk offloads->rx_offloads |= ENA_L4_IPV4_CSUM; 2142e8c838fdSMichal Krawczyk 2143e8c838fdSMichal Krawczyk /* Rx IPv6 checksum offloads */ 2144e8c838fdSMichal Krawczyk if (offload_desc->rx_supported & 2145e8c838fdSMichal Krawczyk ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK) 2146e8c838fdSMichal Krawczyk offloads->rx_offloads |= ENA_L4_IPV6_CSUM; 2147e8c838fdSMichal Krawczyk 2148e8c838fdSMichal Krawczyk if (offload_desc->rx_supported & 2149e8c838fdSMichal Krawczyk ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_MASK) 2150e8c838fdSMichal Krawczyk offloads->rx_offloads |= ENA_RX_RSS_HASH; 2151e8c838fdSMichal Krawczyk } 2152e8c838fdSMichal Krawczyk 2153e3595539SStanislaw Kardach static int ena_init_once(void) 2154e3595539SStanislaw Kardach { 2155e3595539SStanislaw Kardach static bool init_done; 2156e3595539SStanislaw Kardach 2157e3595539SStanislaw Kardach if (init_done) 2158e3595539SStanislaw Kardach return 0; 2159e3595539SStanislaw Kardach 2160e3595539SStanislaw Kardach if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 2161e3595539SStanislaw Kardach /* Init timer subsystem for the ENA timer service. */ 2162e3595539SStanislaw Kardach rte_timer_subsystem_init(); 2163e3595539SStanislaw Kardach /* Register handler for requests from secondary processes. */ 2164e3595539SStanislaw Kardach rte_mp_action_register(ENA_MP_NAME, ena_mp_primary_handle); 2165e3595539SStanislaw Kardach } 2166e3595539SStanislaw Kardach 2167e3595539SStanislaw Kardach init_done = true; 2168e3595539SStanislaw Kardach return 0; 2169e3595539SStanislaw Kardach } 2170e3595539SStanislaw Kardach 21711173fca2SJan Medala static int eth_ena_dev_init(struct rte_eth_dev *eth_dev) 21721173fca2SJan Medala { 2173ea93d37eSRafal Kozik struct ena_calc_queue_size_ctx calc_queue_ctx = { 0 }; 21741173fca2SJan Medala struct rte_pci_device *pci_dev; 2175eb0ef49dSMichal Krawczyk struct rte_intr_handle *intr_handle; 2176890728ffSStephen Hemminger struct ena_adapter *adapter = eth_dev->data->dev_private; 21771173fca2SJan Medala struct ena_com_dev *ena_dev = &adapter->ena_dev; 21781173fca2SJan Medala struct ena_com_dev_get_features_ctx get_feat_ctx; 21792fca2a98SMichal Krawczyk struct ena_llq_configurations llq_config; 21802fca2a98SMichal Krawczyk const char *queue_type_str; 21815920d930SMichal Krawczyk uint32_t max_num_io_queues; 2182ea93d37eSRafal Kozik int rc; 21831173fca2SJan Medala static int adapters_found; 218433dde075SMichal Krawczyk bool disable_meta_caching; 21851173fca2SJan Medala 21861173fca2SJan Medala eth_dev->dev_ops = &ena_dev_ops; 21871173fca2SJan Medala eth_dev->rx_pkt_burst = ð_ena_recv_pkts; 21881173fca2SJan Medala eth_dev->tx_pkt_burst = ð_ena_xmit_pkts; 2189b3fc5a1aSKonstantin Ananyev eth_dev->tx_pkt_prepare = ð_ena_prep_pkts; 21901173fca2SJan Medala 2191e3595539SStanislaw Kardach rc = ena_init_once(); 2192e3595539SStanislaw Kardach if (rc != 0) 2193e3595539SStanislaw Kardach return rc; 2194e3595539SStanislaw Kardach 21951173fca2SJan Medala if (rte_eal_process_type() != RTE_PROC_PRIMARY) 21961173fca2SJan Medala return 0; 21971173fca2SJan Medala 2198f30e69b4SFerruh Yigit eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; 2199f30e69b4SFerruh Yigit 2200fd976890SMichal Krawczyk memset(adapter, 0, sizeof(struct ena_adapter)); 2201fd976890SMichal Krawczyk ena_dev = &adapter->ena_dev; 2202fd976890SMichal Krawczyk 2203aab58857SStanislaw Kardach adapter->edev_data = eth_dev->data; 2204fd976890SMichal Krawczyk 2205c0802544SFerruh Yigit pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 22061173fca2SJan Medala 22072fc03b23SThomas Monjalon PMD_INIT_LOG(INFO, "Initializing " PCI_PRI_FMT "\n", 22081173fca2SJan Medala pci_dev->addr.domain, 22091173fca2SJan Medala pci_dev->addr.bus, 22101173fca2SJan Medala pci_dev->addr.devid, 22111173fca2SJan Medala pci_dev->addr.function); 22121173fca2SJan Medala 2213d61138d4SHarman Kalra intr_handle = pci_dev->intr_handle; 2214eb0ef49dSMichal Krawczyk 22151173fca2SJan Medala adapter->regs = pci_dev->mem_resource[ENA_REGS_BAR].addr; 22161173fca2SJan Medala adapter->dev_mem_base = pci_dev->mem_resource[ENA_MEM_BAR].addr; 22171173fca2SJan Medala 22181d339597SRafal Kozik if (!adapter->regs) { 2219617898d1SMichal Krawczyk PMD_INIT_LOG(CRIT, "Failed to access registers BAR(%d)\n", 22201173fca2SJan Medala ENA_REGS_BAR); 22211d339597SRafal Kozik return -ENXIO; 22221d339597SRafal Kozik } 22231173fca2SJan Medala 22241173fca2SJan Medala ena_dev->reg_bar = adapter->regs; 2225850e1bb1SMichal Krawczyk /* Pass device data as a pointer which can be passed to the IO functions 2226850e1bb1SMichal Krawczyk * by the ena_com (for example - the memory allocation). 2227850e1bb1SMichal Krawczyk */ 2228850e1bb1SMichal Krawczyk ena_dev->dmadev = eth_dev->data; 22291173fca2SJan Medala 22301173fca2SJan Medala adapter->id_number = adapters_found; 22311173fca2SJan Medala 22321173fca2SJan Medala snprintf(adapter->name, ENA_NAME_MAX_LEN, "ena_%d", 22331173fca2SJan Medala adapter->id_number); 22341173fca2SJan Medala 22359944919eSMichal Krawczyk /* Assign default devargs values */ 2236cc0c5d25SMichal Krawczyk adapter->missing_tx_completion_to = ENA_TX_TIMEOUT; 22379944919eSMichal Krawczyk adapter->enable_llq = true; 22389944919eSMichal Krawczyk adapter->use_large_llq_hdr = false; 2239cc0c5d25SMichal Krawczyk 22408a7a73f2SMichal Krawczyk rc = ena_parse_devargs(adapter, pci_dev->device.devargs); 22418a7a73f2SMichal Krawczyk if (rc != 0) { 22428a7a73f2SMichal Krawczyk PMD_INIT_LOG(CRIT, "Failed to parse devargs\n"); 22438a7a73f2SMichal Krawczyk goto err; 22448a7a73f2SMichal Krawczyk } 224592401abfSShai Brandes rc = ena_com_allocate_customer_metrics_buffer(ena_dev); 224692401abfSShai Brandes if (rc != 0) { 224792401abfSShai Brandes PMD_INIT_LOG(CRIT, "Failed to allocate customer metrics buffer\n"); 224892401abfSShai Brandes goto err; 224992401abfSShai Brandes } 22508a7a73f2SMichal Krawczyk 22511173fca2SJan Medala /* device specific initialization routine */ 2252b9b05d6fSMichal Krawczyk rc = ena_device_init(adapter, pci_dev, &get_feat_ctx); 22531173fca2SJan Medala if (rc) { 2254617898d1SMichal Krawczyk PMD_INIT_LOG(CRIT, "Failed to init ENA device\n"); 225592401abfSShai Brandes goto err_metrics_delete; 22561173fca2SJan Medala } 2257b9b05d6fSMichal Krawczyk 2258b9b05d6fSMichal Krawczyk /* Check if device supports LSC */ 2259b9b05d6fSMichal Krawczyk if (!(adapter->all_aenq_groups & BIT(ENA_ADMIN_LINK_CHANGE))) 2260b9b05d6fSMichal Krawczyk adapter->edev_data->dev_flags &= ~RTE_ETH_DEV_INTR_LSC; 22611173fca2SJan Medala 22628a7a73f2SMichal Krawczyk set_default_llq_configurations(&llq_config, &get_feat_ctx.llq, 22638a7a73f2SMichal Krawczyk adapter->use_large_llq_hdr); 22642fca2a98SMichal Krawczyk rc = ena_set_queues_placement_policy(adapter, ena_dev, 22652fca2a98SMichal Krawczyk &get_feat_ctx.llq, &llq_config); 22662fca2a98SMichal Krawczyk if (unlikely(rc)) { 2267617898d1SMichal Krawczyk PMD_INIT_LOG(CRIT, "Failed to set placement policy\n"); 22682fca2a98SMichal Krawczyk return rc; 22692fca2a98SMichal Krawczyk } 22702fca2a98SMichal Krawczyk 22712fca2a98SMichal Krawczyk if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) 22722fca2a98SMichal Krawczyk queue_type_str = "Regular"; 22732fca2a98SMichal Krawczyk else 22742fca2a98SMichal Krawczyk queue_type_str = "Low latency"; 22756f1c9df9SStephen Hemminger PMD_DRV_LOG(INFO, "Placement policy: %s\n", queue_type_str); 2276ea93d37eSRafal Kozik 2277ea93d37eSRafal Kozik calc_queue_ctx.ena_dev = ena_dev; 2278ea93d37eSRafal Kozik calc_queue_ctx.get_feat_ctx = &get_feat_ctx; 22791173fca2SJan Medala 22805920d930SMichal Krawczyk max_num_io_queues = ena_calc_max_io_queue_num(ena_dev, &get_feat_ctx); 22818a7a73f2SMichal Krawczyk rc = ena_calc_io_queue_size(&calc_queue_ctx, 22828a7a73f2SMichal Krawczyk adapter->use_large_llq_hdr); 22835920d930SMichal Krawczyk if (unlikely((rc != 0) || (max_num_io_queues == 0))) { 2284241da076SRafal Kozik rc = -EFAULT; 2285241da076SRafal Kozik goto err_device_destroy; 2286241da076SRafal Kozik } 22871173fca2SJan Medala 22885920d930SMichal Krawczyk adapter->max_tx_ring_size = calc_queue_ctx.max_tx_queue_size; 22895920d930SMichal Krawczyk adapter->max_rx_ring_size = calc_queue_ctx.max_rx_queue_size; 2290ea93d37eSRafal Kozik adapter->max_tx_sgl_size = calc_queue_ctx.max_tx_sgl_size; 2291ea93d37eSRafal Kozik adapter->max_rx_sgl_size = calc_queue_ctx.max_rx_sgl_size; 22925920d930SMichal Krawczyk adapter->max_num_io_queues = max_num_io_queues; 22932061fe41SRafal Kozik 229433dde075SMichal Krawczyk if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { 229533dde075SMichal Krawczyk disable_meta_caching = 229633dde075SMichal Krawczyk !!(get_feat_ctx.llq.accel_mode.u.get.supported_flags & 229733dde075SMichal Krawczyk BIT(ENA_ADMIN_DISABLE_META_CACHING)); 229833dde075SMichal Krawczyk } else { 229933dde075SMichal Krawczyk disable_meta_caching = false; 230033dde075SMichal Krawczyk } 230133dde075SMichal Krawczyk 23021173fca2SJan Medala /* prepare ring structures */ 230333dde075SMichal Krawczyk ena_init_rings(adapter, disable_meta_caching); 23041173fca2SJan Medala 2305372c1af5SJan Medala ena_config_debug_area(adapter); 2306372c1af5SJan Medala 23071173fca2SJan Medala /* Set max MTU for this device */ 23081173fca2SJan Medala adapter->max_mtu = get_feat_ctx.dev_attr.max_mtu; 23091173fca2SJan Medala 2310e8c838fdSMichal Krawczyk ena_set_offloads(&adapter->offloads, &get_feat_ctx.offload); 231183277a7cSJakub Palider 23121173fca2SJan Medala /* Copy MAC address and point DPDK to it */ 23136d13ea8eSOlivier Matz eth_dev->data->mac_addrs = (struct rte_ether_addr *)adapter->mac_addr; 2314538da7a1SOlivier Matz rte_ether_addr_copy((struct rte_ether_addr *) 2315538da7a1SOlivier Matz get_feat_ctx.dev_attr.mac_addr, 23166d13ea8eSOlivier Matz (struct rte_ether_addr *)adapter->mac_addr); 23171173fca2SJan Medala 231834d5e97eSMichal Krawczyk rc = ena_com_rss_init(ena_dev, ENA_RX_RSS_TABLE_LOG_SIZE); 231934d5e97eSMichal Krawczyk if (unlikely(rc != 0)) { 232034d5e97eSMichal Krawczyk PMD_DRV_LOG(ERR, "Failed to initialize RSS in ENA device\n"); 232134d5e97eSMichal Krawczyk goto err_delete_debug_area; 232234d5e97eSMichal Krawczyk } 232334d5e97eSMichal Krawczyk 23241173fca2SJan Medala adapter->drv_stats = rte_zmalloc("adapter stats", 23251173fca2SJan Medala sizeof(*adapter->drv_stats), 23261173fca2SJan Medala RTE_CACHE_LINE_SIZE); 23271173fca2SJan Medala if (!adapter->drv_stats) { 2328617898d1SMichal Krawczyk PMD_DRV_LOG(ERR, 2329617898d1SMichal Krawczyk "Failed to allocate memory for adapter statistics\n"); 2330241da076SRafal Kozik rc = -ENOMEM; 233134d5e97eSMichal Krawczyk goto err_rss_destroy; 23321173fca2SJan Medala } 23331173fca2SJan Medala 23341343c415SMichal Krawczyk rte_spinlock_init(&adapter->admin_lock); 23351343c415SMichal Krawczyk 2336eb0ef49dSMichal Krawczyk rte_intr_callback_register(intr_handle, 2337eb0ef49dSMichal Krawczyk ena_interrupt_handler_rte, 2338aab58857SStanislaw Kardach eth_dev); 2339eb0ef49dSMichal Krawczyk rte_intr_enable(intr_handle); 2340eb0ef49dSMichal Krawczyk ena_com_set_admin_polling_mode(ena_dev, false); 2341ca148440SMichal Krawczyk ena_com_admin_aenq_enable(ena_dev); 2342eb0ef49dSMichal Krawczyk 2343d9b8b106SMichal Krawczyk rte_timer_init(&adapter->timer_wd); 2344d9b8b106SMichal Krawczyk 23451173fca2SJan Medala adapters_found++; 23461173fca2SJan Medala adapter->state = ENA_ADAPTER_STATE_INIT; 23471173fca2SJan Medala 23481173fca2SJan Medala return 0; 2349241da076SRafal Kozik 235034d5e97eSMichal Krawczyk err_rss_destroy: 235134d5e97eSMichal Krawczyk ena_com_rss_destroy(ena_dev); 2352241da076SRafal Kozik err_delete_debug_area: 2353241da076SRafal Kozik ena_com_delete_debug_area(ena_dev); 2354241da076SRafal Kozik 2355241da076SRafal Kozik err_device_destroy: 2356241da076SRafal Kozik ena_com_delete_host_info(ena_dev); 2357241da076SRafal Kozik ena_com_admin_destroy(ena_dev); 235892401abfSShai Brandes err_metrics_delete: 235992401abfSShai Brandes ena_com_delete_customer_metrics_buffer(ena_dev); 2360241da076SRafal Kozik err: 2361241da076SRafal Kozik return rc; 23621173fca2SJan Medala } 23631173fca2SJan Medala 2364e457bc70SRafal Kozik static void ena_destroy_device(struct rte_eth_dev *eth_dev) 2365eb0ef49dSMichal Krawczyk { 2366890728ffSStephen Hemminger struct ena_adapter *adapter = eth_dev->data->dev_private; 2367e457bc70SRafal Kozik struct ena_com_dev *ena_dev = &adapter->ena_dev; 2368eb0ef49dSMichal Krawczyk 2369e457bc70SRafal Kozik if (adapter->state == ENA_ADAPTER_STATE_FREE) 2370e457bc70SRafal Kozik return; 2371e457bc70SRafal Kozik 2372e457bc70SRafal Kozik ena_com_set_admin_running_state(ena_dev, false); 2373eb0ef49dSMichal Krawczyk 2374eb0ef49dSMichal Krawczyk if (adapter->state != ENA_ADAPTER_STATE_CLOSED) 2375eb0ef49dSMichal Krawczyk ena_close(eth_dev); 2376eb0ef49dSMichal Krawczyk 237734d5e97eSMichal Krawczyk ena_com_rss_destroy(ena_dev); 237834d5e97eSMichal Krawczyk 2379e457bc70SRafal Kozik ena_com_delete_debug_area(ena_dev); 2380e457bc70SRafal Kozik ena_com_delete_host_info(ena_dev); 2381e457bc70SRafal Kozik 2382e457bc70SRafal Kozik ena_com_abort_admin_commands(ena_dev); 2383e457bc70SRafal Kozik ena_com_wait_for_abort_completion(ena_dev); 2384e457bc70SRafal Kozik ena_com_admin_destroy(ena_dev); 2385e457bc70SRafal Kozik ena_com_mmio_reg_read_request_destroy(ena_dev); 238692401abfSShai Brandes ena_com_delete_customer_metrics_buffer(ena_dev); 2387e457bc70SRafal Kozik 2388e457bc70SRafal Kozik adapter->state = ENA_ADAPTER_STATE_FREE; 2389e457bc70SRafal Kozik } 2390e457bc70SRafal Kozik 2391e457bc70SRafal Kozik static int eth_ena_dev_uninit(struct rte_eth_dev *eth_dev) 2392e457bc70SRafal Kozik { 2393e457bc70SRafal Kozik if (rte_eal_process_type() != RTE_PROC_PRIMARY) 2394e457bc70SRafal Kozik return 0; 2395e457bc70SRafal Kozik 2396e457bc70SRafal Kozik ena_destroy_device(eth_dev); 2397e457bc70SRafal Kozik 2398eb0ef49dSMichal Krawczyk return 0; 2399eb0ef49dSMichal Krawczyk } 2400eb0ef49dSMichal Krawczyk 24011173fca2SJan Medala static int ena_dev_configure(struct rte_eth_dev *dev) 24021173fca2SJan Medala { 2403890728ffSStephen Hemminger struct ena_adapter *adapter = dev->data->dev_private; 2404b9b05d6fSMichal Krawczyk int rc; 24057369f88fSRafal Kozik 24061173fca2SJan Medala adapter->state = ENA_ADAPTER_STATE_CONFIG; 24071173fca2SJan Medala 2408295968d1SFerruh Yigit if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) 2409295968d1SFerruh Yigit dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH; 2410295968d1SFerruh Yigit dev->data->dev_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS; 2411b418f0d2SMichal Krawczyk 2412e2a6d08bSMichal Krawczyk /* Scattered Rx cannot be turned off in the HW, so this capability must 2413e2a6d08bSMichal Krawczyk * be forced. 2414e2a6d08bSMichal Krawczyk */ 2415e2a6d08bSMichal Krawczyk dev->data->scattered_rx = 1; 2416e2a6d08bSMichal Krawczyk 2417f93e20e5SMichal Krawczyk adapter->last_tx_comp_qid = 0; 2418f93e20e5SMichal Krawczyk 2419f93e20e5SMichal Krawczyk adapter->missing_tx_completion_budget = 2420f93e20e5SMichal Krawczyk RTE_MIN(ENA_MONITORED_TX_QUEUES, dev->data->nb_tx_queues); 2421f93e20e5SMichal Krawczyk 2422f93e20e5SMichal Krawczyk /* To avoid detection of the spurious Tx completion timeout due to 2423f93e20e5SMichal Krawczyk * application not calling the Tx cleanup function, set timeout for the 2424f93e20e5SMichal Krawczyk * Tx queue which should be half of the missing completion timeout for a 2425f93e20e5SMichal Krawczyk * safety. If there will be a lot of missing Tx completions in the 2426f93e20e5SMichal Krawczyk * queue, they will be detected sooner or later. 2427f93e20e5SMichal Krawczyk */ 2428f93e20e5SMichal Krawczyk adapter->tx_cleanup_stall_delay = adapter->missing_tx_completion_to / 2; 2429f93e20e5SMichal Krawczyk 2430b9b05d6fSMichal Krawczyk rc = ena_configure_aenq(adapter); 2431b9b05d6fSMichal Krawczyk 2432b9b05d6fSMichal Krawczyk return rc; 24331173fca2SJan Medala } 24341173fca2SJan Medala 243533dde075SMichal Krawczyk static void ena_init_rings(struct ena_adapter *adapter, 243633dde075SMichal Krawczyk bool disable_meta_caching) 24371173fca2SJan Medala { 24385920d930SMichal Krawczyk size_t i; 24391173fca2SJan Medala 24405920d930SMichal Krawczyk for (i = 0; i < adapter->max_num_io_queues; i++) { 24411173fca2SJan Medala struct ena_ring *ring = &adapter->tx_ring[i]; 24421173fca2SJan Medala 24431173fca2SJan Medala ring->configured = 0; 24441173fca2SJan Medala ring->type = ENA_RING_TYPE_TX; 24451173fca2SJan Medala ring->adapter = adapter; 24461173fca2SJan Medala ring->id = i; 24471173fca2SJan Medala ring->tx_mem_queue_type = adapter->ena_dev.tx_mem_queue_type; 24481173fca2SJan Medala ring->tx_max_header_size = adapter->ena_dev.tx_max_header_size; 24492061fe41SRafal Kozik ring->sgl_size = adapter->max_tx_sgl_size; 245033dde075SMichal Krawczyk ring->disable_meta_caching = disable_meta_caching; 24511173fca2SJan Medala } 24521173fca2SJan Medala 24535920d930SMichal Krawczyk for (i = 0; i < adapter->max_num_io_queues; i++) { 24541173fca2SJan Medala struct ena_ring *ring = &adapter->rx_ring[i]; 24551173fca2SJan Medala 24561173fca2SJan Medala ring->configured = 0; 24571173fca2SJan Medala ring->type = ENA_RING_TYPE_RX; 24581173fca2SJan Medala ring->adapter = adapter; 24591173fca2SJan Medala ring->id = i; 2460ea93d37eSRafal Kozik ring->sgl_size = adapter->max_rx_sgl_size; 24611173fca2SJan Medala } 24621173fca2SJan Medala } 24631173fca2SJan Medala 24643a822d79SMichal Krawczyk static uint64_t ena_get_rx_port_offloads(struct ena_adapter *adapter) 24653a822d79SMichal Krawczyk { 24663a822d79SMichal Krawczyk uint64_t port_offloads = 0; 24673a822d79SMichal Krawczyk 24683a822d79SMichal Krawczyk if (adapter->offloads.rx_offloads & ENA_L3_IPV4_CSUM) 2469295968d1SFerruh Yigit port_offloads |= RTE_ETH_RX_OFFLOAD_IPV4_CKSUM; 24703a822d79SMichal Krawczyk 24713a822d79SMichal Krawczyk if (adapter->offloads.rx_offloads & 24723a822d79SMichal Krawczyk (ENA_L4_IPV4_CSUM | ENA_L4_IPV6_CSUM)) 24733a822d79SMichal Krawczyk port_offloads |= 2474295968d1SFerruh Yigit RTE_ETH_RX_OFFLOAD_UDP_CKSUM | RTE_ETH_RX_OFFLOAD_TCP_CKSUM; 24753a822d79SMichal Krawczyk 24763a822d79SMichal Krawczyk if (adapter->offloads.rx_offloads & ENA_RX_RSS_HASH) 2477295968d1SFerruh Yigit port_offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH; 24783a822d79SMichal Krawczyk 2479295968d1SFerruh Yigit port_offloads |= RTE_ETH_RX_OFFLOAD_SCATTER; 2480e2a6d08bSMichal Krawczyk 24813a822d79SMichal Krawczyk return port_offloads; 24823a822d79SMichal Krawczyk } 24833a822d79SMichal Krawczyk 24843a822d79SMichal Krawczyk static uint64_t ena_get_tx_port_offloads(struct ena_adapter *adapter) 24853a822d79SMichal Krawczyk { 24863a822d79SMichal Krawczyk uint64_t port_offloads = 0; 24873a822d79SMichal Krawczyk 24883a822d79SMichal Krawczyk if (adapter->offloads.tx_offloads & ENA_IPV4_TSO) 2489295968d1SFerruh Yigit port_offloads |= RTE_ETH_TX_OFFLOAD_TCP_TSO; 24903a822d79SMichal Krawczyk 24913a822d79SMichal Krawczyk if (adapter->offloads.tx_offloads & ENA_L3_IPV4_CSUM) 2492295968d1SFerruh Yigit port_offloads |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM; 24933a822d79SMichal Krawczyk if (adapter->offloads.tx_offloads & 24943a822d79SMichal Krawczyk (ENA_L4_IPV4_CSUM_PARTIAL | ENA_L4_IPV4_CSUM | 24953a822d79SMichal Krawczyk ENA_L4_IPV6_CSUM | ENA_L4_IPV6_CSUM_PARTIAL)) 24963a822d79SMichal Krawczyk port_offloads |= 2497295968d1SFerruh Yigit RTE_ETH_TX_OFFLOAD_UDP_CKSUM | RTE_ETH_TX_OFFLOAD_TCP_CKSUM; 24983a822d79SMichal Krawczyk 2499295968d1SFerruh Yigit port_offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS; 25003a822d79SMichal Krawczyk 2501c339f538SDawid Gorecki port_offloads |= RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE; 2502c339f538SDawid Gorecki 25033a822d79SMichal Krawczyk return port_offloads; 25043a822d79SMichal Krawczyk } 25053a822d79SMichal Krawczyk 25063a822d79SMichal Krawczyk static uint64_t ena_get_rx_queue_offloads(struct ena_adapter *adapter) 25073a822d79SMichal Krawczyk { 25083a822d79SMichal Krawczyk RTE_SET_USED(adapter); 25093a822d79SMichal Krawczyk 25103a822d79SMichal Krawczyk return 0; 25113a822d79SMichal Krawczyk } 25123a822d79SMichal Krawczyk 25133a822d79SMichal Krawczyk static uint64_t ena_get_tx_queue_offloads(struct ena_adapter *adapter) 25143a822d79SMichal Krawczyk { 2515c339f538SDawid Gorecki uint64_t queue_offloads = 0; 25163a822d79SMichal Krawczyk RTE_SET_USED(adapter); 25173a822d79SMichal Krawczyk 2518c339f538SDawid Gorecki queue_offloads |= RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE; 2519c339f538SDawid Gorecki 2520c339f538SDawid Gorecki return queue_offloads; 25213a822d79SMichal Krawczyk } 25223a822d79SMichal Krawczyk 2523bdad90d1SIvan Ilchenko static int ena_infos_get(struct rte_eth_dev *dev, 25241173fca2SJan Medala struct rte_eth_dev_info *dev_info) 25251173fca2SJan Medala { 25261173fca2SJan Medala struct ena_adapter *adapter; 25271173fca2SJan Medala struct ena_com_dev *ena_dev; 25281173fca2SJan Medala 2529498c687aSRafal Kozik ena_assert_msg(dev->data != NULL, "Uninitialized device\n"); 2530498c687aSRafal Kozik ena_assert_msg(dev->data->dev_private != NULL, "Uninitialized device\n"); 2531890728ffSStephen Hemminger adapter = dev->data->dev_private; 25321173fca2SJan Medala 25331173fca2SJan Medala ena_dev = &adapter->ena_dev; 2534498c687aSRafal Kozik ena_assert_msg(ena_dev != NULL, "Uninitialized device\n"); 25351173fca2SJan Medala 2536e274f573SMarc Sune dev_info->speed_capa = 2537295968d1SFerruh Yigit RTE_ETH_LINK_SPEED_1G | 2538295968d1SFerruh Yigit RTE_ETH_LINK_SPEED_2_5G | 2539295968d1SFerruh Yigit RTE_ETH_LINK_SPEED_5G | 2540295968d1SFerruh Yigit RTE_ETH_LINK_SPEED_10G | 2541295968d1SFerruh Yigit RTE_ETH_LINK_SPEED_25G | 2542295968d1SFerruh Yigit RTE_ETH_LINK_SPEED_40G | 2543295968d1SFerruh Yigit RTE_ETH_LINK_SPEED_50G | 25444ebe5efaSShai Brandes RTE_ETH_LINK_SPEED_100G | 25454ebe5efaSShai Brandes RTE_ETH_LINK_SPEED_200G | 25464ebe5efaSShai Brandes RTE_ETH_LINK_SPEED_400G; 2547e274f573SMarc Sune 25481173fca2SJan Medala /* Inform framework about available features */ 25493a822d79SMichal Krawczyk dev_info->rx_offload_capa = ena_get_rx_port_offloads(adapter); 25503a822d79SMichal Krawczyk dev_info->tx_offload_capa = ena_get_tx_port_offloads(adapter); 25513a822d79SMichal Krawczyk dev_info->rx_queue_offload_capa = ena_get_rx_queue_offloads(adapter); 25523a822d79SMichal Krawczyk dev_info->tx_queue_offload_capa = ena_get_tx_queue_offloads(adapter); 25531173fca2SJan Medala 255434d5e97eSMichal Krawczyk dev_info->flow_type_rss_offloads = ENA_ALL_RSS_HF; 255534d5e97eSMichal Krawczyk dev_info->hash_key_size = ENA_HASH_KEY_SIZE; 2556b01ead20SRafal Kozik 25571173fca2SJan Medala dev_info->min_rx_bufsize = ENA_MIN_FRAME_LEN; 25581bb4a528SFerruh Yigit dev_info->max_rx_pktlen = adapter->max_mtu + RTE_ETHER_HDR_LEN + 25591bb4a528SFerruh Yigit RTE_ETHER_CRC_LEN; 25601bb4a528SFerruh Yigit dev_info->min_mtu = ENA_MIN_MTU; 25611bb4a528SFerruh Yigit dev_info->max_mtu = adapter->max_mtu; 25621173fca2SJan Medala dev_info->max_mac_addrs = 1; 25631173fca2SJan Medala 25645920d930SMichal Krawczyk dev_info->max_rx_queues = adapter->max_num_io_queues; 25655920d930SMichal Krawczyk dev_info->max_tx_queues = adapter->max_num_io_queues; 25661173fca2SJan Medala dev_info->reta_size = ENA_RX_RSS_TABLE_SIZE; 256756b8b9b7SRafal Kozik 25685920d930SMichal Krawczyk dev_info->rx_desc_lim.nb_max = adapter->max_rx_ring_size; 256992680dc2SRafal Kozik dev_info->rx_desc_lim.nb_min = ENA_MIN_RING_DESC; 2570ea93d37eSRafal Kozik dev_info->rx_desc_lim.nb_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS, 2571ea93d37eSRafal Kozik adapter->max_rx_sgl_size); 2572ea93d37eSRafal Kozik dev_info->rx_desc_lim.nb_mtu_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS, 2573ea93d37eSRafal Kozik adapter->max_rx_sgl_size); 257492680dc2SRafal Kozik 25755920d930SMichal Krawczyk dev_info->tx_desc_lim.nb_max = adapter->max_tx_ring_size; 257692680dc2SRafal Kozik dev_info->tx_desc_lim.nb_min = ENA_MIN_RING_DESC; 257792680dc2SRafal Kozik dev_info->tx_desc_lim.nb_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS, 2578ea93d37eSRafal Kozik adapter->max_tx_sgl_size); 257992680dc2SRafal Kozik dev_info->tx_desc_lim.nb_mtu_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS, 2580ea93d37eSRafal Kozik adapter->max_tx_sgl_size); 2581bdad90d1SIvan Ilchenko 258230a6c7efSStanislaw Kardach dev_info->default_rxportconf.ring_size = ENA_DEFAULT_RING_SIZE; 258330a6c7efSStanislaw Kardach dev_info->default_txportconf.ring_size = ENA_DEFAULT_RING_SIZE; 258430a6c7efSStanislaw Kardach 25850d5c38baSChengwen Feng dev_info->err_handle_mode = RTE_ETH_ERROR_HANDLE_MODE_PASSIVE; 25860d5c38baSChengwen Feng 2587bdad90d1SIvan Ilchenko return 0; 25881173fca2SJan Medala } 25891173fca2SJan Medala 25901be097dcSMichal Krawczyk static inline void ena_init_rx_mbuf(struct rte_mbuf *mbuf, uint16_t len) 25911be097dcSMichal Krawczyk { 25921be097dcSMichal Krawczyk mbuf->data_len = len; 25931be097dcSMichal Krawczyk mbuf->data_off = RTE_PKTMBUF_HEADROOM; 25941be097dcSMichal Krawczyk mbuf->refcnt = 1; 25951be097dcSMichal Krawczyk mbuf->next = NULL; 25961be097dcSMichal Krawczyk } 25971be097dcSMichal Krawczyk 25981be097dcSMichal Krawczyk static struct rte_mbuf *ena_rx_mbuf(struct ena_ring *rx_ring, 25991be097dcSMichal Krawczyk struct ena_com_rx_buf_info *ena_bufs, 26001be097dcSMichal Krawczyk uint32_t descs, 26011be097dcSMichal Krawczyk uint16_t *next_to_clean, 26021be097dcSMichal Krawczyk uint8_t offset) 26031be097dcSMichal Krawczyk { 26041be097dcSMichal Krawczyk struct rte_mbuf *mbuf; 26051be097dcSMichal Krawczyk struct rte_mbuf *mbuf_head; 26061be097dcSMichal Krawczyk struct ena_rx_buffer *rx_info; 260783fd97b2SMichal Krawczyk int rc; 26081be097dcSMichal Krawczyk uint16_t ntc, len, req_id, buf = 0; 26091be097dcSMichal Krawczyk 26101be097dcSMichal Krawczyk if (unlikely(descs == 0)) 26111be097dcSMichal Krawczyk return NULL; 26121be097dcSMichal Krawczyk 26131be097dcSMichal Krawczyk ntc = *next_to_clean; 26141be097dcSMichal Krawczyk 26151be097dcSMichal Krawczyk len = ena_bufs[buf].len; 26161be097dcSMichal Krawczyk req_id = ena_bufs[buf].req_id; 26171be097dcSMichal Krawczyk 26181be097dcSMichal Krawczyk rx_info = &rx_ring->rx_buffer_info[req_id]; 26191be097dcSMichal Krawczyk 26201be097dcSMichal Krawczyk mbuf = rx_info->mbuf; 26211be097dcSMichal Krawczyk RTE_ASSERT(mbuf != NULL); 26221be097dcSMichal Krawczyk 26231be097dcSMichal Krawczyk ena_init_rx_mbuf(mbuf, len); 26241be097dcSMichal Krawczyk 26251be097dcSMichal Krawczyk /* Fill the mbuf head with the data specific for 1st segment. */ 26261be097dcSMichal Krawczyk mbuf_head = mbuf; 26271be097dcSMichal Krawczyk mbuf_head->nb_segs = descs; 26281be097dcSMichal Krawczyk mbuf_head->port = rx_ring->port_id; 26291be097dcSMichal Krawczyk mbuf_head->pkt_len = len; 26301be097dcSMichal Krawczyk mbuf_head->data_off += offset; 26311be097dcSMichal Krawczyk 26321be097dcSMichal Krawczyk rx_info->mbuf = NULL; 2633c0006061SMichal Krawczyk rx_ring->empty_rx_reqs[ntc] = req_id; 2634c0006061SMichal Krawczyk ntc = ENA_IDX_NEXT_MASKED(ntc, rx_ring->size_mask); 26351be097dcSMichal Krawczyk 26361be097dcSMichal Krawczyk while (--descs) { 26371be097dcSMichal Krawczyk ++buf; 26381be097dcSMichal Krawczyk len = ena_bufs[buf].len; 26391be097dcSMichal Krawczyk req_id = ena_bufs[buf].req_id; 26401be097dcSMichal Krawczyk 26411be097dcSMichal Krawczyk rx_info = &rx_ring->rx_buffer_info[req_id]; 26421be097dcSMichal Krawczyk RTE_ASSERT(rx_info->mbuf != NULL); 26431be097dcSMichal Krawczyk 264483fd97b2SMichal Krawczyk if (unlikely(len == 0)) { 264583fd97b2SMichal Krawczyk /* 264683fd97b2SMichal Krawczyk * Some devices can pass descriptor with the length 0. 264783fd97b2SMichal Krawczyk * To avoid confusion, the PMD is simply putting the 264883fd97b2SMichal Krawczyk * descriptor back, as it was never used. We'll avoid 264983fd97b2SMichal Krawczyk * mbuf allocation that way. 265083fd97b2SMichal Krawczyk */ 265183fd97b2SMichal Krawczyk rc = ena_add_single_rx_desc(rx_ring->ena_com_io_sq, 265283fd97b2SMichal Krawczyk rx_info->mbuf, req_id); 265383fd97b2SMichal Krawczyk if (unlikely(rc != 0)) { 265483fd97b2SMichal Krawczyk /* Free the mbuf in case of an error. */ 265583fd97b2SMichal Krawczyk rte_mbuf_raw_free(rx_info->mbuf); 265683fd97b2SMichal Krawczyk } else { 265783fd97b2SMichal Krawczyk /* 265883fd97b2SMichal Krawczyk * If there was no error, just exit the loop as 265983fd97b2SMichal Krawczyk * 0 length descriptor is always the last one. 266083fd97b2SMichal Krawczyk */ 266183fd97b2SMichal Krawczyk break; 266283fd97b2SMichal Krawczyk } 266383fd97b2SMichal Krawczyk } else { 26641be097dcSMichal Krawczyk /* Create an mbuf chain. */ 26651be097dcSMichal Krawczyk mbuf->next = rx_info->mbuf; 26661be097dcSMichal Krawczyk mbuf = mbuf->next; 26671be097dcSMichal Krawczyk 26681be097dcSMichal Krawczyk ena_init_rx_mbuf(mbuf, len); 26691be097dcSMichal Krawczyk mbuf_head->pkt_len += len; 267083fd97b2SMichal Krawczyk } 26711be097dcSMichal Krawczyk 267283fd97b2SMichal Krawczyk /* 267383fd97b2SMichal Krawczyk * Mark the descriptor as depleted and perform necessary 267483fd97b2SMichal Krawczyk * cleanup. 267583fd97b2SMichal Krawczyk * This code will execute in two cases: 267683fd97b2SMichal Krawczyk * 1. Descriptor len was greater than 0 - normal situation. 267783fd97b2SMichal Krawczyk * 2. Descriptor len was 0 and we failed to add the descriptor 267883fd97b2SMichal Krawczyk * to the device. In that situation, we should try to add 267983fd97b2SMichal Krawczyk * the mbuf again in the populate routine and mark the 268083fd97b2SMichal Krawczyk * descriptor as used up by the device. 268183fd97b2SMichal Krawczyk */ 26821be097dcSMichal Krawczyk rx_info->mbuf = NULL; 2683c0006061SMichal Krawczyk rx_ring->empty_rx_reqs[ntc] = req_id; 2684c0006061SMichal Krawczyk ntc = ENA_IDX_NEXT_MASKED(ntc, rx_ring->size_mask); 26851be097dcSMichal Krawczyk } 26861be097dcSMichal Krawczyk 26871be097dcSMichal Krawczyk *next_to_clean = ntc; 26881be097dcSMichal Krawczyk 26891be097dcSMichal Krawczyk return mbuf_head; 26901be097dcSMichal Krawczyk } 26911be097dcSMichal Krawczyk 26921173fca2SJan Medala static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, 26931173fca2SJan Medala uint16_t nb_pkts) 26941173fca2SJan Medala { 26951173fca2SJan Medala struct ena_ring *rx_ring = (struct ena_ring *)(rx_queue); 269677550607SMichal Krawczyk unsigned int free_queue_entries; 26971173fca2SJan Medala uint16_t next_to_clean = rx_ring->next_to_clean; 269874456796SMichal Krawczyk uint16_t descs_in_use; 26991be097dcSMichal Krawczyk struct rte_mbuf *mbuf; 27001be097dcSMichal Krawczyk uint16_t completed; 27011173fca2SJan Medala struct ena_com_rx_ctx ena_rx_ctx; 27021be097dcSMichal Krawczyk int i, rc = 0; 270334d5e97eSMichal Krawczyk bool fill_hash; 27041173fca2SJan Medala 27050a001d69SMichal Krawczyk #ifdef RTE_ETHDEV_DEBUG_RX 27061173fca2SJan Medala /* Check adapter state */ 27071173fca2SJan Medala if (unlikely(rx_ring->adapter->state != ENA_ADAPTER_STATE_RUNNING)) { 27080a001d69SMichal Krawczyk PMD_RX_LOG(ALERT, 27091173fca2SJan Medala "Trying to receive pkts while device is NOT running\n"); 27101173fca2SJan Medala return 0; 27111173fca2SJan Medala } 27120a001d69SMichal Krawczyk #endif 27131173fca2SJan Medala 2714295968d1SFerruh Yigit fill_hash = rx_ring->offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH; 271534d5e97eSMichal Krawczyk 2716c0006061SMichal Krawczyk descs_in_use = rx_ring->ring_size - 271774456796SMichal Krawczyk ena_com_free_q_entries(rx_ring->ena_com_io_sq) - 1; 271874456796SMichal Krawczyk nb_pkts = RTE_MIN(descs_in_use, nb_pkts); 27191173fca2SJan Medala 27201173fca2SJan Medala for (completed = 0; completed < nb_pkts; completed++) { 2721ea93d37eSRafal Kozik ena_rx_ctx.max_bufs = rx_ring->sgl_size; 27221173fca2SJan Medala ena_rx_ctx.ena_bufs = rx_ring->ena_bufs; 27231173fca2SJan Medala ena_rx_ctx.descs = 0; 27247b3a3c4bSMaciej Bielski ena_rx_ctx.pkt_offset = 0; 27251173fca2SJan Medala /* receive packet context */ 27261173fca2SJan Medala rc = ena_com_rx_pkt(rx_ring->ena_com_io_cq, 27271173fca2SJan Medala rx_ring->ena_com_io_sq, 27281173fca2SJan Medala &ena_rx_ctx); 27291173fca2SJan Medala if (unlikely(rc)) { 27300a001d69SMichal Krawczyk PMD_RX_LOG(ERR, 2731617898d1SMichal Krawczyk "Failed to get the packet from the device, rc: %d\n", 2732617898d1SMichal Krawczyk rc); 273305cffdcfSMichal Krawczyk if (rc == ENA_COM_NO_SPACE) { 273405cffdcfSMichal Krawczyk ++rx_ring->rx_stats.bad_desc_num; 27352bae75eaSDawid Gorecki ena_trigger_reset(rx_ring->adapter, 27362bae75eaSDawid Gorecki ENA_REGS_RESET_TOO_MANY_RX_DESCS); 273705cffdcfSMichal Krawczyk } else { 273805cffdcfSMichal Krawczyk ++rx_ring->rx_stats.bad_req_id; 27392bae75eaSDawid Gorecki ena_trigger_reset(rx_ring->adapter, 27402bae75eaSDawid Gorecki ENA_REGS_RESET_INV_RX_REQ_ID); 274105cffdcfSMichal Krawczyk } 27421173fca2SJan Medala return 0; 27431173fca2SJan Medala } 27441173fca2SJan Medala 27451be097dcSMichal Krawczyk mbuf = ena_rx_mbuf(rx_ring, 27461be097dcSMichal Krawczyk ena_rx_ctx.ena_bufs, 27471be097dcSMichal Krawczyk ena_rx_ctx.descs, 27481be097dcSMichal Krawczyk &next_to_clean, 27491be097dcSMichal Krawczyk ena_rx_ctx.pkt_offset); 27501be097dcSMichal Krawczyk if (unlikely(mbuf == NULL)) { 27511be097dcSMichal Krawczyk for (i = 0; i < ena_rx_ctx.descs; ++i) { 2752c0006061SMichal Krawczyk rx_ring->empty_rx_reqs[next_to_clean] = 27531be097dcSMichal Krawczyk rx_ring->ena_bufs[i].req_id; 2754c0006061SMichal Krawczyk next_to_clean = ENA_IDX_NEXT_MASKED( 2755c0006061SMichal Krawczyk next_to_clean, rx_ring->size_mask); 27561173fca2SJan Medala } 2757f00930d9SRafal Kozik break; 27581be097dcSMichal Krawczyk } 27591173fca2SJan Medala 27601173fca2SJan Medala /* fill mbuf attributes if any */ 276184daba99SMichal Krawczyk ena_rx_mbuf_prepare(rx_ring, mbuf, &ena_rx_ctx, fill_hash); 27627830e905SSolganik Alexander 27631be097dcSMichal Krawczyk if (unlikely(mbuf->ol_flags & 276484daba99SMichal Krawczyk (RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD))) 2765ef74b5f7SMichal Krawczyk rte_atomic64_inc(&rx_ring->adapter->drv_stats->ierrors); 27667830e905SSolganik Alexander 27671be097dcSMichal Krawczyk rx_pkts[completed] = mbuf; 27681be097dcSMichal Krawczyk rx_ring->rx_stats.bytes += mbuf->pkt_len; 27691173fca2SJan Medala } 27701173fca2SJan Medala 27711be097dcSMichal Krawczyk rx_ring->rx_stats.cnt += completed; 2772ec78af6bSMichal Krawczyk rx_ring->next_to_clean = next_to_clean; 2773ec78af6bSMichal Krawczyk 277477550607SMichal Krawczyk free_queue_entries = ena_com_free_q_entries(rx_ring->ena_com_io_sq); 277577550607SMichal Krawczyk 27761173fca2SJan Medala /* Burst refill to save doorbells, memory barriers, const interval */ 2777005064e5SMichal Krawczyk if (free_queue_entries >= rx_ring->rx_free_thresh) { 277877550607SMichal Krawczyk ena_populate_rx_queue(rx_ring, free_queue_entries); 2779a45462c5SRafal Kozik } 27801173fca2SJan Medala 27811be097dcSMichal Krawczyk return completed; 27821173fca2SJan Medala } 27831173fca2SJan Medala 2784b3fc5a1aSKonstantin Ananyev static uint16_t 278583277a7cSJakub Palider eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 2786b3fc5a1aSKonstantin Ananyev uint16_t nb_pkts) 2787b3fc5a1aSKonstantin Ananyev { 2788b3fc5a1aSKonstantin Ananyev int32_t ret; 2789b3fc5a1aSKonstantin Ananyev uint32_t i; 2790b3fc5a1aSKonstantin Ananyev struct rte_mbuf *m; 279183277a7cSJakub Palider struct ena_ring *tx_ring = (struct ena_ring *)(tx_queue); 2792e8c838fdSMichal Krawczyk struct ena_adapter *adapter = tx_ring->adapter; 2793a7c528e5SOlivier Matz struct rte_ipv4_hdr *ip_hdr; 2794b3fc5a1aSKonstantin Ananyev uint64_t ol_flags; 2795e8c838fdSMichal Krawczyk uint64_t l4_csum_flag; 2796e8c838fdSMichal Krawczyk uint64_t dev_offload_capa; 279783277a7cSJakub Palider uint16_t frag_field; 2798e8c838fdSMichal Krawczyk bool need_pseudo_csum; 279983277a7cSJakub Palider 2800e8c838fdSMichal Krawczyk dev_offload_capa = adapter->offloads.tx_offloads; 2801b3fc5a1aSKonstantin Ananyev for (i = 0; i != nb_pkts; i++) { 2802b3fc5a1aSKonstantin Ananyev m = tx_pkts[i]; 2803b3fc5a1aSKonstantin Ananyev ol_flags = m->ol_flags; 2804b3fc5a1aSKonstantin Ananyev 2805e8c838fdSMichal Krawczyk /* Check if any offload flag was set */ 2806e8c838fdSMichal Krawczyk if (ol_flags == 0) 2807bc5ef57dSMichal Krawczyk continue; 2808bc5ef57dSMichal Krawczyk 2809daa02b5cSOlivier Matz l4_csum_flag = ol_flags & RTE_MBUF_F_TX_L4_MASK; 2810e8c838fdSMichal Krawczyk /* SCTP checksum offload is not supported by the ENA. */ 2811e8c838fdSMichal Krawczyk if ((ol_flags & ENA_TX_OFFLOAD_NOTSUP_MASK) || 2812daa02b5cSOlivier Matz l4_csum_flag == RTE_MBUF_F_TX_SCTP_CKSUM) { 2813e8c838fdSMichal Krawczyk PMD_TX_LOG(DEBUG, 2814e8c838fdSMichal Krawczyk "mbuf[%" PRIu32 "] has unsupported offloads flags set: 0x%" PRIu64 "\n", 2815e8c838fdSMichal Krawczyk i, ol_flags); 2816baeed5f4SMichal Krawczyk rte_errno = ENOTSUP; 2817b3fc5a1aSKonstantin Ananyev return i; 2818b3fc5a1aSKonstantin Ananyev } 2819b3fc5a1aSKonstantin Ananyev 282096ffa8a7SMichal Krawczyk if (unlikely(m->nb_segs >= tx_ring->sgl_size && 282196ffa8a7SMichal Krawczyk !(tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV && 282296ffa8a7SMichal Krawczyk m->nb_segs == tx_ring->sgl_size && 282396ffa8a7SMichal Krawczyk m->data_len < tx_ring->tx_max_header_size))) { 282496ffa8a7SMichal Krawczyk PMD_TX_LOG(DEBUG, 282596ffa8a7SMichal Krawczyk "mbuf[%" PRIu32 "] has too many segments: %" PRIu16 "\n", 282696ffa8a7SMichal Krawczyk i, m->nb_segs); 282796ffa8a7SMichal Krawczyk rte_errno = EINVAL; 282896ffa8a7SMichal Krawczyk return i; 282996ffa8a7SMichal Krawczyk } 283096ffa8a7SMichal Krawczyk 2831b3fc5a1aSKonstantin Ananyev #ifdef RTE_LIBRTE_ETHDEV_DEBUG 2832e8c838fdSMichal Krawczyk /* Check if requested offload is also enabled for the queue */ 2833daa02b5cSOlivier Matz if ((ol_flags & RTE_MBUF_F_TX_IP_CKSUM && 2834295968d1SFerruh Yigit !(tx_ring->offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)) || 2835daa02b5cSOlivier Matz (l4_csum_flag == RTE_MBUF_F_TX_TCP_CKSUM && 2836295968d1SFerruh Yigit !(tx_ring->offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM)) || 2837daa02b5cSOlivier Matz (l4_csum_flag == RTE_MBUF_F_TX_UDP_CKSUM && 2838295968d1SFerruh Yigit !(tx_ring->offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM))) { 2839e8c838fdSMichal Krawczyk PMD_TX_LOG(DEBUG, 2840e8c838fdSMichal Krawczyk "mbuf[%" PRIu32 "]: requested offloads: %" PRIu16 " are not enabled for the queue[%u]\n", 2841e8c838fdSMichal Krawczyk i, m->nb_segs, tx_ring->id); 2842e8c838fdSMichal Krawczyk rte_errno = EINVAL; 2843e8c838fdSMichal Krawczyk return i; 2844e8c838fdSMichal Krawczyk } 2845e8c838fdSMichal Krawczyk 2846e8c838fdSMichal Krawczyk /* The caller is obligated to set l2 and l3 len if any cksum 2847e8c838fdSMichal Krawczyk * offload is enabled. 2848e8c838fdSMichal Krawczyk */ 2849daa02b5cSOlivier Matz if (unlikely(ol_flags & (RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_L4_MASK) && 2850e8c838fdSMichal Krawczyk (m->l2_len == 0 || m->l3_len == 0))) { 2851e8c838fdSMichal Krawczyk PMD_TX_LOG(DEBUG, 2852e8c838fdSMichal Krawczyk "mbuf[%" PRIu32 "]: l2_len or l3_len values are 0 while the offload was requested\n", 2853e8c838fdSMichal Krawczyk i); 2854e8c838fdSMichal Krawczyk rte_errno = EINVAL; 2855e8c838fdSMichal Krawczyk return i; 2856e8c838fdSMichal Krawczyk } 2857b3fc5a1aSKonstantin Ananyev ret = rte_validate_tx_offload(m); 2858b3fc5a1aSKonstantin Ananyev if (ret != 0) { 2859baeed5f4SMichal Krawczyk rte_errno = -ret; 2860b3fc5a1aSKonstantin Ananyev return i; 2861b3fc5a1aSKonstantin Ananyev } 2862b3fc5a1aSKonstantin Ananyev #endif 286383277a7cSJakub Palider 2864e8c838fdSMichal Krawczyk /* Verify HW support for requested offloads and determine if 2865e8c838fdSMichal Krawczyk * pseudo header checksum is needed. 286683277a7cSJakub Palider */ 2867e8c838fdSMichal Krawczyk need_pseudo_csum = false; 2868daa02b5cSOlivier Matz if (ol_flags & RTE_MBUF_F_TX_IPV4) { 2869daa02b5cSOlivier Matz if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM && 2870e8c838fdSMichal Krawczyk !(dev_offload_capa & ENA_L3_IPV4_CSUM)) { 2871e8c838fdSMichal Krawczyk rte_errno = ENOTSUP; 2872e8c838fdSMichal Krawczyk return i; 2873e8c838fdSMichal Krawczyk } 287483277a7cSJakub Palider 2875daa02b5cSOlivier Matz if (ol_flags & RTE_MBUF_F_TX_TCP_SEG && 2876e8c838fdSMichal Krawczyk !(dev_offload_capa & ENA_IPV4_TSO)) { 2877e8c838fdSMichal Krawczyk rte_errno = ENOTSUP; 2878e8c838fdSMichal Krawczyk return i; 2879e8c838fdSMichal Krawczyk } 2880e8c838fdSMichal Krawczyk 2881e8c838fdSMichal Krawczyk /* Check HW capabilities and if pseudo csum is needed 2882e8c838fdSMichal Krawczyk * for L4 offloads. 2883e8c838fdSMichal Krawczyk */ 2884daa02b5cSOlivier Matz if (l4_csum_flag != RTE_MBUF_F_TX_L4_NO_CKSUM && 2885e8c838fdSMichal Krawczyk !(dev_offload_capa & ENA_L4_IPV4_CSUM)) { 2886e8c838fdSMichal Krawczyk if (dev_offload_capa & 2887e8c838fdSMichal Krawczyk ENA_L4_IPV4_CSUM_PARTIAL) { 2888e8c838fdSMichal Krawczyk need_pseudo_csum = true; 2889e8c838fdSMichal Krawczyk } else { 2890e8c838fdSMichal Krawczyk rte_errno = ENOTSUP; 2891e8c838fdSMichal Krawczyk return i; 2892e8c838fdSMichal Krawczyk } 2893e8c838fdSMichal Krawczyk } 2894e8c838fdSMichal Krawczyk 2895e8c838fdSMichal Krawczyk /* Parse the DF flag */ 2896e8c838fdSMichal Krawczyk ip_hdr = rte_pktmbuf_mtod_offset(m, 2897e8c838fdSMichal Krawczyk struct rte_ipv4_hdr *, m->l2_len); 2898e8c838fdSMichal Krawczyk frag_field = rte_be_to_cpu_16(ip_hdr->fragment_offset); 2899e8c838fdSMichal Krawczyk if (frag_field & RTE_IPV4_HDR_DF_FLAG) { 2900e8c838fdSMichal Krawczyk m->packet_type |= RTE_PTYPE_L4_NONFRAG; 2901daa02b5cSOlivier Matz } else if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) { 2902e8c838fdSMichal Krawczyk /* In case we are supposed to TSO and have DF 2903e8c838fdSMichal Krawczyk * not set (DF=0) hardware must be provided with 2904e8c838fdSMichal Krawczyk * partial checksum. 2905e8c838fdSMichal Krawczyk */ 2906e8c838fdSMichal Krawczyk need_pseudo_csum = true; 2907e8c838fdSMichal Krawczyk } 2908daa02b5cSOlivier Matz } else if (ol_flags & RTE_MBUF_F_TX_IPV6) { 2909e8c838fdSMichal Krawczyk /* There is no support for IPv6 TSO as for now. */ 2910daa02b5cSOlivier Matz if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) { 2911e8c838fdSMichal Krawczyk rte_errno = ENOTSUP; 2912e8c838fdSMichal Krawczyk return i; 2913e8c838fdSMichal Krawczyk } 2914e8c838fdSMichal Krawczyk 2915e8c838fdSMichal Krawczyk /* Check HW capabilities and if pseudo csum is needed */ 2916daa02b5cSOlivier Matz if (l4_csum_flag != RTE_MBUF_F_TX_L4_NO_CKSUM && 2917e8c838fdSMichal Krawczyk !(dev_offload_capa & ENA_L4_IPV6_CSUM)) { 2918e8c838fdSMichal Krawczyk if (dev_offload_capa & 2919e8c838fdSMichal Krawczyk ENA_L4_IPV6_CSUM_PARTIAL) { 2920e8c838fdSMichal Krawczyk need_pseudo_csum = true; 2921e8c838fdSMichal Krawczyk } else { 2922e8c838fdSMichal Krawczyk rte_errno = ENOTSUP; 2923e8c838fdSMichal Krawczyk return i; 2924e8c838fdSMichal Krawczyk } 2925e8c838fdSMichal Krawczyk } 2926e8c838fdSMichal Krawczyk } 2927e8c838fdSMichal Krawczyk 2928e8c838fdSMichal Krawczyk if (need_pseudo_csum) { 2929e8c838fdSMichal Krawczyk ret = rte_net_intel_cksum_flags_prepare(m, ol_flags); 2930b3fc5a1aSKonstantin Ananyev if (ret != 0) { 2931baeed5f4SMichal Krawczyk rte_errno = -ret; 2932b3fc5a1aSKonstantin Ananyev return i; 2933b3fc5a1aSKonstantin Ananyev } 2934b3fc5a1aSKonstantin Ananyev } 2935e8c838fdSMichal Krawczyk } 2936b3fc5a1aSKonstantin Ananyev 2937b3fc5a1aSKonstantin Ananyev return i; 2938b3fc5a1aSKonstantin Ananyev } 2939b3fc5a1aSKonstantin Ananyev 2940f01f060cSRafal Kozik static void ena_update_hints(struct ena_adapter *adapter, 2941f01f060cSRafal Kozik struct ena_admin_ena_hw_hints *hints) 2942f01f060cSRafal Kozik { 2943f01f060cSRafal Kozik if (hints->admin_completion_tx_timeout) 2944f01f060cSRafal Kozik adapter->ena_dev.admin_queue.completion_timeout = 2945f01f060cSRafal Kozik hints->admin_completion_tx_timeout * 1000; 2946f01f060cSRafal Kozik 2947f01f060cSRafal Kozik if (hints->mmio_read_timeout) 2948f01f060cSRafal Kozik /* convert to usec */ 2949f01f060cSRafal Kozik adapter->ena_dev.mmio_read.reg_read_to = 2950f01f060cSRafal Kozik hints->mmio_read_timeout * 1000; 2951d9b8b106SMichal Krawczyk 2952d9b8b106SMichal Krawczyk if (hints->driver_watchdog_timeout) { 2953d9b8b106SMichal Krawczyk if (hints->driver_watchdog_timeout == ENA_HW_HINTS_NO_TIMEOUT) 2954d9b8b106SMichal Krawczyk adapter->keep_alive_timeout = ENA_HW_HINTS_NO_TIMEOUT; 2955d9b8b106SMichal Krawczyk else 2956d9b8b106SMichal Krawczyk // Convert msecs to ticks 2957d9b8b106SMichal Krawczyk adapter->keep_alive_timeout = 2958d9b8b106SMichal Krawczyk (hints->driver_watchdog_timeout * 2959d9b8b106SMichal Krawczyk rte_get_timer_hz()) / 1000; 2960d9b8b106SMichal Krawczyk } 2961f01f060cSRafal Kozik } 2962f01f060cSRafal Kozik 296336278b82SMichal Krawczyk static void ena_tx_map_mbuf(struct ena_ring *tx_ring, 296436278b82SMichal Krawczyk struct ena_tx_buffer *tx_info, 296536278b82SMichal Krawczyk struct rte_mbuf *mbuf, 296636278b82SMichal Krawczyk void **push_header, 296736278b82SMichal Krawczyk uint16_t *header_len) 296836278b82SMichal Krawczyk { 296936278b82SMichal Krawczyk struct ena_com_buf *ena_buf; 297036278b82SMichal Krawczyk uint16_t delta, seg_len, push_len; 297136278b82SMichal Krawczyk 297236278b82SMichal Krawczyk delta = 0; 297336278b82SMichal Krawczyk seg_len = mbuf->data_len; 297436278b82SMichal Krawczyk 297536278b82SMichal Krawczyk tx_info->mbuf = mbuf; 297636278b82SMichal Krawczyk ena_buf = tx_info->bufs; 297736278b82SMichal Krawczyk 297836278b82SMichal Krawczyk if (tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { 297936278b82SMichal Krawczyk /* 298036278b82SMichal Krawczyk * Tx header might be (and will be in most cases) smaller than 298136278b82SMichal Krawczyk * tx_max_header_size. But it's not an issue to send more data 298236278b82SMichal Krawczyk * to the device, than actually needed if the mbuf size is 298336278b82SMichal Krawczyk * greater than tx_max_header_size. 298436278b82SMichal Krawczyk */ 298536278b82SMichal Krawczyk push_len = RTE_MIN(mbuf->pkt_len, tx_ring->tx_max_header_size); 298636278b82SMichal Krawczyk *header_len = push_len; 298736278b82SMichal Krawczyk 298836278b82SMichal Krawczyk if (likely(push_len <= seg_len)) { 298936278b82SMichal Krawczyk /* If the push header is in the single segment, then 299036278b82SMichal Krawczyk * just point it to the 1st mbuf data. 299136278b82SMichal Krawczyk */ 299236278b82SMichal Krawczyk *push_header = rte_pktmbuf_mtod(mbuf, uint8_t *); 299336278b82SMichal Krawczyk } else { 299436278b82SMichal Krawczyk /* If the push header lays in the several segments, copy 299536278b82SMichal Krawczyk * it to the intermediate buffer. 299636278b82SMichal Krawczyk */ 299736278b82SMichal Krawczyk rte_pktmbuf_read(mbuf, 0, push_len, 299836278b82SMichal Krawczyk tx_ring->push_buf_intermediate_buf); 299936278b82SMichal Krawczyk *push_header = tx_ring->push_buf_intermediate_buf; 300036278b82SMichal Krawczyk delta = push_len - seg_len; 300136278b82SMichal Krawczyk } 300236278b82SMichal Krawczyk } else { 300336278b82SMichal Krawczyk *push_header = NULL; 300436278b82SMichal Krawczyk *header_len = 0; 300536278b82SMichal Krawczyk push_len = 0; 300636278b82SMichal Krawczyk } 300736278b82SMichal Krawczyk 300836278b82SMichal Krawczyk /* Process first segment taking into consideration pushed header */ 300936278b82SMichal Krawczyk if (seg_len > push_len) { 301036278b82SMichal Krawczyk ena_buf->paddr = mbuf->buf_iova + 301136278b82SMichal Krawczyk mbuf->data_off + 301236278b82SMichal Krawczyk push_len; 301336278b82SMichal Krawczyk ena_buf->len = seg_len - push_len; 301436278b82SMichal Krawczyk ena_buf++; 301536278b82SMichal Krawczyk tx_info->num_of_bufs++; 301636278b82SMichal Krawczyk } 301736278b82SMichal Krawczyk 301836278b82SMichal Krawczyk while ((mbuf = mbuf->next) != NULL) { 301936278b82SMichal Krawczyk seg_len = mbuf->data_len; 302036278b82SMichal Krawczyk 302136278b82SMichal Krawczyk /* Skip mbufs if whole data is pushed as a header */ 302236278b82SMichal Krawczyk if (unlikely(delta > seg_len)) { 302336278b82SMichal Krawczyk delta -= seg_len; 302436278b82SMichal Krawczyk continue; 302536278b82SMichal Krawczyk } 302636278b82SMichal Krawczyk 302736278b82SMichal Krawczyk ena_buf->paddr = mbuf->buf_iova + mbuf->data_off + delta; 302836278b82SMichal Krawczyk ena_buf->len = seg_len - delta; 302936278b82SMichal Krawczyk ena_buf++; 303036278b82SMichal Krawczyk tx_info->num_of_bufs++; 303136278b82SMichal Krawczyk 303236278b82SMichal Krawczyk delta = 0; 303336278b82SMichal Krawczyk } 303436278b82SMichal Krawczyk } 303536278b82SMichal Krawczyk 303636278b82SMichal Krawczyk static int ena_xmit_mbuf(struct ena_ring *tx_ring, struct rte_mbuf *mbuf) 303736278b82SMichal Krawczyk { 303836278b82SMichal Krawczyk struct ena_tx_buffer *tx_info; 303936278b82SMichal Krawczyk struct ena_com_tx_ctx ena_tx_ctx = { { 0 } }; 304036278b82SMichal Krawczyk uint16_t next_to_use; 304136278b82SMichal Krawczyk uint16_t header_len; 304236278b82SMichal Krawczyk uint16_t req_id; 304336278b82SMichal Krawczyk void *push_header; 304436278b82SMichal Krawczyk int nb_hw_desc; 304536278b82SMichal Krawczyk int rc; 304636278b82SMichal Krawczyk 304796ffa8a7SMichal Krawczyk /* Checking for space for 2 additional metadata descriptors due to 304896ffa8a7SMichal Krawczyk * possible header split and metadata descriptor 304996ffa8a7SMichal Krawczyk */ 305096ffa8a7SMichal Krawczyk if (!ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, 305196ffa8a7SMichal Krawczyk mbuf->nb_segs + 2)) { 305296ffa8a7SMichal Krawczyk PMD_DRV_LOG(DEBUG, "Not enough space in the tx queue\n"); 305396ffa8a7SMichal Krawczyk return ENA_COM_NO_MEM; 305496ffa8a7SMichal Krawczyk } 305536278b82SMichal Krawczyk 305636278b82SMichal Krawczyk next_to_use = tx_ring->next_to_use; 305736278b82SMichal Krawczyk 305836278b82SMichal Krawczyk req_id = tx_ring->empty_tx_reqs[next_to_use]; 305936278b82SMichal Krawczyk tx_info = &tx_ring->tx_buffer_info[req_id]; 306036278b82SMichal Krawczyk tx_info->num_of_bufs = 0; 30613d47e9b1SMichal Krawczyk RTE_ASSERT(tx_info->mbuf == NULL); 306236278b82SMichal Krawczyk 306336278b82SMichal Krawczyk ena_tx_map_mbuf(tx_ring, tx_info, mbuf, &push_header, &header_len); 306436278b82SMichal Krawczyk 306536278b82SMichal Krawczyk ena_tx_ctx.ena_bufs = tx_info->bufs; 306636278b82SMichal Krawczyk ena_tx_ctx.push_header = push_header; 306736278b82SMichal Krawczyk ena_tx_ctx.num_bufs = tx_info->num_of_bufs; 306836278b82SMichal Krawczyk ena_tx_ctx.req_id = req_id; 306936278b82SMichal Krawczyk ena_tx_ctx.header_len = header_len; 307036278b82SMichal Krawczyk 307136278b82SMichal Krawczyk /* Set Tx offloads flags, if applicable */ 307236278b82SMichal Krawczyk ena_tx_mbuf_prepare(mbuf, &ena_tx_ctx, tx_ring->offloads, 307336278b82SMichal Krawczyk tx_ring->disable_meta_caching); 307436278b82SMichal Krawczyk 307536278b82SMichal Krawczyk if (unlikely(ena_com_is_doorbell_needed(tx_ring->ena_com_io_sq, 307636278b82SMichal Krawczyk &ena_tx_ctx))) { 30770a001d69SMichal Krawczyk PMD_TX_LOG(DEBUG, 3078617898d1SMichal Krawczyk "LLQ Tx max burst size of queue %d achieved, writing doorbell to send burst\n", 307936278b82SMichal Krawczyk tx_ring->id); 308036278b82SMichal Krawczyk ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq); 30811f949ad9SAmit Bernstein tx_ring->tx_stats.doorbells++; 30821d973d8fSIgor Chauskin tx_ring->pkts_without_db = false; 308336278b82SMichal Krawczyk } 308436278b82SMichal Krawczyk 308536278b82SMichal Krawczyk /* prepare the packet's descriptors to dma engine */ 308636278b82SMichal Krawczyk rc = ena_com_prepare_tx(tx_ring->ena_com_io_sq, &ena_tx_ctx, 308736278b82SMichal Krawczyk &nb_hw_desc); 308836278b82SMichal Krawczyk if (unlikely(rc)) { 3089b57e1053SMichal Krawczyk PMD_DRV_LOG(ERR, "Failed to prepare Tx buffers, rc: %d\n", rc); 309036278b82SMichal Krawczyk ++tx_ring->tx_stats.prepare_ctx_err; 30912bae75eaSDawid Gorecki ena_trigger_reset(tx_ring->adapter, 30922bae75eaSDawid Gorecki ENA_REGS_RESET_DRIVER_INVALID_STATE); 309336278b82SMichal Krawczyk return rc; 309436278b82SMichal Krawczyk } 309536278b82SMichal Krawczyk 309636278b82SMichal Krawczyk tx_info->tx_descs = nb_hw_desc; 3097f93e20e5SMichal Krawczyk tx_info->timestamp = rte_get_timer_cycles(); 309836278b82SMichal Krawczyk 309936278b82SMichal Krawczyk tx_ring->tx_stats.cnt++; 310036278b82SMichal Krawczyk tx_ring->tx_stats.bytes += mbuf->pkt_len; 310136278b82SMichal Krawczyk 310236278b82SMichal Krawczyk tx_ring->next_to_use = ENA_IDX_NEXT_MASKED(next_to_use, 310336278b82SMichal Krawczyk tx_ring->size_mask); 310436278b82SMichal Krawczyk 310536278b82SMichal Krawczyk return 0; 310636278b82SMichal Krawczyk } 310736278b82SMichal Krawczyk 3108c339f538SDawid Gorecki static __rte_always_inline size_t 3109c339f538SDawid Gorecki ena_tx_cleanup_mbuf_fast(struct rte_mbuf **mbufs_to_clean, 3110c339f538SDawid Gorecki struct rte_mbuf *mbuf, 3111c339f538SDawid Gorecki size_t mbuf_cnt, 3112c339f538SDawid Gorecki size_t buf_size) 3113c339f538SDawid Gorecki { 3114c339f538SDawid Gorecki struct rte_mbuf *m_next; 3115c339f538SDawid Gorecki 3116c339f538SDawid Gorecki while (mbuf != NULL) { 3117c339f538SDawid Gorecki m_next = mbuf->next; 3118c339f538SDawid Gorecki mbufs_to_clean[mbuf_cnt++] = mbuf; 3119c339f538SDawid Gorecki if (mbuf_cnt == buf_size) { 3120c339f538SDawid Gorecki rte_mempool_put_bulk(mbufs_to_clean[0]->pool, (void **)mbufs_to_clean, 3121c339f538SDawid Gorecki (unsigned int)mbuf_cnt); 3122c339f538SDawid Gorecki mbuf_cnt = 0; 3123c339f538SDawid Gorecki } 3124c339f538SDawid Gorecki mbuf = m_next; 3125c339f538SDawid Gorecki } 3126c339f538SDawid Gorecki 3127c339f538SDawid Gorecki return mbuf_cnt; 3128c339f538SDawid Gorecki } 3129c339f538SDawid Gorecki 3130a52b317eSDawid Gorecki static int ena_tx_cleanup(void *txp, uint32_t free_pkt_cnt) 313136278b82SMichal Krawczyk { 3132c339f538SDawid Gorecki struct rte_mbuf *mbufs_to_clean[ENA_CLEANUP_BUF_SIZE]; 3133a52b317eSDawid Gorecki struct ena_ring *tx_ring = (struct ena_ring *)txp; 3134c339f538SDawid Gorecki size_t mbuf_cnt = 0; 313536278b82SMichal Krawczyk unsigned int total_tx_descs = 0; 3136a52b317eSDawid Gorecki unsigned int total_tx_pkts = 0; 3137005064e5SMichal Krawczyk uint16_t cleanup_budget; 313836278b82SMichal Krawczyk uint16_t next_to_clean = tx_ring->next_to_clean; 3139c339f538SDawid Gorecki bool fast_free = tx_ring->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE; 314036278b82SMichal Krawczyk 3141a52b317eSDawid Gorecki /* 3142a52b317eSDawid Gorecki * If free_pkt_cnt is equal to 0, it means that the user requested 3143a52b317eSDawid Gorecki * full cleanup, so attempt to release all Tx descriptors 3144a52b317eSDawid Gorecki * (ring_size - 1 -> size_mask) 3145a52b317eSDawid Gorecki */ 3146a52b317eSDawid Gorecki cleanup_budget = (free_pkt_cnt == 0) ? tx_ring->size_mask : free_pkt_cnt; 314736278b82SMichal Krawczyk 3148a52b317eSDawid Gorecki while (likely(total_tx_pkts < cleanup_budget)) { 314936278b82SMichal Krawczyk struct rte_mbuf *mbuf; 315036278b82SMichal Krawczyk struct ena_tx_buffer *tx_info; 315136278b82SMichal Krawczyk uint16_t req_id; 315236278b82SMichal Krawczyk 315336278b82SMichal Krawczyk if (ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq, &req_id) != 0) 315436278b82SMichal Krawczyk break; 315536278b82SMichal Krawczyk 315636278b82SMichal Krawczyk if (unlikely(validate_tx_req_id(tx_ring, req_id) != 0)) 315736278b82SMichal Krawczyk break; 315836278b82SMichal Krawczyk 315936278b82SMichal Krawczyk /* Get Tx info & store how many descs were processed */ 316036278b82SMichal Krawczyk tx_info = &tx_ring->tx_buffer_info[req_id]; 3161f93e20e5SMichal Krawczyk tx_info->timestamp = 0; 316236278b82SMichal Krawczyk 316336278b82SMichal Krawczyk mbuf = tx_info->mbuf; 3164c339f538SDawid Gorecki if (fast_free) { 3165c339f538SDawid Gorecki mbuf_cnt = ena_tx_cleanup_mbuf_fast(mbufs_to_clean, mbuf, mbuf_cnt, 3166c339f538SDawid Gorecki ENA_CLEANUP_BUF_SIZE); 3167c339f538SDawid Gorecki } else { 316836278b82SMichal Krawczyk rte_pktmbuf_free(mbuf); 3169c339f538SDawid Gorecki } 317036278b82SMichal Krawczyk 317136278b82SMichal Krawczyk tx_info->mbuf = NULL; 317236278b82SMichal Krawczyk tx_ring->empty_tx_reqs[next_to_clean] = req_id; 317336278b82SMichal Krawczyk 317436278b82SMichal Krawczyk total_tx_descs += tx_info->tx_descs; 3175a52b317eSDawid Gorecki total_tx_pkts++; 317636278b82SMichal Krawczyk 317736278b82SMichal Krawczyk /* Put back descriptor to the ring for reuse */ 317836278b82SMichal Krawczyk next_to_clean = ENA_IDX_NEXT_MASKED(next_to_clean, 317936278b82SMichal Krawczyk tx_ring->size_mask); 318036278b82SMichal Krawczyk } 318136278b82SMichal Krawczyk 318236278b82SMichal Krawczyk if (likely(total_tx_descs > 0)) { 318336278b82SMichal Krawczyk /* acknowledge completion of sent packets */ 318436278b82SMichal Krawczyk tx_ring->next_to_clean = next_to_clean; 318536278b82SMichal Krawczyk ena_com_comp_ack(tx_ring->ena_com_io_sq, total_tx_descs); 318636278b82SMichal Krawczyk } 3187f93e20e5SMichal Krawczyk 3188c339f538SDawid Gorecki if (mbuf_cnt != 0) 3189c339f538SDawid Gorecki rte_mempool_put_bulk(mbufs_to_clean[0]->pool, 3190c339f538SDawid Gorecki (void **)mbufs_to_clean, mbuf_cnt); 3191c339f538SDawid Gorecki 3192a52b317eSDawid Gorecki /* Notify completion handler that full cleanup was performed */ 3193a52b317eSDawid Gorecki if (free_pkt_cnt == 0 || total_tx_pkts < cleanup_budget) 3194f93e20e5SMichal Krawczyk tx_ring->last_cleanup_ticks = rte_get_timer_cycles(); 3195a52b317eSDawid Gorecki 3196a52b317eSDawid Gorecki return total_tx_pkts; 319736278b82SMichal Krawczyk } 319836278b82SMichal Krawczyk 31991173fca2SJan Medala static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 32001173fca2SJan Medala uint16_t nb_pkts) 32011173fca2SJan Medala { 32021173fca2SJan Medala struct ena_ring *tx_ring = (struct ena_ring *)(tx_queue); 3203005064e5SMichal Krawczyk int available_desc; 320474456796SMichal Krawczyk uint16_t sent_idx = 0; 32051173fca2SJan Medala 32060a001d69SMichal Krawczyk #ifdef RTE_ETHDEV_DEBUG_TX 32071173fca2SJan Medala /* Check adapter state */ 32081173fca2SJan Medala if (unlikely(tx_ring->adapter->state != ENA_ADAPTER_STATE_RUNNING)) { 32090a001d69SMichal Krawczyk PMD_TX_LOG(ALERT, 32101173fca2SJan Medala "Trying to xmit pkts while device is NOT running\n"); 32111173fca2SJan Medala return 0; 32121173fca2SJan Medala } 32130a001d69SMichal Krawczyk #endif 32141173fca2SJan Medala 321567216c31SMichal Krawczyk available_desc = ena_com_free_q_entries(tx_ring->ena_com_io_sq); 321667216c31SMichal Krawczyk if (available_desc < tx_ring->tx_free_thresh) 3217a52b317eSDawid Gorecki ena_tx_cleanup((void *)tx_ring, 0); 321867216c31SMichal Krawczyk 32191173fca2SJan Medala for (sent_idx = 0; sent_idx < nb_pkts; sent_idx++) { 322036278b82SMichal Krawczyk if (ena_xmit_mbuf(tx_ring, tx_pkts[sent_idx])) 32212061fe41SRafal Kozik break; 32221d973d8fSIgor Chauskin tx_ring->pkts_without_db = true; 322336278b82SMichal Krawczyk rte_prefetch0(tx_pkts[ENA_IDX_ADD_MASKED(sent_idx, 4, 322436278b82SMichal Krawczyk tx_ring->size_mask)]); 32252fca2a98SMichal Krawczyk } 32262fca2a98SMichal Krawczyk 32275e02e19eSJan Medala /* If there are ready packets to be xmitted... */ 32281d973d8fSIgor Chauskin if (likely(tx_ring->pkts_without_db)) { 32295e02e19eSJan Medala /* ...let HW do its best :-) */ 32301173fca2SJan Medala ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq); 323145b6d861SMichal Krawczyk tx_ring->tx_stats.doorbells++; 32321d973d8fSIgor Chauskin tx_ring->pkts_without_db = false; 32335e02e19eSJan Medala } 32345e02e19eSJan Medala 32357830e905SSolganik Alexander tx_ring->tx_stats.available_desc = 3236b2b02edeSMichal Krawczyk ena_com_free_q_entries(tx_ring->ena_com_io_sq); 32377830e905SSolganik Alexander tx_ring->tx_stats.tx_poll++; 32387830e905SSolganik Alexander 32391173fca2SJan Medala return sent_idx; 32401173fca2SJan Medala } 32411173fca2SJan Medala 324292401abfSShai Brandes static void ena_copy_customer_metrics(struct ena_adapter *adapter, uint64_t *buf, 324392401abfSShai Brandes size_t num_metrics) 324445718adaSMichal Krawczyk { 324592401abfSShai Brandes struct ena_com_dev *ena_dev = &adapter->ena_dev; 324645718adaSMichal Krawczyk int rc; 324745718adaSMichal Krawczyk 324892401abfSShai Brandes if (ena_com_get_cap(ena_dev, ENA_ADMIN_CUSTOMER_METRICS)) { 324992401abfSShai Brandes if (num_metrics != ENA_STATS_ARRAY_METRICS) { 325092401abfSShai Brandes PMD_DRV_LOG(ERR, "Detected discrepancy in the number of customer metrics"); 325192401abfSShai Brandes return; 325292401abfSShai Brandes } 325345718adaSMichal Krawczyk rte_spinlock_lock(&adapter->admin_lock); 325492401abfSShai Brandes rc = ENA_PROXY(adapter, 325592401abfSShai Brandes ena_com_get_customer_metrics, 325692401abfSShai Brandes &adapter->ena_dev, 325792401abfSShai Brandes (char *)buf, 325892401abfSShai Brandes num_metrics * sizeof(uint64_t)); 325945718adaSMichal Krawczyk rte_spinlock_unlock(&adapter->admin_lock); 326045718adaSMichal Krawczyk if (rc != 0) { 326192401abfSShai Brandes PMD_DRV_LOG(WARNING, "Failed to get customer metrics, rc: %d\n", rc); 326292401abfSShai Brandes return; 326345718adaSMichal Krawczyk } 326445718adaSMichal Krawczyk 326592401abfSShai Brandes } else if (ena_com_get_cap(ena_dev, ENA_ADMIN_ENI_STATS)) { 326692401abfSShai Brandes if (num_metrics != ENA_STATS_ARRAY_METRICS_LEGACY) { 326792401abfSShai Brandes PMD_DRV_LOG(ERR, "Detected discrepancy in the number of legacy metrics"); 326892401abfSShai Brandes return; 326992401abfSShai Brandes } 327092401abfSShai Brandes 327192401abfSShai Brandes rte_spinlock_lock(&adapter->admin_lock); 327292401abfSShai Brandes rc = ENA_PROXY(adapter, 327392401abfSShai Brandes ena_com_get_eni_stats, 327492401abfSShai Brandes &adapter->ena_dev, 327592401abfSShai Brandes (struct ena_admin_eni_stats *)buf); 327692401abfSShai Brandes rte_spinlock_unlock(&adapter->admin_lock); 327792401abfSShai Brandes if (rc != 0) { 327892401abfSShai Brandes PMD_DRV_LOG(WARNING, 327992401abfSShai Brandes "Failed to get ENI metrics, rc: %d\n", rc); 328092401abfSShai Brandes return; 328192401abfSShai Brandes } 328292401abfSShai Brandes } 328345718adaSMichal Krawczyk } 328445718adaSMichal Krawczyk 3285a73dd098SShai Brandes static void ena_copy_ena_srd_info(struct ena_adapter *adapter, 3286a73dd098SShai Brandes struct ena_stats_srd *srd_info) 3287a73dd098SShai Brandes { 3288a73dd098SShai Brandes int rc; 3289a73dd098SShai Brandes 3290a73dd098SShai Brandes if (!ena_com_get_cap(&adapter->ena_dev, ENA_ADMIN_ENA_SRD_INFO)) 3291a73dd098SShai Brandes return; 3292a73dd098SShai Brandes 3293a73dd098SShai Brandes rte_spinlock_lock(&adapter->admin_lock); 3294a73dd098SShai Brandes rc = ENA_PROXY(adapter, 3295a73dd098SShai Brandes ena_com_get_ena_srd_info, 3296a73dd098SShai Brandes &adapter->ena_dev, 3297a73dd098SShai Brandes (struct ena_admin_ena_srd_info *)srd_info); 3298a73dd098SShai Brandes rte_spinlock_unlock(&adapter->admin_lock); 3299a73dd098SShai Brandes if (rc != ENA_COM_OK && rc != ENA_COM_UNSUPPORTED) { 3300a73dd098SShai Brandes PMD_DRV_LOG(WARNING, 3301a73dd098SShai Brandes "Failed to get ENA express srd info, rc: %d\n", rc); 3302a73dd098SShai Brandes return; 3303a73dd098SShai Brandes } 3304a73dd098SShai Brandes } 3305a73dd098SShai Brandes 33067830e905SSolganik Alexander /** 33077830e905SSolganik Alexander * DPDK callback to retrieve names of extended device statistics 33087830e905SSolganik Alexander * 33097830e905SSolganik Alexander * @param dev 33107830e905SSolganik Alexander * Pointer to Ethernet device structure. 33117830e905SSolganik Alexander * @param[out] xstats_names 33127830e905SSolganik Alexander * Buffer to insert names into. 33137830e905SSolganik Alexander * @param n 33147830e905SSolganik Alexander * Number of names. 33157830e905SSolganik Alexander * 33167830e905SSolganik Alexander * @return 33177830e905SSolganik Alexander * Number of xstats names. 33187830e905SSolganik Alexander */ 33197830e905SSolganik Alexander static int ena_xstats_get_names(struct rte_eth_dev *dev, 33207830e905SSolganik Alexander struct rte_eth_xstat_name *xstats_names, 33217830e905SSolganik Alexander unsigned int n) 33227830e905SSolganik Alexander { 332392401abfSShai Brandes struct ena_adapter *adapter = dev->data->dev_private; 3324aab58857SStanislaw Kardach unsigned int xstats_count = ena_xstats_calc_num(dev->data); 33257830e905SSolganik Alexander unsigned int stat, i, count = 0; 33267830e905SSolganik Alexander 33277830e905SSolganik Alexander if (n < xstats_count || !xstats_names) 33287830e905SSolganik Alexander return xstats_count; 33297830e905SSolganik Alexander 33307830e905SSolganik Alexander for (stat = 0; stat < ENA_STATS_ARRAY_GLOBAL; stat++, count++) 33317830e905SSolganik Alexander strcpy(xstats_names[count].name, 33327830e905SSolganik Alexander ena_stats_global_strings[stat].name); 33337830e905SSolganik Alexander 333492401abfSShai Brandes for (stat = 0; stat < adapter->metrics_num; stat++, count++) 333592401abfSShai Brandes rte_strscpy(xstats_names[count].name, 333692401abfSShai Brandes ena_stats_metrics_strings[stat].name, 333792401abfSShai Brandes RTE_ETH_XSTATS_NAME_SIZE); 3338a73dd098SShai Brandes for (stat = 0; stat < ENA_STATS_ARRAY_ENA_SRD; stat++, count++) 3339a73dd098SShai Brandes rte_strscpy(xstats_names[count].name, 3340a73dd098SShai Brandes ena_stats_srd_strings[stat].name, 3341a73dd098SShai Brandes RTE_ETH_XSTATS_NAME_SIZE); 334245718adaSMichal Krawczyk 33437830e905SSolganik Alexander for (stat = 0; stat < ENA_STATS_ARRAY_RX; stat++) 33447830e905SSolganik Alexander for (i = 0; i < dev->data->nb_rx_queues; i++, count++) 33457830e905SSolganik Alexander snprintf(xstats_names[count].name, 33467830e905SSolganik Alexander sizeof(xstats_names[count].name), 33477830e905SSolganik Alexander "rx_q%d_%s", i, 33487830e905SSolganik Alexander ena_stats_rx_strings[stat].name); 33497830e905SSolganik Alexander 33507830e905SSolganik Alexander for (stat = 0; stat < ENA_STATS_ARRAY_TX; stat++) 33517830e905SSolganik Alexander for (i = 0; i < dev->data->nb_tx_queues; i++, count++) 33527830e905SSolganik Alexander snprintf(xstats_names[count].name, 33537830e905SSolganik Alexander sizeof(xstats_names[count].name), 33547830e905SSolganik Alexander "tx_q%d_%s", i, 33557830e905SSolganik Alexander ena_stats_tx_strings[stat].name); 33567830e905SSolganik Alexander 33577830e905SSolganik Alexander return xstats_count; 33587830e905SSolganik Alexander } 33597830e905SSolganik Alexander 33607830e905SSolganik Alexander /** 33613cec73faSMichal Krawczyk * DPDK callback to retrieve names of extended device statistics for the given 33623cec73faSMichal Krawczyk * ids. 33633cec73faSMichal Krawczyk * 33643cec73faSMichal Krawczyk * @param dev 33653cec73faSMichal Krawczyk * Pointer to Ethernet device structure. 33663cec73faSMichal Krawczyk * @param[out] xstats_names 33673cec73faSMichal Krawczyk * Buffer to insert names into. 33683cec73faSMichal Krawczyk * @param ids 33693cec73faSMichal Krawczyk * IDs array for which the names should be retrieved. 33703cec73faSMichal Krawczyk * @param size 33713cec73faSMichal Krawczyk * Number of ids. 33723cec73faSMichal Krawczyk * 33733cec73faSMichal Krawczyk * @return 33743cec73faSMichal Krawczyk * Positive value: number of xstats names. Negative value: error code. 33753cec73faSMichal Krawczyk */ 33763cec73faSMichal Krawczyk static int ena_xstats_get_names_by_id(struct rte_eth_dev *dev, 33773cec73faSMichal Krawczyk const uint64_t *ids, 33783cec73faSMichal Krawczyk struct rte_eth_xstat_name *xstats_names, 33793cec73faSMichal Krawczyk unsigned int size) 33803cec73faSMichal Krawczyk { 338192401abfSShai Brandes struct ena_adapter *adapter = dev->data->dev_private; 33823cec73faSMichal Krawczyk uint64_t xstats_count = ena_xstats_calc_num(dev->data); 33833cec73faSMichal Krawczyk uint64_t id, qid; 33843cec73faSMichal Krawczyk unsigned int i; 33853cec73faSMichal Krawczyk 33863cec73faSMichal Krawczyk if (xstats_names == NULL) 33873cec73faSMichal Krawczyk return xstats_count; 33883cec73faSMichal Krawczyk 33893cec73faSMichal Krawczyk for (i = 0; i < size; ++i) { 33903cec73faSMichal Krawczyk id = ids[i]; 33913cec73faSMichal Krawczyk if (id > xstats_count) { 33923cec73faSMichal Krawczyk PMD_DRV_LOG(ERR, 33933cec73faSMichal Krawczyk "ID value out of range: id=%" PRIu64 ", xstats_num=%" PRIu64 "\n", 33943cec73faSMichal Krawczyk id, xstats_count); 33953cec73faSMichal Krawczyk return -EINVAL; 33963cec73faSMichal Krawczyk } 33973cec73faSMichal Krawczyk 33983cec73faSMichal Krawczyk if (id < ENA_STATS_ARRAY_GLOBAL) { 33993cec73faSMichal Krawczyk strcpy(xstats_names[i].name, 34003cec73faSMichal Krawczyk ena_stats_global_strings[id].name); 34013cec73faSMichal Krawczyk continue; 34023cec73faSMichal Krawczyk } 34033cec73faSMichal Krawczyk 34043cec73faSMichal Krawczyk id -= ENA_STATS_ARRAY_GLOBAL; 340592401abfSShai Brandes if (id < adapter->metrics_num) { 340692401abfSShai Brandes rte_strscpy(xstats_names[i].name, 340792401abfSShai Brandes ena_stats_metrics_strings[id].name, 340892401abfSShai Brandes RTE_ETH_XSTATS_NAME_SIZE); 34093cec73faSMichal Krawczyk continue; 34103cec73faSMichal Krawczyk } 34113cec73faSMichal Krawczyk 341292401abfSShai Brandes id -= adapter->metrics_num; 3413a73dd098SShai Brandes 3414a73dd098SShai Brandes if (id < ENA_STATS_ARRAY_ENA_SRD) { 3415a73dd098SShai Brandes rte_strscpy(xstats_names[i].name, 3416a73dd098SShai Brandes ena_stats_srd_strings[id].name, 3417a73dd098SShai Brandes RTE_ETH_XSTATS_NAME_SIZE); 3418a73dd098SShai Brandes continue; 3419a73dd098SShai Brandes } 3420a73dd098SShai Brandes id -= ENA_STATS_ARRAY_ENA_SRD; 3421a73dd098SShai Brandes 34223cec73faSMichal Krawczyk if (id < ENA_STATS_ARRAY_RX) { 34233cec73faSMichal Krawczyk qid = id / dev->data->nb_rx_queues; 34243cec73faSMichal Krawczyk id %= dev->data->nb_rx_queues; 34253cec73faSMichal Krawczyk snprintf(xstats_names[i].name, 34263cec73faSMichal Krawczyk sizeof(xstats_names[i].name), 34273cec73faSMichal Krawczyk "rx_q%" PRIu64 "d_%s", 34283cec73faSMichal Krawczyk qid, ena_stats_rx_strings[id].name); 34293cec73faSMichal Krawczyk continue; 34303cec73faSMichal Krawczyk } 34313cec73faSMichal Krawczyk 34323cec73faSMichal Krawczyk id -= ENA_STATS_ARRAY_RX; 34333cec73faSMichal Krawczyk /* Although this condition is not needed, it was added for 34343cec73faSMichal Krawczyk * compatibility if new xstat structure would be ever added. 34353cec73faSMichal Krawczyk */ 34363cec73faSMichal Krawczyk if (id < ENA_STATS_ARRAY_TX) { 34373cec73faSMichal Krawczyk qid = id / dev->data->nb_tx_queues; 34383cec73faSMichal Krawczyk id %= dev->data->nb_tx_queues; 34393cec73faSMichal Krawczyk snprintf(xstats_names[i].name, 34403cec73faSMichal Krawczyk sizeof(xstats_names[i].name), 34413cec73faSMichal Krawczyk "tx_q%" PRIu64 "_%s", 34423cec73faSMichal Krawczyk qid, ena_stats_tx_strings[id].name); 34433cec73faSMichal Krawczyk continue; 34443cec73faSMichal Krawczyk } 34453cec73faSMichal Krawczyk } 34463cec73faSMichal Krawczyk 34473cec73faSMichal Krawczyk return i; 34483cec73faSMichal Krawczyk } 34493cec73faSMichal Krawczyk 34503cec73faSMichal Krawczyk /** 34517830e905SSolganik Alexander * DPDK callback to get extended device statistics. 34527830e905SSolganik Alexander * 34537830e905SSolganik Alexander * @param dev 34547830e905SSolganik Alexander * Pointer to Ethernet device structure. 34557830e905SSolganik Alexander * @param[out] stats 34567830e905SSolganik Alexander * Stats table output buffer. 34577830e905SSolganik Alexander * @param n 34587830e905SSolganik Alexander * The size of the stats table. 34597830e905SSolganik Alexander * 34607830e905SSolganik Alexander * @return 34617830e905SSolganik Alexander * Number of xstats on success, negative on failure. 34627830e905SSolganik Alexander */ 34637830e905SSolganik Alexander static int ena_xstats_get(struct rte_eth_dev *dev, 34647830e905SSolganik Alexander struct rte_eth_xstat *xstats, 34657830e905SSolganik Alexander unsigned int n) 34667830e905SSolganik Alexander { 3467890728ffSStephen Hemminger struct ena_adapter *adapter = dev->data->dev_private; 3468aab58857SStanislaw Kardach unsigned int xstats_count = ena_xstats_calc_num(dev->data); 34697830e905SSolganik Alexander unsigned int stat, i, count = 0; 34707830e905SSolganik Alexander int stat_offset; 34717830e905SSolganik Alexander void *stats_begin; 347292401abfSShai Brandes uint64_t metrics_stats[ENA_MAX_CUSTOMER_METRICS]; 3473a73dd098SShai Brandes struct ena_stats_srd srd_info = {0}; 34747830e905SSolganik Alexander 34757830e905SSolganik Alexander if (n < xstats_count) 34767830e905SSolganik Alexander return xstats_count; 34777830e905SSolganik Alexander 34787830e905SSolganik Alexander if (!xstats) 34797830e905SSolganik Alexander return 0; 34807830e905SSolganik Alexander 34817830e905SSolganik Alexander for (stat = 0; stat < ENA_STATS_ARRAY_GLOBAL; stat++, count++) { 3482493107fdSMichal Krawczyk stat_offset = ena_stats_global_strings[stat].stat_offset; 34837830e905SSolganik Alexander stats_begin = &adapter->dev_stats; 34847830e905SSolganik Alexander 34857830e905SSolganik Alexander xstats[count].id = count; 34867830e905SSolganik Alexander xstats[count].value = *((uint64_t *) 34877830e905SSolganik Alexander ((char *)stats_begin + stat_offset)); 34887830e905SSolganik Alexander } 34897830e905SSolganik Alexander 349092401abfSShai Brandes ena_copy_customer_metrics(adapter, metrics_stats, adapter->metrics_num); 349192401abfSShai Brandes stats_begin = metrics_stats; 349292401abfSShai Brandes for (stat = 0; stat < adapter->metrics_num; stat++, count++) { 349392401abfSShai Brandes stat_offset = ena_stats_metrics_strings[stat].stat_offset; 349445718adaSMichal Krawczyk 349545718adaSMichal Krawczyk xstats[count].id = count; 349645718adaSMichal Krawczyk xstats[count].value = *((uint64_t *) 349745718adaSMichal Krawczyk ((char *)stats_begin + stat_offset)); 349845718adaSMichal Krawczyk } 349945718adaSMichal Krawczyk 3500a73dd098SShai Brandes ena_copy_ena_srd_info(adapter, &srd_info); 3501a73dd098SShai Brandes stats_begin = &srd_info; 3502a73dd098SShai Brandes for (stat = 0; stat < ENA_STATS_ARRAY_ENA_SRD; stat++, count++) { 3503a73dd098SShai Brandes stat_offset = ena_stats_srd_strings[stat].stat_offset; 3504a73dd098SShai Brandes xstats[count].id = count; 3505a73dd098SShai Brandes xstats[count].value = *((uint64_t *) 3506a73dd098SShai Brandes ((char *)stats_begin + stat_offset)); 3507a73dd098SShai Brandes } 3508a73dd098SShai Brandes 35097830e905SSolganik Alexander for (stat = 0; stat < ENA_STATS_ARRAY_RX; stat++) { 35107830e905SSolganik Alexander for (i = 0; i < dev->data->nb_rx_queues; i++, count++) { 35117830e905SSolganik Alexander stat_offset = ena_stats_rx_strings[stat].stat_offset; 35127830e905SSolganik Alexander stats_begin = &adapter->rx_ring[i].rx_stats; 35137830e905SSolganik Alexander 35147830e905SSolganik Alexander xstats[count].id = count; 35157830e905SSolganik Alexander xstats[count].value = *((uint64_t *) 35167830e905SSolganik Alexander ((char *)stats_begin + stat_offset)); 35177830e905SSolganik Alexander } 35187830e905SSolganik Alexander } 35197830e905SSolganik Alexander 35207830e905SSolganik Alexander for (stat = 0; stat < ENA_STATS_ARRAY_TX; stat++) { 35217830e905SSolganik Alexander for (i = 0; i < dev->data->nb_tx_queues; i++, count++) { 35227830e905SSolganik Alexander stat_offset = ena_stats_tx_strings[stat].stat_offset; 35237830e905SSolganik Alexander stats_begin = &adapter->tx_ring[i].rx_stats; 35247830e905SSolganik Alexander 35257830e905SSolganik Alexander xstats[count].id = count; 35267830e905SSolganik Alexander xstats[count].value = *((uint64_t *) 35277830e905SSolganik Alexander ((char *)stats_begin + stat_offset)); 35287830e905SSolganik Alexander } 35297830e905SSolganik Alexander } 35307830e905SSolganik Alexander 35317830e905SSolganik Alexander return count; 35327830e905SSolganik Alexander } 35337830e905SSolganik Alexander 35347830e905SSolganik Alexander static int ena_xstats_get_by_id(struct rte_eth_dev *dev, 35357830e905SSolganik Alexander const uint64_t *ids, 35367830e905SSolganik Alexander uint64_t *values, 35377830e905SSolganik Alexander unsigned int n) 35387830e905SSolganik Alexander { 3539890728ffSStephen Hemminger struct ena_adapter *adapter = dev->data->dev_private; 35407830e905SSolganik Alexander uint64_t id; 35417830e905SSolganik Alexander uint64_t rx_entries, tx_entries; 35427830e905SSolganik Alexander unsigned int i; 35437830e905SSolganik Alexander int qid; 35447830e905SSolganik Alexander int valid = 0; 354592401abfSShai Brandes bool were_metrics_copied = false; 3546a73dd098SShai Brandes bool was_srd_info_copied = false; 354792401abfSShai Brandes uint64_t metrics_stats[ENA_MAX_CUSTOMER_METRICS]; 3548a73dd098SShai Brandes struct ena_stats_srd srd_info = {0}; 354945718adaSMichal Krawczyk 35507830e905SSolganik Alexander for (i = 0; i < n; ++i) { 35517830e905SSolganik Alexander id = ids[i]; 35527830e905SSolganik Alexander /* Check if id belongs to global statistics */ 35537830e905SSolganik Alexander if (id < ENA_STATS_ARRAY_GLOBAL) { 35547830e905SSolganik Alexander values[i] = *((uint64_t *)&adapter->dev_stats + id); 35557830e905SSolganik Alexander ++valid; 35567830e905SSolganik Alexander continue; 35577830e905SSolganik Alexander } 35587830e905SSolganik Alexander 355945718adaSMichal Krawczyk /* Check if id belongs to ENI statistics */ 35607830e905SSolganik Alexander id -= ENA_STATS_ARRAY_GLOBAL; 356192401abfSShai Brandes if (id < adapter->metrics_num) { 356292401abfSShai Brandes /* Avoid reading metrics multiple times in a single 356345718adaSMichal Krawczyk * function call, as it requires communication with the 356445718adaSMichal Krawczyk * admin queue. 356545718adaSMichal Krawczyk */ 356692401abfSShai Brandes if (!were_metrics_copied) { 356792401abfSShai Brandes were_metrics_copied = true; 356892401abfSShai Brandes ena_copy_customer_metrics(adapter, 356992401abfSShai Brandes metrics_stats, 357092401abfSShai Brandes adapter->metrics_num); 357145718adaSMichal Krawczyk } 357292401abfSShai Brandes 357392401abfSShai Brandes values[i] = *((uint64_t *)&metrics_stats + id); 357445718adaSMichal Krawczyk ++valid; 357545718adaSMichal Krawczyk continue; 357645718adaSMichal Krawczyk } 357745718adaSMichal Krawczyk 3578a73dd098SShai Brandes /* Check if id belongs to SRD info statistics */ 357992401abfSShai Brandes id -= adapter->metrics_num; 3580a73dd098SShai Brandes 3581a73dd098SShai Brandes if (id < ENA_STATS_ARRAY_ENA_SRD) { 3582a73dd098SShai Brandes /* 3583a73dd098SShai Brandes * Avoid reading srd info multiple times in a single 3584a73dd098SShai Brandes * function call, as it requires communication with the 3585a73dd098SShai Brandes * admin queue. 3586a73dd098SShai Brandes */ 3587a73dd098SShai Brandes if (!was_srd_info_copied) { 3588a73dd098SShai Brandes was_srd_info_copied = true; 3589a73dd098SShai Brandes ena_copy_ena_srd_info(adapter, &srd_info); 3590a73dd098SShai Brandes } 3591a73dd098SShai Brandes values[i] = *((uint64_t *)&adapter->srd_stats + id); 3592a73dd098SShai Brandes ++valid; 3593a73dd098SShai Brandes continue; 3594a73dd098SShai Brandes } 3595a73dd098SShai Brandes 3596a73dd098SShai Brandes /* Check if id belongs to rx queue statistics */ 3597a73dd098SShai Brandes id -= ENA_STATS_ARRAY_ENA_SRD; 3598a73dd098SShai Brandes 35997830e905SSolganik Alexander rx_entries = ENA_STATS_ARRAY_RX * dev->data->nb_rx_queues; 36007830e905SSolganik Alexander if (id < rx_entries) { 36017830e905SSolganik Alexander qid = id % dev->data->nb_rx_queues; 36027830e905SSolganik Alexander id /= dev->data->nb_rx_queues; 36037830e905SSolganik Alexander values[i] = *((uint64_t *) 36047830e905SSolganik Alexander &adapter->rx_ring[qid].rx_stats + id); 36057830e905SSolganik Alexander ++valid; 36067830e905SSolganik Alexander continue; 36077830e905SSolganik Alexander } 36087830e905SSolganik Alexander /* Check if id belongs to rx queue statistics */ 36097830e905SSolganik Alexander id -= rx_entries; 36107830e905SSolganik Alexander tx_entries = ENA_STATS_ARRAY_TX * dev->data->nb_tx_queues; 36117830e905SSolganik Alexander if (id < tx_entries) { 36127830e905SSolganik Alexander qid = id % dev->data->nb_tx_queues; 36137830e905SSolganik Alexander id /= dev->data->nb_tx_queues; 36147830e905SSolganik Alexander values[i] = *((uint64_t *) 36157830e905SSolganik Alexander &adapter->tx_ring[qid].tx_stats + id); 36167830e905SSolganik Alexander ++valid; 36177830e905SSolganik Alexander continue; 36187830e905SSolganik Alexander } 36197830e905SSolganik Alexander } 36207830e905SSolganik Alexander 36217830e905SSolganik Alexander return valid; 36227830e905SSolganik Alexander } 36237830e905SSolganik Alexander 3624cc0c5d25SMichal Krawczyk static int ena_process_uint_devarg(const char *key, 3625cc0c5d25SMichal Krawczyk const char *value, 3626cc0c5d25SMichal Krawczyk void *opaque) 3627cc0c5d25SMichal Krawczyk { 3628cc0c5d25SMichal Krawczyk struct ena_adapter *adapter = opaque; 3629cc0c5d25SMichal Krawczyk char *str_end; 3630cc0c5d25SMichal Krawczyk uint64_t uint_value; 3631cc0c5d25SMichal Krawczyk 3632cc0c5d25SMichal Krawczyk uint_value = strtoull(value, &str_end, 10); 3633cc0c5d25SMichal Krawczyk if (value == str_end) { 3634cc0c5d25SMichal Krawczyk PMD_INIT_LOG(ERR, 3635cc0c5d25SMichal Krawczyk "Invalid value for key '%s'. Only uint values are accepted.\n", 3636cc0c5d25SMichal Krawczyk key); 3637cc0c5d25SMichal Krawczyk return -EINVAL; 3638cc0c5d25SMichal Krawczyk } 3639cc0c5d25SMichal Krawczyk 3640cc0c5d25SMichal Krawczyk if (strcmp(key, ENA_DEVARG_MISS_TXC_TO) == 0) { 3641cc0c5d25SMichal Krawczyk if (uint_value > ENA_MAX_TX_TIMEOUT_SECONDS) { 3642cc0c5d25SMichal Krawczyk PMD_INIT_LOG(ERR, 3643cc0c5d25SMichal Krawczyk "Tx timeout too high: %" PRIu64 " sec. Maximum allowed: %d sec.\n", 3644cc0c5d25SMichal Krawczyk uint_value, ENA_MAX_TX_TIMEOUT_SECONDS); 3645cc0c5d25SMichal Krawczyk return -EINVAL; 3646cc0c5d25SMichal Krawczyk } else if (uint_value == 0) { 3647cc0c5d25SMichal Krawczyk PMD_INIT_LOG(INFO, 3648cc0c5d25SMichal Krawczyk "Check for missing Tx completions has been disabled.\n"); 3649cc0c5d25SMichal Krawczyk adapter->missing_tx_completion_to = 3650cc0c5d25SMichal Krawczyk ENA_HW_HINTS_NO_TIMEOUT; 3651cc0c5d25SMichal Krawczyk } else { 3652cc0c5d25SMichal Krawczyk PMD_INIT_LOG(INFO, 3653cc0c5d25SMichal Krawczyk "Tx packet completion timeout set to %" PRIu64 " seconds.\n", 3654cc0c5d25SMichal Krawczyk uint_value); 3655cc0c5d25SMichal Krawczyk adapter->missing_tx_completion_to = 3656cc0c5d25SMichal Krawczyk uint_value * rte_get_timer_hz(); 3657cc0c5d25SMichal Krawczyk } 3658cc0c5d25SMichal Krawczyk } 3659cc0c5d25SMichal Krawczyk 3660cc0c5d25SMichal Krawczyk return 0; 3661cc0c5d25SMichal Krawczyk } 3662cc0c5d25SMichal Krawczyk 36638a7a73f2SMichal Krawczyk static int ena_process_bool_devarg(const char *key, 36648a7a73f2SMichal Krawczyk const char *value, 36658a7a73f2SMichal Krawczyk void *opaque) 36668a7a73f2SMichal Krawczyk { 36678a7a73f2SMichal Krawczyk struct ena_adapter *adapter = opaque; 36688a7a73f2SMichal Krawczyk bool bool_value; 36698a7a73f2SMichal Krawczyk 36708a7a73f2SMichal Krawczyk /* Parse the value. */ 36718a7a73f2SMichal Krawczyk if (strcmp(value, "1") == 0) { 36728a7a73f2SMichal Krawczyk bool_value = true; 36738a7a73f2SMichal Krawczyk } else if (strcmp(value, "0") == 0) { 36748a7a73f2SMichal Krawczyk bool_value = false; 36758a7a73f2SMichal Krawczyk } else { 36768a7a73f2SMichal Krawczyk PMD_INIT_LOG(ERR, 36778a7a73f2SMichal Krawczyk "Invalid value: '%s' for key '%s'. Accepted: '0' or '1'\n", 36788a7a73f2SMichal Krawczyk value, key); 36798a7a73f2SMichal Krawczyk return -EINVAL; 36808a7a73f2SMichal Krawczyk } 36818a7a73f2SMichal Krawczyk 36828a7a73f2SMichal Krawczyk /* Now, assign it to the proper adapter field. */ 36839b312ad3SIgor Chauskin if (strcmp(key, ENA_DEVARG_LARGE_LLQ_HDR) == 0) 36848a7a73f2SMichal Krawczyk adapter->use_large_llq_hdr = bool_value; 36859944919eSMichal Krawczyk else if (strcmp(key, ENA_DEVARG_ENABLE_LLQ) == 0) 36869944919eSMichal Krawczyk adapter->enable_llq = bool_value; 36878a7a73f2SMichal Krawczyk 36888a7a73f2SMichal Krawczyk return 0; 36898a7a73f2SMichal Krawczyk } 36908a7a73f2SMichal Krawczyk 36918a7a73f2SMichal Krawczyk static int ena_parse_devargs(struct ena_adapter *adapter, 36928a7a73f2SMichal Krawczyk struct rte_devargs *devargs) 36938a7a73f2SMichal Krawczyk { 36948a7a73f2SMichal Krawczyk static const char * const allowed_args[] = { 36958a7a73f2SMichal Krawczyk ENA_DEVARG_LARGE_LLQ_HDR, 3696cc0c5d25SMichal Krawczyk ENA_DEVARG_MISS_TXC_TO, 36979944919eSMichal Krawczyk ENA_DEVARG_ENABLE_LLQ, 36989f220a95SMichal Krawczyk NULL, 36998a7a73f2SMichal Krawczyk }; 37008a7a73f2SMichal Krawczyk struct rte_kvargs *kvlist; 37018a7a73f2SMichal Krawczyk int rc; 37028a7a73f2SMichal Krawczyk 37038a7a73f2SMichal Krawczyk if (devargs == NULL) 37048a7a73f2SMichal Krawczyk return 0; 37058a7a73f2SMichal Krawczyk 37068a7a73f2SMichal Krawczyk kvlist = rte_kvargs_parse(devargs->args, allowed_args); 37078a7a73f2SMichal Krawczyk if (kvlist == NULL) { 37088a7a73f2SMichal Krawczyk PMD_INIT_LOG(ERR, "Invalid device arguments: %s\n", 37098a7a73f2SMichal Krawczyk devargs->args); 37108a7a73f2SMichal Krawczyk return -EINVAL; 37118a7a73f2SMichal Krawczyk } 37128a7a73f2SMichal Krawczyk 37138a7a73f2SMichal Krawczyk rc = rte_kvargs_process(kvlist, ENA_DEVARG_LARGE_LLQ_HDR, 37148a7a73f2SMichal Krawczyk ena_process_bool_devarg, adapter); 3715cc0c5d25SMichal Krawczyk if (rc != 0) 3716cc0c5d25SMichal Krawczyk goto exit; 3717cc0c5d25SMichal Krawczyk rc = rte_kvargs_process(kvlist, ENA_DEVARG_MISS_TXC_TO, 3718cc0c5d25SMichal Krawczyk ena_process_uint_devarg, adapter); 37199944919eSMichal Krawczyk if (rc != 0) 37209944919eSMichal Krawczyk goto exit; 37219944919eSMichal Krawczyk rc = rte_kvargs_process(kvlist, ENA_DEVARG_ENABLE_LLQ, 37229944919eSMichal Krawczyk ena_process_bool_devarg, adapter); 37238a7a73f2SMichal Krawczyk 3724cc0c5d25SMichal Krawczyk exit: 37258a7a73f2SMichal Krawczyk rte_kvargs_free(kvlist); 37268a7a73f2SMichal Krawczyk 37278a7a73f2SMichal Krawczyk return rc; 37288a7a73f2SMichal Krawczyk } 37298a7a73f2SMichal Krawczyk 37306986cdc4SMichal Krawczyk static int ena_setup_rx_intr(struct rte_eth_dev *dev) 37316986cdc4SMichal Krawczyk { 37326986cdc4SMichal Krawczyk struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 3733d61138d4SHarman Kalra struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 37346986cdc4SMichal Krawczyk int rc; 37356986cdc4SMichal Krawczyk uint16_t vectors_nb, i; 37366986cdc4SMichal Krawczyk bool rx_intr_requested = dev->data->dev_conf.intr_conf.rxq; 37376986cdc4SMichal Krawczyk 37386986cdc4SMichal Krawczyk if (!rx_intr_requested) 37396986cdc4SMichal Krawczyk return 0; 37406986cdc4SMichal Krawczyk 37416986cdc4SMichal Krawczyk if (!rte_intr_cap_multiple(intr_handle)) { 37426986cdc4SMichal Krawczyk PMD_DRV_LOG(ERR, 37436986cdc4SMichal Krawczyk "Rx interrupt requested, but it isn't supported by the PCI driver\n"); 37446986cdc4SMichal Krawczyk return -ENOTSUP; 37456986cdc4SMichal Krawczyk } 37466986cdc4SMichal Krawczyk 37476986cdc4SMichal Krawczyk /* Disable interrupt mapping before the configuration starts. */ 37486986cdc4SMichal Krawczyk rte_intr_disable(intr_handle); 37496986cdc4SMichal Krawczyk 37506986cdc4SMichal Krawczyk /* Verify if there are enough vectors available. */ 37516986cdc4SMichal Krawczyk vectors_nb = dev->data->nb_rx_queues; 37526986cdc4SMichal Krawczyk if (vectors_nb > RTE_MAX_RXTX_INTR_VEC_ID) { 37536986cdc4SMichal Krawczyk PMD_DRV_LOG(ERR, 37546986cdc4SMichal Krawczyk "Too many Rx interrupts requested, maximum number: %d\n", 37556986cdc4SMichal Krawczyk RTE_MAX_RXTX_INTR_VEC_ID); 37566986cdc4SMichal Krawczyk rc = -ENOTSUP; 37576986cdc4SMichal Krawczyk goto enable_intr; 37586986cdc4SMichal Krawczyk } 37596986cdc4SMichal Krawczyk 3760d61138d4SHarman Kalra /* Allocate the vector list */ 3761d61138d4SHarman Kalra if (rte_intr_vec_list_alloc(intr_handle, "intr_vec", 3762d61138d4SHarman Kalra dev->data->nb_rx_queues)) { 37636986cdc4SMichal Krawczyk PMD_DRV_LOG(ERR, 37646986cdc4SMichal Krawczyk "Failed to allocate interrupt vector for %d queues\n", 37656986cdc4SMichal Krawczyk dev->data->nb_rx_queues); 37666986cdc4SMichal Krawczyk rc = -ENOMEM; 37676986cdc4SMichal Krawczyk goto enable_intr; 37686986cdc4SMichal Krawczyk } 37696986cdc4SMichal Krawczyk 37706986cdc4SMichal Krawczyk rc = rte_intr_efd_enable(intr_handle, vectors_nb); 37716986cdc4SMichal Krawczyk if (rc != 0) 37726986cdc4SMichal Krawczyk goto free_intr_vec; 37736986cdc4SMichal Krawczyk 37746986cdc4SMichal Krawczyk if (!rte_intr_allow_others(intr_handle)) { 37756986cdc4SMichal Krawczyk PMD_DRV_LOG(ERR, 37766986cdc4SMichal Krawczyk "Not enough interrupts available to use both ENA Admin and Rx interrupts\n"); 37776986cdc4SMichal Krawczyk goto disable_intr_efd; 37786986cdc4SMichal Krawczyk } 37796986cdc4SMichal Krawczyk 37806986cdc4SMichal Krawczyk for (i = 0; i < vectors_nb; ++i) 3781d61138d4SHarman Kalra if (rte_intr_vec_list_index_set(intr_handle, i, 3782d61138d4SHarman Kalra RTE_INTR_VEC_RXTX_OFFSET + i)) 3783d61138d4SHarman Kalra goto disable_intr_efd; 37846986cdc4SMichal Krawczyk 37856986cdc4SMichal Krawczyk rte_intr_enable(intr_handle); 37866986cdc4SMichal Krawczyk return 0; 37876986cdc4SMichal Krawczyk 37886986cdc4SMichal Krawczyk disable_intr_efd: 37896986cdc4SMichal Krawczyk rte_intr_efd_disable(intr_handle); 37906986cdc4SMichal Krawczyk free_intr_vec: 3791d61138d4SHarman Kalra rte_intr_vec_list_free(intr_handle); 37926986cdc4SMichal Krawczyk enable_intr: 37936986cdc4SMichal Krawczyk rte_intr_enable(intr_handle); 37946986cdc4SMichal Krawczyk return rc; 37956986cdc4SMichal Krawczyk } 37966986cdc4SMichal Krawczyk 37976986cdc4SMichal Krawczyk static void ena_rx_queue_intr_set(struct rte_eth_dev *dev, 37986986cdc4SMichal Krawczyk uint16_t queue_id, 37996986cdc4SMichal Krawczyk bool unmask) 38006986cdc4SMichal Krawczyk { 38016986cdc4SMichal Krawczyk struct ena_adapter *adapter = dev->data->dev_private; 38026986cdc4SMichal Krawczyk struct ena_ring *rxq = &adapter->rx_ring[queue_id]; 38036986cdc4SMichal Krawczyk struct ena_eth_io_intr_reg intr_reg; 38046986cdc4SMichal Krawczyk 3805f73f53f7SShai Brandes ena_com_update_intr_reg(&intr_reg, 0, 0, unmask, 1); 38066986cdc4SMichal Krawczyk ena_com_unmask_intr(rxq->ena_com_io_cq, &intr_reg); 38076986cdc4SMichal Krawczyk } 38086986cdc4SMichal Krawczyk 38096986cdc4SMichal Krawczyk static int ena_rx_queue_intr_enable(struct rte_eth_dev *dev, 38106986cdc4SMichal Krawczyk uint16_t queue_id) 38116986cdc4SMichal Krawczyk { 38126986cdc4SMichal Krawczyk ena_rx_queue_intr_set(dev, queue_id, true); 38136986cdc4SMichal Krawczyk 38146986cdc4SMichal Krawczyk return 0; 38156986cdc4SMichal Krawczyk } 38166986cdc4SMichal Krawczyk 38176986cdc4SMichal Krawczyk static int ena_rx_queue_intr_disable(struct rte_eth_dev *dev, 38186986cdc4SMichal Krawczyk uint16_t queue_id) 38196986cdc4SMichal Krawczyk { 38206986cdc4SMichal Krawczyk ena_rx_queue_intr_set(dev, queue_id, false); 38216986cdc4SMichal Krawczyk 38226986cdc4SMichal Krawczyk return 0; 38236986cdc4SMichal Krawczyk } 38246986cdc4SMichal Krawczyk 3825b9b05d6fSMichal Krawczyk static int ena_configure_aenq(struct ena_adapter *adapter) 3826b9b05d6fSMichal Krawczyk { 3827b9b05d6fSMichal Krawczyk uint32_t aenq_groups = adapter->all_aenq_groups; 3828b9b05d6fSMichal Krawczyk int rc; 3829b9b05d6fSMichal Krawczyk 3830b9b05d6fSMichal Krawczyk /* All_aenq_groups holds all AENQ functions supported by the device and 3831b9b05d6fSMichal Krawczyk * the HW, so at first we need to be sure the LSC request is valid. 3832b9b05d6fSMichal Krawczyk */ 3833b9b05d6fSMichal Krawczyk if (adapter->edev_data->dev_conf.intr_conf.lsc != 0) { 3834b9b05d6fSMichal Krawczyk if (!(aenq_groups & BIT(ENA_ADMIN_LINK_CHANGE))) { 3835b9b05d6fSMichal Krawczyk PMD_DRV_LOG(ERR, 3836b9b05d6fSMichal Krawczyk "LSC requested, but it's not supported by the AENQ\n"); 3837b9b05d6fSMichal Krawczyk return -EINVAL; 3838b9b05d6fSMichal Krawczyk } 3839b9b05d6fSMichal Krawczyk } else { 3840b9b05d6fSMichal Krawczyk /* If LSC wasn't enabled by the app, let's enable all supported 3841b9b05d6fSMichal Krawczyk * AENQ procedures except the LSC. 3842b9b05d6fSMichal Krawczyk */ 3843b9b05d6fSMichal Krawczyk aenq_groups &= ~BIT(ENA_ADMIN_LINK_CHANGE); 3844b9b05d6fSMichal Krawczyk } 3845b9b05d6fSMichal Krawczyk 3846b9b05d6fSMichal Krawczyk rc = ena_com_set_aenq_config(&adapter->ena_dev, aenq_groups); 3847b9b05d6fSMichal Krawczyk if (rc != 0) { 3848b9b05d6fSMichal Krawczyk PMD_DRV_LOG(ERR, "Cannot configure AENQ groups, rc=%d\n", rc); 3849b9b05d6fSMichal Krawczyk return rc; 3850b9b05d6fSMichal Krawczyk } 3851b9b05d6fSMichal Krawczyk 3852b9b05d6fSMichal Krawczyk adapter->active_aenq_groups = aenq_groups; 3853b9b05d6fSMichal Krawczyk 3854b9b05d6fSMichal Krawczyk return 0; 3855b9b05d6fSMichal Krawczyk } 3856b9b05d6fSMichal Krawczyk 3857e3595539SStanislaw Kardach int ena_mp_indirect_table_set(struct ena_adapter *adapter) 3858e3595539SStanislaw Kardach { 3859e3595539SStanislaw Kardach return ENA_PROXY(adapter, ena_com_indirect_table_set, &adapter->ena_dev); 3860e3595539SStanislaw Kardach } 3861e3595539SStanislaw Kardach 3862e3595539SStanislaw Kardach int ena_mp_indirect_table_get(struct ena_adapter *adapter, 3863e3595539SStanislaw Kardach uint32_t *indirect_table) 3864e3595539SStanislaw Kardach { 3865e3595539SStanislaw Kardach return ENA_PROXY(adapter, ena_com_indirect_table_get, &adapter->ena_dev, 3866e3595539SStanislaw Kardach indirect_table); 3867e3595539SStanislaw Kardach } 3868e3595539SStanislaw Kardach 3869ca148440SMichal Krawczyk /********************************************************************* 3870850e1bb1SMichal Krawczyk * ena_plat_dpdk.h functions implementations 3871850e1bb1SMichal Krawczyk *********************************************************************/ 3872850e1bb1SMichal Krawczyk 3873850e1bb1SMichal Krawczyk const struct rte_memzone * 3874850e1bb1SMichal Krawczyk ena_mem_alloc_coherent(struct rte_eth_dev_data *data, size_t size, 3875850e1bb1SMichal Krawczyk int socket_id, unsigned int alignment, void **virt_addr, 3876850e1bb1SMichal Krawczyk dma_addr_t *phys_addr) 3877850e1bb1SMichal Krawczyk { 3878850e1bb1SMichal Krawczyk char z_name[RTE_MEMZONE_NAMESIZE]; 3879850e1bb1SMichal Krawczyk struct ena_adapter *adapter = data->dev_private; 3880850e1bb1SMichal Krawczyk const struct rte_memzone *memzone; 3881850e1bb1SMichal Krawczyk int rc; 3882850e1bb1SMichal Krawczyk 3883850e1bb1SMichal Krawczyk rc = snprintf(z_name, RTE_MEMZONE_NAMESIZE, "ena_p%d_mz%" PRIu64 "", 3884850e1bb1SMichal Krawczyk data->port_id, adapter->memzone_cnt); 3885850e1bb1SMichal Krawczyk if (rc >= RTE_MEMZONE_NAMESIZE) { 3886850e1bb1SMichal Krawczyk PMD_DRV_LOG(ERR, 3887850e1bb1SMichal Krawczyk "Name for the ena_com memzone is too long. Port: %d, mz_num: %" PRIu64 "\n", 3888850e1bb1SMichal Krawczyk data->port_id, adapter->memzone_cnt); 3889850e1bb1SMichal Krawczyk goto error; 3890850e1bb1SMichal Krawczyk } 3891850e1bb1SMichal Krawczyk adapter->memzone_cnt++; 3892850e1bb1SMichal Krawczyk 3893850e1bb1SMichal Krawczyk memzone = rte_memzone_reserve_aligned(z_name, size, socket_id, 3894850e1bb1SMichal Krawczyk RTE_MEMZONE_IOVA_CONTIG, alignment); 3895850e1bb1SMichal Krawczyk if (memzone == NULL) { 3896850e1bb1SMichal Krawczyk PMD_DRV_LOG(ERR, "Failed to allocate ena_com memzone: %s\n", 3897850e1bb1SMichal Krawczyk z_name); 3898850e1bb1SMichal Krawczyk goto error; 3899850e1bb1SMichal Krawczyk } 3900850e1bb1SMichal Krawczyk 3901850e1bb1SMichal Krawczyk memset(memzone->addr, 0, size); 3902850e1bb1SMichal Krawczyk *virt_addr = memzone->addr; 3903850e1bb1SMichal Krawczyk *phys_addr = memzone->iova; 3904850e1bb1SMichal Krawczyk 3905850e1bb1SMichal Krawczyk return memzone; 3906850e1bb1SMichal Krawczyk 3907850e1bb1SMichal Krawczyk error: 3908850e1bb1SMichal Krawczyk *virt_addr = NULL; 3909850e1bb1SMichal Krawczyk *phys_addr = 0; 3910850e1bb1SMichal Krawczyk 3911850e1bb1SMichal Krawczyk return NULL; 3912850e1bb1SMichal Krawczyk } 3913850e1bb1SMichal Krawczyk 3914850e1bb1SMichal Krawczyk 3915850e1bb1SMichal Krawczyk /********************************************************************* 3916ca148440SMichal Krawczyk * PMD configuration 3917ca148440SMichal Krawczyk *********************************************************************/ 3918fdf91e0fSJan Blunck static int eth_ena_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 3919fdf91e0fSJan Blunck struct rte_pci_device *pci_dev) 3920fdf91e0fSJan Blunck { 3921fdf91e0fSJan Blunck return rte_eth_dev_pci_generic_probe(pci_dev, 3922fdf91e0fSJan Blunck sizeof(struct ena_adapter), eth_ena_dev_init); 3923fdf91e0fSJan Blunck } 3924fdf91e0fSJan Blunck 3925fdf91e0fSJan Blunck static int eth_ena_pci_remove(struct rte_pci_device *pci_dev) 3926fdf91e0fSJan Blunck { 3927eb0ef49dSMichal Krawczyk return rte_eth_dev_pci_generic_remove(pci_dev, eth_ena_dev_uninit); 3928fdf91e0fSJan Blunck } 3929fdf91e0fSJan Blunck 3930fdf91e0fSJan Blunck static struct rte_pci_driver rte_ena_pmd = { 39311173fca2SJan Medala .id_table = pci_id_ena_map, 393205e0eee0SRafal Kozik .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC | 393305e0eee0SRafal Kozik RTE_PCI_DRV_WC_ACTIVATE, 3934fdf91e0fSJan Blunck .probe = eth_ena_pci_probe, 3935fdf91e0fSJan Blunck .remove = eth_ena_pci_remove, 39361173fca2SJan Medala }; 39371173fca2SJan Medala 3938fdf91e0fSJan Blunck RTE_PMD_REGISTER_PCI(net_ena, rte_ena_pmd); 393901f19227SShreyansh Jain RTE_PMD_REGISTER_PCI_TABLE(net_ena, pci_id_ena_map); 394006e81dc9SDavid Marchand RTE_PMD_REGISTER_KMOD_DEP(net_ena, "* igb_uio | uio_pci_generic | vfio-pci"); 39419944919eSMichal Krawczyk RTE_PMD_REGISTER_PARAM_STRING(net_ena, 39429944919eSMichal Krawczyk ENA_DEVARG_LARGE_LLQ_HDR "=<0|1> " 39439944919eSMichal Krawczyk ENA_DEVARG_ENABLE_LLQ "=<0|1> " 39449944919eSMichal Krawczyk ENA_DEVARG_MISS_TXC_TO "=<uint>"); 3945eeded204SDavid Marchand RTE_LOG_REGISTER_SUFFIX(ena_logtype_init, init, NOTICE); 3946eeded204SDavid Marchand RTE_LOG_REGISTER_SUFFIX(ena_logtype_driver, driver, NOTICE); 39470a001d69SMichal Krawczyk #ifdef RTE_ETHDEV_DEBUG_RX 39480a001d69SMichal Krawczyk RTE_LOG_REGISTER_SUFFIX(ena_logtype_rx, rx, DEBUG); 39496f1c9df9SStephen Hemminger #endif 39500a001d69SMichal Krawczyk #ifdef RTE_ETHDEV_DEBUG_TX 39510a001d69SMichal Krawczyk RTE_LOG_REGISTER_SUFFIX(ena_logtype_tx, tx, DEBUG); 39526f1c9df9SStephen Hemminger #endif 39530a001d69SMichal Krawczyk RTE_LOG_REGISTER_SUFFIX(ena_logtype_com, com, WARNING); 39543adcba9aSMichal Krawczyk 39553adcba9aSMichal Krawczyk /****************************************************************************** 39563adcba9aSMichal Krawczyk ******************************** AENQ Handlers ******************************* 39573adcba9aSMichal Krawczyk *****************************************************************************/ 3958ca148440SMichal Krawczyk static void ena_update_on_link_change(void *adapter_data, 3959ca148440SMichal Krawczyk struct ena_admin_aenq_entry *aenq_e) 3960ca148440SMichal Krawczyk { 3961aab58857SStanislaw Kardach struct rte_eth_dev *eth_dev = adapter_data; 3962aab58857SStanislaw Kardach struct ena_adapter *adapter = eth_dev->data->dev_private; 3963ca148440SMichal Krawczyk struct ena_admin_aenq_link_change_desc *aenq_link_desc; 3964ca148440SMichal Krawczyk uint32_t status; 3965ca148440SMichal Krawczyk 3966ca148440SMichal Krawczyk aenq_link_desc = (struct ena_admin_aenq_link_change_desc *)aenq_e; 3967ca148440SMichal Krawczyk 3968ca148440SMichal Krawczyk status = get_ena_admin_aenq_link_change_desc_link_status(aenq_link_desc); 3969ca148440SMichal Krawczyk adapter->link_status = status; 3970ca148440SMichal Krawczyk 3971ca148440SMichal Krawczyk ena_link_update(eth_dev, 0); 39725723fbedSFerruh Yigit rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL); 3973ca148440SMichal Krawczyk } 3974ca148440SMichal Krawczyk 3975aab58857SStanislaw Kardach static void ena_notification(void *adapter_data, 3976f01f060cSRafal Kozik struct ena_admin_aenq_entry *aenq_e) 3977f01f060cSRafal Kozik { 3978aab58857SStanislaw Kardach struct rte_eth_dev *eth_dev = adapter_data; 3979aab58857SStanislaw Kardach struct ena_adapter *adapter = eth_dev->data->dev_private; 3980f01f060cSRafal Kozik struct ena_admin_ena_hw_hints *hints; 3981f01f060cSRafal Kozik 3982f01f060cSRafal Kozik if (aenq_e->aenq_common_desc.group != ENA_ADMIN_NOTIFICATION) 3983617898d1SMichal Krawczyk PMD_DRV_LOG(WARNING, "Invalid AENQ group: %x. Expected: %x\n", 3984f01f060cSRafal Kozik aenq_e->aenq_common_desc.group, 3985f01f060cSRafal Kozik ENA_ADMIN_NOTIFICATION); 3986f01f060cSRafal Kozik 3987b19f366cSMichal Krawczyk switch (aenq_e->aenq_common_desc.syndrome) { 3988f01f060cSRafal Kozik case ENA_ADMIN_UPDATE_HINTS: 3989f01f060cSRafal Kozik hints = (struct ena_admin_ena_hw_hints *) 3990f01f060cSRafal Kozik (&aenq_e->inline_data_w4); 3991f01f060cSRafal Kozik ena_update_hints(adapter, hints); 3992f01f060cSRafal Kozik break; 3993f01f060cSRafal Kozik default: 3994617898d1SMichal Krawczyk PMD_DRV_LOG(ERR, "Invalid AENQ notification link state: %d\n", 3995b19f366cSMichal Krawczyk aenq_e->aenq_common_desc.syndrome); 3996f01f060cSRafal Kozik } 3997f01f060cSRafal Kozik } 3998f01f060cSRafal Kozik 3999d9b8b106SMichal Krawczyk static void ena_keep_alive(void *adapter_data, 4000d9b8b106SMichal Krawczyk __rte_unused struct ena_admin_aenq_entry *aenq_e) 4001d9b8b106SMichal Krawczyk { 4002aab58857SStanislaw Kardach struct rte_eth_dev *eth_dev = adapter_data; 4003aab58857SStanislaw Kardach struct ena_adapter *adapter = eth_dev->data->dev_private; 400494c3e376SRafal Kozik struct ena_admin_aenq_keep_alive_desc *desc; 400594c3e376SRafal Kozik uint64_t rx_drops; 4006e1e73e32SMichal Krawczyk uint64_t tx_drops; 400777d4ed30SShai Brandes uint64_t rx_overruns; 4008d9b8b106SMichal Krawczyk 4009d9b8b106SMichal Krawczyk adapter->timestamp_wd = rte_get_timer_cycles(); 401094c3e376SRafal Kozik 401194c3e376SRafal Kozik desc = (struct ena_admin_aenq_keep_alive_desc *)aenq_e; 401294c3e376SRafal Kozik rx_drops = ((uint64_t)desc->rx_drops_high << 32) | desc->rx_drops_low; 4013e1e73e32SMichal Krawczyk tx_drops = ((uint64_t)desc->tx_drops_high << 32) | desc->tx_drops_low; 401477d4ed30SShai Brandes rx_overruns = ((uint64_t)desc->rx_overruns_high << 32) | desc->rx_overruns_low; 4015e1e73e32SMichal Krawczyk 4016*dae29f75SShai Brandes /* 4017*dae29f75SShai Brandes * Depending on its acceleration support, the device updates a different statistic when 4018*dae29f75SShai Brandes * Rx packet is dropped because there are no available buffers to accommodate it. 4019*dae29f75SShai Brandes */ 4020*dae29f75SShai Brandes adapter->drv_stats->rx_drops = rx_drops + rx_overruns; 4021e1e73e32SMichal Krawczyk adapter->dev_stats.tx_drops = tx_drops; 4022d9b8b106SMichal Krawczyk } 4023d9b8b106SMichal Krawczyk 40243adcba9aSMichal Krawczyk /** 40253adcba9aSMichal Krawczyk * This handler will called for unknown event group or unimplemented handlers 40263adcba9aSMichal Krawczyk **/ 40273adcba9aSMichal Krawczyk static void unimplemented_aenq_handler(__rte_unused void *data, 40283adcba9aSMichal Krawczyk __rte_unused struct ena_admin_aenq_entry *aenq_e) 40293adcba9aSMichal Krawczyk { 4030617898d1SMichal Krawczyk PMD_DRV_LOG(ERR, 4031617898d1SMichal Krawczyk "Unknown event was received or event with unimplemented handler\n"); 40323adcba9aSMichal Krawczyk } 40333adcba9aSMichal Krawczyk 4034ca148440SMichal Krawczyk static struct ena_aenq_handlers aenq_handlers = { 40353adcba9aSMichal Krawczyk .handlers = { 4036ca148440SMichal Krawczyk [ENA_ADMIN_LINK_CHANGE] = ena_update_on_link_change, 4037f01f060cSRafal Kozik [ENA_ADMIN_NOTIFICATION] = ena_notification, 4038d9b8b106SMichal Krawczyk [ENA_ADMIN_KEEP_ALIVE] = ena_keep_alive 40393adcba9aSMichal Krawczyk }, 40403adcba9aSMichal Krawczyk .unimplemented_handler = unimplemented_aenq_handler 40413adcba9aSMichal Krawczyk }; 4042e3595539SStanislaw Kardach 4043e3595539SStanislaw Kardach /********************************************************************* 4044e3595539SStanislaw Kardach * Multi-Process communication request handling (in primary) 4045e3595539SStanislaw Kardach *********************************************************************/ 4046e3595539SStanislaw Kardach static int 4047e3595539SStanislaw Kardach ena_mp_primary_handle(const struct rte_mp_msg *mp_msg, const void *peer) 4048e3595539SStanislaw Kardach { 4049e3595539SStanislaw Kardach const struct ena_mp_body *req = 4050e3595539SStanislaw Kardach (const struct ena_mp_body *)mp_msg->param; 4051e3595539SStanislaw Kardach struct ena_adapter *adapter; 4052e3595539SStanislaw Kardach struct ena_com_dev *ena_dev; 4053e3595539SStanislaw Kardach struct ena_mp_body *rsp; 4054e3595539SStanislaw Kardach struct rte_mp_msg mp_rsp; 4055e3595539SStanislaw Kardach struct rte_eth_dev *dev; 4056e3595539SStanislaw Kardach int res = 0; 4057e3595539SStanislaw Kardach 4058e3595539SStanislaw Kardach rsp = (struct ena_mp_body *)&mp_rsp.param; 4059e3595539SStanislaw Kardach mp_msg_init(&mp_rsp, req->type, req->port_id); 4060e3595539SStanislaw Kardach 4061e3595539SStanislaw Kardach if (!rte_eth_dev_is_valid_port(req->port_id)) { 4062e3595539SStanislaw Kardach rte_errno = ENODEV; 4063e3595539SStanislaw Kardach res = -rte_errno; 4064e3595539SStanislaw Kardach PMD_DRV_LOG(ERR, "Unknown port %d in request %d\n", 4065e3595539SStanislaw Kardach req->port_id, req->type); 4066e3595539SStanislaw Kardach goto end; 4067e3595539SStanislaw Kardach } 4068e3595539SStanislaw Kardach dev = &rte_eth_devices[req->port_id]; 4069e3595539SStanislaw Kardach adapter = dev->data->dev_private; 4070e3595539SStanislaw Kardach ena_dev = &adapter->ena_dev; 4071e3595539SStanislaw Kardach 4072e3595539SStanislaw Kardach switch (req->type) { 4073e3595539SStanislaw Kardach case ENA_MP_DEV_STATS_GET: 4074e3595539SStanislaw Kardach res = ena_com_get_dev_basic_stats(ena_dev, 4075e3595539SStanislaw Kardach &adapter->basic_stats); 4076e3595539SStanislaw Kardach break; 4077e3595539SStanislaw Kardach case ENA_MP_ENI_STATS_GET: 4078e3595539SStanislaw Kardach res = ena_com_get_eni_stats(ena_dev, 407992401abfSShai Brandes (struct ena_admin_eni_stats *)&adapter->metrics_stats); 4080e3595539SStanislaw Kardach break; 4081e3595539SStanislaw Kardach case ENA_MP_MTU_SET: 4082e3595539SStanislaw Kardach res = ena_com_set_dev_mtu(ena_dev, req->args.mtu); 4083e3595539SStanislaw Kardach break; 4084e3595539SStanislaw Kardach case ENA_MP_IND_TBL_GET: 4085e3595539SStanislaw Kardach res = ena_com_indirect_table_get(ena_dev, 4086e3595539SStanislaw Kardach adapter->indirect_table); 4087e3595539SStanislaw Kardach break; 4088e3595539SStanislaw Kardach case ENA_MP_IND_TBL_SET: 4089e3595539SStanislaw Kardach res = ena_com_indirect_table_set(ena_dev); 4090e3595539SStanislaw Kardach break; 409192401abfSShai Brandes case ENA_MP_CUSTOMER_METRICS_GET: 409292401abfSShai Brandes res = ena_com_get_customer_metrics(ena_dev, 409392401abfSShai Brandes (char *)adapter->metrics_stats, 4094980d0ba4SShai Brandes adapter->metrics_num * sizeof(uint64_t)); 409592401abfSShai Brandes break; 4096a73dd098SShai Brandes case ENA_MP_SRD_STATS_GET: 4097a73dd098SShai Brandes res = ena_com_get_ena_srd_info(ena_dev, 4098a73dd098SShai Brandes (struct ena_admin_ena_srd_info *)&adapter->srd_stats); 4099a73dd098SShai Brandes break; 4100e3595539SStanislaw Kardach default: 4101e3595539SStanislaw Kardach PMD_DRV_LOG(ERR, "Unknown request type %d\n", req->type); 4102e3595539SStanislaw Kardach res = -EINVAL; 4103e3595539SStanislaw Kardach break; 4104e3595539SStanislaw Kardach } 4105e3595539SStanislaw Kardach 4106e3595539SStanislaw Kardach end: 4107e3595539SStanislaw Kardach /* Save processing result in the reply */ 4108e3595539SStanislaw Kardach rsp->result = res; 4109e3595539SStanislaw Kardach /* Return just IPC processing status */ 4110e3595539SStanislaw Kardach return rte_mp_reply(&mp_rsp, peer); 4111e3595539SStanislaw Kardach } 4112