1702928afSMaciej Bielski /* SPDX-License-Identifier: BSD-3-Clause 238364c26SMichal Krawczyk * Copyright (c) 2015-2020 Amazon.com, Inc. or its affiliates. 31173fca2SJan Medala * All rights reserved. 41173fca2SJan Medala */ 51173fca2SJan Medala 66723c0fcSBruce Richardson #include <rte_string_fns.h> 71173fca2SJan Medala #include <rte_errno.h> 8372c1af5SJan Medala #include <rte_version.h> 9b3fc5a1aSKonstantin Ananyev #include <rte_net.h> 108a7a73f2SMichal Krawczyk #include <rte_kvargs.h> 111173fca2SJan Medala 121173fca2SJan Medala #include "ena_ethdev.h" 131173fca2SJan Medala #include "ena_logs.h" 141173fca2SJan Medala #include "ena_platform.h" 151173fca2SJan Medala #include "ena_com.h" 161173fca2SJan Medala #include "ena_eth_com.h" 171173fca2SJan Medala 181173fca2SJan Medala #include <ena_common_defs.h> 191173fca2SJan Medala #include <ena_regs_defs.h> 201173fca2SJan Medala #include <ena_admin_defs.h> 211173fca2SJan Medala #include <ena_eth_io_defs.h> 221173fca2SJan Medala 23419c3e3eSMichal Krawczyk #define DRV_MODULE_VER_MAJOR 2 24ba94dad4SMichal Krawczyk #define DRV_MODULE_VER_MINOR 5 251b48c60dSMichal Krawczyk #define DRV_MODULE_VER_SUBMINOR 0 26372c1af5SJan Medala 271173fca2SJan Medala #define __MERGE_64B_H_L(h, l) (((uint64_t)h << 32) | l) 281173fca2SJan Medala 291173fca2SJan Medala #define GET_L4_HDR_LEN(mbuf) \ 30f41b5156SOlivier Matz ((rte_pktmbuf_mtod_offset(mbuf, struct rte_tcp_hdr *, \ 311173fca2SJan Medala mbuf->l3_len + mbuf->l2_len)->data_off) >> 4) 321173fca2SJan Medala 33372c1af5SJan Medala #define ETH_GSTRING_LEN 32 34372c1af5SJan Medala 35a3c9a11aSAndrew Boyer #define ARRAY_SIZE(x) RTE_DIM(x) 36372c1af5SJan Medala 3792680dc2SRafal Kozik #define ENA_MIN_RING_DESC 128 3892680dc2SRafal Kozik 39b418f0d2SMichal Krawczyk #define ENA_PTYPE_HAS_HASH (RTE_PTYPE_L4_TCP | RTE_PTYPE_L4_UDP) 40b418f0d2SMichal Krawczyk 41372c1af5SJan Medala struct ena_stats { 42372c1af5SJan Medala char name[ETH_GSTRING_LEN]; 43372c1af5SJan Medala int stat_offset; 44372c1af5SJan Medala }; 45372c1af5SJan Medala 46372c1af5SJan Medala #define ENA_STAT_ENTRY(stat, stat_type) { \ 47372c1af5SJan Medala .name = #stat, \ 48372c1af5SJan Medala .stat_offset = offsetof(struct ena_stats_##stat_type, stat) \ 49372c1af5SJan Medala } 50372c1af5SJan Medala 51372c1af5SJan Medala #define ENA_STAT_RX_ENTRY(stat) \ 52372c1af5SJan Medala ENA_STAT_ENTRY(stat, rx) 53372c1af5SJan Medala 54372c1af5SJan Medala #define ENA_STAT_TX_ENTRY(stat) \ 55372c1af5SJan Medala ENA_STAT_ENTRY(stat, tx) 56372c1af5SJan Medala 5745718adaSMichal Krawczyk #define ENA_STAT_ENI_ENTRY(stat) \ 5845718adaSMichal Krawczyk ENA_STAT_ENTRY(stat, eni) 5945718adaSMichal Krawczyk 60372c1af5SJan Medala #define ENA_STAT_GLOBAL_ENTRY(stat) \ 61372c1af5SJan Medala ENA_STAT_ENTRY(stat, dev) 62372c1af5SJan Medala 638a7a73f2SMichal Krawczyk /* Device arguments */ 648a7a73f2SMichal Krawczyk #define ENA_DEVARG_LARGE_LLQ_HDR "large_llq_hdr" 65*cc0c5d25SMichal Krawczyk /* Timeout in seconds after which a single uncompleted Tx packet should be 66*cc0c5d25SMichal Krawczyk * considered as a missing. 67*cc0c5d25SMichal Krawczyk */ 68*cc0c5d25SMichal Krawczyk #define ENA_DEVARG_MISS_TXC_TO "miss_txc_to" 698a7a73f2SMichal Krawczyk 703adcba9aSMichal Krawczyk /* 713adcba9aSMichal Krawczyk * Each rte_memzone should have unique name. 723adcba9aSMichal Krawczyk * To satisfy it, count number of allocation and add it to name. 733adcba9aSMichal Krawczyk */ 747c0a233eSAmit Bernstein rte_atomic64_t ena_alloc_cnt; 753adcba9aSMichal Krawczyk 76372c1af5SJan Medala static const struct ena_stats ena_stats_global_strings[] = { 77372c1af5SJan Medala ENA_STAT_GLOBAL_ENTRY(wd_expired), 787830e905SSolganik Alexander ENA_STAT_GLOBAL_ENTRY(dev_start), 797830e905SSolganik Alexander ENA_STAT_GLOBAL_ENTRY(dev_stop), 80e1e73e32SMichal Krawczyk ENA_STAT_GLOBAL_ENTRY(tx_drops), 81372c1af5SJan Medala }; 82372c1af5SJan Medala 8345718adaSMichal Krawczyk static const struct ena_stats ena_stats_eni_strings[] = { 8445718adaSMichal Krawczyk ENA_STAT_ENI_ENTRY(bw_in_allowance_exceeded), 8545718adaSMichal Krawczyk ENA_STAT_ENI_ENTRY(bw_out_allowance_exceeded), 8645718adaSMichal Krawczyk ENA_STAT_ENI_ENTRY(pps_allowance_exceeded), 8745718adaSMichal Krawczyk ENA_STAT_ENI_ENTRY(conntrack_allowance_exceeded), 8845718adaSMichal Krawczyk ENA_STAT_ENI_ENTRY(linklocal_allowance_exceeded), 8945718adaSMichal Krawczyk }; 9045718adaSMichal Krawczyk 91372c1af5SJan Medala static const struct ena_stats ena_stats_tx_strings[] = { 92372c1af5SJan Medala ENA_STAT_TX_ENTRY(cnt), 93372c1af5SJan Medala ENA_STAT_TX_ENTRY(bytes), 947830e905SSolganik Alexander ENA_STAT_TX_ENTRY(prepare_ctx_err), 95372c1af5SJan Medala ENA_STAT_TX_ENTRY(tx_poll), 96372c1af5SJan Medala ENA_STAT_TX_ENTRY(doorbells), 97372c1af5SJan Medala ENA_STAT_TX_ENTRY(bad_req_id), 987830e905SSolganik Alexander ENA_STAT_TX_ENTRY(available_desc), 99f93e20e5SMichal Krawczyk ENA_STAT_TX_ENTRY(missed_tx), 100372c1af5SJan Medala }; 101372c1af5SJan Medala 102372c1af5SJan Medala static const struct ena_stats ena_stats_rx_strings[] = { 103372c1af5SJan Medala ENA_STAT_RX_ENTRY(cnt), 104372c1af5SJan Medala ENA_STAT_RX_ENTRY(bytes), 1057830e905SSolganik Alexander ENA_STAT_RX_ENTRY(refill_partial), 10684daba99SMichal Krawczyk ENA_STAT_RX_ENTRY(l3_csum_bad), 10784daba99SMichal Krawczyk ENA_STAT_RX_ENTRY(l4_csum_bad), 10884daba99SMichal Krawczyk ENA_STAT_RX_ENTRY(l4_csum_good), 1097830e905SSolganik Alexander ENA_STAT_RX_ENTRY(mbuf_alloc_fail), 110372c1af5SJan Medala ENA_STAT_RX_ENTRY(bad_desc_num), 1117830e905SSolganik Alexander ENA_STAT_RX_ENTRY(bad_req_id), 112372c1af5SJan Medala }; 113372c1af5SJan Medala 114372c1af5SJan Medala #define ENA_STATS_ARRAY_GLOBAL ARRAY_SIZE(ena_stats_global_strings) 11545718adaSMichal Krawczyk #define ENA_STATS_ARRAY_ENI ARRAY_SIZE(ena_stats_eni_strings) 116372c1af5SJan Medala #define ENA_STATS_ARRAY_TX ARRAY_SIZE(ena_stats_tx_strings) 117372c1af5SJan Medala #define ENA_STATS_ARRAY_RX ARRAY_SIZE(ena_stats_rx_strings) 1181173fca2SJan Medala 119295968d1SFerruh Yigit #define QUEUE_OFFLOADS (RTE_ETH_TX_OFFLOAD_TCP_CKSUM |\ 120295968d1SFerruh Yigit RTE_ETH_TX_OFFLOAD_UDP_CKSUM |\ 121295968d1SFerruh Yigit RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |\ 122295968d1SFerruh Yigit RTE_ETH_TX_OFFLOAD_TCP_TSO) 123daa02b5cSOlivier Matz #define MBUF_OFFLOADS (RTE_MBUF_F_TX_L4_MASK |\ 124daa02b5cSOlivier Matz RTE_MBUF_F_TX_IP_CKSUM |\ 125daa02b5cSOlivier Matz RTE_MBUF_F_TX_TCP_SEG) 12656b8b9b7SRafal Kozik 1271173fca2SJan Medala /** Vendor ID used by Amazon devices */ 1281173fca2SJan Medala #define PCI_VENDOR_ID_AMAZON 0x1D0F 1291173fca2SJan Medala /** Amazon devices */ 1301173fca2SJan Medala #define PCI_DEVICE_ID_ENA_VF 0xEC20 131f7138b91SMichal Krawczyk #define PCI_DEVICE_ID_ENA_VF_RSERV0 0xEC21 1321173fca2SJan Medala 133daa02b5cSOlivier Matz #define ENA_TX_OFFLOAD_MASK (RTE_MBUF_F_TX_L4_MASK | \ 134daa02b5cSOlivier Matz RTE_MBUF_F_TX_IPV6 | \ 135daa02b5cSOlivier Matz RTE_MBUF_F_TX_IPV4 | \ 136daa02b5cSOlivier Matz RTE_MBUF_F_TX_IP_CKSUM | \ 137daa02b5cSOlivier Matz RTE_MBUF_F_TX_TCP_SEG) 138b3fc5a1aSKonstantin Ananyev 139b3fc5a1aSKonstantin Ananyev #define ENA_TX_OFFLOAD_NOTSUP_MASK \ 140daa02b5cSOlivier Matz (RTE_MBUF_F_TX_OFFLOAD_MASK ^ ENA_TX_OFFLOAD_MASK) 141b3fc5a1aSKonstantin Ananyev 142e8c838fdSMichal Krawczyk /** HW specific offloads capabilities. */ 143e8c838fdSMichal Krawczyk /* IPv4 checksum offload. */ 144e8c838fdSMichal Krawczyk #define ENA_L3_IPV4_CSUM 0x0001 145e8c838fdSMichal Krawczyk /* TCP/UDP checksum offload for IPv4 packets. */ 146e8c838fdSMichal Krawczyk #define ENA_L4_IPV4_CSUM 0x0002 147e8c838fdSMichal Krawczyk /* TCP/UDP checksum offload for IPv4 packets with pseudo header checksum. */ 148e8c838fdSMichal Krawczyk #define ENA_L4_IPV4_CSUM_PARTIAL 0x0004 149e8c838fdSMichal Krawczyk /* TCP/UDP checksum offload for IPv6 packets. */ 150e8c838fdSMichal Krawczyk #define ENA_L4_IPV6_CSUM 0x0008 151e8c838fdSMichal Krawczyk /* TCP/UDP checksum offload for IPv6 packets with pseudo header checksum. */ 152e8c838fdSMichal Krawczyk #define ENA_L4_IPV6_CSUM_PARTIAL 0x0010 153e8c838fdSMichal Krawczyk /* TSO support for IPv4 packets. */ 154e8c838fdSMichal Krawczyk #define ENA_IPV4_TSO 0x0020 155e8c838fdSMichal Krawczyk 156e8c838fdSMichal Krawczyk /* Device supports setting RSS hash. */ 157e8c838fdSMichal Krawczyk #define ENA_RX_RSS_HASH 0x0040 158e8c838fdSMichal Krawczyk 15928a1fd4fSFerruh Yigit static const struct rte_pci_id pci_id_ena_map[] = { 160cb990571SDavid Marchand { RTE_PCI_DEVICE(PCI_VENDOR_ID_AMAZON, PCI_DEVICE_ID_ENA_VF) }, 161f7138b91SMichal Krawczyk { RTE_PCI_DEVICE(PCI_VENDOR_ID_AMAZON, PCI_DEVICE_ID_ENA_VF_RSERV0) }, 1621173fca2SJan Medala { .device_id = 0 }, 1631173fca2SJan Medala }; 1641173fca2SJan Medala 165ca148440SMichal Krawczyk static struct ena_aenq_handlers aenq_handlers; 1663adcba9aSMichal Krawczyk 167b9b05d6fSMichal Krawczyk static int ena_device_init(struct ena_adapter *adapter, 168aab58857SStanislaw Kardach struct rte_pci_device *pdev, 169b9b05d6fSMichal Krawczyk struct ena_com_dev_get_features_ctx *get_feat_ctx); 1701173fca2SJan Medala static int ena_dev_configure(struct rte_eth_dev *dev); 17136278b82SMichal Krawczyk static void ena_tx_map_mbuf(struct ena_ring *tx_ring, 17236278b82SMichal Krawczyk struct ena_tx_buffer *tx_info, 17336278b82SMichal Krawczyk struct rte_mbuf *mbuf, 17436278b82SMichal Krawczyk void **push_header, 17536278b82SMichal Krawczyk uint16_t *header_len); 17636278b82SMichal Krawczyk static int ena_xmit_mbuf(struct ena_ring *tx_ring, struct rte_mbuf *mbuf); 177a52b317eSDawid Gorecki static int ena_tx_cleanup(void *txp, uint32_t free_pkt_cnt); 1781173fca2SJan Medala static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 1791173fca2SJan Medala uint16_t nb_pkts); 180b3fc5a1aSKonstantin Ananyev static uint16_t eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 181b3fc5a1aSKonstantin Ananyev uint16_t nb_pkts); 1821173fca2SJan Medala static int ena_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, 1831173fca2SJan Medala uint16_t nb_desc, unsigned int socket_id, 1841173fca2SJan Medala const struct rte_eth_txconf *tx_conf); 1851173fca2SJan Medala static int ena_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, 1861173fca2SJan Medala uint16_t nb_desc, unsigned int socket_id, 1871173fca2SJan Medala const struct rte_eth_rxconf *rx_conf, 1881173fca2SJan Medala struct rte_mempool *mp); 1891be097dcSMichal Krawczyk static inline void ena_init_rx_mbuf(struct rte_mbuf *mbuf, uint16_t len); 1901be097dcSMichal Krawczyk static struct rte_mbuf *ena_rx_mbuf(struct ena_ring *rx_ring, 1911be097dcSMichal Krawczyk struct ena_com_rx_buf_info *ena_bufs, 1921be097dcSMichal Krawczyk uint32_t descs, 1931be097dcSMichal Krawczyk uint16_t *next_to_clean, 1941be097dcSMichal Krawczyk uint8_t offset); 1951173fca2SJan Medala static uint16_t eth_ena_recv_pkts(void *rx_queue, 1961173fca2SJan Medala struct rte_mbuf **rx_pkts, uint16_t nb_pkts); 19783fd97b2SMichal Krawczyk static int ena_add_single_rx_desc(struct ena_com_io_sq *io_sq, 19883fd97b2SMichal Krawczyk struct rte_mbuf *mbuf, uint16_t id); 1991173fca2SJan Medala static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count); 20033dde075SMichal Krawczyk static void ena_init_rings(struct ena_adapter *adapter, 20133dde075SMichal Krawczyk bool disable_meta_caching); 2021173fca2SJan Medala static int ena_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); 2031173fca2SJan Medala static int ena_start(struct rte_eth_dev *dev); 20462024eb8SIvan Ilchenko static int ena_stop(struct rte_eth_dev *dev); 205b142387bSThomas Monjalon static int ena_close(struct rte_eth_dev *dev); 2062081d5e2SMichal Krawczyk static int ena_dev_reset(struct rte_eth_dev *dev); 207d5b0924bSMatan Azrad static int ena_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats); 2081173fca2SJan Medala static void ena_rx_queue_release_all(struct rte_eth_dev *dev); 2091173fca2SJan Medala static void ena_tx_queue_release_all(struct rte_eth_dev *dev); 2107483341aSXueming Li static void ena_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid); 2117483341aSXueming Li static void ena_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid); 2121173fca2SJan Medala static void ena_rx_queue_release_bufs(struct ena_ring *ring); 2131173fca2SJan Medala static void ena_tx_queue_release_bufs(struct ena_ring *ring); 2141173fca2SJan Medala static int ena_link_update(struct rte_eth_dev *dev, 215dd2c630aSFerruh Yigit int wait_to_complete); 2166986cdc4SMichal Krawczyk static int ena_create_io_queue(struct rte_eth_dev *dev, struct ena_ring *ring); 21726e5543dSRafal Kozik static void ena_queue_stop(struct ena_ring *ring); 21826e5543dSRafal Kozik static void ena_queue_stop_all(struct rte_eth_dev *dev, 21926e5543dSRafal Kozik enum ena_ring_type ring_type); 2206986cdc4SMichal Krawczyk static int ena_queue_start(struct rte_eth_dev *dev, struct ena_ring *ring); 22126e5543dSRafal Kozik static int ena_queue_start_all(struct rte_eth_dev *dev, 2221173fca2SJan Medala enum ena_ring_type ring_type); 2231173fca2SJan Medala static void ena_stats_restart(struct rte_eth_dev *dev); 2243a822d79SMichal Krawczyk static uint64_t ena_get_rx_port_offloads(struct ena_adapter *adapter); 2253a822d79SMichal Krawczyk static uint64_t ena_get_tx_port_offloads(struct ena_adapter *adapter); 2263a822d79SMichal Krawczyk static uint64_t ena_get_rx_queue_offloads(struct ena_adapter *adapter); 2273a822d79SMichal Krawczyk static uint64_t ena_get_tx_queue_offloads(struct ena_adapter *adapter); 228bdad90d1SIvan Ilchenko static int ena_infos_get(struct rte_eth_dev *dev, 2291173fca2SJan Medala struct rte_eth_dev_info *dev_info); 23015773e06SMichal Krawczyk static void ena_interrupt_handler_rte(void *cb_arg); 231d9b8b106SMichal Krawczyk static void ena_timer_wd_callback(struct rte_timer *timer, void *arg); 232e457bc70SRafal Kozik static void ena_destroy_device(struct rte_eth_dev *eth_dev); 233e457bc70SRafal Kozik static int eth_ena_dev_init(struct rte_eth_dev *eth_dev); 2347830e905SSolganik Alexander static int ena_xstats_get_names(struct rte_eth_dev *dev, 2357830e905SSolganik Alexander struct rte_eth_xstat_name *xstats_names, 2367830e905SSolganik Alexander unsigned int n); 2373cec73faSMichal Krawczyk static int ena_xstats_get_names_by_id(struct rte_eth_dev *dev, 2383cec73faSMichal Krawczyk const uint64_t *ids, 2393cec73faSMichal Krawczyk struct rte_eth_xstat_name *xstats_names, 2403cec73faSMichal Krawczyk unsigned int size); 2417830e905SSolganik Alexander static int ena_xstats_get(struct rte_eth_dev *dev, 2427830e905SSolganik Alexander struct rte_eth_xstat *stats, 2437830e905SSolganik Alexander unsigned int n); 2447830e905SSolganik Alexander static int ena_xstats_get_by_id(struct rte_eth_dev *dev, 2457830e905SSolganik Alexander const uint64_t *ids, 2467830e905SSolganik Alexander uint64_t *values, 2477830e905SSolganik Alexander unsigned int n); 2488a7a73f2SMichal Krawczyk static int ena_process_bool_devarg(const char *key, 2498a7a73f2SMichal Krawczyk const char *value, 2508a7a73f2SMichal Krawczyk void *opaque); 2518a7a73f2SMichal Krawczyk static int ena_parse_devargs(struct ena_adapter *adapter, 2528a7a73f2SMichal Krawczyk struct rte_devargs *devargs); 253e3595539SStanislaw Kardach static int ena_copy_eni_stats(struct ena_adapter *adapter, 254e3595539SStanislaw Kardach struct ena_stats_eni *stats); 2556986cdc4SMichal Krawczyk static int ena_setup_rx_intr(struct rte_eth_dev *dev); 2566986cdc4SMichal Krawczyk static int ena_rx_queue_intr_enable(struct rte_eth_dev *dev, 2576986cdc4SMichal Krawczyk uint16_t queue_id); 2586986cdc4SMichal Krawczyk static int ena_rx_queue_intr_disable(struct rte_eth_dev *dev, 2596986cdc4SMichal Krawczyk uint16_t queue_id); 260b9b05d6fSMichal Krawczyk static int ena_configure_aenq(struct ena_adapter *adapter); 261e3595539SStanislaw Kardach static int ena_mp_primary_handle(const struct rte_mp_msg *mp_msg, 262e3595539SStanislaw Kardach const void *peer); 2631173fca2SJan Medala 264103ab18cSFerruh Yigit static const struct eth_dev_ops ena_dev_ops = { 2651173fca2SJan Medala .dev_configure = ena_dev_configure, 2661173fca2SJan Medala .dev_infos_get = ena_infos_get, 2671173fca2SJan Medala .rx_queue_setup = ena_rx_queue_setup, 2681173fca2SJan Medala .tx_queue_setup = ena_tx_queue_setup, 2691173fca2SJan Medala .dev_start = ena_start, 270eb0ef49dSMichal Krawczyk .dev_stop = ena_stop, 2711173fca2SJan Medala .link_update = ena_link_update, 2721173fca2SJan Medala .stats_get = ena_stats_get, 2737830e905SSolganik Alexander .xstats_get_names = ena_xstats_get_names, 2743cec73faSMichal Krawczyk .xstats_get_names_by_id = ena_xstats_get_names_by_id, 2757830e905SSolganik Alexander .xstats_get = ena_xstats_get, 2767830e905SSolganik Alexander .xstats_get_by_id = ena_xstats_get_by_id, 2771173fca2SJan Medala .mtu_set = ena_mtu_set, 2781173fca2SJan Medala .rx_queue_release = ena_rx_queue_release, 2791173fca2SJan Medala .tx_queue_release = ena_tx_queue_release, 2801173fca2SJan Medala .dev_close = ena_close, 2812081d5e2SMichal Krawczyk .dev_reset = ena_dev_reset, 2821173fca2SJan Medala .reta_update = ena_rss_reta_update, 2831173fca2SJan Medala .reta_query = ena_rss_reta_query, 2846986cdc4SMichal Krawczyk .rx_queue_intr_enable = ena_rx_queue_intr_enable, 2856986cdc4SMichal Krawczyk .rx_queue_intr_disable = ena_rx_queue_intr_disable, 28634d5e97eSMichal Krawczyk .rss_hash_update = ena_rss_hash_update, 28734d5e97eSMichal Krawczyk .rss_hash_conf_get = ena_rss_hash_conf_get, 288a52b317eSDawid Gorecki .tx_done_cleanup = ena_tx_cleanup, 2891173fca2SJan Medala }; 2901173fca2SJan Medala 291e3595539SStanislaw Kardach /********************************************************************* 292e3595539SStanislaw Kardach * Multi-Process communication bits 293e3595539SStanislaw Kardach *********************************************************************/ 294e3595539SStanislaw Kardach /* rte_mp IPC message name */ 295e3595539SStanislaw Kardach #define ENA_MP_NAME "net_ena_mp" 296e3595539SStanislaw Kardach /* Request timeout in seconds */ 297e3595539SStanislaw Kardach #define ENA_MP_REQ_TMO 5 298e3595539SStanislaw Kardach 299e3595539SStanislaw Kardach /** Proxy request type */ 300e3595539SStanislaw Kardach enum ena_mp_req { 301e3595539SStanislaw Kardach ENA_MP_DEV_STATS_GET, 302e3595539SStanislaw Kardach ENA_MP_ENI_STATS_GET, 303e3595539SStanislaw Kardach ENA_MP_MTU_SET, 304e3595539SStanislaw Kardach ENA_MP_IND_TBL_GET, 305e3595539SStanislaw Kardach ENA_MP_IND_TBL_SET 306e3595539SStanislaw Kardach }; 307e3595539SStanislaw Kardach 308e3595539SStanislaw Kardach /** Proxy message body. Shared between requests and responses. */ 309e3595539SStanislaw Kardach struct ena_mp_body { 310e3595539SStanislaw Kardach /* Message type */ 311e3595539SStanislaw Kardach enum ena_mp_req type; 312e3595539SStanislaw Kardach int port_id; 313e3595539SStanislaw Kardach /* Processing result. Set in replies. 0 if message succeeded, negative 314e3595539SStanislaw Kardach * error code otherwise. 315e3595539SStanislaw Kardach */ 316e3595539SStanislaw Kardach int result; 317e3595539SStanislaw Kardach union { 318e3595539SStanislaw Kardach int mtu; /* For ENA_MP_MTU_SET */ 319e3595539SStanislaw Kardach } args; 320e3595539SStanislaw Kardach }; 321e3595539SStanislaw Kardach 322e3595539SStanislaw Kardach /** 323e3595539SStanislaw Kardach * Initialize IPC message. 324e3595539SStanislaw Kardach * 325e3595539SStanislaw Kardach * @param[out] msg 326e3595539SStanislaw Kardach * Pointer to the message to initialize. 327e3595539SStanislaw Kardach * @param[in] type 328e3595539SStanislaw Kardach * Message type. 329e3595539SStanislaw Kardach * @param[in] port_id 330e3595539SStanislaw Kardach * Port ID of target device. 331e3595539SStanislaw Kardach * 332e3595539SStanislaw Kardach */ 333e3595539SStanislaw Kardach static void 334e3595539SStanislaw Kardach mp_msg_init(struct rte_mp_msg *msg, enum ena_mp_req type, int port_id) 335e3595539SStanislaw Kardach { 336e3595539SStanislaw Kardach struct ena_mp_body *body = (struct ena_mp_body *)&msg->param; 337e3595539SStanislaw Kardach 338e3595539SStanislaw Kardach memset(msg, 0, sizeof(*msg)); 339e3595539SStanislaw Kardach strlcpy(msg->name, ENA_MP_NAME, sizeof(msg->name)); 340e3595539SStanislaw Kardach msg->len_param = sizeof(*body); 341e3595539SStanislaw Kardach body->type = type; 342e3595539SStanislaw Kardach body->port_id = port_id; 343e3595539SStanislaw Kardach } 344e3595539SStanislaw Kardach 345e3595539SStanislaw Kardach /********************************************************************* 346e3595539SStanislaw Kardach * Multi-Process communication PMD API 347e3595539SStanislaw Kardach *********************************************************************/ 348e3595539SStanislaw Kardach /** 349e3595539SStanislaw Kardach * Define proxy request descriptor 350e3595539SStanislaw Kardach * 351e3595539SStanislaw Kardach * Used to define all structures and functions required for proxying a given 352e3595539SStanislaw Kardach * function to the primary process including the code to perform to prepare the 353e3595539SStanislaw Kardach * request and process the response. 354e3595539SStanislaw Kardach * 355e3595539SStanislaw Kardach * @param[in] f 356e3595539SStanislaw Kardach * Name of the function to proxy 357e3595539SStanislaw Kardach * @param[in] t 358e3595539SStanislaw Kardach * Message type to use 359e3595539SStanislaw Kardach * @param[in] prep 360e3595539SStanislaw Kardach * Body of a function to prepare the request in form of a statement 361e3595539SStanislaw Kardach * expression. It is passed all the original function arguments along with two 362e3595539SStanislaw Kardach * extra ones: 363e3595539SStanislaw Kardach * - struct ena_adapter *adapter - PMD data of the device calling the proxy. 364e3595539SStanislaw Kardach * - struct ena_mp_body *req - body of a request to prepare. 365e3595539SStanislaw Kardach * @param[in] proc 366e3595539SStanislaw Kardach * Body of a function to process the response in form of a statement 367e3595539SStanislaw Kardach * expression. It is passed all the original function arguments along with two 368e3595539SStanislaw Kardach * extra ones: 369e3595539SStanislaw Kardach * - struct ena_adapter *adapter - PMD data of the device calling the proxy. 370e3595539SStanislaw Kardach * - struct ena_mp_body *rsp - body of a response to process. 371e3595539SStanislaw Kardach * @param ... 372e3595539SStanislaw Kardach * Proxied function's arguments 373e3595539SStanislaw Kardach * 374e3595539SStanislaw Kardach * @note Inside prep and proc any parameters which aren't used should be marked 375e3595539SStanislaw Kardach * as such (with ENA_TOUCH or __rte_unused). 376e3595539SStanislaw Kardach */ 377e3595539SStanislaw Kardach #define ENA_PROXY_DESC(f, t, prep, proc, ...) \ 378e3595539SStanislaw Kardach static const enum ena_mp_req mp_type_ ## f = t; \ 379e3595539SStanislaw Kardach static const char *mp_name_ ## f = #t; \ 380e3595539SStanislaw Kardach static void mp_prep_ ## f(struct ena_adapter *adapter, \ 381e3595539SStanislaw Kardach struct ena_mp_body *req, \ 382e3595539SStanislaw Kardach __VA_ARGS__) \ 383e3595539SStanislaw Kardach { \ 384e3595539SStanislaw Kardach prep; \ 385e3595539SStanislaw Kardach } \ 386e3595539SStanislaw Kardach static void mp_proc_ ## f(struct ena_adapter *adapter, \ 387e3595539SStanislaw Kardach struct ena_mp_body *rsp, \ 388e3595539SStanislaw Kardach __VA_ARGS__) \ 389e3595539SStanislaw Kardach { \ 390e3595539SStanislaw Kardach proc; \ 391e3595539SStanislaw Kardach } 392e3595539SStanislaw Kardach 393e3595539SStanislaw Kardach /** 394e3595539SStanislaw Kardach * Proxy wrapper for calling primary functions in a secondary process. 395e3595539SStanislaw Kardach * 396e3595539SStanislaw Kardach * Depending on whether called in primary or secondary process, calls the 397e3595539SStanislaw Kardach * @p func directly or proxies the call to the primary process via rte_mp IPC. 398e3595539SStanislaw Kardach * This macro requires a proxy request descriptor to be defined for @p func 399e3595539SStanislaw Kardach * using ENA_PROXY_DESC() macro. 400e3595539SStanislaw Kardach * 401e3595539SStanislaw Kardach * @param[in/out] a 402e3595539SStanislaw Kardach * Device PMD data. Used for sending the message and sharing message results 403e3595539SStanislaw Kardach * between primary and secondary. 404e3595539SStanislaw Kardach * @param[in] f 405e3595539SStanislaw Kardach * Function to proxy. 406e3595539SStanislaw Kardach * @param ... 407e3595539SStanislaw Kardach * Arguments of @p func. 408e3595539SStanislaw Kardach * 409e3595539SStanislaw Kardach * @return 410e3595539SStanislaw Kardach * - 0: Processing succeeded and response handler was called. 411e3595539SStanislaw Kardach * - -EPERM: IPC is unavailable on this platform. This means only primary 412e3595539SStanislaw Kardach * process may call the proxied function. 413e3595539SStanislaw Kardach * - -EIO: IPC returned error on request send. Inspect rte_errno detailed 414e3595539SStanislaw Kardach * error code. 415e3595539SStanislaw Kardach * - Negative error code from the proxied function. 416e3595539SStanislaw Kardach * 417e3595539SStanislaw Kardach * @note This mechanism is geared towards control-path tasks. Avoid calling it 418e3595539SStanislaw Kardach * in fast-path unless unbound delays are allowed. This is due to the IPC 419e3595539SStanislaw Kardach * mechanism itself (socket based). 420e3595539SStanislaw Kardach * @note Due to IPC parameter size limitations the proxy logic shares call 421e3595539SStanislaw Kardach * results through the struct ena_adapter shared memory. This makes the 422e3595539SStanislaw Kardach * proxy mechanism strictly single-threaded. Therefore be sure to make all 423e3595539SStanislaw Kardach * calls to the same proxied function under the same lock. 424e3595539SStanislaw Kardach */ 425e3595539SStanislaw Kardach #define ENA_PROXY(a, f, ...) \ 426e3595539SStanislaw Kardach ({ \ 427e3595539SStanislaw Kardach struct ena_adapter *_a = (a); \ 428e3595539SStanislaw Kardach struct timespec ts = { .tv_sec = ENA_MP_REQ_TMO }; \ 429e3595539SStanislaw Kardach struct ena_mp_body *req, *rsp; \ 430e3595539SStanislaw Kardach struct rte_mp_reply mp_rep; \ 431e3595539SStanislaw Kardach struct rte_mp_msg mp_req; \ 432e3595539SStanislaw Kardach int ret; \ 433e3595539SStanislaw Kardach \ 434e3595539SStanislaw Kardach if (rte_eal_process_type() == RTE_PROC_PRIMARY) { \ 435e3595539SStanislaw Kardach ret = f(__VA_ARGS__); \ 436e3595539SStanislaw Kardach } else { \ 437e3595539SStanislaw Kardach /* Prepare and send request */ \ 438e3595539SStanislaw Kardach req = (struct ena_mp_body *)&mp_req.param; \ 439e3595539SStanislaw Kardach mp_msg_init(&mp_req, mp_type_ ## f, _a->edev_data->port_id); \ 440e3595539SStanislaw Kardach mp_prep_ ## f(_a, req, ## __VA_ARGS__); \ 441e3595539SStanislaw Kardach \ 442e3595539SStanislaw Kardach ret = rte_mp_request_sync(&mp_req, &mp_rep, &ts); \ 443e3595539SStanislaw Kardach if (likely(!ret)) { \ 444e3595539SStanislaw Kardach RTE_ASSERT(mp_rep.nb_received == 1); \ 445e3595539SStanislaw Kardach rsp = (struct ena_mp_body *)&mp_rep.msgs[0].param; \ 446e3595539SStanislaw Kardach ret = rsp->result; \ 447e3595539SStanislaw Kardach if (ret == 0) { \ 448e3595539SStanislaw Kardach mp_proc_##f(_a, rsp, ## __VA_ARGS__); \ 449e3595539SStanislaw Kardach } else { \ 450e3595539SStanislaw Kardach PMD_DRV_LOG(ERR, \ 451e3595539SStanislaw Kardach "%s returned error: %d\n", \ 452e3595539SStanislaw Kardach mp_name_ ## f, rsp->result);\ 453e3595539SStanislaw Kardach } \ 454e3595539SStanislaw Kardach free(mp_rep.msgs); \ 455e3595539SStanislaw Kardach } else if (rte_errno == ENOTSUP) { \ 456e3595539SStanislaw Kardach PMD_DRV_LOG(ERR, \ 457e3595539SStanislaw Kardach "No IPC, can't proxy to primary\n");\ 458e3595539SStanislaw Kardach ret = -rte_errno; \ 459e3595539SStanislaw Kardach } else { \ 460e3595539SStanislaw Kardach PMD_DRV_LOG(ERR, "Request %s failed: %s\n", \ 461e3595539SStanislaw Kardach mp_name_ ## f, \ 462e3595539SStanislaw Kardach rte_strerror(rte_errno)); \ 463e3595539SStanislaw Kardach ret = -EIO; \ 464e3595539SStanislaw Kardach } \ 465e3595539SStanislaw Kardach } \ 466e3595539SStanislaw Kardach ret; \ 467e3595539SStanislaw Kardach }) 468e3595539SStanislaw Kardach 469e3595539SStanislaw Kardach /********************************************************************* 470e3595539SStanislaw Kardach * Multi-Process communication request descriptors 471e3595539SStanislaw Kardach *********************************************************************/ 472e3595539SStanislaw Kardach 473e3595539SStanislaw Kardach ENA_PROXY_DESC(ena_com_get_dev_basic_stats, ENA_MP_DEV_STATS_GET, 474e3595539SStanislaw Kardach ({ 475e3595539SStanislaw Kardach ENA_TOUCH(adapter); 476e3595539SStanislaw Kardach ENA_TOUCH(req); 477e3595539SStanislaw Kardach ENA_TOUCH(ena_dev); 478e3595539SStanislaw Kardach ENA_TOUCH(stats); 479e3595539SStanislaw Kardach }), 480e3595539SStanislaw Kardach ({ 481e3595539SStanislaw Kardach ENA_TOUCH(rsp); 482e3595539SStanislaw Kardach ENA_TOUCH(ena_dev); 483e3595539SStanislaw Kardach if (stats != &adapter->basic_stats) 484e3595539SStanislaw Kardach rte_memcpy(stats, &adapter->basic_stats, sizeof(*stats)); 485e3595539SStanislaw Kardach }), 486e3595539SStanislaw Kardach struct ena_com_dev *ena_dev, struct ena_admin_basic_stats *stats); 487e3595539SStanislaw Kardach 488e3595539SStanislaw Kardach ENA_PROXY_DESC(ena_com_get_eni_stats, ENA_MP_ENI_STATS_GET, 489e3595539SStanislaw Kardach ({ 490e3595539SStanislaw Kardach ENA_TOUCH(adapter); 491e3595539SStanislaw Kardach ENA_TOUCH(req); 492e3595539SStanislaw Kardach ENA_TOUCH(ena_dev); 493e3595539SStanislaw Kardach ENA_TOUCH(stats); 494e3595539SStanislaw Kardach }), 495e3595539SStanislaw Kardach ({ 496e3595539SStanislaw Kardach ENA_TOUCH(rsp); 497e3595539SStanislaw Kardach ENA_TOUCH(ena_dev); 498e3595539SStanislaw Kardach if (stats != (struct ena_admin_eni_stats *)&adapter->eni_stats) 499e3595539SStanislaw Kardach rte_memcpy(stats, &adapter->eni_stats, sizeof(*stats)); 500e3595539SStanislaw Kardach }), 501e3595539SStanislaw Kardach struct ena_com_dev *ena_dev, struct ena_admin_eni_stats *stats); 502e3595539SStanislaw Kardach 503e3595539SStanislaw Kardach ENA_PROXY_DESC(ena_com_set_dev_mtu, ENA_MP_MTU_SET, 504e3595539SStanislaw Kardach ({ 505e3595539SStanislaw Kardach ENA_TOUCH(adapter); 506e3595539SStanislaw Kardach ENA_TOUCH(ena_dev); 507e3595539SStanislaw Kardach req->args.mtu = mtu; 508e3595539SStanislaw Kardach }), 509e3595539SStanislaw Kardach ({ 510e3595539SStanislaw Kardach ENA_TOUCH(adapter); 511e3595539SStanislaw Kardach ENA_TOUCH(rsp); 512e3595539SStanislaw Kardach ENA_TOUCH(ena_dev); 513e3595539SStanislaw Kardach ENA_TOUCH(mtu); 514e3595539SStanislaw Kardach }), 515e3595539SStanislaw Kardach struct ena_com_dev *ena_dev, int mtu); 516e3595539SStanislaw Kardach 517e3595539SStanislaw Kardach ENA_PROXY_DESC(ena_com_indirect_table_set, ENA_MP_IND_TBL_SET, 518e3595539SStanislaw Kardach ({ 519e3595539SStanislaw Kardach ENA_TOUCH(adapter); 520e3595539SStanislaw Kardach ENA_TOUCH(req); 521e3595539SStanislaw Kardach ENA_TOUCH(ena_dev); 522e3595539SStanislaw Kardach }), 523e3595539SStanislaw Kardach ({ 524e3595539SStanislaw Kardach ENA_TOUCH(adapter); 525e3595539SStanislaw Kardach ENA_TOUCH(rsp); 526e3595539SStanislaw Kardach ENA_TOUCH(ena_dev); 527e3595539SStanislaw Kardach }), 528e3595539SStanislaw Kardach struct ena_com_dev *ena_dev); 529e3595539SStanislaw Kardach 530e3595539SStanislaw Kardach ENA_PROXY_DESC(ena_com_indirect_table_get, ENA_MP_IND_TBL_GET, 531e3595539SStanislaw Kardach ({ 532e3595539SStanislaw Kardach ENA_TOUCH(adapter); 533e3595539SStanislaw Kardach ENA_TOUCH(req); 534e3595539SStanislaw Kardach ENA_TOUCH(ena_dev); 535e3595539SStanislaw Kardach ENA_TOUCH(ind_tbl); 536e3595539SStanislaw Kardach }), 537e3595539SStanislaw Kardach ({ 538e3595539SStanislaw Kardach ENA_TOUCH(rsp); 539e3595539SStanislaw Kardach ENA_TOUCH(ena_dev); 540e3595539SStanislaw Kardach if (ind_tbl != adapter->indirect_table) 541e3595539SStanislaw Kardach rte_memcpy(ind_tbl, adapter->indirect_table, 542e3595539SStanislaw Kardach sizeof(adapter->indirect_table)); 543e3595539SStanislaw Kardach }), 544e3595539SStanislaw Kardach struct ena_com_dev *ena_dev, u32 *ind_tbl); 545e3595539SStanislaw Kardach 5462bae75eaSDawid Gorecki static inline void ena_trigger_reset(struct ena_adapter *adapter, 5472bae75eaSDawid Gorecki enum ena_regs_reset_reason_types reason) 5482bae75eaSDawid Gorecki { 5492bae75eaSDawid Gorecki if (likely(!adapter->trigger_reset)) { 5502bae75eaSDawid Gorecki adapter->reset_reason = reason; 5512bae75eaSDawid Gorecki adapter->trigger_reset = true; 5522bae75eaSDawid Gorecki } 5532bae75eaSDawid Gorecki } 5542bae75eaSDawid Gorecki 55584daba99SMichal Krawczyk static inline void ena_rx_mbuf_prepare(struct ena_ring *rx_ring, 55684daba99SMichal Krawczyk struct rte_mbuf *mbuf, 55734d5e97eSMichal Krawczyk struct ena_com_rx_ctx *ena_rx_ctx, 55834d5e97eSMichal Krawczyk bool fill_hash) 5591173fca2SJan Medala { 56084daba99SMichal Krawczyk struct ena_stats_rx *rx_stats = &rx_ring->rx_stats; 5611173fca2SJan Medala uint64_t ol_flags = 0; 562fd617795SRafal Kozik uint32_t packet_type = 0; 5631173fca2SJan Medala 5641173fca2SJan Medala if (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP) 565fd617795SRafal Kozik packet_type |= RTE_PTYPE_L4_TCP; 5661173fca2SJan Medala else if (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP) 567fd617795SRafal Kozik packet_type |= RTE_PTYPE_L4_UDP; 5681173fca2SJan Medala 569856edce2SMichal Krawczyk if (ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV4) { 570fd617795SRafal Kozik packet_type |= RTE_PTYPE_L3_IPV4; 57184daba99SMichal Krawczyk if (unlikely(ena_rx_ctx->l3_csum_err)) { 57284daba99SMichal Krawczyk ++rx_stats->l3_csum_bad; 573daa02b5cSOlivier Matz ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD; 57484daba99SMichal Krawczyk } else { 575daa02b5cSOlivier Matz ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD; 57684daba99SMichal Krawczyk } 577856edce2SMichal Krawczyk } else if (ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV6) { 578856edce2SMichal Krawczyk packet_type |= RTE_PTYPE_L3_IPV6; 579856edce2SMichal Krawczyk } 580856edce2SMichal Krawczyk 58184daba99SMichal Krawczyk if (!ena_rx_ctx->l4_csum_checked || ena_rx_ctx->frag) { 582daa02b5cSOlivier Matz ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN; 58384daba99SMichal Krawczyk } else { 58484daba99SMichal Krawczyk if (unlikely(ena_rx_ctx->l4_csum_err)) { 58584daba99SMichal Krawczyk ++rx_stats->l4_csum_bad; 586daa02b5cSOlivier Matz ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD; 58784daba99SMichal Krawczyk } else { 58884daba99SMichal Krawczyk ++rx_stats->l4_csum_good; 589daa02b5cSOlivier Matz ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD; 59084daba99SMichal Krawczyk } 59184daba99SMichal Krawczyk } 5921173fca2SJan Medala 59334d5e97eSMichal Krawczyk if (fill_hash && 59434d5e97eSMichal Krawczyk likely((packet_type & ENA_PTYPE_HAS_HASH) && !ena_rx_ctx->frag)) { 595daa02b5cSOlivier Matz ol_flags |= RTE_MBUF_F_RX_RSS_HASH; 596b418f0d2SMichal Krawczyk mbuf->hash.rss = ena_rx_ctx->hash; 597b418f0d2SMichal Krawczyk } 598b418f0d2SMichal Krawczyk 5991173fca2SJan Medala mbuf->ol_flags = ol_flags; 600fd617795SRafal Kozik mbuf->packet_type = packet_type; 6011173fca2SJan Medala } 6021173fca2SJan Medala 6031173fca2SJan Medala static inline void ena_tx_mbuf_prepare(struct rte_mbuf *mbuf, 60456b8b9b7SRafal Kozik struct ena_com_tx_ctx *ena_tx_ctx, 60533dde075SMichal Krawczyk uint64_t queue_offloads, 60633dde075SMichal Krawczyk bool disable_meta_caching) 6071173fca2SJan Medala { 6081173fca2SJan Medala struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta; 6091173fca2SJan Medala 61056b8b9b7SRafal Kozik if ((mbuf->ol_flags & MBUF_OFFLOADS) && 61156b8b9b7SRafal Kozik (queue_offloads & QUEUE_OFFLOADS)) { 6121173fca2SJan Medala /* check if TSO is required */ 613daa02b5cSOlivier Matz if ((mbuf->ol_flags & RTE_MBUF_F_TX_TCP_SEG) && 614295968d1SFerruh Yigit (queue_offloads & RTE_ETH_TX_OFFLOAD_TCP_TSO)) { 6151173fca2SJan Medala ena_tx_ctx->tso_enable = true; 6161173fca2SJan Medala 6171173fca2SJan Medala ena_meta->l4_hdr_len = GET_L4_HDR_LEN(mbuf); 6181173fca2SJan Medala } 6191173fca2SJan Medala 6201173fca2SJan Medala /* check if L3 checksum is needed */ 621daa02b5cSOlivier Matz if ((mbuf->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) && 622295968d1SFerruh Yigit (queue_offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)) 6231173fca2SJan Medala ena_tx_ctx->l3_csum_enable = true; 6241173fca2SJan Medala 625daa02b5cSOlivier Matz if (mbuf->ol_flags & RTE_MBUF_F_TX_IPV6) { 6261173fca2SJan Medala ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV6; 6271173fca2SJan Medala } else { 6281173fca2SJan Medala ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV4; 6291173fca2SJan Medala 6301173fca2SJan Medala /* set don't fragment (DF) flag */ 6311173fca2SJan Medala if (mbuf->packet_type & 6321173fca2SJan Medala (RTE_PTYPE_L4_NONFRAG 6331173fca2SJan Medala | RTE_PTYPE_INNER_L4_NONFRAG)) 6341173fca2SJan Medala ena_tx_ctx->df = true; 6351173fca2SJan Medala } 6361173fca2SJan Medala 6371173fca2SJan Medala /* check if L4 checksum is needed */ 638daa02b5cSOlivier Matz if (((mbuf->ol_flags & RTE_MBUF_F_TX_L4_MASK) == RTE_MBUF_F_TX_TCP_CKSUM) && 639295968d1SFerruh Yigit (queue_offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM)) { 6401173fca2SJan Medala ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_TCP; 6411173fca2SJan Medala ena_tx_ctx->l4_csum_enable = true; 642daa02b5cSOlivier Matz } else if (((mbuf->ol_flags & RTE_MBUF_F_TX_L4_MASK) == 643daa02b5cSOlivier Matz RTE_MBUF_F_TX_UDP_CKSUM) && 644295968d1SFerruh Yigit (queue_offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM)) { 6451173fca2SJan Medala ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UDP; 6461173fca2SJan Medala ena_tx_ctx->l4_csum_enable = true; 64756b8b9b7SRafal Kozik } else { 6481173fca2SJan Medala ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UNKNOWN; 6491173fca2SJan Medala ena_tx_ctx->l4_csum_enable = false; 6501173fca2SJan Medala } 6511173fca2SJan Medala 6521173fca2SJan Medala ena_meta->mss = mbuf->tso_segsz; 6531173fca2SJan Medala ena_meta->l3_hdr_len = mbuf->l3_len; 6541173fca2SJan Medala ena_meta->l3_hdr_offset = mbuf->l2_len; 6551173fca2SJan Medala 6561173fca2SJan Medala ena_tx_ctx->meta_valid = true; 65733dde075SMichal Krawczyk } else if (disable_meta_caching) { 65833dde075SMichal Krawczyk memset(ena_meta, 0, sizeof(*ena_meta)); 65933dde075SMichal Krawczyk ena_tx_ctx->meta_valid = true; 6601173fca2SJan Medala } else { 6611173fca2SJan Medala ena_tx_ctx->meta_valid = false; 6621173fca2SJan Medala } 6631173fca2SJan Medala } 6641173fca2SJan Medala 665f7d82d24SRafal Kozik static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id) 666f7d82d24SRafal Kozik { 667f7d82d24SRafal Kozik struct ena_tx_buffer *tx_info = NULL; 668f7d82d24SRafal Kozik 669f7d82d24SRafal Kozik if (likely(req_id < tx_ring->ring_size)) { 670f7d82d24SRafal Kozik tx_info = &tx_ring->tx_buffer_info[req_id]; 671f7d82d24SRafal Kozik if (likely(tx_info->mbuf)) 672f7d82d24SRafal Kozik return 0; 673f7d82d24SRafal Kozik } 674f7d82d24SRafal Kozik 675f7d82d24SRafal Kozik if (tx_info) 6760a001d69SMichal Krawczyk PMD_TX_LOG(ERR, "tx_info doesn't have valid mbuf\n"); 677f7d82d24SRafal Kozik else 6780a001d69SMichal Krawczyk PMD_TX_LOG(ERR, "Invalid req_id: %hu\n", req_id); 679f7d82d24SRafal Kozik 680f7d82d24SRafal Kozik /* Trigger device reset */ 6817830e905SSolganik Alexander ++tx_ring->tx_stats.bad_req_id; 6822bae75eaSDawid Gorecki ena_trigger_reset(tx_ring->adapter, ENA_REGS_RESET_INV_TX_REQ_ID); 683f7d82d24SRafal Kozik return -EFAULT; 684f7d82d24SRafal Kozik } 685f7d82d24SRafal Kozik 686372c1af5SJan Medala static void ena_config_host_info(struct ena_com_dev *ena_dev) 687372c1af5SJan Medala { 688372c1af5SJan Medala struct ena_admin_host_info *host_info; 689372c1af5SJan Medala int rc; 690372c1af5SJan Medala 691372c1af5SJan Medala /* Allocate only the host info */ 692372c1af5SJan Medala rc = ena_com_allocate_host_info(ena_dev); 693372c1af5SJan Medala if (rc) { 6946f1c9df9SStephen Hemminger PMD_DRV_LOG(ERR, "Cannot allocate host info\n"); 695372c1af5SJan Medala return; 696372c1af5SJan Medala } 697372c1af5SJan Medala 698372c1af5SJan Medala host_info = ena_dev->host_attr.host_info; 699372c1af5SJan Medala 700372c1af5SJan Medala host_info->os_type = ENA_ADMIN_OS_DPDK; 701372c1af5SJan Medala host_info->kernel_ver = RTE_VERSION; 7026723c0fcSBruce Richardson strlcpy((char *)host_info->kernel_ver_str, rte_version(), 7036723c0fcSBruce Richardson sizeof(host_info->kernel_ver_str)); 704372c1af5SJan Medala host_info->os_dist = RTE_VERSION; 7056723c0fcSBruce Richardson strlcpy((char *)host_info->os_dist_str, rte_version(), 7066723c0fcSBruce Richardson sizeof(host_info->os_dist_str)); 707372c1af5SJan Medala host_info->driver_version = 708372c1af5SJan Medala (DRV_MODULE_VER_MAJOR) | 709372c1af5SJan Medala (DRV_MODULE_VER_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) | 710c4144557SJan Medala (DRV_MODULE_VER_SUBMINOR << 711c4144557SJan Medala ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT); 712b9302eb9SRafal Kozik host_info->num_cpus = rte_lcore_count(); 713372c1af5SJan Medala 7147b3a3c4bSMaciej Bielski host_info->driver_supported_features = 71534d5e97eSMichal Krawczyk ENA_ADMIN_HOST_INFO_RX_OFFSET_MASK | 71634d5e97eSMichal Krawczyk ENA_ADMIN_HOST_INFO_RSS_CONFIGURABLE_FUNCTION_KEY_MASK; 7177b3a3c4bSMaciej Bielski 718372c1af5SJan Medala rc = ena_com_set_host_attributes(ena_dev); 719372c1af5SJan Medala if (rc) { 720241da076SRafal Kozik if (rc == -ENA_COM_UNSUPPORTED) 7216f1c9df9SStephen Hemminger PMD_DRV_LOG(WARNING, "Cannot set host attributes\n"); 722241da076SRafal Kozik else 7236f1c9df9SStephen Hemminger PMD_DRV_LOG(ERR, "Cannot set host attributes\n"); 724241da076SRafal Kozik 725372c1af5SJan Medala goto err; 726372c1af5SJan Medala } 727372c1af5SJan Medala 728372c1af5SJan Medala return; 729372c1af5SJan Medala 730372c1af5SJan Medala err: 731372c1af5SJan Medala ena_com_delete_host_info(ena_dev); 732372c1af5SJan Medala } 733372c1af5SJan Medala 7347830e905SSolganik Alexander /* This function calculates the number of xstats based on the current config */ 735aab58857SStanislaw Kardach static unsigned int ena_xstats_calc_num(struct rte_eth_dev_data *data) 736372c1af5SJan Medala { 73745718adaSMichal Krawczyk return ENA_STATS_ARRAY_GLOBAL + ENA_STATS_ARRAY_ENI + 738aab58857SStanislaw Kardach (data->nb_tx_queues * ENA_STATS_ARRAY_TX) + 739aab58857SStanislaw Kardach (data->nb_rx_queues * ENA_STATS_ARRAY_RX); 740372c1af5SJan Medala } 741372c1af5SJan Medala 742372c1af5SJan Medala static void ena_config_debug_area(struct ena_adapter *adapter) 743372c1af5SJan Medala { 744372c1af5SJan Medala u32 debug_area_size; 745372c1af5SJan Medala int rc, ss_count; 746372c1af5SJan Medala 747aab58857SStanislaw Kardach ss_count = ena_xstats_calc_num(adapter->edev_data); 748372c1af5SJan Medala 749372c1af5SJan Medala /* allocate 32 bytes for each string and 64bit for the value */ 750372c1af5SJan Medala debug_area_size = ss_count * ETH_GSTRING_LEN + sizeof(u64) * ss_count; 751372c1af5SJan Medala 752372c1af5SJan Medala rc = ena_com_allocate_debug_area(&adapter->ena_dev, debug_area_size); 753372c1af5SJan Medala if (rc) { 7546f1c9df9SStephen Hemminger PMD_DRV_LOG(ERR, "Cannot allocate debug area\n"); 755372c1af5SJan Medala return; 756372c1af5SJan Medala } 757372c1af5SJan Medala 758372c1af5SJan Medala rc = ena_com_set_host_attributes(&adapter->ena_dev); 759372c1af5SJan Medala if (rc) { 760241da076SRafal Kozik if (rc == -ENA_COM_UNSUPPORTED) 7616f1c9df9SStephen Hemminger PMD_DRV_LOG(WARNING, "Cannot set host attributes\n"); 762241da076SRafal Kozik else 7636f1c9df9SStephen Hemminger PMD_DRV_LOG(ERR, "Cannot set host attributes\n"); 764241da076SRafal Kozik 765372c1af5SJan Medala goto err; 766372c1af5SJan Medala } 767372c1af5SJan Medala 768372c1af5SJan Medala return; 769372c1af5SJan Medala err: 770372c1af5SJan Medala ena_com_delete_debug_area(&adapter->ena_dev); 771372c1af5SJan Medala } 772372c1af5SJan Medala 773b142387bSThomas Monjalon static int ena_close(struct rte_eth_dev *dev) 7741173fca2SJan Medala { 7754d7877fdSMichal Krawczyk struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 776d61138d4SHarman Kalra struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 777890728ffSStephen Hemminger struct ena_adapter *adapter = dev->data->dev_private; 77862024eb8SIvan Ilchenko int ret = 0; 7791173fca2SJan Medala 78030410493SThomas Monjalon if (rte_eal_process_type() != RTE_PROC_PRIMARY) 78130410493SThomas Monjalon return 0; 78230410493SThomas Monjalon 783df238f84SMichal Krawczyk if (adapter->state == ENA_ADAPTER_STATE_RUNNING) 78462024eb8SIvan Ilchenko ret = ena_stop(dev); 785eb0ef49dSMichal Krawczyk adapter->state = ENA_ADAPTER_STATE_CLOSED; 78615773e06SMichal Krawczyk 7871173fca2SJan Medala ena_rx_queue_release_all(dev); 7881173fca2SJan Medala ena_tx_queue_release_all(dev); 7894d7877fdSMichal Krawczyk 7904d7877fdSMichal Krawczyk rte_free(adapter->drv_stats); 7914d7877fdSMichal Krawczyk adapter->drv_stats = NULL; 7924d7877fdSMichal Krawczyk 7934d7877fdSMichal Krawczyk rte_intr_disable(intr_handle); 7944d7877fdSMichal Krawczyk rte_intr_callback_unregister(intr_handle, 7954d7877fdSMichal Krawczyk ena_interrupt_handler_rte, 796aab58857SStanislaw Kardach dev); 7974d7877fdSMichal Krawczyk 7984d7877fdSMichal Krawczyk /* 7994d7877fdSMichal Krawczyk * MAC is not allocated dynamically. Setting NULL should prevent from 8004d7877fdSMichal Krawczyk * release of the resource in the rte_eth_dev_release_port(). 8014d7877fdSMichal Krawczyk */ 8024d7877fdSMichal Krawczyk dev->data->mac_addrs = NULL; 803b142387bSThomas Monjalon 80462024eb8SIvan Ilchenko return ret; 8051173fca2SJan Medala } 8061173fca2SJan Medala 8072081d5e2SMichal Krawczyk static int 8082081d5e2SMichal Krawczyk ena_dev_reset(struct rte_eth_dev *dev) 8092081d5e2SMichal Krawczyk { 810e457bc70SRafal Kozik int rc = 0; 8112081d5e2SMichal Krawczyk 81239ecdd3dSStanislaw Kardach /* Cannot release memory in secondary process */ 81339ecdd3dSStanislaw Kardach if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 81439ecdd3dSStanislaw Kardach PMD_DRV_LOG(WARNING, "dev_reset not supported in secondary.\n"); 81539ecdd3dSStanislaw Kardach return -EPERM; 81639ecdd3dSStanislaw Kardach } 81739ecdd3dSStanislaw Kardach 818e457bc70SRafal Kozik ena_destroy_device(dev); 819e457bc70SRafal Kozik rc = eth_ena_dev_init(dev); 820241da076SRafal Kozik if (rc) 821617898d1SMichal Krawczyk PMD_INIT_LOG(CRIT, "Cannot initialize device\n"); 822e457bc70SRafal Kozik 8232081d5e2SMichal Krawczyk return rc; 8242081d5e2SMichal Krawczyk } 8252081d5e2SMichal Krawczyk 8261173fca2SJan Medala static void ena_rx_queue_release_all(struct rte_eth_dev *dev) 8271173fca2SJan Medala { 8281173fca2SJan Medala int nb_queues = dev->data->nb_rx_queues; 8291173fca2SJan Medala int i; 8301173fca2SJan Medala 8311173fca2SJan Medala for (i = 0; i < nb_queues; i++) 8327483341aSXueming Li ena_rx_queue_release(dev, i); 8331173fca2SJan Medala } 8341173fca2SJan Medala 8351173fca2SJan Medala static void ena_tx_queue_release_all(struct rte_eth_dev *dev) 8361173fca2SJan Medala { 8371173fca2SJan Medala int nb_queues = dev->data->nb_tx_queues; 8381173fca2SJan Medala int i; 8391173fca2SJan Medala 8401173fca2SJan Medala for (i = 0; i < nb_queues; i++) 8417483341aSXueming Li ena_tx_queue_release(dev, i); 8421173fca2SJan Medala } 8431173fca2SJan Medala 8447483341aSXueming Li static void ena_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid) 8451173fca2SJan Medala { 8467483341aSXueming Li struct ena_ring *ring = dev->data->rx_queues[qid]; 8471173fca2SJan Medala 8481173fca2SJan Medala /* Free ring resources */ 8491173fca2SJan Medala rte_free(ring->rx_buffer_info); 8501173fca2SJan Medala ring->rx_buffer_info = NULL; 8511173fca2SJan Medala 85279405ee1SRafal Kozik rte_free(ring->rx_refill_buffer); 85379405ee1SRafal Kozik ring->rx_refill_buffer = NULL; 85479405ee1SRafal Kozik 855c2034976SMichal Krawczyk rte_free(ring->empty_rx_reqs); 856c2034976SMichal Krawczyk ring->empty_rx_reqs = NULL; 857c2034976SMichal Krawczyk 8581173fca2SJan Medala ring->configured = 0; 8591173fca2SJan Medala 860617898d1SMichal Krawczyk PMD_DRV_LOG(NOTICE, "Rx queue %d:%d released\n", 8611173fca2SJan Medala ring->port_id, ring->id); 8621173fca2SJan Medala } 8631173fca2SJan Medala 8647483341aSXueming Li static void ena_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid) 8651173fca2SJan Medala { 8667483341aSXueming Li struct ena_ring *ring = dev->data->tx_queues[qid]; 8671173fca2SJan Medala 8681173fca2SJan Medala /* Free ring resources */ 8692fca2a98SMichal Krawczyk rte_free(ring->push_buf_intermediate_buf); 8702fca2a98SMichal Krawczyk 8711173fca2SJan Medala rte_free(ring->tx_buffer_info); 8721173fca2SJan Medala 8731173fca2SJan Medala rte_free(ring->empty_tx_reqs); 8741173fca2SJan Medala 8751173fca2SJan Medala ring->empty_tx_reqs = NULL; 8761173fca2SJan Medala ring->tx_buffer_info = NULL; 8772fca2a98SMichal Krawczyk ring->push_buf_intermediate_buf = NULL; 8781173fca2SJan Medala 8791173fca2SJan Medala ring->configured = 0; 8801173fca2SJan Medala 881617898d1SMichal Krawczyk PMD_DRV_LOG(NOTICE, "Tx queue %d:%d released\n", 8821173fca2SJan Medala ring->port_id, ring->id); 8831173fca2SJan Medala } 8841173fca2SJan Medala 8851173fca2SJan Medala static void ena_rx_queue_release_bufs(struct ena_ring *ring) 8861173fca2SJan Medala { 887709b1dcbSRafal Kozik unsigned int i; 8881173fca2SJan Medala 8891be097dcSMichal Krawczyk for (i = 0; i < ring->ring_size; ++i) { 8901be097dcSMichal Krawczyk struct ena_rx_buffer *rx_info = &ring->rx_buffer_info[i]; 8911be097dcSMichal Krawczyk if (rx_info->mbuf) { 8921be097dcSMichal Krawczyk rte_mbuf_raw_free(rx_info->mbuf); 8931be097dcSMichal Krawczyk rx_info->mbuf = NULL; 8941be097dcSMichal Krawczyk } 8951173fca2SJan Medala } 8961173fca2SJan Medala } 8971173fca2SJan Medala 8981173fca2SJan Medala static void ena_tx_queue_release_bufs(struct ena_ring *ring) 8991173fca2SJan Medala { 900207a514cSMichal Krawczyk unsigned int i; 9011173fca2SJan Medala 902207a514cSMichal Krawczyk for (i = 0; i < ring->ring_size; ++i) { 903207a514cSMichal Krawczyk struct ena_tx_buffer *tx_buf = &ring->tx_buffer_info[i]; 9041173fca2SJan Medala 9053c8bc29fSDavid Harton if (tx_buf->mbuf) { 9061173fca2SJan Medala rte_pktmbuf_free(tx_buf->mbuf); 9073c8bc29fSDavid Harton tx_buf->mbuf = NULL; 9083c8bc29fSDavid Harton } 9091173fca2SJan Medala } 9101173fca2SJan Medala } 9111173fca2SJan Medala 9121173fca2SJan Medala static int ena_link_update(struct rte_eth_dev *dev, 9131173fca2SJan Medala __rte_unused int wait_to_complete) 9141173fca2SJan Medala { 9151173fca2SJan Medala struct rte_eth_link *link = &dev->data->dev_link; 916890728ffSStephen Hemminger struct ena_adapter *adapter = dev->data->dev_private; 917ca148440SMichal Krawczyk 918295968d1SFerruh Yigit link->link_status = adapter->link_status ? RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN; 919295968d1SFerruh Yigit link->link_speed = RTE_ETH_SPEED_NUM_NONE; 920295968d1SFerruh Yigit link->link_duplex = RTE_ETH_LINK_FULL_DUPLEX; 9211173fca2SJan Medala 9221173fca2SJan Medala return 0; 9231173fca2SJan Medala } 9241173fca2SJan Medala 92526e5543dSRafal Kozik static int ena_queue_start_all(struct rte_eth_dev *dev, 9261173fca2SJan Medala enum ena_ring_type ring_type) 9271173fca2SJan Medala { 928890728ffSStephen Hemminger struct ena_adapter *adapter = dev->data->dev_private; 9291173fca2SJan Medala struct ena_ring *queues = NULL; 93053b61841SMichal Krawczyk int nb_queues; 9311173fca2SJan Medala int i = 0; 9321173fca2SJan Medala int rc = 0; 9331173fca2SJan Medala 93453b61841SMichal Krawczyk if (ring_type == ENA_RING_TYPE_RX) { 93553b61841SMichal Krawczyk queues = adapter->rx_ring; 93653b61841SMichal Krawczyk nb_queues = dev->data->nb_rx_queues; 93753b61841SMichal Krawczyk } else { 93853b61841SMichal Krawczyk queues = adapter->tx_ring; 93953b61841SMichal Krawczyk nb_queues = dev->data->nb_tx_queues; 94053b61841SMichal Krawczyk } 94153b61841SMichal Krawczyk for (i = 0; i < nb_queues; i++) { 9421173fca2SJan Medala if (queues[i].configured) { 9431173fca2SJan Medala if (ring_type == ENA_RING_TYPE_RX) { 9441173fca2SJan Medala ena_assert_msg( 9451173fca2SJan Medala dev->data->rx_queues[i] == &queues[i], 946617898d1SMichal Krawczyk "Inconsistent state of Rx queues\n"); 9471173fca2SJan Medala } else { 9481173fca2SJan Medala ena_assert_msg( 9491173fca2SJan Medala dev->data->tx_queues[i] == &queues[i], 950617898d1SMichal Krawczyk "Inconsistent state of Tx queues\n"); 9511173fca2SJan Medala } 9521173fca2SJan Medala 9536986cdc4SMichal Krawczyk rc = ena_queue_start(dev, &queues[i]); 9541173fca2SJan Medala 9551173fca2SJan Medala if (rc) { 9561173fca2SJan Medala PMD_INIT_LOG(ERR, 957617898d1SMichal Krawczyk "Failed to start queue[%d] of type(%d)\n", 9581173fca2SJan Medala i, ring_type); 95926e5543dSRafal Kozik goto err; 9601173fca2SJan Medala } 9611173fca2SJan Medala } 9621173fca2SJan Medala } 9631173fca2SJan Medala 9641173fca2SJan Medala return 0; 96526e5543dSRafal Kozik 96626e5543dSRafal Kozik err: 96726e5543dSRafal Kozik while (i--) 96826e5543dSRafal Kozik if (queues[i].configured) 96926e5543dSRafal Kozik ena_queue_stop(&queues[i]); 97026e5543dSRafal Kozik 97126e5543dSRafal Kozik return rc; 9721173fca2SJan Medala } 9731173fca2SJan Medala 9741173fca2SJan Medala static int ena_check_valid_conf(struct ena_adapter *adapter) 9751173fca2SJan Medala { 9761bb4a528SFerruh Yigit uint32_t mtu = adapter->edev_data->mtu; 9771173fca2SJan Medala 9781bb4a528SFerruh Yigit if (mtu > adapter->max_mtu || mtu < ENA_MIN_MTU) { 979617898d1SMichal Krawczyk PMD_INIT_LOG(ERR, 980617898d1SMichal Krawczyk "Unsupported MTU of %d. Max MTU: %d, min MTU: %d\n", 9811bb4a528SFerruh Yigit mtu, adapter->max_mtu, ENA_MIN_MTU); 982241da076SRafal Kozik return ENA_COM_UNSUPPORTED; 9831173fca2SJan Medala } 9841173fca2SJan Medala 9851173fca2SJan Medala return 0; 9861173fca2SJan Medala } 9871173fca2SJan Medala 9881173fca2SJan Medala static int 9898a7a73f2SMichal Krawczyk ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx, 9908a7a73f2SMichal Krawczyk bool use_large_llq_hdr) 9911173fca2SJan Medala { 9922fca2a98SMichal Krawczyk struct ena_admin_feature_llq_desc *llq = &ctx->get_feat_ctx->llq; 9932fca2a98SMichal Krawczyk struct ena_com_dev *ena_dev = ctx->ena_dev; 9945920d930SMichal Krawczyk uint32_t max_tx_queue_size; 9955920d930SMichal Krawczyk uint32_t max_rx_queue_size; 9961173fca2SJan Medala 9972fca2a98SMichal Krawczyk if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) { 998ea93d37eSRafal Kozik struct ena_admin_queue_ext_feature_fields *max_queue_ext = 999ea93d37eSRafal Kozik &ctx->get_feat_ctx->max_queue_ext.max_queue_ext; 10005920d930SMichal Krawczyk max_rx_queue_size = RTE_MIN(max_queue_ext->max_rx_cq_depth, 1001ea93d37eSRafal Kozik max_queue_ext->max_rx_sq_depth); 10025920d930SMichal Krawczyk max_tx_queue_size = max_queue_ext->max_tx_cq_depth; 10032fca2a98SMichal Krawczyk 10042fca2a98SMichal Krawczyk if (ena_dev->tx_mem_queue_type == 10052fca2a98SMichal Krawczyk ENA_ADMIN_PLACEMENT_POLICY_DEV) { 10065920d930SMichal Krawczyk max_tx_queue_size = RTE_MIN(max_tx_queue_size, 10072fca2a98SMichal Krawczyk llq->max_llq_depth); 10082fca2a98SMichal Krawczyk } else { 10095920d930SMichal Krawczyk max_tx_queue_size = RTE_MIN(max_tx_queue_size, 1010ea93d37eSRafal Kozik max_queue_ext->max_tx_sq_depth); 10112fca2a98SMichal Krawczyk } 10122fca2a98SMichal Krawczyk 1013ea93d37eSRafal Kozik ctx->max_rx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS, 1014ea93d37eSRafal Kozik max_queue_ext->max_per_packet_rx_descs); 1015ea93d37eSRafal Kozik ctx->max_tx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS, 1016ea93d37eSRafal Kozik max_queue_ext->max_per_packet_tx_descs); 1017ea93d37eSRafal Kozik } else { 1018ea93d37eSRafal Kozik struct ena_admin_queue_feature_desc *max_queues = 1019ea93d37eSRafal Kozik &ctx->get_feat_ctx->max_queues; 10205920d930SMichal Krawczyk max_rx_queue_size = RTE_MIN(max_queues->max_cq_depth, 1021ea93d37eSRafal Kozik max_queues->max_sq_depth); 10225920d930SMichal Krawczyk max_tx_queue_size = max_queues->max_cq_depth; 10232fca2a98SMichal Krawczyk 10242fca2a98SMichal Krawczyk if (ena_dev->tx_mem_queue_type == 10252fca2a98SMichal Krawczyk ENA_ADMIN_PLACEMENT_POLICY_DEV) { 10265920d930SMichal Krawczyk max_tx_queue_size = RTE_MIN(max_tx_queue_size, 10272fca2a98SMichal Krawczyk llq->max_llq_depth); 10282fca2a98SMichal Krawczyk } else { 10295920d930SMichal Krawczyk max_tx_queue_size = RTE_MIN(max_tx_queue_size, 10302fca2a98SMichal Krawczyk max_queues->max_sq_depth); 10312fca2a98SMichal Krawczyk } 10322fca2a98SMichal Krawczyk 1033ea93d37eSRafal Kozik ctx->max_rx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS, 1034ea93d37eSRafal Kozik max_queues->max_packet_rx_descs); 10355920d930SMichal Krawczyk ctx->max_tx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS, 10365920d930SMichal Krawczyk max_queues->max_packet_tx_descs); 1037ea93d37eSRafal Kozik } 10381173fca2SJan Medala 1039ea93d37eSRafal Kozik /* Round down to the nearest power of 2 */ 10405920d930SMichal Krawczyk max_rx_queue_size = rte_align32prevpow2(max_rx_queue_size); 10415920d930SMichal Krawczyk max_tx_queue_size = rte_align32prevpow2(max_tx_queue_size); 10421173fca2SJan Medala 10438a7a73f2SMichal Krawczyk if (use_large_llq_hdr) { 10448a7a73f2SMichal Krawczyk if ((llq->entry_size_ctrl_supported & 10458a7a73f2SMichal Krawczyk ENA_ADMIN_LIST_ENTRY_SIZE_256B) && 10468a7a73f2SMichal Krawczyk (ena_dev->tx_mem_queue_type == 10478a7a73f2SMichal Krawczyk ENA_ADMIN_PLACEMENT_POLICY_DEV)) { 10488a7a73f2SMichal Krawczyk max_tx_queue_size /= 2; 10498a7a73f2SMichal Krawczyk PMD_INIT_LOG(INFO, 1050617898d1SMichal Krawczyk "Forcing large headers and decreasing maximum Tx queue size to %d\n", 10518a7a73f2SMichal Krawczyk max_tx_queue_size); 10528a7a73f2SMichal Krawczyk } else { 10538a7a73f2SMichal Krawczyk PMD_INIT_LOG(ERR, 10548a7a73f2SMichal Krawczyk "Forcing large headers failed: LLQ is disabled or device does not support large headers\n"); 10558a7a73f2SMichal Krawczyk } 10568a7a73f2SMichal Krawczyk } 10578a7a73f2SMichal Krawczyk 10585920d930SMichal Krawczyk if (unlikely(max_rx_queue_size == 0 || max_tx_queue_size == 0)) { 1059617898d1SMichal Krawczyk PMD_INIT_LOG(ERR, "Invalid queue size\n"); 10601173fca2SJan Medala return -EFAULT; 10611173fca2SJan Medala } 10621173fca2SJan Medala 10635920d930SMichal Krawczyk ctx->max_tx_queue_size = max_tx_queue_size; 10645920d930SMichal Krawczyk ctx->max_rx_queue_size = max_rx_queue_size; 10652061fe41SRafal Kozik 1066ea93d37eSRafal Kozik return 0; 10671173fca2SJan Medala } 10681173fca2SJan Medala 10691173fca2SJan Medala static void ena_stats_restart(struct rte_eth_dev *dev) 10701173fca2SJan Medala { 1071890728ffSStephen Hemminger struct ena_adapter *adapter = dev->data->dev_private; 10721173fca2SJan Medala 10731173fca2SJan Medala rte_atomic64_init(&adapter->drv_stats->ierrors); 10741173fca2SJan Medala rte_atomic64_init(&adapter->drv_stats->oerrors); 10751173fca2SJan Medala rte_atomic64_init(&adapter->drv_stats->rx_nombuf); 1076e1e73e32SMichal Krawczyk adapter->drv_stats->rx_drops = 0; 10771173fca2SJan Medala } 10781173fca2SJan Medala 1079d5b0924bSMatan Azrad static int ena_stats_get(struct rte_eth_dev *dev, 10801173fca2SJan Medala struct rte_eth_stats *stats) 10811173fca2SJan Medala { 10821173fca2SJan Medala struct ena_admin_basic_stats ena_stats; 1083890728ffSStephen Hemminger struct ena_adapter *adapter = dev->data->dev_private; 10841173fca2SJan Medala struct ena_com_dev *ena_dev = &adapter->ena_dev; 10851173fca2SJan Medala int rc; 108645b6d861SMichal Krawczyk int i; 108745b6d861SMichal Krawczyk int max_rings_stats; 10881173fca2SJan Medala 10891173fca2SJan Medala memset(&ena_stats, 0, sizeof(ena_stats)); 10901343c415SMichal Krawczyk 10911343c415SMichal Krawczyk rte_spinlock_lock(&adapter->admin_lock); 1092e3595539SStanislaw Kardach rc = ENA_PROXY(adapter, ena_com_get_dev_basic_stats, ena_dev, 1093e3595539SStanislaw Kardach &ena_stats); 10941343c415SMichal Krawczyk rte_spinlock_unlock(&adapter->admin_lock); 10951173fca2SJan Medala if (unlikely(rc)) { 10966f1c9df9SStephen Hemminger PMD_DRV_LOG(ERR, "Could not retrieve statistics from ENA\n"); 1097d5b0924bSMatan Azrad return rc; 10981173fca2SJan Medala } 10991173fca2SJan Medala 11001173fca2SJan Medala /* Set of basic statistics from ENA */ 11011173fca2SJan Medala stats->ipackets = __MERGE_64B_H_L(ena_stats.rx_pkts_high, 11021173fca2SJan Medala ena_stats.rx_pkts_low); 11031173fca2SJan Medala stats->opackets = __MERGE_64B_H_L(ena_stats.tx_pkts_high, 11041173fca2SJan Medala ena_stats.tx_pkts_low); 11051173fca2SJan Medala stats->ibytes = __MERGE_64B_H_L(ena_stats.rx_bytes_high, 11061173fca2SJan Medala ena_stats.rx_bytes_low); 11071173fca2SJan Medala stats->obytes = __MERGE_64B_H_L(ena_stats.tx_bytes_high, 11081173fca2SJan Medala ena_stats.tx_bytes_low); 11091173fca2SJan Medala 11101173fca2SJan Medala /* Driver related stats */ 1111e1e73e32SMichal Krawczyk stats->imissed = adapter->drv_stats->rx_drops; 11121173fca2SJan Medala stats->ierrors = rte_atomic64_read(&adapter->drv_stats->ierrors); 11131173fca2SJan Medala stats->oerrors = rte_atomic64_read(&adapter->drv_stats->oerrors); 11141173fca2SJan Medala stats->rx_nombuf = rte_atomic64_read(&adapter->drv_stats->rx_nombuf); 111545b6d861SMichal Krawczyk 111645b6d861SMichal Krawczyk max_rings_stats = RTE_MIN(dev->data->nb_rx_queues, 111745b6d861SMichal Krawczyk RTE_ETHDEV_QUEUE_STAT_CNTRS); 111845b6d861SMichal Krawczyk for (i = 0; i < max_rings_stats; ++i) { 111945b6d861SMichal Krawczyk struct ena_stats_rx *rx_stats = &adapter->rx_ring[i].rx_stats; 112045b6d861SMichal Krawczyk 112145b6d861SMichal Krawczyk stats->q_ibytes[i] = rx_stats->bytes; 112245b6d861SMichal Krawczyk stats->q_ipackets[i] = rx_stats->cnt; 112345b6d861SMichal Krawczyk stats->q_errors[i] = rx_stats->bad_desc_num + 112445b6d861SMichal Krawczyk rx_stats->bad_req_id; 112545b6d861SMichal Krawczyk } 112645b6d861SMichal Krawczyk 112745b6d861SMichal Krawczyk max_rings_stats = RTE_MIN(dev->data->nb_tx_queues, 112845b6d861SMichal Krawczyk RTE_ETHDEV_QUEUE_STAT_CNTRS); 112945b6d861SMichal Krawczyk for (i = 0; i < max_rings_stats; ++i) { 113045b6d861SMichal Krawczyk struct ena_stats_tx *tx_stats = &adapter->tx_ring[i].tx_stats; 113145b6d861SMichal Krawczyk 113245b6d861SMichal Krawczyk stats->q_obytes[i] = tx_stats->bytes; 113345b6d861SMichal Krawczyk stats->q_opackets[i] = tx_stats->cnt; 113445b6d861SMichal Krawczyk } 113545b6d861SMichal Krawczyk 1136d5b0924bSMatan Azrad return 0; 11371173fca2SJan Medala } 11381173fca2SJan Medala 11391173fca2SJan Medala static int ena_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 11401173fca2SJan Medala { 11411173fca2SJan Medala struct ena_adapter *adapter; 11421173fca2SJan Medala struct ena_com_dev *ena_dev; 11431173fca2SJan Medala int rc = 0; 11441173fca2SJan Medala 1145498c687aSRafal Kozik ena_assert_msg(dev->data != NULL, "Uninitialized device\n"); 1146498c687aSRafal Kozik ena_assert_msg(dev->data->dev_private != NULL, "Uninitialized device\n"); 1147890728ffSStephen Hemminger adapter = dev->data->dev_private; 11481173fca2SJan Medala 11491173fca2SJan Medala ena_dev = &adapter->ena_dev; 1150498c687aSRafal Kozik ena_assert_msg(ena_dev != NULL, "Uninitialized device\n"); 11511173fca2SJan Medala 11521bb4a528SFerruh Yigit if (mtu > adapter->max_mtu || mtu < ENA_MIN_MTU) { 11536f1c9df9SStephen Hemminger PMD_DRV_LOG(ERR, 1154617898d1SMichal Krawczyk "Invalid MTU setting. New MTU: %d, max MTU: %d, min MTU: %d\n", 11551bb4a528SFerruh Yigit mtu, adapter->max_mtu, ENA_MIN_MTU); 1156241da076SRafal Kozik return -EINVAL; 11571173fca2SJan Medala } 11581173fca2SJan Medala 1159e3595539SStanislaw Kardach rc = ENA_PROXY(adapter, ena_com_set_dev_mtu, ena_dev, mtu); 11601173fca2SJan Medala if (rc) 11616f1c9df9SStephen Hemminger PMD_DRV_LOG(ERR, "Could not set MTU: %d\n", mtu); 11621173fca2SJan Medala else 1163617898d1SMichal Krawczyk PMD_DRV_LOG(NOTICE, "MTU set to: %d\n", mtu); 11641173fca2SJan Medala 11651173fca2SJan Medala return rc; 11661173fca2SJan Medala } 11671173fca2SJan Medala 11681173fca2SJan Medala static int ena_start(struct rte_eth_dev *dev) 11691173fca2SJan Medala { 1170890728ffSStephen Hemminger struct ena_adapter *adapter = dev->data->dev_private; 1171d9b8b106SMichal Krawczyk uint64_t ticks; 11721173fca2SJan Medala int rc = 0; 11731173fca2SJan Medala 117439ecdd3dSStanislaw Kardach /* Cannot allocate memory in secondary process */ 117539ecdd3dSStanislaw Kardach if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 117639ecdd3dSStanislaw Kardach PMD_DRV_LOG(WARNING, "dev_start not supported in secondary.\n"); 117739ecdd3dSStanislaw Kardach return -EPERM; 117839ecdd3dSStanislaw Kardach } 117939ecdd3dSStanislaw Kardach 11801173fca2SJan Medala rc = ena_check_valid_conf(adapter); 11811173fca2SJan Medala if (rc) 11821173fca2SJan Medala return rc; 11831173fca2SJan Medala 11846986cdc4SMichal Krawczyk rc = ena_setup_rx_intr(dev); 11856986cdc4SMichal Krawczyk if (rc) 11866986cdc4SMichal Krawczyk return rc; 11876986cdc4SMichal Krawczyk 118826e5543dSRafal Kozik rc = ena_queue_start_all(dev, ENA_RING_TYPE_RX); 11891173fca2SJan Medala if (rc) 11901173fca2SJan Medala return rc; 11911173fca2SJan Medala 119226e5543dSRafal Kozik rc = ena_queue_start_all(dev, ENA_RING_TYPE_TX); 11931173fca2SJan Medala if (rc) 119426e5543dSRafal Kozik goto err_start_tx; 11951173fca2SJan Medala 1196295968d1SFerruh Yigit if (adapter->edev_data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) { 119734d5e97eSMichal Krawczyk rc = ena_rss_configure(adapter); 11981173fca2SJan Medala if (rc) 119926e5543dSRafal Kozik goto err_rss_init; 12001173fca2SJan Medala } 12011173fca2SJan Medala 12021173fca2SJan Medala ena_stats_restart(dev); 12031173fca2SJan Medala 1204d9b8b106SMichal Krawczyk adapter->timestamp_wd = rte_get_timer_cycles(); 1205d9b8b106SMichal Krawczyk adapter->keep_alive_timeout = ENA_DEVICE_KALIVE_TIMEOUT; 1206d9b8b106SMichal Krawczyk 1207d9b8b106SMichal Krawczyk ticks = rte_get_timer_hz(); 1208d9b8b106SMichal Krawczyk rte_timer_reset(&adapter->timer_wd, ticks, PERIODICAL, rte_lcore_id(), 1209aab58857SStanislaw Kardach ena_timer_wd_callback, dev); 1210d9b8b106SMichal Krawczyk 12117830e905SSolganik Alexander ++adapter->dev_stats.dev_start; 12121173fca2SJan Medala adapter->state = ENA_ADAPTER_STATE_RUNNING; 12131173fca2SJan Medala 12141173fca2SJan Medala return 0; 121526e5543dSRafal Kozik 121626e5543dSRafal Kozik err_rss_init: 121726e5543dSRafal Kozik ena_queue_stop_all(dev, ENA_RING_TYPE_TX); 121826e5543dSRafal Kozik err_start_tx: 121926e5543dSRafal Kozik ena_queue_stop_all(dev, ENA_RING_TYPE_RX); 122026e5543dSRafal Kozik return rc; 12211173fca2SJan Medala } 12221173fca2SJan Medala 122362024eb8SIvan Ilchenko static int ena_stop(struct rte_eth_dev *dev) 1224eb0ef49dSMichal Krawczyk { 1225890728ffSStephen Hemminger struct ena_adapter *adapter = dev->data->dev_private; 1226e457bc70SRafal Kozik struct ena_com_dev *ena_dev = &adapter->ena_dev; 12276986cdc4SMichal Krawczyk struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 1228d61138d4SHarman Kalra struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 1229e457bc70SRafal Kozik int rc; 1230eb0ef49dSMichal Krawczyk 123139ecdd3dSStanislaw Kardach /* Cannot free memory in secondary process */ 123239ecdd3dSStanislaw Kardach if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 123339ecdd3dSStanislaw Kardach PMD_DRV_LOG(WARNING, "dev_stop not supported in secondary.\n"); 123439ecdd3dSStanislaw Kardach return -EPERM; 123539ecdd3dSStanislaw Kardach } 123639ecdd3dSStanislaw Kardach 1237d9b8b106SMichal Krawczyk rte_timer_stop_sync(&adapter->timer_wd); 123826e5543dSRafal Kozik ena_queue_stop_all(dev, ENA_RING_TYPE_TX); 123926e5543dSRafal Kozik ena_queue_stop_all(dev, ENA_RING_TYPE_RX); 1240d9b8b106SMichal Krawczyk 1241e457bc70SRafal Kozik if (adapter->trigger_reset) { 1242e457bc70SRafal Kozik rc = ena_com_dev_reset(ena_dev, adapter->reset_reason); 1243e457bc70SRafal Kozik if (rc) 1244617898d1SMichal Krawczyk PMD_DRV_LOG(ERR, "Device reset failed, rc: %d\n", rc); 1245e457bc70SRafal Kozik } 1246e457bc70SRafal Kozik 12476986cdc4SMichal Krawczyk rte_intr_disable(intr_handle); 12486986cdc4SMichal Krawczyk 12496986cdc4SMichal Krawczyk rte_intr_efd_disable(intr_handle); 1250d61138d4SHarman Kalra 1251d61138d4SHarman Kalra /* Cleanup vector list */ 1252d61138d4SHarman Kalra rte_intr_vec_list_free(intr_handle); 12536986cdc4SMichal Krawczyk 12546986cdc4SMichal Krawczyk rte_intr_enable(intr_handle); 12556986cdc4SMichal Krawczyk 12567830e905SSolganik Alexander ++adapter->dev_stats.dev_stop; 1257eb0ef49dSMichal Krawczyk adapter->state = ENA_ADAPTER_STATE_STOPPED; 1258b8f5d2aeSThomas Monjalon dev->data->dev_started = 0; 125962024eb8SIvan Ilchenko 126062024eb8SIvan Ilchenko return 0; 1261eb0ef49dSMichal Krawczyk } 1262eb0ef49dSMichal Krawczyk 12636986cdc4SMichal Krawczyk static int ena_create_io_queue(struct rte_eth_dev *dev, struct ena_ring *ring) 1264df238f84SMichal Krawczyk { 12656986cdc4SMichal Krawczyk struct ena_adapter *adapter = ring->adapter; 12666986cdc4SMichal Krawczyk struct ena_com_dev *ena_dev = &adapter->ena_dev; 12676986cdc4SMichal Krawczyk struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 1268d61138d4SHarman Kalra struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 1269df238f84SMichal Krawczyk struct ena_com_create_io_ctx ctx = 1270df238f84SMichal Krawczyk /* policy set to _HOST just to satisfy icc compiler */ 1271df238f84SMichal Krawczyk { ENA_ADMIN_PLACEMENT_POLICY_HOST, 1272df238f84SMichal Krawczyk 0, 0, 0, 0, 0 }; 1273df238f84SMichal Krawczyk uint16_t ena_qid; 1274778677dcSRafal Kozik unsigned int i; 1275df238f84SMichal Krawczyk int rc; 1276df238f84SMichal Krawczyk 12776986cdc4SMichal Krawczyk ctx.msix_vector = -1; 1278df238f84SMichal Krawczyk if (ring->type == ENA_RING_TYPE_TX) { 1279df238f84SMichal Krawczyk ena_qid = ENA_IO_TXQ_IDX(ring->id); 1280df238f84SMichal Krawczyk ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX; 1281df238f84SMichal Krawczyk ctx.mem_queue_type = ena_dev->tx_mem_queue_type; 1282778677dcSRafal Kozik for (i = 0; i < ring->ring_size; i++) 1283778677dcSRafal Kozik ring->empty_tx_reqs[i] = i; 1284df238f84SMichal Krawczyk } else { 1285df238f84SMichal Krawczyk ena_qid = ENA_IO_RXQ_IDX(ring->id); 1286df238f84SMichal Krawczyk ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX; 12876986cdc4SMichal Krawczyk if (rte_intr_dp_is_en(intr_handle)) 1288d61138d4SHarman Kalra ctx.msix_vector = 1289d61138d4SHarman Kalra rte_intr_vec_list_index_get(intr_handle, 1290d61138d4SHarman Kalra ring->id); 1291d61138d4SHarman Kalra 1292778677dcSRafal Kozik for (i = 0; i < ring->ring_size; i++) 1293778677dcSRafal Kozik ring->empty_rx_reqs[i] = i; 1294df238f84SMichal Krawczyk } 1295badc3a6aSMichal Krawczyk ctx.queue_size = ring->ring_size; 1296df238f84SMichal Krawczyk ctx.qid = ena_qid; 12974217cb0bSMichal Krawczyk ctx.numa_node = ring->numa_socket_id; 1298df238f84SMichal Krawczyk 1299df238f84SMichal Krawczyk rc = ena_com_create_io_queue(ena_dev, &ctx); 1300df238f84SMichal Krawczyk if (rc) { 13016f1c9df9SStephen Hemminger PMD_DRV_LOG(ERR, 1302617898d1SMichal Krawczyk "Failed to create IO queue[%d] (qid:%d), rc: %d\n", 1303df238f84SMichal Krawczyk ring->id, ena_qid, rc); 1304df238f84SMichal Krawczyk return rc; 1305df238f84SMichal Krawczyk } 1306df238f84SMichal Krawczyk 1307df238f84SMichal Krawczyk rc = ena_com_get_io_handlers(ena_dev, ena_qid, 1308df238f84SMichal Krawczyk &ring->ena_com_io_sq, 1309df238f84SMichal Krawczyk &ring->ena_com_io_cq); 1310df238f84SMichal Krawczyk if (rc) { 13116f1c9df9SStephen Hemminger PMD_DRV_LOG(ERR, 1312617898d1SMichal Krawczyk "Failed to get IO queue[%d] handlers, rc: %d\n", 1313df238f84SMichal Krawczyk ring->id, rc); 1314df238f84SMichal Krawczyk ena_com_destroy_io_queue(ena_dev, ena_qid); 1315df238f84SMichal Krawczyk return rc; 1316df238f84SMichal Krawczyk } 1317df238f84SMichal Krawczyk 1318df238f84SMichal Krawczyk if (ring->type == ENA_RING_TYPE_TX) 1319df238f84SMichal Krawczyk ena_com_update_numa_node(ring->ena_com_io_cq, ctx.numa_node); 1320df238f84SMichal Krawczyk 13216986cdc4SMichal Krawczyk /* Start with Rx interrupts being masked. */ 13226986cdc4SMichal Krawczyk if (ring->type == ENA_RING_TYPE_RX && rte_intr_dp_is_en(intr_handle)) 13236986cdc4SMichal Krawczyk ena_rx_queue_intr_disable(dev, ring->id); 13246986cdc4SMichal Krawczyk 1325df238f84SMichal Krawczyk return 0; 1326df238f84SMichal Krawczyk } 1327df238f84SMichal Krawczyk 132826e5543dSRafal Kozik static void ena_queue_stop(struct ena_ring *ring) 1329df238f84SMichal Krawczyk { 133026e5543dSRafal Kozik struct ena_com_dev *ena_dev = &ring->adapter->ena_dev; 1331df238f84SMichal Krawczyk 133226e5543dSRafal Kozik if (ring->type == ENA_RING_TYPE_RX) { 133326e5543dSRafal Kozik ena_com_destroy_io_queue(ena_dev, ENA_IO_RXQ_IDX(ring->id)); 133426e5543dSRafal Kozik ena_rx_queue_release_bufs(ring); 133526e5543dSRafal Kozik } else { 133626e5543dSRafal Kozik ena_com_destroy_io_queue(ena_dev, ENA_IO_TXQ_IDX(ring->id)); 133726e5543dSRafal Kozik ena_tx_queue_release_bufs(ring); 1338df238f84SMichal Krawczyk } 1339df238f84SMichal Krawczyk } 1340df238f84SMichal Krawczyk 134126e5543dSRafal Kozik static void ena_queue_stop_all(struct rte_eth_dev *dev, 134226e5543dSRafal Kozik enum ena_ring_type ring_type) 134326e5543dSRafal Kozik { 1344890728ffSStephen Hemminger struct ena_adapter *adapter = dev->data->dev_private; 134526e5543dSRafal Kozik struct ena_ring *queues = NULL; 134626e5543dSRafal Kozik uint16_t nb_queues, i; 134726e5543dSRafal Kozik 134826e5543dSRafal Kozik if (ring_type == ENA_RING_TYPE_RX) { 134926e5543dSRafal Kozik queues = adapter->rx_ring; 135026e5543dSRafal Kozik nb_queues = dev->data->nb_rx_queues; 135126e5543dSRafal Kozik } else { 135226e5543dSRafal Kozik queues = adapter->tx_ring; 135326e5543dSRafal Kozik nb_queues = dev->data->nb_tx_queues; 135426e5543dSRafal Kozik } 135526e5543dSRafal Kozik 135626e5543dSRafal Kozik for (i = 0; i < nb_queues; ++i) 135726e5543dSRafal Kozik if (queues[i].configured) 135826e5543dSRafal Kozik ena_queue_stop(&queues[i]); 135926e5543dSRafal Kozik } 136026e5543dSRafal Kozik 13616986cdc4SMichal Krawczyk static int ena_queue_start(struct rte_eth_dev *dev, struct ena_ring *ring) 13621173fca2SJan Medala { 1363a467e8f3SMichal Krawczyk int rc, bufs_num; 13641173fca2SJan Medala 13651173fca2SJan Medala ena_assert_msg(ring->configured == 1, 136626e5543dSRafal Kozik "Trying to start unconfigured queue\n"); 13671173fca2SJan Medala 13686986cdc4SMichal Krawczyk rc = ena_create_io_queue(dev, ring); 1369df238f84SMichal Krawczyk if (rc) { 1370617898d1SMichal Krawczyk PMD_INIT_LOG(ERR, "Failed to create IO queue\n"); 1371df238f84SMichal Krawczyk return rc; 1372df238f84SMichal Krawczyk } 1373df238f84SMichal Krawczyk 13741173fca2SJan Medala ring->next_to_clean = 0; 13751173fca2SJan Medala ring->next_to_use = 0; 13761173fca2SJan Medala 13777830e905SSolganik Alexander if (ring->type == ENA_RING_TYPE_TX) { 13787830e905SSolganik Alexander ring->tx_stats.available_desc = 1379b2b02edeSMichal Krawczyk ena_com_free_q_entries(ring->ena_com_io_sq); 13801173fca2SJan Medala return 0; 13817830e905SSolganik Alexander } 13821173fca2SJan Medala 1383a467e8f3SMichal Krawczyk bufs_num = ring->ring_size - 1; 1384a467e8f3SMichal Krawczyk rc = ena_populate_rx_queue(ring, bufs_num); 1385a467e8f3SMichal Krawczyk if (rc != bufs_num) { 138626e5543dSRafal Kozik ena_com_destroy_io_queue(&ring->adapter->ena_dev, 138726e5543dSRafal Kozik ENA_IO_RXQ_IDX(ring->id)); 1388617898d1SMichal Krawczyk PMD_INIT_LOG(ERR, "Failed to populate Rx ring\n"); 1389241da076SRafal Kozik return ENA_COM_FAULT; 13901173fca2SJan Medala } 13914387e81cSIdo Segev /* Flush per-core RX buffers pools cache as they can be used on other 13924387e81cSIdo Segev * cores as well. 13934387e81cSIdo Segev */ 13944387e81cSIdo Segev rte_mempool_cache_flush(NULL, ring->mb_pool); 13951173fca2SJan Medala 13961173fca2SJan Medala return 0; 13971173fca2SJan Medala } 13981173fca2SJan Medala 13991173fca2SJan Medala static int ena_tx_queue_setup(struct rte_eth_dev *dev, 14001173fca2SJan Medala uint16_t queue_idx, 14011173fca2SJan Medala uint16_t nb_desc, 14024217cb0bSMichal Krawczyk unsigned int socket_id, 140356b8b9b7SRafal Kozik const struct rte_eth_txconf *tx_conf) 14041173fca2SJan Medala { 14051173fca2SJan Medala struct ena_ring *txq = NULL; 1406890728ffSStephen Hemminger struct ena_adapter *adapter = dev->data->dev_private; 14071173fca2SJan Medala unsigned int i; 1408005064e5SMichal Krawczyk uint16_t dyn_thresh; 14091173fca2SJan Medala 14101173fca2SJan Medala txq = &adapter->tx_ring[queue_idx]; 14111173fca2SJan Medala 14121173fca2SJan Medala if (txq->configured) { 14136f1c9df9SStephen Hemminger PMD_DRV_LOG(CRIT, 1414617898d1SMichal Krawczyk "API violation. Queue[%d] is already configured\n", 14151173fca2SJan Medala queue_idx); 1416241da076SRafal Kozik return ENA_COM_FAULT; 14171173fca2SJan Medala } 14181173fca2SJan Medala 14191daff526SJakub Palider if (!rte_is_power_of_2(nb_desc)) { 14206f1c9df9SStephen Hemminger PMD_DRV_LOG(ERR, 1421617898d1SMichal Krawczyk "Unsupported size of Tx queue: %d is not a power of 2.\n", 14221daff526SJakub Palider nb_desc); 14231daff526SJakub Palider return -EINVAL; 14241daff526SJakub Palider } 14251daff526SJakub Palider 14265920d930SMichal Krawczyk if (nb_desc > adapter->max_tx_ring_size) { 14276f1c9df9SStephen Hemminger PMD_DRV_LOG(ERR, 1428617898d1SMichal Krawczyk "Unsupported size of Tx queue (max size: %d)\n", 14295920d930SMichal Krawczyk adapter->max_tx_ring_size); 14301173fca2SJan Medala return -EINVAL; 14311173fca2SJan Medala } 14321173fca2SJan Medala 14331173fca2SJan Medala txq->port_id = dev->data->port_id; 14341173fca2SJan Medala txq->next_to_clean = 0; 14351173fca2SJan Medala txq->next_to_use = 0; 14361173fca2SJan Medala txq->ring_size = nb_desc; 1437c0006061SMichal Krawczyk txq->size_mask = nb_desc - 1; 14384217cb0bSMichal Krawczyk txq->numa_socket_id = socket_id; 14391d973d8fSIgor Chauskin txq->pkts_without_db = false; 1440f93e20e5SMichal Krawczyk txq->last_cleanup_ticks = 0; 14411173fca2SJan Medala 144208180833SMichal Krawczyk txq->tx_buffer_info = rte_zmalloc_socket("txq->tx_buffer_info", 144308180833SMichal Krawczyk sizeof(struct ena_tx_buffer) * txq->ring_size, 144408180833SMichal Krawczyk RTE_CACHE_LINE_SIZE, 144508180833SMichal Krawczyk socket_id); 14461173fca2SJan Medala if (!txq->tx_buffer_info) { 1447617898d1SMichal Krawczyk PMD_DRV_LOG(ERR, 1448617898d1SMichal Krawczyk "Failed to allocate memory for Tx buffer info\n"); 1449df238f84SMichal Krawczyk return -ENOMEM; 14501173fca2SJan Medala } 14511173fca2SJan Medala 145208180833SMichal Krawczyk txq->empty_tx_reqs = rte_zmalloc_socket("txq->empty_tx_reqs", 145308180833SMichal Krawczyk sizeof(uint16_t) * txq->ring_size, 145408180833SMichal Krawczyk RTE_CACHE_LINE_SIZE, 145508180833SMichal Krawczyk socket_id); 14561173fca2SJan Medala if (!txq->empty_tx_reqs) { 1457617898d1SMichal Krawczyk PMD_DRV_LOG(ERR, 1458617898d1SMichal Krawczyk "Failed to allocate memory for empty Tx requests\n"); 1459df238f84SMichal Krawczyk rte_free(txq->tx_buffer_info); 1460df238f84SMichal Krawczyk return -ENOMEM; 14611173fca2SJan Medala } 1462241da076SRafal Kozik 14632fca2a98SMichal Krawczyk txq->push_buf_intermediate_buf = 146408180833SMichal Krawczyk rte_zmalloc_socket("txq->push_buf_intermediate_buf", 14652fca2a98SMichal Krawczyk txq->tx_max_header_size, 146608180833SMichal Krawczyk RTE_CACHE_LINE_SIZE, 146708180833SMichal Krawczyk socket_id); 14682fca2a98SMichal Krawczyk if (!txq->push_buf_intermediate_buf) { 1469617898d1SMichal Krawczyk PMD_DRV_LOG(ERR, "Failed to alloc push buffer for LLQ\n"); 14702fca2a98SMichal Krawczyk rte_free(txq->tx_buffer_info); 14712fca2a98SMichal Krawczyk rte_free(txq->empty_tx_reqs); 14722fca2a98SMichal Krawczyk return -ENOMEM; 14732fca2a98SMichal Krawczyk } 14742fca2a98SMichal Krawczyk 14751173fca2SJan Medala for (i = 0; i < txq->ring_size; i++) 14761173fca2SJan Medala txq->empty_tx_reqs[i] = i; 14771173fca2SJan Medala 1478005064e5SMichal Krawczyk txq->offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads; 1479005064e5SMichal Krawczyk 1480005064e5SMichal Krawczyk /* Check if caller provided the Tx cleanup threshold value. */ 1481005064e5SMichal Krawczyk if (tx_conf->tx_free_thresh != 0) { 1482005064e5SMichal Krawczyk txq->tx_free_thresh = tx_conf->tx_free_thresh; 1483005064e5SMichal Krawczyk } else { 1484005064e5SMichal Krawczyk dyn_thresh = txq->ring_size - 1485005064e5SMichal Krawczyk txq->ring_size / ENA_REFILL_THRESH_DIVIDER; 1486005064e5SMichal Krawczyk txq->tx_free_thresh = RTE_MAX(dyn_thresh, 1487005064e5SMichal Krawczyk txq->ring_size - ENA_REFILL_THRESH_PACKET); 14882081d5e2SMichal Krawczyk } 1489005064e5SMichal Krawczyk 1490f93e20e5SMichal Krawczyk txq->missing_tx_completion_threshold = 1491f93e20e5SMichal Krawczyk RTE_MIN(txq->ring_size / 2, ENA_DEFAULT_MISSING_COMP); 1492f93e20e5SMichal Krawczyk 14931173fca2SJan Medala /* Store pointer to this queue in upper layer */ 14941173fca2SJan Medala txq->configured = 1; 14951173fca2SJan Medala dev->data->tx_queues[queue_idx] = txq; 1496241da076SRafal Kozik 1497241da076SRafal Kozik return 0; 14981173fca2SJan Medala } 14991173fca2SJan Medala 15001173fca2SJan Medala static int ena_rx_queue_setup(struct rte_eth_dev *dev, 15011173fca2SJan Medala uint16_t queue_idx, 15021173fca2SJan Medala uint16_t nb_desc, 15034217cb0bSMichal Krawczyk unsigned int socket_id, 150434d5e97eSMichal Krawczyk const struct rte_eth_rxconf *rx_conf, 15051173fca2SJan Medala struct rte_mempool *mp) 15061173fca2SJan Medala { 1507890728ffSStephen Hemminger struct ena_adapter *adapter = dev->data->dev_private; 15081173fca2SJan Medala struct ena_ring *rxq = NULL; 150938364c26SMichal Krawczyk size_t buffer_size; 1510df238f84SMichal Krawczyk int i; 1511005064e5SMichal Krawczyk uint16_t dyn_thresh; 15121173fca2SJan Medala 15131173fca2SJan Medala rxq = &adapter->rx_ring[queue_idx]; 15141173fca2SJan Medala if (rxq->configured) { 15156f1c9df9SStephen Hemminger PMD_DRV_LOG(CRIT, 1516617898d1SMichal Krawczyk "API violation. Queue[%d] is already configured\n", 15171173fca2SJan Medala queue_idx); 1518241da076SRafal Kozik return ENA_COM_FAULT; 15191173fca2SJan Medala } 15201173fca2SJan Medala 15211daff526SJakub Palider if (!rte_is_power_of_2(nb_desc)) { 15226f1c9df9SStephen Hemminger PMD_DRV_LOG(ERR, 1523617898d1SMichal Krawczyk "Unsupported size of Rx queue: %d is not a power of 2.\n", 15241daff526SJakub Palider nb_desc); 15251daff526SJakub Palider return -EINVAL; 15261daff526SJakub Palider } 15271daff526SJakub Palider 15285920d930SMichal Krawczyk if (nb_desc > adapter->max_rx_ring_size) { 15296f1c9df9SStephen Hemminger PMD_DRV_LOG(ERR, 1530617898d1SMichal Krawczyk "Unsupported size of Rx queue (max size: %d)\n", 15315920d930SMichal Krawczyk adapter->max_rx_ring_size); 15321173fca2SJan Medala return -EINVAL; 15331173fca2SJan Medala } 15341173fca2SJan Medala 153538364c26SMichal Krawczyk /* ENA isn't supporting buffers smaller than 1400 bytes */ 153638364c26SMichal Krawczyk buffer_size = rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM; 153738364c26SMichal Krawczyk if (buffer_size < ENA_RX_BUF_MIN_SIZE) { 153838364c26SMichal Krawczyk PMD_DRV_LOG(ERR, 1539617898d1SMichal Krawczyk "Unsupported size of Rx buffer: %zu (min size: %d)\n", 154038364c26SMichal Krawczyk buffer_size, ENA_RX_BUF_MIN_SIZE); 154138364c26SMichal Krawczyk return -EINVAL; 154238364c26SMichal Krawczyk } 154338364c26SMichal Krawczyk 15441173fca2SJan Medala rxq->port_id = dev->data->port_id; 15451173fca2SJan Medala rxq->next_to_clean = 0; 15461173fca2SJan Medala rxq->next_to_use = 0; 15471173fca2SJan Medala rxq->ring_size = nb_desc; 1548c0006061SMichal Krawczyk rxq->size_mask = nb_desc - 1; 15494217cb0bSMichal Krawczyk rxq->numa_socket_id = socket_id; 15501173fca2SJan Medala rxq->mb_pool = mp; 15511173fca2SJan Medala 155208180833SMichal Krawczyk rxq->rx_buffer_info = rte_zmalloc_socket("rxq->buffer_info", 15531be097dcSMichal Krawczyk sizeof(struct ena_rx_buffer) * nb_desc, 155408180833SMichal Krawczyk RTE_CACHE_LINE_SIZE, 155508180833SMichal Krawczyk socket_id); 15561173fca2SJan Medala if (!rxq->rx_buffer_info) { 1557617898d1SMichal Krawczyk PMD_DRV_LOG(ERR, 1558617898d1SMichal Krawczyk "Failed to allocate memory for Rx buffer info\n"); 15591173fca2SJan Medala return -ENOMEM; 15601173fca2SJan Medala } 15611173fca2SJan Medala 156208180833SMichal Krawczyk rxq->rx_refill_buffer = rte_zmalloc_socket("rxq->rx_refill_buffer", 156379405ee1SRafal Kozik sizeof(struct rte_mbuf *) * nb_desc, 156408180833SMichal Krawczyk RTE_CACHE_LINE_SIZE, 156508180833SMichal Krawczyk socket_id); 156679405ee1SRafal Kozik if (!rxq->rx_refill_buffer) { 1567617898d1SMichal Krawczyk PMD_DRV_LOG(ERR, 1568617898d1SMichal Krawczyk "Failed to allocate memory for Rx refill buffer\n"); 156979405ee1SRafal Kozik rte_free(rxq->rx_buffer_info); 157079405ee1SRafal Kozik rxq->rx_buffer_info = NULL; 157179405ee1SRafal Kozik return -ENOMEM; 157279405ee1SRafal Kozik } 157379405ee1SRafal Kozik 157408180833SMichal Krawczyk rxq->empty_rx_reqs = rte_zmalloc_socket("rxq->empty_rx_reqs", 1575c2034976SMichal Krawczyk sizeof(uint16_t) * nb_desc, 157608180833SMichal Krawczyk RTE_CACHE_LINE_SIZE, 157708180833SMichal Krawczyk socket_id); 1578c2034976SMichal Krawczyk if (!rxq->empty_rx_reqs) { 1579617898d1SMichal Krawczyk PMD_DRV_LOG(ERR, 1580617898d1SMichal Krawczyk "Failed to allocate memory for empty Rx requests\n"); 1581c2034976SMichal Krawczyk rte_free(rxq->rx_buffer_info); 1582c2034976SMichal Krawczyk rxq->rx_buffer_info = NULL; 158379405ee1SRafal Kozik rte_free(rxq->rx_refill_buffer); 158479405ee1SRafal Kozik rxq->rx_refill_buffer = NULL; 1585c2034976SMichal Krawczyk return -ENOMEM; 1586c2034976SMichal Krawczyk } 1587c2034976SMichal Krawczyk 1588c2034976SMichal Krawczyk for (i = 0; i < nb_desc; i++) 1589eccbe2ffSRafal Kozik rxq->empty_rx_reqs[i] = i; 1590c2034976SMichal Krawczyk 159134d5e97eSMichal Krawczyk rxq->offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads; 159234d5e97eSMichal Krawczyk 1593005064e5SMichal Krawczyk if (rx_conf->rx_free_thresh != 0) { 1594005064e5SMichal Krawczyk rxq->rx_free_thresh = rx_conf->rx_free_thresh; 1595005064e5SMichal Krawczyk } else { 1596005064e5SMichal Krawczyk dyn_thresh = rxq->ring_size / ENA_REFILL_THRESH_DIVIDER; 1597005064e5SMichal Krawczyk rxq->rx_free_thresh = RTE_MIN(dyn_thresh, 1598005064e5SMichal Krawczyk (uint16_t)(ENA_REFILL_THRESH_PACKET)); 1599005064e5SMichal Krawczyk } 1600005064e5SMichal Krawczyk 16011173fca2SJan Medala /* Store pointer to this queue in upper layer */ 16021173fca2SJan Medala rxq->configured = 1; 16031173fca2SJan Medala dev->data->rx_queues[queue_idx] = rxq; 16041173fca2SJan Medala 1605df238f84SMichal Krawczyk return 0; 16061173fca2SJan Medala } 16071173fca2SJan Medala 160883fd97b2SMichal Krawczyk static int ena_add_single_rx_desc(struct ena_com_io_sq *io_sq, 160983fd97b2SMichal Krawczyk struct rte_mbuf *mbuf, uint16_t id) 161083fd97b2SMichal Krawczyk { 161183fd97b2SMichal Krawczyk struct ena_com_buf ebuf; 161283fd97b2SMichal Krawczyk int rc; 161383fd97b2SMichal Krawczyk 161483fd97b2SMichal Krawczyk /* prepare physical address for DMA transaction */ 161583fd97b2SMichal Krawczyk ebuf.paddr = mbuf->buf_iova + RTE_PKTMBUF_HEADROOM; 161683fd97b2SMichal Krawczyk ebuf.len = mbuf->buf_len - RTE_PKTMBUF_HEADROOM; 161783fd97b2SMichal Krawczyk 161883fd97b2SMichal Krawczyk /* pass resource to device */ 161983fd97b2SMichal Krawczyk rc = ena_com_add_single_rx_desc(io_sq, &ebuf, id); 162083fd97b2SMichal Krawczyk if (unlikely(rc != 0)) 16210a001d69SMichal Krawczyk PMD_RX_LOG(WARNING, "Failed adding Rx desc\n"); 162283fd97b2SMichal Krawczyk 162383fd97b2SMichal Krawczyk return rc; 162483fd97b2SMichal Krawczyk } 162583fd97b2SMichal Krawczyk 16261173fca2SJan Medala static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count) 16271173fca2SJan Medala { 16281173fca2SJan Medala unsigned int i; 16291173fca2SJan Medala int rc; 16301daff526SJakub Palider uint16_t next_to_use = rxq->next_to_use; 16310a001d69SMichal Krawczyk uint16_t req_id; 16320a001d69SMichal Krawczyk #ifdef RTE_ETHDEV_DEBUG_RX 16330a001d69SMichal Krawczyk uint16_t in_use; 16340a001d69SMichal Krawczyk #endif 163579405ee1SRafal Kozik struct rte_mbuf **mbufs = rxq->rx_refill_buffer; 16361173fca2SJan Medala 16371173fca2SJan Medala if (unlikely(!count)) 16381173fca2SJan Medala return 0; 16391173fca2SJan Medala 16400a001d69SMichal Krawczyk #ifdef RTE_ETHDEV_DEBUG_RX 1641c0006061SMichal Krawczyk in_use = rxq->ring_size - 1 - 1642c0006061SMichal Krawczyk ena_com_free_q_entries(rxq->ena_com_io_sq); 16430a001d69SMichal Krawczyk if (unlikely((in_use + count) >= rxq->ring_size)) 16440a001d69SMichal Krawczyk PMD_RX_LOG(ERR, "Bad Rx ring state\n"); 16450a001d69SMichal Krawczyk #endif 16461173fca2SJan Medala 16471173fca2SJan Medala /* get resources for incoming packets */ 16483c8bc29fSDavid Harton rc = rte_pktmbuf_alloc_bulk(rxq->mb_pool, mbufs, count); 16491173fca2SJan Medala if (unlikely(rc < 0)) { 16501173fca2SJan Medala rte_atomic64_inc(&rxq->adapter->drv_stats->rx_nombuf); 16517830e905SSolganik Alexander ++rxq->rx_stats.mbuf_alloc_fail; 1652617898d1SMichal Krawczyk PMD_RX_LOG(DEBUG, "There are not enough free buffers\n"); 16531173fca2SJan Medala return 0; 16541173fca2SJan Medala } 16551173fca2SJan Medala 16561173fca2SJan Medala for (i = 0; i < count; i++) { 165779405ee1SRafal Kozik struct rte_mbuf *mbuf = mbufs[i]; 16581be097dcSMichal Krawczyk struct ena_rx_buffer *rx_info; 16591173fca2SJan Medala 166079405ee1SRafal Kozik if (likely((i + 4) < count)) 166179405ee1SRafal Kozik rte_prefetch0(mbufs[i + 4]); 1662c2034976SMichal Krawczyk 1663c0006061SMichal Krawczyk req_id = rxq->empty_rx_reqs[next_to_use]; 16641be097dcSMichal Krawczyk rx_info = &rxq->rx_buffer_info[req_id]; 1665241da076SRafal Kozik 166683fd97b2SMichal Krawczyk rc = ena_add_single_rx_desc(rxq->ena_com_io_sq, mbuf, req_id); 166783fd97b2SMichal Krawczyk if (unlikely(rc != 0)) 16681173fca2SJan Medala break; 166983fd97b2SMichal Krawczyk 16701be097dcSMichal Krawczyk rx_info->mbuf = mbuf; 1671c0006061SMichal Krawczyk next_to_use = ENA_IDX_NEXT_MASKED(next_to_use, rxq->size_mask); 16721173fca2SJan Medala } 16731173fca2SJan Medala 167479405ee1SRafal Kozik if (unlikely(i < count)) { 16750a001d69SMichal Krawczyk PMD_RX_LOG(WARNING, 1676617898d1SMichal Krawczyk "Refilled Rx queue[%d] with only %d/%d buffers\n", 1677617898d1SMichal Krawczyk rxq->id, i, count); 16783c8bc29fSDavid Harton rte_pktmbuf_free_bulk(&mbufs[i], count - i); 16797830e905SSolganik Alexander ++rxq->rx_stats.refill_partial; 168079405ee1SRafal Kozik } 1681241da076SRafal Kozik 16827be78d02SJosh Soref /* When we submitted free resources to device... */ 16833d19e1abSRafal Kozik if (likely(i > 0)) { 168438faa87eSMichal Krawczyk /* ...let HW know that it can fill buffers with data. */ 16851173fca2SJan Medala ena_com_write_sq_doorbell(rxq->ena_com_io_sq); 16861173fca2SJan Medala 16875e02e19eSJan Medala rxq->next_to_use = next_to_use; 16885e02e19eSJan Medala } 16895e02e19eSJan Medala 16901173fca2SJan Medala return i; 16911173fca2SJan Medala } 16921173fca2SJan Medala 1693b9b05d6fSMichal Krawczyk static int ena_device_init(struct ena_adapter *adapter, 1694aab58857SStanislaw Kardach struct rte_pci_device *pdev, 1695b9b05d6fSMichal Krawczyk struct ena_com_dev_get_features_ctx *get_feat_ctx) 16961173fca2SJan Medala { 1697b9b05d6fSMichal Krawczyk struct ena_com_dev *ena_dev = &adapter->ena_dev; 1698ca148440SMichal Krawczyk uint32_t aenq_groups; 16991173fca2SJan Medala int rc; 1700c4144557SJan Medala bool readless_supported; 17011173fca2SJan Medala 17021173fca2SJan Medala /* Initialize mmio registers */ 17031173fca2SJan Medala rc = ena_com_mmio_reg_read_request_init(ena_dev); 17041173fca2SJan Medala if (rc) { 1705617898d1SMichal Krawczyk PMD_DRV_LOG(ERR, "Failed to init MMIO read less\n"); 17061173fca2SJan Medala return rc; 17071173fca2SJan Medala } 17081173fca2SJan Medala 1709c4144557SJan Medala /* The PCIe configuration space revision id indicate if mmio reg 1710c4144557SJan Medala * read is disabled. 1711c4144557SJan Medala */ 1712aab58857SStanislaw Kardach readless_supported = !(pdev->id.class_id & ENA_MMIO_DISABLE_REG_READ); 1713c4144557SJan Medala ena_com_set_mmio_read_mode(ena_dev, readless_supported); 1714c4144557SJan Medala 17151173fca2SJan Medala /* reset device */ 17163adcba9aSMichal Krawczyk rc = ena_com_dev_reset(ena_dev, ENA_REGS_RESET_NORMAL); 17171173fca2SJan Medala if (rc) { 1718617898d1SMichal Krawczyk PMD_DRV_LOG(ERR, "Cannot reset device\n"); 17191173fca2SJan Medala goto err_mmio_read_less; 17201173fca2SJan Medala } 17211173fca2SJan Medala 17221173fca2SJan Medala /* check FW version */ 17231173fca2SJan Medala rc = ena_com_validate_version(ena_dev); 17241173fca2SJan Medala if (rc) { 1725617898d1SMichal Krawczyk PMD_DRV_LOG(ERR, "Device version is too low\n"); 17261173fca2SJan Medala goto err_mmio_read_less; 17271173fca2SJan Medala } 17281173fca2SJan Medala 17291173fca2SJan Medala ena_dev->dma_addr_bits = ena_com_get_dma_width(ena_dev); 17301173fca2SJan Medala 17311173fca2SJan Medala /* ENA device administration layer init */ 1732b68309beSRafal Kozik rc = ena_com_admin_init(ena_dev, &aenq_handlers); 17331173fca2SJan Medala if (rc) { 17346f1c9df9SStephen Hemminger PMD_DRV_LOG(ERR, 1735617898d1SMichal Krawczyk "Cannot initialize ENA admin queue\n"); 17361173fca2SJan Medala goto err_mmio_read_less; 17371173fca2SJan Medala } 17381173fca2SJan Medala 17391173fca2SJan Medala /* To enable the msix interrupts the driver needs to know the number 17401173fca2SJan Medala * of queues. So the driver uses polling mode to retrieve this 17411173fca2SJan Medala * information. 17421173fca2SJan Medala */ 17431173fca2SJan Medala ena_com_set_admin_polling_mode(ena_dev, true); 17441173fca2SJan Medala 1745201ff2e5SJakub Palider ena_config_host_info(ena_dev); 1746201ff2e5SJakub Palider 17471173fca2SJan Medala /* Get Device Attributes and features */ 17481173fca2SJan Medala rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx); 17491173fca2SJan Medala if (rc) { 17506f1c9df9SStephen Hemminger PMD_DRV_LOG(ERR, 1751617898d1SMichal Krawczyk "Cannot get attribute for ENA device, rc: %d\n", rc); 17521173fca2SJan Medala goto err_admin_init; 17531173fca2SJan Medala } 17541173fca2SJan Medala 1755f01f060cSRafal Kozik aenq_groups = BIT(ENA_ADMIN_LINK_CHANGE) | 1756d9b8b106SMichal Krawczyk BIT(ENA_ADMIN_NOTIFICATION) | 1757983cce2dSRafal Kozik BIT(ENA_ADMIN_KEEP_ALIVE) | 1758983cce2dSRafal Kozik BIT(ENA_ADMIN_FATAL_ERROR) | 1759983cce2dSRafal Kozik BIT(ENA_ADMIN_WARNING); 1760ca148440SMichal Krawczyk 1761ca148440SMichal Krawczyk aenq_groups &= get_feat_ctx->aenq.supported_groups; 1762ca148440SMichal Krawczyk 1763b9b05d6fSMichal Krawczyk adapter->all_aenq_groups = aenq_groups; 1764e859d2b8SRafal Kozik 17651173fca2SJan Medala return 0; 17661173fca2SJan Medala 17671173fca2SJan Medala err_admin_init: 17681173fca2SJan Medala ena_com_admin_destroy(ena_dev); 17691173fca2SJan Medala 17701173fca2SJan Medala err_mmio_read_less: 17711173fca2SJan Medala ena_com_mmio_reg_read_request_destroy(ena_dev); 17721173fca2SJan Medala 17731173fca2SJan Medala return rc; 17741173fca2SJan Medala } 17751173fca2SJan Medala 1776ca148440SMichal Krawczyk static void ena_interrupt_handler_rte(void *cb_arg) 177715773e06SMichal Krawczyk { 1778aab58857SStanislaw Kardach struct rte_eth_dev *dev = cb_arg; 1779aab58857SStanislaw Kardach struct ena_adapter *adapter = dev->data->dev_private; 178015773e06SMichal Krawczyk struct ena_com_dev *ena_dev = &adapter->ena_dev; 178115773e06SMichal Krawczyk 178215773e06SMichal Krawczyk ena_com_admin_q_comp_intr_handler(ena_dev); 17833d19e1abSRafal Kozik if (likely(adapter->state != ENA_ADAPTER_STATE_CLOSED)) 1784aab58857SStanislaw Kardach ena_com_aenq_intr_handler(ena_dev, dev); 178515773e06SMichal Krawczyk } 178615773e06SMichal Krawczyk 17875efb9fc7SMichal Krawczyk static void check_for_missing_keep_alive(struct ena_adapter *adapter) 17885efb9fc7SMichal Krawczyk { 1789b9b05d6fSMichal Krawczyk if (!(adapter->active_aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE))) 1790e859d2b8SRafal Kozik return; 1791e859d2b8SRafal Kozik 17925efb9fc7SMichal Krawczyk if (adapter->keep_alive_timeout == ENA_HW_HINTS_NO_TIMEOUT) 17935efb9fc7SMichal Krawczyk return; 17945efb9fc7SMichal Krawczyk 17955efb9fc7SMichal Krawczyk if (unlikely((rte_get_timer_cycles() - adapter->timestamp_wd) >= 17965efb9fc7SMichal Krawczyk adapter->keep_alive_timeout)) { 17976f1c9df9SStephen Hemminger PMD_DRV_LOG(ERR, "Keep alive timeout\n"); 17982bae75eaSDawid Gorecki ena_trigger_reset(adapter, ENA_REGS_RESET_KEEP_ALIVE_TO); 17997830e905SSolganik Alexander ++adapter->dev_stats.wd_expired; 18005efb9fc7SMichal Krawczyk } 18015efb9fc7SMichal Krawczyk } 18025efb9fc7SMichal Krawczyk 18035efb9fc7SMichal Krawczyk /* Check if admin queue is enabled */ 18045efb9fc7SMichal Krawczyk static void check_for_admin_com_state(struct ena_adapter *adapter) 18055efb9fc7SMichal Krawczyk { 18065efb9fc7SMichal Krawczyk if (unlikely(!ena_com_get_admin_running_state(&adapter->ena_dev))) { 1807617898d1SMichal Krawczyk PMD_DRV_LOG(ERR, "ENA admin queue is not in running state\n"); 18082bae75eaSDawid Gorecki ena_trigger_reset(adapter, ENA_REGS_RESET_ADMIN_TO); 18095efb9fc7SMichal Krawczyk } 18105efb9fc7SMichal Krawczyk } 18115efb9fc7SMichal Krawczyk 1812f93e20e5SMichal Krawczyk static int check_for_tx_completion_in_queue(struct ena_adapter *adapter, 1813f93e20e5SMichal Krawczyk struct ena_ring *tx_ring) 1814f93e20e5SMichal Krawczyk { 1815f93e20e5SMichal Krawczyk struct ena_tx_buffer *tx_buf; 1816f93e20e5SMichal Krawczyk uint64_t timestamp; 1817f93e20e5SMichal Krawczyk uint64_t completion_delay; 1818f93e20e5SMichal Krawczyk uint32_t missed_tx = 0; 1819f93e20e5SMichal Krawczyk unsigned int i; 1820f93e20e5SMichal Krawczyk int rc = 0; 1821f93e20e5SMichal Krawczyk 1822f93e20e5SMichal Krawczyk for (i = 0; i < tx_ring->ring_size; ++i) { 1823f93e20e5SMichal Krawczyk tx_buf = &tx_ring->tx_buffer_info[i]; 1824f93e20e5SMichal Krawczyk timestamp = tx_buf->timestamp; 1825f93e20e5SMichal Krawczyk 1826f93e20e5SMichal Krawczyk if (timestamp == 0) 1827f93e20e5SMichal Krawczyk continue; 1828f93e20e5SMichal Krawczyk 1829f93e20e5SMichal Krawczyk completion_delay = rte_get_timer_cycles() - timestamp; 1830f93e20e5SMichal Krawczyk if (completion_delay > adapter->missing_tx_completion_to) { 1831f93e20e5SMichal Krawczyk if (unlikely(!tx_buf->print_once)) { 1832f93e20e5SMichal Krawczyk PMD_TX_LOG(WARNING, 1833f93e20e5SMichal Krawczyk "Found a Tx that wasn't completed on time, qid %d, index %d. " 1834f93e20e5SMichal Krawczyk "Missing Tx outstanding for %" PRIu64 " msecs.\n", 1835f93e20e5SMichal Krawczyk tx_ring->id, i, completion_delay / 1836f93e20e5SMichal Krawczyk rte_get_timer_hz() * 1000); 1837f93e20e5SMichal Krawczyk tx_buf->print_once = true; 1838f93e20e5SMichal Krawczyk } 1839f93e20e5SMichal Krawczyk ++missed_tx; 1840f93e20e5SMichal Krawczyk } 1841f93e20e5SMichal Krawczyk } 1842f93e20e5SMichal Krawczyk 1843f93e20e5SMichal Krawczyk if (unlikely(missed_tx > tx_ring->missing_tx_completion_threshold)) { 1844f93e20e5SMichal Krawczyk PMD_DRV_LOG(ERR, 1845f93e20e5SMichal Krawczyk "The number of lost Tx completions is above the threshold (%d > %d). " 1846f93e20e5SMichal Krawczyk "Trigger the device reset.\n", 1847f93e20e5SMichal Krawczyk missed_tx, 1848f93e20e5SMichal Krawczyk tx_ring->missing_tx_completion_threshold); 1849f93e20e5SMichal Krawczyk adapter->reset_reason = ENA_REGS_RESET_MISS_TX_CMPL; 1850f93e20e5SMichal Krawczyk adapter->trigger_reset = true; 1851f93e20e5SMichal Krawczyk rc = -EIO; 1852f93e20e5SMichal Krawczyk } 1853f93e20e5SMichal Krawczyk 1854f93e20e5SMichal Krawczyk tx_ring->tx_stats.missed_tx += missed_tx; 1855f93e20e5SMichal Krawczyk 1856f93e20e5SMichal Krawczyk return rc; 1857f93e20e5SMichal Krawczyk } 1858f93e20e5SMichal Krawczyk 1859f93e20e5SMichal Krawczyk static void check_for_tx_completions(struct ena_adapter *adapter) 1860f93e20e5SMichal Krawczyk { 1861f93e20e5SMichal Krawczyk struct ena_ring *tx_ring; 1862f93e20e5SMichal Krawczyk uint64_t tx_cleanup_delay; 1863f93e20e5SMichal Krawczyk size_t qid; 1864f93e20e5SMichal Krawczyk int budget; 1865f93e20e5SMichal Krawczyk uint16_t nb_tx_queues = adapter->edev_data->nb_tx_queues; 1866f93e20e5SMichal Krawczyk 1867f93e20e5SMichal Krawczyk if (adapter->missing_tx_completion_to == ENA_HW_HINTS_NO_TIMEOUT) 1868f93e20e5SMichal Krawczyk return; 1869f93e20e5SMichal Krawczyk 1870f93e20e5SMichal Krawczyk nb_tx_queues = adapter->edev_data->nb_tx_queues; 1871f93e20e5SMichal Krawczyk budget = adapter->missing_tx_completion_budget; 1872f93e20e5SMichal Krawczyk 1873f93e20e5SMichal Krawczyk qid = adapter->last_tx_comp_qid; 1874f93e20e5SMichal Krawczyk while (budget-- > 0) { 1875f93e20e5SMichal Krawczyk tx_ring = &adapter->tx_ring[qid]; 1876f93e20e5SMichal Krawczyk 1877f93e20e5SMichal Krawczyk /* Tx cleanup is called only by the burst function and can be 1878f93e20e5SMichal Krawczyk * called dynamically by the application. Also cleanup is 1879f93e20e5SMichal Krawczyk * limited by the threshold. To avoid false detection of the 1880f93e20e5SMichal Krawczyk * missing HW Tx completion, get the delay since last cleanup 1881f93e20e5SMichal Krawczyk * function was called. 1882f93e20e5SMichal Krawczyk */ 1883f93e20e5SMichal Krawczyk tx_cleanup_delay = rte_get_timer_cycles() - 1884f93e20e5SMichal Krawczyk tx_ring->last_cleanup_ticks; 1885f93e20e5SMichal Krawczyk if (tx_cleanup_delay < adapter->tx_cleanup_stall_delay) 1886f93e20e5SMichal Krawczyk check_for_tx_completion_in_queue(adapter, tx_ring); 1887f93e20e5SMichal Krawczyk qid = (qid + 1) % nb_tx_queues; 1888f93e20e5SMichal Krawczyk } 1889f93e20e5SMichal Krawczyk 1890f93e20e5SMichal Krawczyk adapter->last_tx_comp_qid = qid; 1891f93e20e5SMichal Krawczyk } 1892f93e20e5SMichal Krawczyk 1893d9b8b106SMichal Krawczyk static void ena_timer_wd_callback(__rte_unused struct rte_timer *timer, 1894d9b8b106SMichal Krawczyk void *arg) 1895d9b8b106SMichal Krawczyk { 1896aab58857SStanislaw Kardach struct rte_eth_dev *dev = arg; 1897aab58857SStanislaw Kardach struct ena_adapter *adapter = dev->data->dev_private; 1898d9b8b106SMichal Krawczyk 1899e2174a54SMichal Krawczyk if (unlikely(adapter->trigger_reset)) 1900e2174a54SMichal Krawczyk return; 1901e2174a54SMichal Krawczyk 19025efb9fc7SMichal Krawczyk check_for_missing_keep_alive(adapter); 19035efb9fc7SMichal Krawczyk check_for_admin_com_state(adapter); 1904f93e20e5SMichal Krawczyk check_for_tx_completions(adapter); 1905d9b8b106SMichal Krawczyk 19065efb9fc7SMichal Krawczyk if (unlikely(adapter->trigger_reset)) { 19076f1c9df9SStephen Hemminger PMD_DRV_LOG(ERR, "Trigger reset is on\n"); 19085723fbedSFerruh Yigit rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET, 1909d9b8b106SMichal Krawczyk NULL); 1910d9b8b106SMichal Krawczyk } 1911d9b8b106SMichal Krawczyk } 1912d9b8b106SMichal Krawczyk 19132fca2a98SMichal Krawczyk static inline void 19148a7a73f2SMichal Krawczyk set_default_llq_configurations(struct ena_llq_configurations *llq_config, 19158a7a73f2SMichal Krawczyk struct ena_admin_feature_llq_desc *llq, 19168a7a73f2SMichal Krawczyk bool use_large_llq_hdr) 19172fca2a98SMichal Krawczyk { 19182fca2a98SMichal Krawczyk llq_config->llq_header_location = ENA_ADMIN_INLINE_HEADER; 19192fca2a98SMichal Krawczyk llq_config->llq_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY; 19202fca2a98SMichal Krawczyk llq_config->llq_num_decs_before_header = 19212fca2a98SMichal Krawczyk ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2; 19228a7a73f2SMichal Krawczyk 19238a7a73f2SMichal Krawczyk if (use_large_llq_hdr && 19248a7a73f2SMichal Krawczyk (llq->entry_size_ctrl_supported & ENA_ADMIN_LIST_ENTRY_SIZE_256B)) { 19258a7a73f2SMichal Krawczyk llq_config->llq_ring_entry_size = 19268a7a73f2SMichal Krawczyk ENA_ADMIN_LIST_ENTRY_SIZE_256B; 19278a7a73f2SMichal Krawczyk llq_config->llq_ring_entry_size_value = 256; 19288a7a73f2SMichal Krawczyk } else { 19298a7a73f2SMichal Krawczyk llq_config->llq_ring_entry_size = 19308a7a73f2SMichal Krawczyk ENA_ADMIN_LIST_ENTRY_SIZE_128B; 19312fca2a98SMichal Krawczyk llq_config->llq_ring_entry_size_value = 128; 19322fca2a98SMichal Krawczyk } 19338a7a73f2SMichal Krawczyk } 19342fca2a98SMichal Krawczyk 19352fca2a98SMichal Krawczyk static int 19362fca2a98SMichal Krawczyk ena_set_queues_placement_policy(struct ena_adapter *adapter, 19372fca2a98SMichal Krawczyk struct ena_com_dev *ena_dev, 19382fca2a98SMichal Krawczyk struct ena_admin_feature_llq_desc *llq, 19392fca2a98SMichal Krawczyk struct ena_llq_configurations *llq_default_configurations) 19402fca2a98SMichal Krawczyk { 19412fca2a98SMichal Krawczyk int rc; 19422fca2a98SMichal Krawczyk u32 llq_feature_mask; 19432fca2a98SMichal Krawczyk 19442fca2a98SMichal Krawczyk llq_feature_mask = 1 << ENA_ADMIN_LLQ; 19452fca2a98SMichal Krawczyk if (!(ena_dev->supported_features & llq_feature_mask)) { 19466f1c9df9SStephen Hemminger PMD_DRV_LOG(INFO, 19472fca2a98SMichal Krawczyk "LLQ is not supported. Fallback to host mode policy.\n"); 19482fca2a98SMichal Krawczyk ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 19492fca2a98SMichal Krawczyk return 0; 19502fca2a98SMichal Krawczyk } 19512fca2a98SMichal Krawczyk 19522fca2a98SMichal Krawczyk rc = ena_com_config_dev_mode(ena_dev, llq, llq_default_configurations); 19532fca2a98SMichal Krawczyk if (unlikely(rc)) { 1954617898d1SMichal Krawczyk PMD_INIT_LOG(WARNING, 1955617898d1SMichal Krawczyk "Failed to config dev mode. Fallback to host mode policy.\n"); 19562fca2a98SMichal Krawczyk ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 19572fca2a98SMichal Krawczyk return 0; 19582fca2a98SMichal Krawczyk } 19592fca2a98SMichal Krawczyk 19602fca2a98SMichal Krawczyk /* Nothing to config, exit */ 19612fca2a98SMichal Krawczyk if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) 19622fca2a98SMichal Krawczyk return 0; 19632fca2a98SMichal Krawczyk 19642fca2a98SMichal Krawczyk if (!adapter->dev_mem_base) { 1965617898d1SMichal Krawczyk PMD_DRV_LOG(ERR, 1966617898d1SMichal Krawczyk "Unable to access LLQ BAR resource. Fallback to host mode policy.\n"); 19672fca2a98SMichal Krawczyk ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 19682fca2a98SMichal Krawczyk return 0; 19692fca2a98SMichal Krawczyk } 19702fca2a98SMichal Krawczyk 19712fca2a98SMichal Krawczyk ena_dev->mem_bar = adapter->dev_mem_base; 19722fca2a98SMichal Krawczyk 19732fca2a98SMichal Krawczyk return 0; 19742fca2a98SMichal Krawczyk } 19752fca2a98SMichal Krawczyk 19765920d930SMichal Krawczyk static uint32_t ena_calc_max_io_queue_num(struct ena_com_dev *ena_dev, 197701bd6877SRafal Kozik struct ena_com_dev_get_features_ctx *get_feat_ctx) 197801bd6877SRafal Kozik { 19795920d930SMichal Krawczyk uint32_t io_tx_sq_num, io_tx_cq_num, io_rx_num, max_num_io_queues; 198001bd6877SRafal Kozik 1981ea93d37eSRafal Kozik /* Regular queues capabilities */ 1982ea93d37eSRafal Kozik if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) { 1983ea93d37eSRafal Kozik struct ena_admin_queue_ext_feature_fields *max_queue_ext = 1984ea93d37eSRafal Kozik &get_feat_ctx->max_queue_ext.max_queue_ext; 19852fca2a98SMichal Krawczyk io_rx_num = RTE_MIN(max_queue_ext->max_rx_sq_num, 19862fca2a98SMichal Krawczyk max_queue_ext->max_rx_cq_num); 19872fca2a98SMichal Krawczyk io_tx_sq_num = max_queue_ext->max_tx_sq_num; 19882fca2a98SMichal Krawczyk io_tx_cq_num = max_queue_ext->max_tx_cq_num; 1989ea93d37eSRafal Kozik } else { 1990ea93d37eSRafal Kozik struct ena_admin_queue_feature_desc *max_queues = 1991ea93d37eSRafal Kozik &get_feat_ctx->max_queues; 19922fca2a98SMichal Krawczyk io_tx_sq_num = max_queues->max_sq_num; 19932fca2a98SMichal Krawczyk io_tx_cq_num = max_queues->max_cq_num; 19942fca2a98SMichal Krawczyk io_rx_num = RTE_MIN(io_tx_sq_num, io_tx_cq_num); 1995ea93d37eSRafal Kozik } 199601bd6877SRafal Kozik 19972fca2a98SMichal Krawczyk /* In case of LLQ use the llq number in the get feature cmd */ 19982fca2a98SMichal Krawczyk if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) 19992fca2a98SMichal Krawczyk io_tx_sq_num = get_feat_ctx->llq.max_llq_num; 20002fca2a98SMichal Krawczyk 20015920d930SMichal Krawczyk max_num_io_queues = RTE_MIN(ENA_MAX_NUM_IO_QUEUES, io_rx_num); 20025920d930SMichal Krawczyk max_num_io_queues = RTE_MIN(max_num_io_queues, io_tx_sq_num); 20035920d930SMichal Krawczyk max_num_io_queues = RTE_MIN(max_num_io_queues, io_tx_cq_num); 200401bd6877SRafal Kozik 20055920d930SMichal Krawczyk if (unlikely(max_num_io_queues == 0)) { 2006617898d1SMichal Krawczyk PMD_DRV_LOG(ERR, "Number of IO queues cannot not be 0\n"); 200701bd6877SRafal Kozik return -EFAULT; 200801bd6877SRafal Kozik } 200901bd6877SRafal Kozik 20105920d930SMichal Krawczyk return max_num_io_queues; 201101bd6877SRafal Kozik } 201201bd6877SRafal Kozik 2013e8c838fdSMichal Krawczyk static void 2014e8c838fdSMichal Krawczyk ena_set_offloads(struct ena_offloads *offloads, 2015e8c838fdSMichal Krawczyk struct ena_admin_feature_offload_desc *offload_desc) 2016e8c838fdSMichal Krawczyk { 2017e8c838fdSMichal Krawczyk if (offload_desc->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK) 2018e8c838fdSMichal Krawczyk offloads->tx_offloads |= ENA_IPV4_TSO; 2019e8c838fdSMichal Krawczyk 2020e8c838fdSMichal Krawczyk /* Tx IPv4 checksum offloads */ 2021e8c838fdSMichal Krawczyk if (offload_desc->tx & 2022e8c838fdSMichal Krawczyk ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L3_CSUM_IPV4_MASK) 2023e8c838fdSMichal Krawczyk offloads->tx_offloads |= ENA_L3_IPV4_CSUM; 2024e8c838fdSMichal Krawczyk if (offload_desc->tx & 2025e8c838fdSMichal Krawczyk ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_MASK) 2026e8c838fdSMichal Krawczyk offloads->tx_offloads |= ENA_L4_IPV4_CSUM; 2027e8c838fdSMichal Krawczyk if (offload_desc->tx & 2028e8c838fdSMichal Krawczyk ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK) 2029e8c838fdSMichal Krawczyk offloads->tx_offloads |= ENA_L4_IPV4_CSUM_PARTIAL; 2030e8c838fdSMichal Krawczyk 2031e8c838fdSMichal Krawczyk /* Tx IPv6 checksum offloads */ 2032e8c838fdSMichal Krawczyk if (offload_desc->tx & 2033e8c838fdSMichal Krawczyk ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_MASK) 2034e8c838fdSMichal Krawczyk offloads->tx_offloads |= ENA_L4_IPV6_CSUM; 2035e8c838fdSMichal Krawczyk if (offload_desc->tx & 2036e8c838fdSMichal Krawczyk ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK) 2037e8c838fdSMichal Krawczyk offloads->tx_offloads |= ENA_L4_IPV6_CSUM_PARTIAL; 2038e8c838fdSMichal Krawczyk 2039e8c838fdSMichal Krawczyk /* Rx IPv4 checksum offloads */ 2040e8c838fdSMichal Krawczyk if (offload_desc->rx_supported & 2041e8c838fdSMichal Krawczyk ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L3_CSUM_IPV4_MASK) 2042e8c838fdSMichal Krawczyk offloads->rx_offloads |= ENA_L3_IPV4_CSUM; 2043e8c838fdSMichal Krawczyk if (offload_desc->rx_supported & 2044e8c838fdSMichal Krawczyk ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK) 2045e8c838fdSMichal Krawczyk offloads->rx_offloads |= ENA_L4_IPV4_CSUM; 2046e8c838fdSMichal Krawczyk 2047e8c838fdSMichal Krawczyk /* Rx IPv6 checksum offloads */ 2048e8c838fdSMichal Krawczyk if (offload_desc->rx_supported & 2049e8c838fdSMichal Krawczyk ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK) 2050e8c838fdSMichal Krawczyk offloads->rx_offloads |= ENA_L4_IPV6_CSUM; 2051e8c838fdSMichal Krawczyk 2052e8c838fdSMichal Krawczyk if (offload_desc->rx_supported & 2053e8c838fdSMichal Krawczyk ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_MASK) 2054e8c838fdSMichal Krawczyk offloads->rx_offloads |= ENA_RX_RSS_HASH; 2055e8c838fdSMichal Krawczyk } 2056e8c838fdSMichal Krawczyk 2057e3595539SStanislaw Kardach static int ena_init_once(void) 2058e3595539SStanislaw Kardach { 2059e3595539SStanislaw Kardach static bool init_done; 2060e3595539SStanislaw Kardach 2061e3595539SStanislaw Kardach if (init_done) 2062e3595539SStanislaw Kardach return 0; 2063e3595539SStanislaw Kardach 2064e3595539SStanislaw Kardach if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 2065e3595539SStanislaw Kardach /* Init timer subsystem for the ENA timer service. */ 2066e3595539SStanislaw Kardach rte_timer_subsystem_init(); 2067e3595539SStanislaw Kardach /* Register handler for requests from secondary processes. */ 2068e3595539SStanislaw Kardach rte_mp_action_register(ENA_MP_NAME, ena_mp_primary_handle); 2069e3595539SStanislaw Kardach } 2070e3595539SStanislaw Kardach 2071e3595539SStanislaw Kardach init_done = true; 2072e3595539SStanislaw Kardach return 0; 2073e3595539SStanislaw Kardach } 2074e3595539SStanislaw Kardach 20751173fca2SJan Medala static int eth_ena_dev_init(struct rte_eth_dev *eth_dev) 20761173fca2SJan Medala { 2077ea93d37eSRafal Kozik struct ena_calc_queue_size_ctx calc_queue_ctx = { 0 }; 20781173fca2SJan Medala struct rte_pci_device *pci_dev; 2079eb0ef49dSMichal Krawczyk struct rte_intr_handle *intr_handle; 2080890728ffSStephen Hemminger struct ena_adapter *adapter = eth_dev->data->dev_private; 20811173fca2SJan Medala struct ena_com_dev *ena_dev = &adapter->ena_dev; 20821173fca2SJan Medala struct ena_com_dev_get_features_ctx get_feat_ctx; 20832fca2a98SMichal Krawczyk struct ena_llq_configurations llq_config; 20842fca2a98SMichal Krawczyk const char *queue_type_str; 20855920d930SMichal Krawczyk uint32_t max_num_io_queues; 2086ea93d37eSRafal Kozik int rc; 20871173fca2SJan Medala static int adapters_found; 208833dde075SMichal Krawczyk bool disable_meta_caching; 20891173fca2SJan Medala 20901173fca2SJan Medala eth_dev->dev_ops = &ena_dev_ops; 20911173fca2SJan Medala eth_dev->rx_pkt_burst = ð_ena_recv_pkts; 20921173fca2SJan Medala eth_dev->tx_pkt_burst = ð_ena_xmit_pkts; 2093b3fc5a1aSKonstantin Ananyev eth_dev->tx_pkt_prepare = ð_ena_prep_pkts; 20941173fca2SJan Medala 2095e3595539SStanislaw Kardach rc = ena_init_once(); 2096e3595539SStanislaw Kardach if (rc != 0) 2097e3595539SStanislaw Kardach return rc; 2098e3595539SStanislaw Kardach 20991173fca2SJan Medala if (rte_eal_process_type() != RTE_PROC_PRIMARY) 21001173fca2SJan Medala return 0; 21011173fca2SJan Medala 2102f30e69b4SFerruh Yigit eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; 2103f30e69b4SFerruh Yigit 2104fd976890SMichal Krawczyk memset(adapter, 0, sizeof(struct ena_adapter)); 2105fd976890SMichal Krawczyk ena_dev = &adapter->ena_dev; 2106fd976890SMichal Krawczyk 2107aab58857SStanislaw Kardach adapter->edev_data = eth_dev->data; 2108fd976890SMichal Krawczyk 2109c0802544SFerruh Yigit pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 21101173fca2SJan Medala 2111617898d1SMichal Krawczyk PMD_INIT_LOG(INFO, "Initializing %x:%x:%x.%d\n", 21121173fca2SJan Medala pci_dev->addr.domain, 21131173fca2SJan Medala pci_dev->addr.bus, 21141173fca2SJan Medala pci_dev->addr.devid, 21151173fca2SJan Medala pci_dev->addr.function); 21161173fca2SJan Medala 2117d61138d4SHarman Kalra intr_handle = pci_dev->intr_handle; 2118eb0ef49dSMichal Krawczyk 21191173fca2SJan Medala adapter->regs = pci_dev->mem_resource[ENA_REGS_BAR].addr; 21201173fca2SJan Medala adapter->dev_mem_base = pci_dev->mem_resource[ENA_MEM_BAR].addr; 21211173fca2SJan Medala 21221d339597SRafal Kozik if (!adapter->regs) { 2123617898d1SMichal Krawczyk PMD_INIT_LOG(CRIT, "Failed to access registers BAR(%d)\n", 21241173fca2SJan Medala ENA_REGS_BAR); 21251d339597SRafal Kozik return -ENXIO; 21261d339597SRafal Kozik } 21271173fca2SJan Medala 21281173fca2SJan Medala ena_dev->reg_bar = adapter->regs; 2129850e1bb1SMichal Krawczyk /* Pass device data as a pointer which can be passed to the IO functions 2130850e1bb1SMichal Krawczyk * by the ena_com (for example - the memory allocation). 2131850e1bb1SMichal Krawczyk */ 2132850e1bb1SMichal Krawczyk ena_dev->dmadev = eth_dev->data; 21331173fca2SJan Medala 21341173fca2SJan Medala adapter->id_number = adapters_found; 21351173fca2SJan Medala 21361173fca2SJan Medala snprintf(adapter->name, ENA_NAME_MAX_LEN, "ena_%d", 21371173fca2SJan Medala adapter->id_number); 21381173fca2SJan Medala 2139*cc0c5d25SMichal Krawczyk adapter->missing_tx_completion_to = ENA_TX_TIMEOUT; 2140*cc0c5d25SMichal Krawczyk 21418a7a73f2SMichal Krawczyk rc = ena_parse_devargs(adapter, pci_dev->device.devargs); 21428a7a73f2SMichal Krawczyk if (rc != 0) { 21438a7a73f2SMichal Krawczyk PMD_INIT_LOG(CRIT, "Failed to parse devargs\n"); 21448a7a73f2SMichal Krawczyk goto err; 21458a7a73f2SMichal Krawczyk } 21468a7a73f2SMichal Krawczyk 21471173fca2SJan Medala /* device specific initialization routine */ 2148b9b05d6fSMichal Krawczyk rc = ena_device_init(adapter, pci_dev, &get_feat_ctx); 21491173fca2SJan Medala if (rc) { 2150617898d1SMichal Krawczyk PMD_INIT_LOG(CRIT, "Failed to init ENA device\n"); 2151241da076SRafal Kozik goto err; 21521173fca2SJan Medala } 2153b9b05d6fSMichal Krawczyk 2154b9b05d6fSMichal Krawczyk /* Check if device supports LSC */ 2155b9b05d6fSMichal Krawczyk if (!(adapter->all_aenq_groups & BIT(ENA_ADMIN_LINK_CHANGE))) 2156b9b05d6fSMichal Krawczyk adapter->edev_data->dev_flags &= ~RTE_ETH_DEV_INTR_LSC; 21571173fca2SJan Medala 21588a7a73f2SMichal Krawczyk set_default_llq_configurations(&llq_config, &get_feat_ctx.llq, 21598a7a73f2SMichal Krawczyk adapter->use_large_llq_hdr); 21602fca2a98SMichal Krawczyk rc = ena_set_queues_placement_policy(adapter, ena_dev, 21612fca2a98SMichal Krawczyk &get_feat_ctx.llq, &llq_config); 21622fca2a98SMichal Krawczyk if (unlikely(rc)) { 2163617898d1SMichal Krawczyk PMD_INIT_LOG(CRIT, "Failed to set placement policy\n"); 21642fca2a98SMichal Krawczyk return rc; 21652fca2a98SMichal Krawczyk } 21662fca2a98SMichal Krawczyk 21672fca2a98SMichal Krawczyk if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) 21682fca2a98SMichal Krawczyk queue_type_str = "Regular"; 21692fca2a98SMichal Krawczyk else 21702fca2a98SMichal Krawczyk queue_type_str = "Low latency"; 21716f1c9df9SStephen Hemminger PMD_DRV_LOG(INFO, "Placement policy: %s\n", queue_type_str); 2172ea93d37eSRafal Kozik 2173ea93d37eSRafal Kozik calc_queue_ctx.ena_dev = ena_dev; 2174ea93d37eSRafal Kozik calc_queue_ctx.get_feat_ctx = &get_feat_ctx; 21751173fca2SJan Medala 21765920d930SMichal Krawczyk max_num_io_queues = ena_calc_max_io_queue_num(ena_dev, &get_feat_ctx); 21778a7a73f2SMichal Krawczyk rc = ena_calc_io_queue_size(&calc_queue_ctx, 21788a7a73f2SMichal Krawczyk adapter->use_large_llq_hdr); 21795920d930SMichal Krawczyk if (unlikely((rc != 0) || (max_num_io_queues == 0))) { 2180241da076SRafal Kozik rc = -EFAULT; 2181241da076SRafal Kozik goto err_device_destroy; 2182241da076SRafal Kozik } 21831173fca2SJan Medala 21845920d930SMichal Krawczyk adapter->max_tx_ring_size = calc_queue_ctx.max_tx_queue_size; 21855920d930SMichal Krawczyk adapter->max_rx_ring_size = calc_queue_ctx.max_rx_queue_size; 2186ea93d37eSRafal Kozik adapter->max_tx_sgl_size = calc_queue_ctx.max_tx_sgl_size; 2187ea93d37eSRafal Kozik adapter->max_rx_sgl_size = calc_queue_ctx.max_rx_sgl_size; 21885920d930SMichal Krawczyk adapter->max_num_io_queues = max_num_io_queues; 21892061fe41SRafal Kozik 219033dde075SMichal Krawczyk if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { 219133dde075SMichal Krawczyk disable_meta_caching = 219233dde075SMichal Krawczyk !!(get_feat_ctx.llq.accel_mode.u.get.supported_flags & 219333dde075SMichal Krawczyk BIT(ENA_ADMIN_DISABLE_META_CACHING)); 219433dde075SMichal Krawczyk } else { 219533dde075SMichal Krawczyk disable_meta_caching = false; 219633dde075SMichal Krawczyk } 219733dde075SMichal Krawczyk 21981173fca2SJan Medala /* prepare ring structures */ 219933dde075SMichal Krawczyk ena_init_rings(adapter, disable_meta_caching); 22001173fca2SJan Medala 2201372c1af5SJan Medala ena_config_debug_area(adapter); 2202372c1af5SJan Medala 22031173fca2SJan Medala /* Set max MTU for this device */ 22041173fca2SJan Medala adapter->max_mtu = get_feat_ctx.dev_attr.max_mtu; 22051173fca2SJan Medala 2206e8c838fdSMichal Krawczyk ena_set_offloads(&adapter->offloads, &get_feat_ctx.offload); 220783277a7cSJakub Palider 22081173fca2SJan Medala /* Copy MAC address and point DPDK to it */ 22096d13ea8eSOlivier Matz eth_dev->data->mac_addrs = (struct rte_ether_addr *)adapter->mac_addr; 2210538da7a1SOlivier Matz rte_ether_addr_copy((struct rte_ether_addr *) 2211538da7a1SOlivier Matz get_feat_ctx.dev_attr.mac_addr, 22126d13ea8eSOlivier Matz (struct rte_ether_addr *)adapter->mac_addr); 22131173fca2SJan Medala 221434d5e97eSMichal Krawczyk rc = ena_com_rss_init(ena_dev, ENA_RX_RSS_TABLE_LOG_SIZE); 221534d5e97eSMichal Krawczyk if (unlikely(rc != 0)) { 221634d5e97eSMichal Krawczyk PMD_DRV_LOG(ERR, "Failed to initialize RSS in ENA device\n"); 221734d5e97eSMichal Krawczyk goto err_delete_debug_area; 221834d5e97eSMichal Krawczyk } 221934d5e97eSMichal Krawczyk 22201173fca2SJan Medala adapter->drv_stats = rte_zmalloc("adapter stats", 22211173fca2SJan Medala sizeof(*adapter->drv_stats), 22221173fca2SJan Medala RTE_CACHE_LINE_SIZE); 22231173fca2SJan Medala if (!adapter->drv_stats) { 2224617898d1SMichal Krawczyk PMD_DRV_LOG(ERR, 2225617898d1SMichal Krawczyk "Failed to allocate memory for adapter statistics\n"); 2226241da076SRafal Kozik rc = -ENOMEM; 222734d5e97eSMichal Krawczyk goto err_rss_destroy; 22281173fca2SJan Medala } 22291173fca2SJan Medala 22301343c415SMichal Krawczyk rte_spinlock_init(&adapter->admin_lock); 22311343c415SMichal Krawczyk 2232eb0ef49dSMichal Krawczyk rte_intr_callback_register(intr_handle, 2233eb0ef49dSMichal Krawczyk ena_interrupt_handler_rte, 2234aab58857SStanislaw Kardach eth_dev); 2235eb0ef49dSMichal Krawczyk rte_intr_enable(intr_handle); 2236eb0ef49dSMichal Krawczyk ena_com_set_admin_polling_mode(ena_dev, false); 2237ca148440SMichal Krawczyk ena_com_admin_aenq_enable(ena_dev); 2238eb0ef49dSMichal Krawczyk 2239d9b8b106SMichal Krawczyk rte_timer_init(&adapter->timer_wd); 2240d9b8b106SMichal Krawczyk 22411173fca2SJan Medala adapters_found++; 22421173fca2SJan Medala adapter->state = ENA_ADAPTER_STATE_INIT; 22431173fca2SJan Medala 22441173fca2SJan Medala return 0; 2245241da076SRafal Kozik 224634d5e97eSMichal Krawczyk err_rss_destroy: 224734d5e97eSMichal Krawczyk ena_com_rss_destroy(ena_dev); 2248241da076SRafal Kozik err_delete_debug_area: 2249241da076SRafal Kozik ena_com_delete_debug_area(ena_dev); 2250241da076SRafal Kozik 2251241da076SRafal Kozik err_device_destroy: 2252241da076SRafal Kozik ena_com_delete_host_info(ena_dev); 2253241da076SRafal Kozik ena_com_admin_destroy(ena_dev); 2254241da076SRafal Kozik 2255241da076SRafal Kozik err: 2256241da076SRafal Kozik return rc; 22571173fca2SJan Medala } 22581173fca2SJan Medala 2259e457bc70SRafal Kozik static void ena_destroy_device(struct rte_eth_dev *eth_dev) 2260eb0ef49dSMichal Krawczyk { 2261890728ffSStephen Hemminger struct ena_adapter *adapter = eth_dev->data->dev_private; 2262e457bc70SRafal Kozik struct ena_com_dev *ena_dev = &adapter->ena_dev; 2263eb0ef49dSMichal Krawczyk 2264e457bc70SRafal Kozik if (adapter->state == ENA_ADAPTER_STATE_FREE) 2265e457bc70SRafal Kozik return; 2266e457bc70SRafal Kozik 2267e457bc70SRafal Kozik ena_com_set_admin_running_state(ena_dev, false); 2268eb0ef49dSMichal Krawczyk 2269eb0ef49dSMichal Krawczyk if (adapter->state != ENA_ADAPTER_STATE_CLOSED) 2270eb0ef49dSMichal Krawczyk ena_close(eth_dev); 2271eb0ef49dSMichal Krawczyk 227234d5e97eSMichal Krawczyk ena_com_rss_destroy(ena_dev); 227334d5e97eSMichal Krawczyk 2274e457bc70SRafal Kozik ena_com_delete_debug_area(ena_dev); 2275e457bc70SRafal Kozik ena_com_delete_host_info(ena_dev); 2276e457bc70SRafal Kozik 2277e457bc70SRafal Kozik ena_com_abort_admin_commands(ena_dev); 2278e457bc70SRafal Kozik ena_com_wait_for_abort_completion(ena_dev); 2279e457bc70SRafal Kozik ena_com_admin_destroy(ena_dev); 2280e457bc70SRafal Kozik ena_com_mmio_reg_read_request_destroy(ena_dev); 2281e457bc70SRafal Kozik 2282e457bc70SRafal Kozik adapter->state = ENA_ADAPTER_STATE_FREE; 2283e457bc70SRafal Kozik } 2284e457bc70SRafal Kozik 2285e457bc70SRafal Kozik static int eth_ena_dev_uninit(struct rte_eth_dev *eth_dev) 2286e457bc70SRafal Kozik { 2287e457bc70SRafal Kozik if (rte_eal_process_type() != RTE_PROC_PRIMARY) 2288e457bc70SRafal Kozik return 0; 2289e457bc70SRafal Kozik 2290e457bc70SRafal Kozik ena_destroy_device(eth_dev); 2291e457bc70SRafal Kozik 2292eb0ef49dSMichal Krawczyk return 0; 2293eb0ef49dSMichal Krawczyk } 2294eb0ef49dSMichal Krawczyk 22951173fca2SJan Medala static int ena_dev_configure(struct rte_eth_dev *dev) 22961173fca2SJan Medala { 2297890728ffSStephen Hemminger struct ena_adapter *adapter = dev->data->dev_private; 2298b9b05d6fSMichal Krawczyk int rc; 22997369f88fSRafal Kozik 23001173fca2SJan Medala adapter->state = ENA_ADAPTER_STATE_CONFIG; 23011173fca2SJan Medala 2302295968d1SFerruh Yigit if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) 2303295968d1SFerruh Yigit dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH; 2304295968d1SFerruh Yigit dev->data->dev_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS; 2305b418f0d2SMichal Krawczyk 2306e2a6d08bSMichal Krawczyk /* Scattered Rx cannot be turned off in the HW, so this capability must 2307e2a6d08bSMichal Krawczyk * be forced. 2308e2a6d08bSMichal Krawczyk */ 2309e2a6d08bSMichal Krawczyk dev->data->scattered_rx = 1; 2310e2a6d08bSMichal Krawczyk 2311f93e20e5SMichal Krawczyk adapter->last_tx_comp_qid = 0; 2312f93e20e5SMichal Krawczyk 2313f93e20e5SMichal Krawczyk adapter->missing_tx_completion_budget = 2314f93e20e5SMichal Krawczyk RTE_MIN(ENA_MONITORED_TX_QUEUES, dev->data->nb_tx_queues); 2315f93e20e5SMichal Krawczyk 2316f93e20e5SMichal Krawczyk /* To avoid detection of the spurious Tx completion timeout due to 2317f93e20e5SMichal Krawczyk * application not calling the Tx cleanup function, set timeout for the 2318f93e20e5SMichal Krawczyk * Tx queue which should be half of the missing completion timeout for a 2319f93e20e5SMichal Krawczyk * safety. If there will be a lot of missing Tx completions in the 2320f93e20e5SMichal Krawczyk * queue, they will be detected sooner or later. 2321f93e20e5SMichal Krawczyk */ 2322f93e20e5SMichal Krawczyk adapter->tx_cleanup_stall_delay = adapter->missing_tx_completion_to / 2; 2323f93e20e5SMichal Krawczyk 2324b9b05d6fSMichal Krawczyk rc = ena_configure_aenq(adapter); 2325b9b05d6fSMichal Krawczyk 2326b9b05d6fSMichal Krawczyk return rc; 23271173fca2SJan Medala } 23281173fca2SJan Medala 232933dde075SMichal Krawczyk static void ena_init_rings(struct ena_adapter *adapter, 233033dde075SMichal Krawczyk bool disable_meta_caching) 23311173fca2SJan Medala { 23325920d930SMichal Krawczyk size_t i; 23331173fca2SJan Medala 23345920d930SMichal Krawczyk for (i = 0; i < adapter->max_num_io_queues; i++) { 23351173fca2SJan Medala struct ena_ring *ring = &adapter->tx_ring[i]; 23361173fca2SJan Medala 23371173fca2SJan Medala ring->configured = 0; 23381173fca2SJan Medala ring->type = ENA_RING_TYPE_TX; 23391173fca2SJan Medala ring->adapter = adapter; 23401173fca2SJan Medala ring->id = i; 23411173fca2SJan Medala ring->tx_mem_queue_type = adapter->ena_dev.tx_mem_queue_type; 23421173fca2SJan Medala ring->tx_max_header_size = adapter->ena_dev.tx_max_header_size; 23432061fe41SRafal Kozik ring->sgl_size = adapter->max_tx_sgl_size; 234433dde075SMichal Krawczyk ring->disable_meta_caching = disable_meta_caching; 23451173fca2SJan Medala } 23461173fca2SJan Medala 23475920d930SMichal Krawczyk for (i = 0; i < adapter->max_num_io_queues; i++) { 23481173fca2SJan Medala struct ena_ring *ring = &adapter->rx_ring[i]; 23491173fca2SJan Medala 23501173fca2SJan Medala ring->configured = 0; 23511173fca2SJan Medala ring->type = ENA_RING_TYPE_RX; 23521173fca2SJan Medala ring->adapter = adapter; 23531173fca2SJan Medala ring->id = i; 2354ea93d37eSRafal Kozik ring->sgl_size = adapter->max_rx_sgl_size; 23551173fca2SJan Medala } 23561173fca2SJan Medala } 23571173fca2SJan Medala 23583a822d79SMichal Krawczyk static uint64_t ena_get_rx_port_offloads(struct ena_adapter *adapter) 23593a822d79SMichal Krawczyk { 23603a822d79SMichal Krawczyk uint64_t port_offloads = 0; 23613a822d79SMichal Krawczyk 23623a822d79SMichal Krawczyk if (adapter->offloads.rx_offloads & ENA_L3_IPV4_CSUM) 2363295968d1SFerruh Yigit port_offloads |= RTE_ETH_RX_OFFLOAD_IPV4_CKSUM; 23643a822d79SMichal Krawczyk 23653a822d79SMichal Krawczyk if (adapter->offloads.rx_offloads & 23663a822d79SMichal Krawczyk (ENA_L4_IPV4_CSUM | ENA_L4_IPV6_CSUM)) 23673a822d79SMichal Krawczyk port_offloads |= 2368295968d1SFerruh Yigit RTE_ETH_RX_OFFLOAD_UDP_CKSUM | RTE_ETH_RX_OFFLOAD_TCP_CKSUM; 23693a822d79SMichal Krawczyk 23703a822d79SMichal Krawczyk if (adapter->offloads.rx_offloads & ENA_RX_RSS_HASH) 2371295968d1SFerruh Yigit port_offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH; 23723a822d79SMichal Krawczyk 2373295968d1SFerruh Yigit port_offloads |= RTE_ETH_RX_OFFLOAD_SCATTER; 2374e2a6d08bSMichal Krawczyk 23753a822d79SMichal Krawczyk return port_offloads; 23763a822d79SMichal Krawczyk } 23773a822d79SMichal Krawczyk 23783a822d79SMichal Krawczyk static uint64_t ena_get_tx_port_offloads(struct ena_adapter *adapter) 23793a822d79SMichal Krawczyk { 23803a822d79SMichal Krawczyk uint64_t port_offloads = 0; 23813a822d79SMichal Krawczyk 23823a822d79SMichal Krawczyk if (adapter->offloads.tx_offloads & ENA_IPV4_TSO) 2383295968d1SFerruh Yigit port_offloads |= RTE_ETH_TX_OFFLOAD_TCP_TSO; 23843a822d79SMichal Krawczyk 23853a822d79SMichal Krawczyk if (adapter->offloads.tx_offloads & ENA_L3_IPV4_CSUM) 2386295968d1SFerruh Yigit port_offloads |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM; 23873a822d79SMichal Krawczyk if (adapter->offloads.tx_offloads & 23883a822d79SMichal Krawczyk (ENA_L4_IPV4_CSUM_PARTIAL | ENA_L4_IPV4_CSUM | 23893a822d79SMichal Krawczyk ENA_L4_IPV6_CSUM | ENA_L4_IPV6_CSUM_PARTIAL)) 23903a822d79SMichal Krawczyk port_offloads |= 2391295968d1SFerruh Yigit RTE_ETH_TX_OFFLOAD_UDP_CKSUM | RTE_ETH_TX_OFFLOAD_TCP_CKSUM; 23923a822d79SMichal Krawczyk 2393295968d1SFerruh Yigit port_offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS; 23943a822d79SMichal Krawczyk 23953a822d79SMichal Krawczyk return port_offloads; 23963a822d79SMichal Krawczyk } 23973a822d79SMichal Krawczyk 23983a822d79SMichal Krawczyk static uint64_t ena_get_rx_queue_offloads(struct ena_adapter *adapter) 23993a822d79SMichal Krawczyk { 24003a822d79SMichal Krawczyk RTE_SET_USED(adapter); 24013a822d79SMichal Krawczyk 24023a822d79SMichal Krawczyk return 0; 24033a822d79SMichal Krawczyk } 24043a822d79SMichal Krawczyk 24053a822d79SMichal Krawczyk static uint64_t ena_get_tx_queue_offloads(struct ena_adapter *adapter) 24063a822d79SMichal Krawczyk { 24073a822d79SMichal Krawczyk RTE_SET_USED(adapter); 24083a822d79SMichal Krawczyk 24093a822d79SMichal Krawczyk return 0; 24103a822d79SMichal Krawczyk } 24113a822d79SMichal Krawczyk 2412bdad90d1SIvan Ilchenko static int ena_infos_get(struct rte_eth_dev *dev, 24131173fca2SJan Medala struct rte_eth_dev_info *dev_info) 24141173fca2SJan Medala { 24151173fca2SJan Medala struct ena_adapter *adapter; 24161173fca2SJan Medala struct ena_com_dev *ena_dev; 24171173fca2SJan Medala 2418498c687aSRafal Kozik ena_assert_msg(dev->data != NULL, "Uninitialized device\n"); 2419498c687aSRafal Kozik ena_assert_msg(dev->data->dev_private != NULL, "Uninitialized device\n"); 2420890728ffSStephen Hemminger adapter = dev->data->dev_private; 24211173fca2SJan Medala 24221173fca2SJan Medala ena_dev = &adapter->ena_dev; 2423498c687aSRafal Kozik ena_assert_msg(ena_dev != NULL, "Uninitialized device\n"); 24241173fca2SJan Medala 2425e274f573SMarc Sune dev_info->speed_capa = 2426295968d1SFerruh Yigit RTE_ETH_LINK_SPEED_1G | 2427295968d1SFerruh Yigit RTE_ETH_LINK_SPEED_2_5G | 2428295968d1SFerruh Yigit RTE_ETH_LINK_SPEED_5G | 2429295968d1SFerruh Yigit RTE_ETH_LINK_SPEED_10G | 2430295968d1SFerruh Yigit RTE_ETH_LINK_SPEED_25G | 2431295968d1SFerruh Yigit RTE_ETH_LINK_SPEED_40G | 2432295968d1SFerruh Yigit RTE_ETH_LINK_SPEED_50G | 2433295968d1SFerruh Yigit RTE_ETH_LINK_SPEED_100G; 2434e274f573SMarc Sune 24351173fca2SJan Medala /* Inform framework about available features */ 24363a822d79SMichal Krawczyk dev_info->rx_offload_capa = ena_get_rx_port_offloads(adapter); 24373a822d79SMichal Krawczyk dev_info->tx_offload_capa = ena_get_tx_port_offloads(adapter); 24383a822d79SMichal Krawczyk dev_info->rx_queue_offload_capa = ena_get_rx_queue_offloads(adapter); 24393a822d79SMichal Krawczyk dev_info->tx_queue_offload_capa = ena_get_tx_queue_offloads(adapter); 24401173fca2SJan Medala 244134d5e97eSMichal Krawczyk dev_info->flow_type_rss_offloads = ENA_ALL_RSS_HF; 244234d5e97eSMichal Krawczyk dev_info->hash_key_size = ENA_HASH_KEY_SIZE; 2443b01ead20SRafal Kozik 24441173fca2SJan Medala dev_info->min_rx_bufsize = ENA_MIN_FRAME_LEN; 24451bb4a528SFerruh Yigit dev_info->max_rx_pktlen = adapter->max_mtu + RTE_ETHER_HDR_LEN + 24461bb4a528SFerruh Yigit RTE_ETHER_CRC_LEN; 24471bb4a528SFerruh Yigit dev_info->min_mtu = ENA_MIN_MTU; 24481bb4a528SFerruh Yigit dev_info->max_mtu = adapter->max_mtu; 24491173fca2SJan Medala dev_info->max_mac_addrs = 1; 24501173fca2SJan Medala 24515920d930SMichal Krawczyk dev_info->max_rx_queues = adapter->max_num_io_queues; 24525920d930SMichal Krawczyk dev_info->max_tx_queues = adapter->max_num_io_queues; 24531173fca2SJan Medala dev_info->reta_size = ENA_RX_RSS_TABLE_SIZE; 245456b8b9b7SRafal Kozik 24555920d930SMichal Krawczyk dev_info->rx_desc_lim.nb_max = adapter->max_rx_ring_size; 245692680dc2SRafal Kozik dev_info->rx_desc_lim.nb_min = ENA_MIN_RING_DESC; 2457ea93d37eSRafal Kozik dev_info->rx_desc_lim.nb_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS, 2458ea93d37eSRafal Kozik adapter->max_rx_sgl_size); 2459ea93d37eSRafal Kozik dev_info->rx_desc_lim.nb_mtu_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS, 2460ea93d37eSRafal Kozik adapter->max_rx_sgl_size); 246192680dc2SRafal Kozik 24625920d930SMichal Krawczyk dev_info->tx_desc_lim.nb_max = adapter->max_tx_ring_size; 246392680dc2SRafal Kozik dev_info->tx_desc_lim.nb_min = ENA_MIN_RING_DESC; 246492680dc2SRafal Kozik dev_info->tx_desc_lim.nb_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS, 2465ea93d37eSRafal Kozik adapter->max_tx_sgl_size); 246692680dc2SRafal Kozik dev_info->tx_desc_lim.nb_mtu_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS, 2467ea93d37eSRafal Kozik adapter->max_tx_sgl_size); 2468bdad90d1SIvan Ilchenko 246930a6c7efSStanislaw Kardach dev_info->default_rxportconf.ring_size = ENA_DEFAULT_RING_SIZE; 247030a6c7efSStanislaw Kardach dev_info->default_txportconf.ring_size = ENA_DEFAULT_RING_SIZE; 247130a6c7efSStanislaw Kardach 2472bdad90d1SIvan Ilchenko return 0; 24731173fca2SJan Medala } 24741173fca2SJan Medala 24751be097dcSMichal Krawczyk static inline void ena_init_rx_mbuf(struct rte_mbuf *mbuf, uint16_t len) 24761be097dcSMichal Krawczyk { 24771be097dcSMichal Krawczyk mbuf->data_len = len; 24781be097dcSMichal Krawczyk mbuf->data_off = RTE_PKTMBUF_HEADROOM; 24791be097dcSMichal Krawczyk mbuf->refcnt = 1; 24801be097dcSMichal Krawczyk mbuf->next = NULL; 24811be097dcSMichal Krawczyk } 24821be097dcSMichal Krawczyk 24831be097dcSMichal Krawczyk static struct rte_mbuf *ena_rx_mbuf(struct ena_ring *rx_ring, 24841be097dcSMichal Krawczyk struct ena_com_rx_buf_info *ena_bufs, 24851be097dcSMichal Krawczyk uint32_t descs, 24861be097dcSMichal Krawczyk uint16_t *next_to_clean, 24871be097dcSMichal Krawczyk uint8_t offset) 24881be097dcSMichal Krawczyk { 24891be097dcSMichal Krawczyk struct rte_mbuf *mbuf; 24901be097dcSMichal Krawczyk struct rte_mbuf *mbuf_head; 24911be097dcSMichal Krawczyk struct ena_rx_buffer *rx_info; 249283fd97b2SMichal Krawczyk int rc; 24931be097dcSMichal Krawczyk uint16_t ntc, len, req_id, buf = 0; 24941be097dcSMichal Krawczyk 24951be097dcSMichal Krawczyk if (unlikely(descs == 0)) 24961be097dcSMichal Krawczyk return NULL; 24971be097dcSMichal Krawczyk 24981be097dcSMichal Krawczyk ntc = *next_to_clean; 24991be097dcSMichal Krawczyk 25001be097dcSMichal Krawczyk len = ena_bufs[buf].len; 25011be097dcSMichal Krawczyk req_id = ena_bufs[buf].req_id; 25021be097dcSMichal Krawczyk 25031be097dcSMichal Krawczyk rx_info = &rx_ring->rx_buffer_info[req_id]; 25041be097dcSMichal Krawczyk 25051be097dcSMichal Krawczyk mbuf = rx_info->mbuf; 25061be097dcSMichal Krawczyk RTE_ASSERT(mbuf != NULL); 25071be097dcSMichal Krawczyk 25081be097dcSMichal Krawczyk ena_init_rx_mbuf(mbuf, len); 25091be097dcSMichal Krawczyk 25101be097dcSMichal Krawczyk /* Fill the mbuf head with the data specific for 1st segment. */ 25111be097dcSMichal Krawczyk mbuf_head = mbuf; 25121be097dcSMichal Krawczyk mbuf_head->nb_segs = descs; 25131be097dcSMichal Krawczyk mbuf_head->port = rx_ring->port_id; 25141be097dcSMichal Krawczyk mbuf_head->pkt_len = len; 25151be097dcSMichal Krawczyk mbuf_head->data_off += offset; 25161be097dcSMichal Krawczyk 25171be097dcSMichal Krawczyk rx_info->mbuf = NULL; 2518c0006061SMichal Krawczyk rx_ring->empty_rx_reqs[ntc] = req_id; 2519c0006061SMichal Krawczyk ntc = ENA_IDX_NEXT_MASKED(ntc, rx_ring->size_mask); 25201be097dcSMichal Krawczyk 25211be097dcSMichal Krawczyk while (--descs) { 25221be097dcSMichal Krawczyk ++buf; 25231be097dcSMichal Krawczyk len = ena_bufs[buf].len; 25241be097dcSMichal Krawczyk req_id = ena_bufs[buf].req_id; 25251be097dcSMichal Krawczyk 25261be097dcSMichal Krawczyk rx_info = &rx_ring->rx_buffer_info[req_id]; 25271be097dcSMichal Krawczyk RTE_ASSERT(rx_info->mbuf != NULL); 25281be097dcSMichal Krawczyk 252983fd97b2SMichal Krawczyk if (unlikely(len == 0)) { 253083fd97b2SMichal Krawczyk /* 253183fd97b2SMichal Krawczyk * Some devices can pass descriptor with the length 0. 253283fd97b2SMichal Krawczyk * To avoid confusion, the PMD is simply putting the 253383fd97b2SMichal Krawczyk * descriptor back, as it was never used. We'll avoid 253483fd97b2SMichal Krawczyk * mbuf allocation that way. 253583fd97b2SMichal Krawczyk */ 253683fd97b2SMichal Krawczyk rc = ena_add_single_rx_desc(rx_ring->ena_com_io_sq, 253783fd97b2SMichal Krawczyk rx_info->mbuf, req_id); 253883fd97b2SMichal Krawczyk if (unlikely(rc != 0)) { 253983fd97b2SMichal Krawczyk /* Free the mbuf in case of an error. */ 254083fd97b2SMichal Krawczyk rte_mbuf_raw_free(rx_info->mbuf); 254183fd97b2SMichal Krawczyk } else { 254283fd97b2SMichal Krawczyk /* 254383fd97b2SMichal Krawczyk * If there was no error, just exit the loop as 254483fd97b2SMichal Krawczyk * 0 length descriptor is always the last one. 254583fd97b2SMichal Krawczyk */ 254683fd97b2SMichal Krawczyk break; 254783fd97b2SMichal Krawczyk } 254883fd97b2SMichal Krawczyk } else { 25491be097dcSMichal Krawczyk /* Create an mbuf chain. */ 25501be097dcSMichal Krawczyk mbuf->next = rx_info->mbuf; 25511be097dcSMichal Krawczyk mbuf = mbuf->next; 25521be097dcSMichal Krawczyk 25531be097dcSMichal Krawczyk ena_init_rx_mbuf(mbuf, len); 25541be097dcSMichal Krawczyk mbuf_head->pkt_len += len; 255583fd97b2SMichal Krawczyk } 25561be097dcSMichal Krawczyk 255783fd97b2SMichal Krawczyk /* 255883fd97b2SMichal Krawczyk * Mark the descriptor as depleted and perform necessary 255983fd97b2SMichal Krawczyk * cleanup. 256083fd97b2SMichal Krawczyk * This code will execute in two cases: 256183fd97b2SMichal Krawczyk * 1. Descriptor len was greater than 0 - normal situation. 256283fd97b2SMichal Krawczyk * 2. Descriptor len was 0 and we failed to add the descriptor 256383fd97b2SMichal Krawczyk * to the device. In that situation, we should try to add 256483fd97b2SMichal Krawczyk * the mbuf again in the populate routine and mark the 256583fd97b2SMichal Krawczyk * descriptor as used up by the device. 256683fd97b2SMichal Krawczyk */ 25671be097dcSMichal Krawczyk rx_info->mbuf = NULL; 2568c0006061SMichal Krawczyk rx_ring->empty_rx_reqs[ntc] = req_id; 2569c0006061SMichal Krawczyk ntc = ENA_IDX_NEXT_MASKED(ntc, rx_ring->size_mask); 25701be097dcSMichal Krawczyk } 25711be097dcSMichal Krawczyk 25721be097dcSMichal Krawczyk *next_to_clean = ntc; 25731be097dcSMichal Krawczyk 25741be097dcSMichal Krawczyk return mbuf_head; 25751be097dcSMichal Krawczyk } 25761be097dcSMichal Krawczyk 25771173fca2SJan Medala static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, 25781173fca2SJan Medala uint16_t nb_pkts) 25791173fca2SJan Medala { 25801173fca2SJan Medala struct ena_ring *rx_ring = (struct ena_ring *)(rx_queue); 258177550607SMichal Krawczyk unsigned int free_queue_entries; 25821173fca2SJan Medala uint16_t next_to_clean = rx_ring->next_to_clean; 258374456796SMichal Krawczyk uint16_t descs_in_use; 25841be097dcSMichal Krawczyk struct rte_mbuf *mbuf; 25851be097dcSMichal Krawczyk uint16_t completed; 25861173fca2SJan Medala struct ena_com_rx_ctx ena_rx_ctx; 25871be097dcSMichal Krawczyk int i, rc = 0; 258834d5e97eSMichal Krawczyk bool fill_hash; 25891173fca2SJan Medala 25900a001d69SMichal Krawczyk #ifdef RTE_ETHDEV_DEBUG_RX 25911173fca2SJan Medala /* Check adapter state */ 25921173fca2SJan Medala if (unlikely(rx_ring->adapter->state != ENA_ADAPTER_STATE_RUNNING)) { 25930a001d69SMichal Krawczyk PMD_RX_LOG(ALERT, 25941173fca2SJan Medala "Trying to receive pkts while device is NOT running\n"); 25951173fca2SJan Medala return 0; 25961173fca2SJan Medala } 25970a001d69SMichal Krawczyk #endif 25981173fca2SJan Medala 2599295968d1SFerruh Yigit fill_hash = rx_ring->offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH; 260034d5e97eSMichal Krawczyk 2601c0006061SMichal Krawczyk descs_in_use = rx_ring->ring_size - 260274456796SMichal Krawczyk ena_com_free_q_entries(rx_ring->ena_com_io_sq) - 1; 260374456796SMichal Krawczyk nb_pkts = RTE_MIN(descs_in_use, nb_pkts); 26041173fca2SJan Medala 26051173fca2SJan Medala for (completed = 0; completed < nb_pkts; completed++) { 2606ea93d37eSRafal Kozik ena_rx_ctx.max_bufs = rx_ring->sgl_size; 26071173fca2SJan Medala ena_rx_ctx.ena_bufs = rx_ring->ena_bufs; 26081173fca2SJan Medala ena_rx_ctx.descs = 0; 26097b3a3c4bSMaciej Bielski ena_rx_ctx.pkt_offset = 0; 26101173fca2SJan Medala /* receive packet context */ 26111173fca2SJan Medala rc = ena_com_rx_pkt(rx_ring->ena_com_io_cq, 26121173fca2SJan Medala rx_ring->ena_com_io_sq, 26131173fca2SJan Medala &ena_rx_ctx); 26141173fca2SJan Medala if (unlikely(rc)) { 26150a001d69SMichal Krawczyk PMD_RX_LOG(ERR, 2616617898d1SMichal Krawczyk "Failed to get the packet from the device, rc: %d\n", 2617617898d1SMichal Krawczyk rc); 261805cffdcfSMichal Krawczyk if (rc == ENA_COM_NO_SPACE) { 261905cffdcfSMichal Krawczyk ++rx_ring->rx_stats.bad_desc_num; 26202bae75eaSDawid Gorecki ena_trigger_reset(rx_ring->adapter, 26212bae75eaSDawid Gorecki ENA_REGS_RESET_TOO_MANY_RX_DESCS); 262205cffdcfSMichal Krawczyk } else { 262305cffdcfSMichal Krawczyk ++rx_ring->rx_stats.bad_req_id; 26242bae75eaSDawid Gorecki ena_trigger_reset(rx_ring->adapter, 26252bae75eaSDawid Gorecki ENA_REGS_RESET_INV_RX_REQ_ID); 262605cffdcfSMichal Krawczyk } 26271173fca2SJan Medala return 0; 26281173fca2SJan Medala } 26291173fca2SJan Medala 26301be097dcSMichal Krawczyk mbuf = ena_rx_mbuf(rx_ring, 26311be097dcSMichal Krawczyk ena_rx_ctx.ena_bufs, 26321be097dcSMichal Krawczyk ena_rx_ctx.descs, 26331be097dcSMichal Krawczyk &next_to_clean, 26341be097dcSMichal Krawczyk ena_rx_ctx.pkt_offset); 26351be097dcSMichal Krawczyk if (unlikely(mbuf == NULL)) { 26361be097dcSMichal Krawczyk for (i = 0; i < ena_rx_ctx.descs; ++i) { 2637c0006061SMichal Krawczyk rx_ring->empty_rx_reqs[next_to_clean] = 26381be097dcSMichal Krawczyk rx_ring->ena_bufs[i].req_id; 2639c0006061SMichal Krawczyk next_to_clean = ENA_IDX_NEXT_MASKED( 2640c0006061SMichal Krawczyk next_to_clean, rx_ring->size_mask); 26411173fca2SJan Medala } 2642f00930d9SRafal Kozik break; 26431be097dcSMichal Krawczyk } 26441173fca2SJan Medala 26451173fca2SJan Medala /* fill mbuf attributes if any */ 264684daba99SMichal Krawczyk ena_rx_mbuf_prepare(rx_ring, mbuf, &ena_rx_ctx, fill_hash); 26477830e905SSolganik Alexander 26481be097dcSMichal Krawczyk if (unlikely(mbuf->ol_flags & 264984daba99SMichal Krawczyk (RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD))) 2650ef74b5f7SMichal Krawczyk rte_atomic64_inc(&rx_ring->adapter->drv_stats->ierrors); 26517830e905SSolganik Alexander 26521be097dcSMichal Krawczyk rx_pkts[completed] = mbuf; 26531be097dcSMichal Krawczyk rx_ring->rx_stats.bytes += mbuf->pkt_len; 26541173fca2SJan Medala } 26551173fca2SJan Medala 26561be097dcSMichal Krawczyk rx_ring->rx_stats.cnt += completed; 2657ec78af6bSMichal Krawczyk rx_ring->next_to_clean = next_to_clean; 2658ec78af6bSMichal Krawczyk 265977550607SMichal Krawczyk free_queue_entries = ena_com_free_q_entries(rx_ring->ena_com_io_sq); 266077550607SMichal Krawczyk 26611173fca2SJan Medala /* Burst refill to save doorbells, memory barriers, const interval */ 2662005064e5SMichal Krawczyk if (free_queue_entries >= rx_ring->rx_free_thresh) { 2663a45462c5SRafal Kozik ena_com_update_dev_comp_head(rx_ring->ena_com_io_cq); 266477550607SMichal Krawczyk ena_populate_rx_queue(rx_ring, free_queue_entries); 2665a45462c5SRafal Kozik } 26661173fca2SJan Medala 26671be097dcSMichal Krawczyk return completed; 26681173fca2SJan Medala } 26691173fca2SJan Medala 2670b3fc5a1aSKonstantin Ananyev static uint16_t 267183277a7cSJakub Palider eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 2672b3fc5a1aSKonstantin Ananyev uint16_t nb_pkts) 2673b3fc5a1aSKonstantin Ananyev { 2674b3fc5a1aSKonstantin Ananyev int32_t ret; 2675b3fc5a1aSKonstantin Ananyev uint32_t i; 2676b3fc5a1aSKonstantin Ananyev struct rte_mbuf *m; 267783277a7cSJakub Palider struct ena_ring *tx_ring = (struct ena_ring *)(tx_queue); 2678e8c838fdSMichal Krawczyk struct ena_adapter *adapter = tx_ring->adapter; 2679a7c528e5SOlivier Matz struct rte_ipv4_hdr *ip_hdr; 2680b3fc5a1aSKonstantin Ananyev uint64_t ol_flags; 2681e8c838fdSMichal Krawczyk uint64_t l4_csum_flag; 2682e8c838fdSMichal Krawczyk uint64_t dev_offload_capa; 268383277a7cSJakub Palider uint16_t frag_field; 2684e8c838fdSMichal Krawczyk bool need_pseudo_csum; 268583277a7cSJakub Palider 2686e8c838fdSMichal Krawczyk dev_offload_capa = adapter->offloads.tx_offloads; 2687b3fc5a1aSKonstantin Ananyev for (i = 0; i != nb_pkts; i++) { 2688b3fc5a1aSKonstantin Ananyev m = tx_pkts[i]; 2689b3fc5a1aSKonstantin Ananyev ol_flags = m->ol_flags; 2690b3fc5a1aSKonstantin Ananyev 2691e8c838fdSMichal Krawczyk /* Check if any offload flag was set */ 2692e8c838fdSMichal Krawczyk if (ol_flags == 0) 2693bc5ef57dSMichal Krawczyk continue; 2694bc5ef57dSMichal Krawczyk 2695daa02b5cSOlivier Matz l4_csum_flag = ol_flags & RTE_MBUF_F_TX_L4_MASK; 2696e8c838fdSMichal Krawczyk /* SCTP checksum offload is not supported by the ENA. */ 2697e8c838fdSMichal Krawczyk if ((ol_flags & ENA_TX_OFFLOAD_NOTSUP_MASK) || 2698daa02b5cSOlivier Matz l4_csum_flag == RTE_MBUF_F_TX_SCTP_CKSUM) { 2699e8c838fdSMichal Krawczyk PMD_TX_LOG(DEBUG, 2700e8c838fdSMichal Krawczyk "mbuf[%" PRIu32 "] has unsupported offloads flags set: 0x%" PRIu64 "\n", 2701e8c838fdSMichal Krawczyk i, ol_flags); 2702baeed5f4SMichal Krawczyk rte_errno = ENOTSUP; 2703b3fc5a1aSKonstantin Ananyev return i; 2704b3fc5a1aSKonstantin Ananyev } 2705b3fc5a1aSKonstantin Ananyev 270696ffa8a7SMichal Krawczyk if (unlikely(m->nb_segs >= tx_ring->sgl_size && 270796ffa8a7SMichal Krawczyk !(tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV && 270896ffa8a7SMichal Krawczyk m->nb_segs == tx_ring->sgl_size && 270996ffa8a7SMichal Krawczyk m->data_len < tx_ring->tx_max_header_size))) { 271096ffa8a7SMichal Krawczyk PMD_TX_LOG(DEBUG, 271196ffa8a7SMichal Krawczyk "mbuf[%" PRIu32 "] has too many segments: %" PRIu16 "\n", 271296ffa8a7SMichal Krawczyk i, m->nb_segs); 271396ffa8a7SMichal Krawczyk rte_errno = EINVAL; 271496ffa8a7SMichal Krawczyk return i; 271596ffa8a7SMichal Krawczyk } 271696ffa8a7SMichal Krawczyk 2717b3fc5a1aSKonstantin Ananyev #ifdef RTE_LIBRTE_ETHDEV_DEBUG 2718e8c838fdSMichal Krawczyk /* Check if requested offload is also enabled for the queue */ 2719daa02b5cSOlivier Matz if ((ol_flags & RTE_MBUF_F_TX_IP_CKSUM && 2720295968d1SFerruh Yigit !(tx_ring->offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)) || 2721daa02b5cSOlivier Matz (l4_csum_flag == RTE_MBUF_F_TX_TCP_CKSUM && 2722295968d1SFerruh Yigit !(tx_ring->offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM)) || 2723daa02b5cSOlivier Matz (l4_csum_flag == RTE_MBUF_F_TX_UDP_CKSUM && 2724295968d1SFerruh Yigit !(tx_ring->offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM))) { 2725e8c838fdSMichal Krawczyk PMD_TX_LOG(DEBUG, 2726e8c838fdSMichal Krawczyk "mbuf[%" PRIu32 "]: requested offloads: %" PRIu16 " are not enabled for the queue[%u]\n", 2727e8c838fdSMichal Krawczyk i, m->nb_segs, tx_ring->id); 2728e8c838fdSMichal Krawczyk rte_errno = EINVAL; 2729e8c838fdSMichal Krawczyk return i; 2730e8c838fdSMichal Krawczyk } 2731e8c838fdSMichal Krawczyk 2732e8c838fdSMichal Krawczyk /* The caller is obligated to set l2 and l3 len if any cksum 2733e8c838fdSMichal Krawczyk * offload is enabled. 2734e8c838fdSMichal Krawczyk */ 2735daa02b5cSOlivier Matz if (unlikely(ol_flags & (RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_L4_MASK) && 2736e8c838fdSMichal Krawczyk (m->l2_len == 0 || m->l3_len == 0))) { 2737e8c838fdSMichal Krawczyk PMD_TX_LOG(DEBUG, 2738e8c838fdSMichal Krawczyk "mbuf[%" PRIu32 "]: l2_len or l3_len values are 0 while the offload was requested\n", 2739e8c838fdSMichal Krawczyk i); 2740e8c838fdSMichal Krawczyk rte_errno = EINVAL; 2741e8c838fdSMichal Krawczyk return i; 2742e8c838fdSMichal Krawczyk } 2743b3fc5a1aSKonstantin Ananyev ret = rte_validate_tx_offload(m); 2744b3fc5a1aSKonstantin Ananyev if (ret != 0) { 2745baeed5f4SMichal Krawczyk rte_errno = -ret; 2746b3fc5a1aSKonstantin Ananyev return i; 2747b3fc5a1aSKonstantin Ananyev } 2748b3fc5a1aSKonstantin Ananyev #endif 274983277a7cSJakub Palider 2750e8c838fdSMichal Krawczyk /* Verify HW support for requested offloads and determine if 2751e8c838fdSMichal Krawczyk * pseudo header checksum is needed. 275283277a7cSJakub Palider */ 2753e8c838fdSMichal Krawczyk need_pseudo_csum = false; 2754daa02b5cSOlivier Matz if (ol_flags & RTE_MBUF_F_TX_IPV4) { 2755daa02b5cSOlivier Matz if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM && 2756e8c838fdSMichal Krawczyk !(dev_offload_capa & ENA_L3_IPV4_CSUM)) { 2757e8c838fdSMichal Krawczyk rte_errno = ENOTSUP; 2758e8c838fdSMichal Krawczyk return i; 2759e8c838fdSMichal Krawczyk } 276083277a7cSJakub Palider 2761daa02b5cSOlivier Matz if (ol_flags & RTE_MBUF_F_TX_TCP_SEG && 2762e8c838fdSMichal Krawczyk !(dev_offload_capa & ENA_IPV4_TSO)) { 2763e8c838fdSMichal Krawczyk rte_errno = ENOTSUP; 2764e8c838fdSMichal Krawczyk return i; 2765e8c838fdSMichal Krawczyk } 2766e8c838fdSMichal Krawczyk 2767e8c838fdSMichal Krawczyk /* Check HW capabilities and if pseudo csum is needed 2768e8c838fdSMichal Krawczyk * for L4 offloads. 2769e8c838fdSMichal Krawczyk */ 2770daa02b5cSOlivier Matz if (l4_csum_flag != RTE_MBUF_F_TX_L4_NO_CKSUM && 2771e8c838fdSMichal Krawczyk !(dev_offload_capa & ENA_L4_IPV4_CSUM)) { 2772e8c838fdSMichal Krawczyk if (dev_offload_capa & 2773e8c838fdSMichal Krawczyk ENA_L4_IPV4_CSUM_PARTIAL) { 2774e8c838fdSMichal Krawczyk need_pseudo_csum = true; 2775e8c838fdSMichal Krawczyk } else { 2776e8c838fdSMichal Krawczyk rte_errno = ENOTSUP; 2777e8c838fdSMichal Krawczyk return i; 2778e8c838fdSMichal Krawczyk } 2779e8c838fdSMichal Krawczyk } 2780e8c838fdSMichal Krawczyk 2781e8c838fdSMichal Krawczyk /* Parse the DF flag */ 2782e8c838fdSMichal Krawczyk ip_hdr = rte_pktmbuf_mtod_offset(m, 2783e8c838fdSMichal Krawczyk struct rte_ipv4_hdr *, m->l2_len); 2784e8c838fdSMichal Krawczyk frag_field = rte_be_to_cpu_16(ip_hdr->fragment_offset); 2785e8c838fdSMichal Krawczyk if (frag_field & RTE_IPV4_HDR_DF_FLAG) { 2786e8c838fdSMichal Krawczyk m->packet_type |= RTE_PTYPE_L4_NONFRAG; 2787daa02b5cSOlivier Matz } else if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) { 2788e8c838fdSMichal Krawczyk /* In case we are supposed to TSO and have DF 2789e8c838fdSMichal Krawczyk * not set (DF=0) hardware must be provided with 2790e8c838fdSMichal Krawczyk * partial checksum. 2791e8c838fdSMichal Krawczyk */ 2792e8c838fdSMichal Krawczyk need_pseudo_csum = true; 2793e8c838fdSMichal Krawczyk } 2794daa02b5cSOlivier Matz } else if (ol_flags & RTE_MBUF_F_TX_IPV6) { 2795e8c838fdSMichal Krawczyk /* There is no support for IPv6 TSO as for now. */ 2796daa02b5cSOlivier Matz if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) { 2797e8c838fdSMichal Krawczyk rte_errno = ENOTSUP; 2798e8c838fdSMichal Krawczyk return i; 2799e8c838fdSMichal Krawczyk } 2800e8c838fdSMichal Krawczyk 2801e8c838fdSMichal Krawczyk /* Check HW capabilities and if pseudo csum is needed */ 2802daa02b5cSOlivier Matz if (l4_csum_flag != RTE_MBUF_F_TX_L4_NO_CKSUM && 2803e8c838fdSMichal Krawczyk !(dev_offload_capa & ENA_L4_IPV6_CSUM)) { 2804e8c838fdSMichal Krawczyk if (dev_offload_capa & 2805e8c838fdSMichal Krawczyk ENA_L4_IPV6_CSUM_PARTIAL) { 2806e8c838fdSMichal Krawczyk need_pseudo_csum = true; 2807e8c838fdSMichal Krawczyk } else { 2808e8c838fdSMichal Krawczyk rte_errno = ENOTSUP; 2809e8c838fdSMichal Krawczyk return i; 2810e8c838fdSMichal Krawczyk } 2811e8c838fdSMichal Krawczyk } 2812e8c838fdSMichal Krawczyk } 2813e8c838fdSMichal Krawczyk 2814e8c838fdSMichal Krawczyk if (need_pseudo_csum) { 2815e8c838fdSMichal Krawczyk ret = rte_net_intel_cksum_flags_prepare(m, ol_flags); 2816b3fc5a1aSKonstantin Ananyev if (ret != 0) { 2817baeed5f4SMichal Krawczyk rte_errno = -ret; 2818b3fc5a1aSKonstantin Ananyev return i; 2819b3fc5a1aSKonstantin Ananyev } 2820b3fc5a1aSKonstantin Ananyev } 2821e8c838fdSMichal Krawczyk } 2822b3fc5a1aSKonstantin Ananyev 2823b3fc5a1aSKonstantin Ananyev return i; 2824b3fc5a1aSKonstantin Ananyev } 2825b3fc5a1aSKonstantin Ananyev 2826f01f060cSRafal Kozik static void ena_update_hints(struct ena_adapter *adapter, 2827f01f060cSRafal Kozik struct ena_admin_ena_hw_hints *hints) 2828f01f060cSRafal Kozik { 2829f01f060cSRafal Kozik if (hints->admin_completion_tx_timeout) 2830f01f060cSRafal Kozik adapter->ena_dev.admin_queue.completion_timeout = 2831f01f060cSRafal Kozik hints->admin_completion_tx_timeout * 1000; 2832f01f060cSRafal Kozik 2833f01f060cSRafal Kozik if (hints->mmio_read_timeout) 2834f01f060cSRafal Kozik /* convert to usec */ 2835f01f060cSRafal Kozik adapter->ena_dev.mmio_read.reg_read_to = 2836f01f060cSRafal Kozik hints->mmio_read_timeout * 1000; 2837d9b8b106SMichal Krawczyk 2838d9b8b106SMichal Krawczyk if (hints->driver_watchdog_timeout) { 2839d9b8b106SMichal Krawczyk if (hints->driver_watchdog_timeout == ENA_HW_HINTS_NO_TIMEOUT) 2840d9b8b106SMichal Krawczyk adapter->keep_alive_timeout = ENA_HW_HINTS_NO_TIMEOUT; 2841d9b8b106SMichal Krawczyk else 2842d9b8b106SMichal Krawczyk // Convert msecs to ticks 2843d9b8b106SMichal Krawczyk adapter->keep_alive_timeout = 2844d9b8b106SMichal Krawczyk (hints->driver_watchdog_timeout * 2845d9b8b106SMichal Krawczyk rte_get_timer_hz()) / 1000; 2846d9b8b106SMichal Krawczyk } 2847f01f060cSRafal Kozik } 2848f01f060cSRafal Kozik 284936278b82SMichal Krawczyk static void ena_tx_map_mbuf(struct ena_ring *tx_ring, 285036278b82SMichal Krawczyk struct ena_tx_buffer *tx_info, 285136278b82SMichal Krawczyk struct rte_mbuf *mbuf, 285236278b82SMichal Krawczyk void **push_header, 285336278b82SMichal Krawczyk uint16_t *header_len) 285436278b82SMichal Krawczyk { 285536278b82SMichal Krawczyk struct ena_com_buf *ena_buf; 285636278b82SMichal Krawczyk uint16_t delta, seg_len, push_len; 285736278b82SMichal Krawczyk 285836278b82SMichal Krawczyk delta = 0; 285936278b82SMichal Krawczyk seg_len = mbuf->data_len; 286036278b82SMichal Krawczyk 286136278b82SMichal Krawczyk tx_info->mbuf = mbuf; 286236278b82SMichal Krawczyk ena_buf = tx_info->bufs; 286336278b82SMichal Krawczyk 286436278b82SMichal Krawczyk if (tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { 286536278b82SMichal Krawczyk /* 286636278b82SMichal Krawczyk * Tx header might be (and will be in most cases) smaller than 286736278b82SMichal Krawczyk * tx_max_header_size. But it's not an issue to send more data 286836278b82SMichal Krawczyk * to the device, than actually needed if the mbuf size is 286936278b82SMichal Krawczyk * greater than tx_max_header_size. 287036278b82SMichal Krawczyk */ 287136278b82SMichal Krawczyk push_len = RTE_MIN(mbuf->pkt_len, tx_ring->tx_max_header_size); 287236278b82SMichal Krawczyk *header_len = push_len; 287336278b82SMichal Krawczyk 287436278b82SMichal Krawczyk if (likely(push_len <= seg_len)) { 287536278b82SMichal Krawczyk /* If the push header is in the single segment, then 287636278b82SMichal Krawczyk * just point it to the 1st mbuf data. 287736278b82SMichal Krawczyk */ 287836278b82SMichal Krawczyk *push_header = rte_pktmbuf_mtod(mbuf, uint8_t *); 287936278b82SMichal Krawczyk } else { 288036278b82SMichal Krawczyk /* If the push header lays in the several segments, copy 288136278b82SMichal Krawczyk * it to the intermediate buffer. 288236278b82SMichal Krawczyk */ 288336278b82SMichal Krawczyk rte_pktmbuf_read(mbuf, 0, push_len, 288436278b82SMichal Krawczyk tx_ring->push_buf_intermediate_buf); 288536278b82SMichal Krawczyk *push_header = tx_ring->push_buf_intermediate_buf; 288636278b82SMichal Krawczyk delta = push_len - seg_len; 288736278b82SMichal Krawczyk } 288836278b82SMichal Krawczyk } else { 288936278b82SMichal Krawczyk *push_header = NULL; 289036278b82SMichal Krawczyk *header_len = 0; 289136278b82SMichal Krawczyk push_len = 0; 289236278b82SMichal Krawczyk } 289336278b82SMichal Krawczyk 289436278b82SMichal Krawczyk /* Process first segment taking into consideration pushed header */ 289536278b82SMichal Krawczyk if (seg_len > push_len) { 289636278b82SMichal Krawczyk ena_buf->paddr = mbuf->buf_iova + 289736278b82SMichal Krawczyk mbuf->data_off + 289836278b82SMichal Krawczyk push_len; 289936278b82SMichal Krawczyk ena_buf->len = seg_len - push_len; 290036278b82SMichal Krawczyk ena_buf++; 290136278b82SMichal Krawczyk tx_info->num_of_bufs++; 290236278b82SMichal Krawczyk } 290336278b82SMichal Krawczyk 290436278b82SMichal Krawczyk while ((mbuf = mbuf->next) != NULL) { 290536278b82SMichal Krawczyk seg_len = mbuf->data_len; 290636278b82SMichal Krawczyk 290736278b82SMichal Krawczyk /* Skip mbufs if whole data is pushed as a header */ 290836278b82SMichal Krawczyk if (unlikely(delta > seg_len)) { 290936278b82SMichal Krawczyk delta -= seg_len; 291036278b82SMichal Krawczyk continue; 291136278b82SMichal Krawczyk } 291236278b82SMichal Krawczyk 291336278b82SMichal Krawczyk ena_buf->paddr = mbuf->buf_iova + mbuf->data_off + delta; 291436278b82SMichal Krawczyk ena_buf->len = seg_len - delta; 291536278b82SMichal Krawczyk ena_buf++; 291636278b82SMichal Krawczyk tx_info->num_of_bufs++; 291736278b82SMichal Krawczyk 291836278b82SMichal Krawczyk delta = 0; 291936278b82SMichal Krawczyk } 292036278b82SMichal Krawczyk } 292136278b82SMichal Krawczyk 292236278b82SMichal Krawczyk static int ena_xmit_mbuf(struct ena_ring *tx_ring, struct rte_mbuf *mbuf) 292336278b82SMichal Krawczyk { 292436278b82SMichal Krawczyk struct ena_tx_buffer *tx_info; 292536278b82SMichal Krawczyk struct ena_com_tx_ctx ena_tx_ctx = { { 0 } }; 292636278b82SMichal Krawczyk uint16_t next_to_use; 292736278b82SMichal Krawczyk uint16_t header_len; 292836278b82SMichal Krawczyk uint16_t req_id; 292936278b82SMichal Krawczyk void *push_header; 293036278b82SMichal Krawczyk int nb_hw_desc; 293136278b82SMichal Krawczyk int rc; 293236278b82SMichal Krawczyk 293396ffa8a7SMichal Krawczyk /* Checking for space for 2 additional metadata descriptors due to 293496ffa8a7SMichal Krawczyk * possible header split and metadata descriptor 293596ffa8a7SMichal Krawczyk */ 293696ffa8a7SMichal Krawczyk if (!ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, 293796ffa8a7SMichal Krawczyk mbuf->nb_segs + 2)) { 293896ffa8a7SMichal Krawczyk PMD_DRV_LOG(DEBUG, "Not enough space in the tx queue\n"); 293996ffa8a7SMichal Krawczyk return ENA_COM_NO_MEM; 294096ffa8a7SMichal Krawczyk } 294136278b82SMichal Krawczyk 294236278b82SMichal Krawczyk next_to_use = tx_ring->next_to_use; 294336278b82SMichal Krawczyk 294436278b82SMichal Krawczyk req_id = tx_ring->empty_tx_reqs[next_to_use]; 294536278b82SMichal Krawczyk tx_info = &tx_ring->tx_buffer_info[req_id]; 294636278b82SMichal Krawczyk tx_info->num_of_bufs = 0; 29473d47e9b1SMichal Krawczyk RTE_ASSERT(tx_info->mbuf == NULL); 294836278b82SMichal Krawczyk 294936278b82SMichal Krawczyk ena_tx_map_mbuf(tx_ring, tx_info, mbuf, &push_header, &header_len); 295036278b82SMichal Krawczyk 295136278b82SMichal Krawczyk ena_tx_ctx.ena_bufs = tx_info->bufs; 295236278b82SMichal Krawczyk ena_tx_ctx.push_header = push_header; 295336278b82SMichal Krawczyk ena_tx_ctx.num_bufs = tx_info->num_of_bufs; 295436278b82SMichal Krawczyk ena_tx_ctx.req_id = req_id; 295536278b82SMichal Krawczyk ena_tx_ctx.header_len = header_len; 295636278b82SMichal Krawczyk 295736278b82SMichal Krawczyk /* Set Tx offloads flags, if applicable */ 295836278b82SMichal Krawczyk ena_tx_mbuf_prepare(mbuf, &ena_tx_ctx, tx_ring->offloads, 295936278b82SMichal Krawczyk tx_ring->disable_meta_caching); 296036278b82SMichal Krawczyk 296136278b82SMichal Krawczyk if (unlikely(ena_com_is_doorbell_needed(tx_ring->ena_com_io_sq, 296236278b82SMichal Krawczyk &ena_tx_ctx))) { 29630a001d69SMichal Krawczyk PMD_TX_LOG(DEBUG, 2964617898d1SMichal Krawczyk "LLQ Tx max burst size of queue %d achieved, writing doorbell to send burst\n", 296536278b82SMichal Krawczyk tx_ring->id); 296636278b82SMichal Krawczyk ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq); 29671f949ad9SAmit Bernstein tx_ring->tx_stats.doorbells++; 29681d973d8fSIgor Chauskin tx_ring->pkts_without_db = false; 296936278b82SMichal Krawczyk } 297036278b82SMichal Krawczyk 297136278b82SMichal Krawczyk /* prepare the packet's descriptors to dma engine */ 297236278b82SMichal Krawczyk rc = ena_com_prepare_tx(tx_ring->ena_com_io_sq, &ena_tx_ctx, 297336278b82SMichal Krawczyk &nb_hw_desc); 297436278b82SMichal Krawczyk if (unlikely(rc)) { 2975b57e1053SMichal Krawczyk PMD_DRV_LOG(ERR, "Failed to prepare Tx buffers, rc: %d\n", rc); 297636278b82SMichal Krawczyk ++tx_ring->tx_stats.prepare_ctx_err; 29772bae75eaSDawid Gorecki ena_trigger_reset(tx_ring->adapter, 29782bae75eaSDawid Gorecki ENA_REGS_RESET_DRIVER_INVALID_STATE); 297936278b82SMichal Krawczyk return rc; 298036278b82SMichal Krawczyk } 298136278b82SMichal Krawczyk 298236278b82SMichal Krawczyk tx_info->tx_descs = nb_hw_desc; 2983f93e20e5SMichal Krawczyk tx_info->timestamp = rte_get_timer_cycles(); 298436278b82SMichal Krawczyk 298536278b82SMichal Krawczyk tx_ring->tx_stats.cnt++; 298636278b82SMichal Krawczyk tx_ring->tx_stats.bytes += mbuf->pkt_len; 298736278b82SMichal Krawczyk 298836278b82SMichal Krawczyk tx_ring->next_to_use = ENA_IDX_NEXT_MASKED(next_to_use, 298936278b82SMichal Krawczyk tx_ring->size_mask); 299036278b82SMichal Krawczyk 299136278b82SMichal Krawczyk return 0; 299236278b82SMichal Krawczyk } 299336278b82SMichal Krawczyk 2994a52b317eSDawid Gorecki static int ena_tx_cleanup(void *txp, uint32_t free_pkt_cnt) 299536278b82SMichal Krawczyk { 2996a52b317eSDawid Gorecki struct ena_ring *tx_ring = (struct ena_ring *)txp; 299736278b82SMichal Krawczyk unsigned int total_tx_descs = 0; 2998a52b317eSDawid Gorecki unsigned int total_tx_pkts = 0; 2999005064e5SMichal Krawczyk uint16_t cleanup_budget; 300036278b82SMichal Krawczyk uint16_t next_to_clean = tx_ring->next_to_clean; 300136278b82SMichal Krawczyk 3002a52b317eSDawid Gorecki /* 3003a52b317eSDawid Gorecki * If free_pkt_cnt is equal to 0, it means that the user requested 3004a52b317eSDawid Gorecki * full cleanup, so attempt to release all Tx descriptors 3005a52b317eSDawid Gorecki * (ring_size - 1 -> size_mask) 3006a52b317eSDawid Gorecki */ 3007a52b317eSDawid Gorecki cleanup_budget = (free_pkt_cnt == 0) ? tx_ring->size_mask : free_pkt_cnt; 300836278b82SMichal Krawczyk 3009a52b317eSDawid Gorecki while (likely(total_tx_pkts < cleanup_budget)) { 301036278b82SMichal Krawczyk struct rte_mbuf *mbuf; 301136278b82SMichal Krawczyk struct ena_tx_buffer *tx_info; 301236278b82SMichal Krawczyk uint16_t req_id; 301336278b82SMichal Krawczyk 301436278b82SMichal Krawczyk if (ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq, &req_id) != 0) 301536278b82SMichal Krawczyk break; 301636278b82SMichal Krawczyk 301736278b82SMichal Krawczyk if (unlikely(validate_tx_req_id(tx_ring, req_id) != 0)) 301836278b82SMichal Krawczyk break; 301936278b82SMichal Krawczyk 302036278b82SMichal Krawczyk /* Get Tx info & store how many descs were processed */ 302136278b82SMichal Krawczyk tx_info = &tx_ring->tx_buffer_info[req_id]; 3022f93e20e5SMichal Krawczyk tx_info->timestamp = 0; 302336278b82SMichal Krawczyk 302436278b82SMichal Krawczyk mbuf = tx_info->mbuf; 302536278b82SMichal Krawczyk rte_pktmbuf_free(mbuf); 302636278b82SMichal Krawczyk 302736278b82SMichal Krawczyk tx_info->mbuf = NULL; 302836278b82SMichal Krawczyk tx_ring->empty_tx_reqs[next_to_clean] = req_id; 302936278b82SMichal Krawczyk 303036278b82SMichal Krawczyk total_tx_descs += tx_info->tx_descs; 3031a52b317eSDawid Gorecki total_tx_pkts++; 303236278b82SMichal Krawczyk 303336278b82SMichal Krawczyk /* Put back descriptor to the ring for reuse */ 303436278b82SMichal Krawczyk next_to_clean = ENA_IDX_NEXT_MASKED(next_to_clean, 303536278b82SMichal Krawczyk tx_ring->size_mask); 303636278b82SMichal Krawczyk } 303736278b82SMichal Krawczyk 303836278b82SMichal Krawczyk if (likely(total_tx_descs > 0)) { 303936278b82SMichal Krawczyk /* acknowledge completion of sent packets */ 304036278b82SMichal Krawczyk tx_ring->next_to_clean = next_to_clean; 304136278b82SMichal Krawczyk ena_com_comp_ack(tx_ring->ena_com_io_sq, total_tx_descs); 304236278b82SMichal Krawczyk ena_com_update_dev_comp_head(tx_ring->ena_com_io_cq); 304336278b82SMichal Krawczyk } 3044f93e20e5SMichal Krawczyk 3045a52b317eSDawid Gorecki /* Notify completion handler that full cleanup was performed */ 3046a52b317eSDawid Gorecki if (free_pkt_cnt == 0 || total_tx_pkts < cleanup_budget) 3047f93e20e5SMichal Krawczyk tx_ring->last_cleanup_ticks = rte_get_timer_cycles(); 3048a52b317eSDawid Gorecki 3049a52b317eSDawid Gorecki return total_tx_pkts; 305036278b82SMichal Krawczyk } 305136278b82SMichal Krawczyk 30521173fca2SJan Medala static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 30531173fca2SJan Medala uint16_t nb_pkts) 30541173fca2SJan Medala { 30551173fca2SJan Medala struct ena_ring *tx_ring = (struct ena_ring *)(tx_queue); 3056005064e5SMichal Krawczyk int available_desc; 305774456796SMichal Krawczyk uint16_t sent_idx = 0; 30581173fca2SJan Medala 30590a001d69SMichal Krawczyk #ifdef RTE_ETHDEV_DEBUG_TX 30601173fca2SJan Medala /* Check adapter state */ 30611173fca2SJan Medala if (unlikely(tx_ring->adapter->state != ENA_ADAPTER_STATE_RUNNING)) { 30620a001d69SMichal Krawczyk PMD_TX_LOG(ALERT, 30631173fca2SJan Medala "Trying to xmit pkts while device is NOT running\n"); 30641173fca2SJan Medala return 0; 30651173fca2SJan Medala } 30660a001d69SMichal Krawczyk #endif 30671173fca2SJan Medala 306867216c31SMichal Krawczyk available_desc = ena_com_free_q_entries(tx_ring->ena_com_io_sq); 306967216c31SMichal Krawczyk if (available_desc < tx_ring->tx_free_thresh) 3070a52b317eSDawid Gorecki ena_tx_cleanup((void *)tx_ring, 0); 307167216c31SMichal Krawczyk 30721173fca2SJan Medala for (sent_idx = 0; sent_idx < nb_pkts; sent_idx++) { 307336278b82SMichal Krawczyk if (ena_xmit_mbuf(tx_ring, tx_pkts[sent_idx])) 30742061fe41SRafal Kozik break; 30751d973d8fSIgor Chauskin tx_ring->pkts_without_db = true; 307636278b82SMichal Krawczyk rte_prefetch0(tx_pkts[ENA_IDX_ADD_MASKED(sent_idx, 4, 307736278b82SMichal Krawczyk tx_ring->size_mask)]); 30782fca2a98SMichal Krawczyk } 30792fca2a98SMichal Krawczyk 30805e02e19eSJan Medala /* If there are ready packets to be xmitted... */ 30811d973d8fSIgor Chauskin if (likely(tx_ring->pkts_without_db)) { 30825e02e19eSJan Medala /* ...let HW do its best :-) */ 30831173fca2SJan Medala ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq); 308445b6d861SMichal Krawczyk tx_ring->tx_stats.doorbells++; 30851d973d8fSIgor Chauskin tx_ring->pkts_without_db = false; 30865e02e19eSJan Medala } 30875e02e19eSJan Medala 30887830e905SSolganik Alexander tx_ring->tx_stats.available_desc = 3089b2b02edeSMichal Krawczyk ena_com_free_q_entries(tx_ring->ena_com_io_sq); 30907830e905SSolganik Alexander tx_ring->tx_stats.tx_poll++; 30917830e905SSolganik Alexander 30921173fca2SJan Medala return sent_idx; 30931173fca2SJan Medala } 30941173fca2SJan Medala 3095e3595539SStanislaw Kardach int ena_copy_eni_stats(struct ena_adapter *adapter, struct ena_stats_eni *stats) 309645718adaSMichal Krawczyk { 309745718adaSMichal Krawczyk int rc; 309845718adaSMichal Krawczyk 309945718adaSMichal Krawczyk rte_spinlock_lock(&adapter->admin_lock); 3100e3595539SStanislaw Kardach /* Retrieve and store the latest statistics from the AQ. This ensures 3101e3595539SStanislaw Kardach * that previous value is returned in case of a com error. 3102e3595539SStanislaw Kardach */ 3103e3595539SStanislaw Kardach rc = ENA_PROXY(adapter, ena_com_get_eni_stats, &adapter->ena_dev, 3104e3595539SStanislaw Kardach (struct ena_admin_eni_stats *)stats); 310545718adaSMichal Krawczyk rte_spinlock_unlock(&adapter->admin_lock); 310645718adaSMichal Krawczyk if (rc != 0) { 310745718adaSMichal Krawczyk if (rc == ENA_COM_UNSUPPORTED) { 310845718adaSMichal Krawczyk PMD_DRV_LOG(DEBUG, 3109617898d1SMichal Krawczyk "Retrieving ENI metrics is not supported\n"); 311045718adaSMichal Krawczyk } else { 311145718adaSMichal Krawczyk PMD_DRV_LOG(WARNING, 3112617898d1SMichal Krawczyk "Failed to get ENI metrics, rc: %d\n", rc); 311345718adaSMichal Krawczyk } 311445718adaSMichal Krawczyk return rc; 311545718adaSMichal Krawczyk } 311645718adaSMichal Krawczyk 311745718adaSMichal Krawczyk return 0; 311845718adaSMichal Krawczyk } 311945718adaSMichal Krawczyk 31207830e905SSolganik Alexander /** 31217830e905SSolganik Alexander * DPDK callback to retrieve names of extended device statistics 31227830e905SSolganik Alexander * 31237830e905SSolganik Alexander * @param dev 31247830e905SSolganik Alexander * Pointer to Ethernet device structure. 31257830e905SSolganik Alexander * @param[out] xstats_names 31267830e905SSolganik Alexander * Buffer to insert names into. 31277830e905SSolganik Alexander * @param n 31287830e905SSolganik Alexander * Number of names. 31297830e905SSolganik Alexander * 31307830e905SSolganik Alexander * @return 31317830e905SSolganik Alexander * Number of xstats names. 31327830e905SSolganik Alexander */ 31337830e905SSolganik Alexander static int ena_xstats_get_names(struct rte_eth_dev *dev, 31347830e905SSolganik Alexander struct rte_eth_xstat_name *xstats_names, 31357830e905SSolganik Alexander unsigned int n) 31367830e905SSolganik Alexander { 3137aab58857SStanislaw Kardach unsigned int xstats_count = ena_xstats_calc_num(dev->data); 31387830e905SSolganik Alexander unsigned int stat, i, count = 0; 31397830e905SSolganik Alexander 31407830e905SSolganik Alexander if (n < xstats_count || !xstats_names) 31417830e905SSolganik Alexander return xstats_count; 31427830e905SSolganik Alexander 31437830e905SSolganik Alexander for (stat = 0; stat < ENA_STATS_ARRAY_GLOBAL; stat++, count++) 31447830e905SSolganik Alexander strcpy(xstats_names[count].name, 31457830e905SSolganik Alexander ena_stats_global_strings[stat].name); 31467830e905SSolganik Alexander 314745718adaSMichal Krawczyk for (stat = 0; stat < ENA_STATS_ARRAY_ENI; stat++, count++) 314845718adaSMichal Krawczyk strcpy(xstats_names[count].name, 314945718adaSMichal Krawczyk ena_stats_eni_strings[stat].name); 315045718adaSMichal Krawczyk 31517830e905SSolganik Alexander for (stat = 0; stat < ENA_STATS_ARRAY_RX; stat++) 31527830e905SSolganik Alexander for (i = 0; i < dev->data->nb_rx_queues; i++, count++) 31537830e905SSolganik Alexander snprintf(xstats_names[count].name, 31547830e905SSolganik Alexander sizeof(xstats_names[count].name), 31557830e905SSolganik Alexander "rx_q%d_%s", i, 31567830e905SSolganik Alexander ena_stats_rx_strings[stat].name); 31577830e905SSolganik Alexander 31587830e905SSolganik Alexander for (stat = 0; stat < ENA_STATS_ARRAY_TX; stat++) 31597830e905SSolganik Alexander for (i = 0; i < dev->data->nb_tx_queues; i++, count++) 31607830e905SSolganik Alexander snprintf(xstats_names[count].name, 31617830e905SSolganik Alexander sizeof(xstats_names[count].name), 31627830e905SSolganik Alexander "tx_q%d_%s", i, 31637830e905SSolganik Alexander ena_stats_tx_strings[stat].name); 31647830e905SSolganik Alexander 31657830e905SSolganik Alexander return xstats_count; 31667830e905SSolganik Alexander } 31677830e905SSolganik Alexander 31687830e905SSolganik Alexander /** 31693cec73faSMichal Krawczyk * DPDK callback to retrieve names of extended device statistics for the given 31703cec73faSMichal Krawczyk * ids. 31713cec73faSMichal Krawczyk * 31723cec73faSMichal Krawczyk * @param dev 31733cec73faSMichal Krawczyk * Pointer to Ethernet device structure. 31743cec73faSMichal Krawczyk * @param[out] xstats_names 31753cec73faSMichal Krawczyk * Buffer to insert names into. 31763cec73faSMichal Krawczyk * @param ids 31773cec73faSMichal Krawczyk * IDs array for which the names should be retrieved. 31783cec73faSMichal Krawczyk * @param size 31793cec73faSMichal Krawczyk * Number of ids. 31803cec73faSMichal Krawczyk * 31813cec73faSMichal Krawczyk * @return 31823cec73faSMichal Krawczyk * Positive value: number of xstats names. Negative value: error code. 31833cec73faSMichal Krawczyk */ 31843cec73faSMichal Krawczyk static int ena_xstats_get_names_by_id(struct rte_eth_dev *dev, 31853cec73faSMichal Krawczyk const uint64_t *ids, 31863cec73faSMichal Krawczyk struct rte_eth_xstat_name *xstats_names, 31873cec73faSMichal Krawczyk unsigned int size) 31883cec73faSMichal Krawczyk { 31893cec73faSMichal Krawczyk uint64_t xstats_count = ena_xstats_calc_num(dev->data); 31903cec73faSMichal Krawczyk uint64_t id, qid; 31913cec73faSMichal Krawczyk unsigned int i; 31923cec73faSMichal Krawczyk 31933cec73faSMichal Krawczyk if (xstats_names == NULL) 31943cec73faSMichal Krawczyk return xstats_count; 31953cec73faSMichal Krawczyk 31963cec73faSMichal Krawczyk for (i = 0; i < size; ++i) { 31973cec73faSMichal Krawczyk id = ids[i]; 31983cec73faSMichal Krawczyk if (id > xstats_count) { 31993cec73faSMichal Krawczyk PMD_DRV_LOG(ERR, 32003cec73faSMichal Krawczyk "ID value out of range: id=%" PRIu64 ", xstats_num=%" PRIu64 "\n", 32013cec73faSMichal Krawczyk id, xstats_count); 32023cec73faSMichal Krawczyk return -EINVAL; 32033cec73faSMichal Krawczyk } 32043cec73faSMichal Krawczyk 32053cec73faSMichal Krawczyk if (id < ENA_STATS_ARRAY_GLOBAL) { 32063cec73faSMichal Krawczyk strcpy(xstats_names[i].name, 32073cec73faSMichal Krawczyk ena_stats_global_strings[id].name); 32083cec73faSMichal Krawczyk continue; 32093cec73faSMichal Krawczyk } 32103cec73faSMichal Krawczyk 32113cec73faSMichal Krawczyk id -= ENA_STATS_ARRAY_GLOBAL; 32123cec73faSMichal Krawczyk if (id < ENA_STATS_ARRAY_ENI) { 32133cec73faSMichal Krawczyk strcpy(xstats_names[i].name, 32143cec73faSMichal Krawczyk ena_stats_eni_strings[id].name); 32153cec73faSMichal Krawczyk continue; 32163cec73faSMichal Krawczyk } 32173cec73faSMichal Krawczyk 32183cec73faSMichal Krawczyk id -= ENA_STATS_ARRAY_ENI; 32193cec73faSMichal Krawczyk if (id < ENA_STATS_ARRAY_RX) { 32203cec73faSMichal Krawczyk qid = id / dev->data->nb_rx_queues; 32213cec73faSMichal Krawczyk id %= dev->data->nb_rx_queues; 32223cec73faSMichal Krawczyk snprintf(xstats_names[i].name, 32233cec73faSMichal Krawczyk sizeof(xstats_names[i].name), 32243cec73faSMichal Krawczyk "rx_q%" PRIu64 "d_%s", 32253cec73faSMichal Krawczyk qid, ena_stats_rx_strings[id].name); 32263cec73faSMichal Krawczyk continue; 32273cec73faSMichal Krawczyk } 32283cec73faSMichal Krawczyk 32293cec73faSMichal Krawczyk id -= ENA_STATS_ARRAY_RX; 32303cec73faSMichal Krawczyk /* Although this condition is not needed, it was added for 32313cec73faSMichal Krawczyk * compatibility if new xstat structure would be ever added. 32323cec73faSMichal Krawczyk */ 32333cec73faSMichal Krawczyk if (id < ENA_STATS_ARRAY_TX) { 32343cec73faSMichal Krawczyk qid = id / dev->data->nb_tx_queues; 32353cec73faSMichal Krawczyk id %= dev->data->nb_tx_queues; 32363cec73faSMichal Krawczyk snprintf(xstats_names[i].name, 32373cec73faSMichal Krawczyk sizeof(xstats_names[i].name), 32383cec73faSMichal Krawczyk "tx_q%" PRIu64 "_%s", 32393cec73faSMichal Krawczyk qid, ena_stats_tx_strings[id].name); 32403cec73faSMichal Krawczyk continue; 32413cec73faSMichal Krawczyk } 32423cec73faSMichal Krawczyk } 32433cec73faSMichal Krawczyk 32443cec73faSMichal Krawczyk return i; 32453cec73faSMichal Krawczyk } 32463cec73faSMichal Krawczyk 32473cec73faSMichal Krawczyk /** 32487830e905SSolganik Alexander * DPDK callback to get extended device statistics. 32497830e905SSolganik Alexander * 32507830e905SSolganik Alexander * @param dev 32517830e905SSolganik Alexander * Pointer to Ethernet device structure. 32527830e905SSolganik Alexander * @param[out] stats 32537830e905SSolganik Alexander * Stats table output buffer. 32547830e905SSolganik Alexander * @param n 32557830e905SSolganik Alexander * The size of the stats table. 32567830e905SSolganik Alexander * 32577830e905SSolganik Alexander * @return 32587830e905SSolganik Alexander * Number of xstats on success, negative on failure. 32597830e905SSolganik Alexander */ 32607830e905SSolganik Alexander static int ena_xstats_get(struct rte_eth_dev *dev, 32617830e905SSolganik Alexander struct rte_eth_xstat *xstats, 32627830e905SSolganik Alexander unsigned int n) 32637830e905SSolganik Alexander { 3264890728ffSStephen Hemminger struct ena_adapter *adapter = dev->data->dev_private; 3265aab58857SStanislaw Kardach unsigned int xstats_count = ena_xstats_calc_num(dev->data); 3266e3595539SStanislaw Kardach struct ena_stats_eni eni_stats; 32677830e905SSolganik Alexander unsigned int stat, i, count = 0; 32687830e905SSolganik Alexander int stat_offset; 32697830e905SSolganik Alexander void *stats_begin; 32707830e905SSolganik Alexander 32717830e905SSolganik Alexander if (n < xstats_count) 32727830e905SSolganik Alexander return xstats_count; 32737830e905SSolganik Alexander 32747830e905SSolganik Alexander if (!xstats) 32757830e905SSolganik Alexander return 0; 32767830e905SSolganik Alexander 32777830e905SSolganik Alexander for (stat = 0; stat < ENA_STATS_ARRAY_GLOBAL; stat++, count++) { 3278493107fdSMichal Krawczyk stat_offset = ena_stats_global_strings[stat].stat_offset; 32797830e905SSolganik Alexander stats_begin = &adapter->dev_stats; 32807830e905SSolganik Alexander 32817830e905SSolganik Alexander xstats[count].id = count; 32827830e905SSolganik Alexander xstats[count].value = *((uint64_t *) 32837830e905SSolganik Alexander ((char *)stats_begin + stat_offset)); 32847830e905SSolganik Alexander } 32857830e905SSolganik Alexander 328645718adaSMichal Krawczyk /* Even if the function below fails, we should copy previous (or initial 328745718adaSMichal Krawczyk * values) to keep structure of rte_eth_xstat consistent. 328845718adaSMichal Krawczyk */ 3289e3595539SStanislaw Kardach ena_copy_eni_stats(adapter, &eni_stats); 329045718adaSMichal Krawczyk for (stat = 0; stat < ENA_STATS_ARRAY_ENI; stat++, count++) { 329145718adaSMichal Krawczyk stat_offset = ena_stats_eni_strings[stat].stat_offset; 3292e3595539SStanislaw Kardach stats_begin = &eni_stats; 329345718adaSMichal Krawczyk 329445718adaSMichal Krawczyk xstats[count].id = count; 329545718adaSMichal Krawczyk xstats[count].value = *((uint64_t *) 329645718adaSMichal Krawczyk ((char *)stats_begin + stat_offset)); 329745718adaSMichal Krawczyk } 329845718adaSMichal Krawczyk 32997830e905SSolganik Alexander for (stat = 0; stat < ENA_STATS_ARRAY_RX; stat++) { 33007830e905SSolganik Alexander for (i = 0; i < dev->data->nb_rx_queues; i++, count++) { 33017830e905SSolganik Alexander stat_offset = ena_stats_rx_strings[stat].stat_offset; 33027830e905SSolganik Alexander stats_begin = &adapter->rx_ring[i].rx_stats; 33037830e905SSolganik Alexander 33047830e905SSolganik Alexander xstats[count].id = count; 33057830e905SSolganik Alexander xstats[count].value = *((uint64_t *) 33067830e905SSolganik Alexander ((char *)stats_begin + stat_offset)); 33077830e905SSolganik Alexander } 33087830e905SSolganik Alexander } 33097830e905SSolganik Alexander 33107830e905SSolganik Alexander for (stat = 0; stat < ENA_STATS_ARRAY_TX; stat++) { 33117830e905SSolganik Alexander for (i = 0; i < dev->data->nb_tx_queues; i++, count++) { 33127830e905SSolganik Alexander stat_offset = ena_stats_tx_strings[stat].stat_offset; 33137830e905SSolganik Alexander stats_begin = &adapter->tx_ring[i].rx_stats; 33147830e905SSolganik Alexander 33157830e905SSolganik Alexander xstats[count].id = count; 33167830e905SSolganik Alexander xstats[count].value = *((uint64_t *) 33177830e905SSolganik Alexander ((char *)stats_begin + stat_offset)); 33187830e905SSolganik Alexander } 33197830e905SSolganik Alexander } 33207830e905SSolganik Alexander 33217830e905SSolganik Alexander return count; 33227830e905SSolganik Alexander } 33237830e905SSolganik Alexander 33247830e905SSolganik Alexander static int ena_xstats_get_by_id(struct rte_eth_dev *dev, 33257830e905SSolganik Alexander const uint64_t *ids, 33267830e905SSolganik Alexander uint64_t *values, 33277830e905SSolganik Alexander unsigned int n) 33287830e905SSolganik Alexander { 3329890728ffSStephen Hemminger struct ena_adapter *adapter = dev->data->dev_private; 3330e3595539SStanislaw Kardach struct ena_stats_eni eni_stats; 33317830e905SSolganik Alexander uint64_t id; 33327830e905SSolganik Alexander uint64_t rx_entries, tx_entries; 33337830e905SSolganik Alexander unsigned int i; 33347830e905SSolganik Alexander int qid; 33357830e905SSolganik Alexander int valid = 0; 333645718adaSMichal Krawczyk bool was_eni_copied = false; 333745718adaSMichal Krawczyk 33387830e905SSolganik Alexander for (i = 0; i < n; ++i) { 33397830e905SSolganik Alexander id = ids[i]; 33407830e905SSolganik Alexander /* Check if id belongs to global statistics */ 33417830e905SSolganik Alexander if (id < ENA_STATS_ARRAY_GLOBAL) { 33427830e905SSolganik Alexander values[i] = *((uint64_t *)&adapter->dev_stats + id); 33437830e905SSolganik Alexander ++valid; 33447830e905SSolganik Alexander continue; 33457830e905SSolganik Alexander } 33467830e905SSolganik Alexander 334745718adaSMichal Krawczyk /* Check if id belongs to ENI statistics */ 33487830e905SSolganik Alexander id -= ENA_STATS_ARRAY_GLOBAL; 334945718adaSMichal Krawczyk if (id < ENA_STATS_ARRAY_ENI) { 335045718adaSMichal Krawczyk /* Avoid reading ENI stats multiple times in a single 335145718adaSMichal Krawczyk * function call, as it requires communication with the 335245718adaSMichal Krawczyk * admin queue. 335345718adaSMichal Krawczyk */ 335445718adaSMichal Krawczyk if (!was_eni_copied) { 335545718adaSMichal Krawczyk was_eni_copied = true; 3356e3595539SStanislaw Kardach ena_copy_eni_stats(adapter, &eni_stats); 335745718adaSMichal Krawczyk } 3358e3595539SStanislaw Kardach values[i] = *((uint64_t *)&eni_stats + id); 335945718adaSMichal Krawczyk ++valid; 336045718adaSMichal Krawczyk continue; 336145718adaSMichal Krawczyk } 336245718adaSMichal Krawczyk 336345718adaSMichal Krawczyk /* Check if id belongs to rx queue statistics */ 336445718adaSMichal Krawczyk id -= ENA_STATS_ARRAY_ENI; 33657830e905SSolganik Alexander rx_entries = ENA_STATS_ARRAY_RX * dev->data->nb_rx_queues; 33667830e905SSolganik Alexander if (id < rx_entries) { 33677830e905SSolganik Alexander qid = id % dev->data->nb_rx_queues; 33687830e905SSolganik Alexander id /= dev->data->nb_rx_queues; 33697830e905SSolganik Alexander values[i] = *((uint64_t *) 33707830e905SSolganik Alexander &adapter->rx_ring[qid].rx_stats + id); 33717830e905SSolganik Alexander ++valid; 33727830e905SSolganik Alexander continue; 33737830e905SSolganik Alexander } 33747830e905SSolganik Alexander /* Check if id belongs to rx queue statistics */ 33757830e905SSolganik Alexander id -= rx_entries; 33767830e905SSolganik Alexander tx_entries = ENA_STATS_ARRAY_TX * dev->data->nb_tx_queues; 33777830e905SSolganik Alexander if (id < tx_entries) { 33787830e905SSolganik Alexander qid = id % dev->data->nb_tx_queues; 33797830e905SSolganik Alexander id /= dev->data->nb_tx_queues; 33807830e905SSolganik Alexander values[i] = *((uint64_t *) 33817830e905SSolganik Alexander &adapter->tx_ring[qid].tx_stats + id); 33827830e905SSolganik Alexander ++valid; 33837830e905SSolganik Alexander continue; 33847830e905SSolganik Alexander } 33857830e905SSolganik Alexander } 33867830e905SSolganik Alexander 33877830e905SSolganik Alexander return valid; 33887830e905SSolganik Alexander } 33897830e905SSolganik Alexander 3390*cc0c5d25SMichal Krawczyk static int ena_process_uint_devarg(const char *key, 3391*cc0c5d25SMichal Krawczyk const char *value, 3392*cc0c5d25SMichal Krawczyk void *opaque) 3393*cc0c5d25SMichal Krawczyk { 3394*cc0c5d25SMichal Krawczyk struct ena_adapter *adapter = opaque; 3395*cc0c5d25SMichal Krawczyk char *str_end; 3396*cc0c5d25SMichal Krawczyk uint64_t uint_value; 3397*cc0c5d25SMichal Krawczyk 3398*cc0c5d25SMichal Krawczyk uint_value = strtoull(value, &str_end, 10); 3399*cc0c5d25SMichal Krawczyk if (value == str_end) { 3400*cc0c5d25SMichal Krawczyk PMD_INIT_LOG(ERR, 3401*cc0c5d25SMichal Krawczyk "Invalid value for key '%s'. Only uint values are accepted.\n", 3402*cc0c5d25SMichal Krawczyk key); 3403*cc0c5d25SMichal Krawczyk return -EINVAL; 3404*cc0c5d25SMichal Krawczyk } 3405*cc0c5d25SMichal Krawczyk 3406*cc0c5d25SMichal Krawczyk if (strcmp(key, ENA_DEVARG_MISS_TXC_TO) == 0) { 3407*cc0c5d25SMichal Krawczyk if (uint_value > ENA_MAX_TX_TIMEOUT_SECONDS) { 3408*cc0c5d25SMichal Krawczyk PMD_INIT_LOG(ERR, 3409*cc0c5d25SMichal Krawczyk "Tx timeout too high: %" PRIu64 " sec. Maximum allowed: %d sec.\n", 3410*cc0c5d25SMichal Krawczyk uint_value, ENA_MAX_TX_TIMEOUT_SECONDS); 3411*cc0c5d25SMichal Krawczyk return -EINVAL; 3412*cc0c5d25SMichal Krawczyk } else if (uint_value == 0) { 3413*cc0c5d25SMichal Krawczyk PMD_INIT_LOG(INFO, 3414*cc0c5d25SMichal Krawczyk "Check for missing Tx completions has been disabled.\n"); 3415*cc0c5d25SMichal Krawczyk adapter->missing_tx_completion_to = 3416*cc0c5d25SMichal Krawczyk ENA_HW_HINTS_NO_TIMEOUT; 3417*cc0c5d25SMichal Krawczyk } else { 3418*cc0c5d25SMichal Krawczyk PMD_INIT_LOG(INFO, 3419*cc0c5d25SMichal Krawczyk "Tx packet completion timeout set to %" PRIu64 " seconds.\n", 3420*cc0c5d25SMichal Krawczyk uint_value); 3421*cc0c5d25SMichal Krawczyk adapter->missing_tx_completion_to = 3422*cc0c5d25SMichal Krawczyk uint_value * rte_get_timer_hz(); 3423*cc0c5d25SMichal Krawczyk } 3424*cc0c5d25SMichal Krawczyk } 3425*cc0c5d25SMichal Krawczyk 3426*cc0c5d25SMichal Krawczyk return 0; 3427*cc0c5d25SMichal Krawczyk } 3428*cc0c5d25SMichal Krawczyk 34298a7a73f2SMichal Krawczyk static int ena_process_bool_devarg(const char *key, 34308a7a73f2SMichal Krawczyk const char *value, 34318a7a73f2SMichal Krawczyk void *opaque) 34328a7a73f2SMichal Krawczyk { 34338a7a73f2SMichal Krawczyk struct ena_adapter *adapter = opaque; 34348a7a73f2SMichal Krawczyk bool bool_value; 34358a7a73f2SMichal Krawczyk 34368a7a73f2SMichal Krawczyk /* Parse the value. */ 34378a7a73f2SMichal Krawczyk if (strcmp(value, "1") == 0) { 34388a7a73f2SMichal Krawczyk bool_value = true; 34398a7a73f2SMichal Krawczyk } else if (strcmp(value, "0") == 0) { 34408a7a73f2SMichal Krawczyk bool_value = false; 34418a7a73f2SMichal Krawczyk } else { 34428a7a73f2SMichal Krawczyk PMD_INIT_LOG(ERR, 34438a7a73f2SMichal Krawczyk "Invalid value: '%s' for key '%s'. Accepted: '0' or '1'\n", 34448a7a73f2SMichal Krawczyk value, key); 34458a7a73f2SMichal Krawczyk return -EINVAL; 34468a7a73f2SMichal Krawczyk } 34478a7a73f2SMichal Krawczyk 34488a7a73f2SMichal Krawczyk /* Now, assign it to the proper adapter field. */ 34499b312ad3SIgor Chauskin if (strcmp(key, ENA_DEVARG_LARGE_LLQ_HDR) == 0) 34508a7a73f2SMichal Krawczyk adapter->use_large_llq_hdr = bool_value; 34518a7a73f2SMichal Krawczyk 34528a7a73f2SMichal Krawczyk return 0; 34538a7a73f2SMichal Krawczyk } 34548a7a73f2SMichal Krawczyk 34558a7a73f2SMichal Krawczyk static int ena_parse_devargs(struct ena_adapter *adapter, 34568a7a73f2SMichal Krawczyk struct rte_devargs *devargs) 34578a7a73f2SMichal Krawczyk { 34588a7a73f2SMichal Krawczyk static const char * const allowed_args[] = { 34598a7a73f2SMichal Krawczyk ENA_DEVARG_LARGE_LLQ_HDR, 3460*cc0c5d25SMichal Krawczyk ENA_DEVARG_MISS_TXC_TO, 34619f220a95SMichal Krawczyk NULL, 34628a7a73f2SMichal Krawczyk }; 34638a7a73f2SMichal Krawczyk struct rte_kvargs *kvlist; 34648a7a73f2SMichal Krawczyk int rc; 34658a7a73f2SMichal Krawczyk 34668a7a73f2SMichal Krawczyk if (devargs == NULL) 34678a7a73f2SMichal Krawczyk return 0; 34688a7a73f2SMichal Krawczyk 34698a7a73f2SMichal Krawczyk kvlist = rte_kvargs_parse(devargs->args, allowed_args); 34708a7a73f2SMichal Krawczyk if (kvlist == NULL) { 34718a7a73f2SMichal Krawczyk PMD_INIT_LOG(ERR, "Invalid device arguments: %s\n", 34728a7a73f2SMichal Krawczyk devargs->args); 34738a7a73f2SMichal Krawczyk return -EINVAL; 34748a7a73f2SMichal Krawczyk } 34758a7a73f2SMichal Krawczyk 34768a7a73f2SMichal Krawczyk rc = rte_kvargs_process(kvlist, ENA_DEVARG_LARGE_LLQ_HDR, 34778a7a73f2SMichal Krawczyk ena_process_bool_devarg, adapter); 3478*cc0c5d25SMichal Krawczyk if (rc != 0) 3479*cc0c5d25SMichal Krawczyk goto exit; 3480*cc0c5d25SMichal Krawczyk rc = rte_kvargs_process(kvlist, ENA_DEVARG_MISS_TXC_TO, 3481*cc0c5d25SMichal Krawczyk ena_process_uint_devarg, adapter); 34828a7a73f2SMichal Krawczyk 3483*cc0c5d25SMichal Krawczyk exit: 34848a7a73f2SMichal Krawczyk rte_kvargs_free(kvlist); 34858a7a73f2SMichal Krawczyk 34868a7a73f2SMichal Krawczyk return rc; 34878a7a73f2SMichal Krawczyk } 34888a7a73f2SMichal Krawczyk 34896986cdc4SMichal Krawczyk static int ena_setup_rx_intr(struct rte_eth_dev *dev) 34906986cdc4SMichal Krawczyk { 34916986cdc4SMichal Krawczyk struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 3492d61138d4SHarman Kalra struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 34936986cdc4SMichal Krawczyk int rc; 34946986cdc4SMichal Krawczyk uint16_t vectors_nb, i; 34956986cdc4SMichal Krawczyk bool rx_intr_requested = dev->data->dev_conf.intr_conf.rxq; 34966986cdc4SMichal Krawczyk 34976986cdc4SMichal Krawczyk if (!rx_intr_requested) 34986986cdc4SMichal Krawczyk return 0; 34996986cdc4SMichal Krawczyk 35006986cdc4SMichal Krawczyk if (!rte_intr_cap_multiple(intr_handle)) { 35016986cdc4SMichal Krawczyk PMD_DRV_LOG(ERR, 35026986cdc4SMichal Krawczyk "Rx interrupt requested, but it isn't supported by the PCI driver\n"); 35036986cdc4SMichal Krawczyk return -ENOTSUP; 35046986cdc4SMichal Krawczyk } 35056986cdc4SMichal Krawczyk 35066986cdc4SMichal Krawczyk /* Disable interrupt mapping before the configuration starts. */ 35076986cdc4SMichal Krawczyk rte_intr_disable(intr_handle); 35086986cdc4SMichal Krawczyk 35096986cdc4SMichal Krawczyk /* Verify if there are enough vectors available. */ 35106986cdc4SMichal Krawczyk vectors_nb = dev->data->nb_rx_queues; 35116986cdc4SMichal Krawczyk if (vectors_nb > RTE_MAX_RXTX_INTR_VEC_ID) { 35126986cdc4SMichal Krawczyk PMD_DRV_LOG(ERR, 35136986cdc4SMichal Krawczyk "Too many Rx interrupts requested, maximum number: %d\n", 35146986cdc4SMichal Krawczyk RTE_MAX_RXTX_INTR_VEC_ID); 35156986cdc4SMichal Krawczyk rc = -ENOTSUP; 35166986cdc4SMichal Krawczyk goto enable_intr; 35176986cdc4SMichal Krawczyk } 35186986cdc4SMichal Krawczyk 3519d61138d4SHarman Kalra /* Allocate the vector list */ 3520d61138d4SHarman Kalra if (rte_intr_vec_list_alloc(intr_handle, "intr_vec", 3521d61138d4SHarman Kalra dev->data->nb_rx_queues)) { 35226986cdc4SMichal Krawczyk PMD_DRV_LOG(ERR, 35236986cdc4SMichal Krawczyk "Failed to allocate interrupt vector for %d queues\n", 35246986cdc4SMichal Krawczyk dev->data->nb_rx_queues); 35256986cdc4SMichal Krawczyk rc = -ENOMEM; 35266986cdc4SMichal Krawczyk goto enable_intr; 35276986cdc4SMichal Krawczyk } 35286986cdc4SMichal Krawczyk 35296986cdc4SMichal Krawczyk rc = rte_intr_efd_enable(intr_handle, vectors_nb); 35306986cdc4SMichal Krawczyk if (rc != 0) 35316986cdc4SMichal Krawczyk goto free_intr_vec; 35326986cdc4SMichal Krawczyk 35336986cdc4SMichal Krawczyk if (!rte_intr_allow_others(intr_handle)) { 35346986cdc4SMichal Krawczyk PMD_DRV_LOG(ERR, 35356986cdc4SMichal Krawczyk "Not enough interrupts available to use both ENA Admin and Rx interrupts\n"); 35366986cdc4SMichal Krawczyk goto disable_intr_efd; 35376986cdc4SMichal Krawczyk } 35386986cdc4SMichal Krawczyk 35396986cdc4SMichal Krawczyk for (i = 0; i < vectors_nb; ++i) 3540d61138d4SHarman Kalra if (rte_intr_vec_list_index_set(intr_handle, i, 3541d61138d4SHarman Kalra RTE_INTR_VEC_RXTX_OFFSET + i)) 3542d61138d4SHarman Kalra goto disable_intr_efd; 35436986cdc4SMichal Krawczyk 35446986cdc4SMichal Krawczyk rte_intr_enable(intr_handle); 35456986cdc4SMichal Krawczyk return 0; 35466986cdc4SMichal Krawczyk 35476986cdc4SMichal Krawczyk disable_intr_efd: 35486986cdc4SMichal Krawczyk rte_intr_efd_disable(intr_handle); 35496986cdc4SMichal Krawczyk free_intr_vec: 3550d61138d4SHarman Kalra rte_intr_vec_list_free(intr_handle); 35516986cdc4SMichal Krawczyk enable_intr: 35526986cdc4SMichal Krawczyk rte_intr_enable(intr_handle); 35536986cdc4SMichal Krawczyk return rc; 35546986cdc4SMichal Krawczyk } 35556986cdc4SMichal Krawczyk 35566986cdc4SMichal Krawczyk static void ena_rx_queue_intr_set(struct rte_eth_dev *dev, 35576986cdc4SMichal Krawczyk uint16_t queue_id, 35586986cdc4SMichal Krawczyk bool unmask) 35596986cdc4SMichal Krawczyk { 35606986cdc4SMichal Krawczyk struct ena_adapter *adapter = dev->data->dev_private; 35616986cdc4SMichal Krawczyk struct ena_ring *rxq = &adapter->rx_ring[queue_id]; 35626986cdc4SMichal Krawczyk struct ena_eth_io_intr_reg intr_reg; 35636986cdc4SMichal Krawczyk 35646986cdc4SMichal Krawczyk ena_com_update_intr_reg(&intr_reg, 0, 0, unmask); 35656986cdc4SMichal Krawczyk ena_com_unmask_intr(rxq->ena_com_io_cq, &intr_reg); 35666986cdc4SMichal Krawczyk } 35676986cdc4SMichal Krawczyk 35686986cdc4SMichal Krawczyk static int ena_rx_queue_intr_enable(struct rte_eth_dev *dev, 35696986cdc4SMichal Krawczyk uint16_t queue_id) 35706986cdc4SMichal Krawczyk { 35716986cdc4SMichal Krawczyk ena_rx_queue_intr_set(dev, queue_id, true); 35726986cdc4SMichal Krawczyk 35736986cdc4SMichal Krawczyk return 0; 35746986cdc4SMichal Krawczyk } 35756986cdc4SMichal Krawczyk 35766986cdc4SMichal Krawczyk static int ena_rx_queue_intr_disable(struct rte_eth_dev *dev, 35776986cdc4SMichal Krawczyk uint16_t queue_id) 35786986cdc4SMichal Krawczyk { 35796986cdc4SMichal Krawczyk ena_rx_queue_intr_set(dev, queue_id, false); 35806986cdc4SMichal Krawczyk 35816986cdc4SMichal Krawczyk return 0; 35826986cdc4SMichal Krawczyk } 35836986cdc4SMichal Krawczyk 3584b9b05d6fSMichal Krawczyk static int ena_configure_aenq(struct ena_adapter *adapter) 3585b9b05d6fSMichal Krawczyk { 3586b9b05d6fSMichal Krawczyk uint32_t aenq_groups = adapter->all_aenq_groups; 3587b9b05d6fSMichal Krawczyk int rc; 3588b9b05d6fSMichal Krawczyk 3589b9b05d6fSMichal Krawczyk /* All_aenq_groups holds all AENQ functions supported by the device and 3590b9b05d6fSMichal Krawczyk * the HW, so at first we need to be sure the LSC request is valid. 3591b9b05d6fSMichal Krawczyk */ 3592b9b05d6fSMichal Krawczyk if (adapter->edev_data->dev_conf.intr_conf.lsc != 0) { 3593b9b05d6fSMichal Krawczyk if (!(aenq_groups & BIT(ENA_ADMIN_LINK_CHANGE))) { 3594b9b05d6fSMichal Krawczyk PMD_DRV_LOG(ERR, 3595b9b05d6fSMichal Krawczyk "LSC requested, but it's not supported by the AENQ\n"); 3596b9b05d6fSMichal Krawczyk return -EINVAL; 3597b9b05d6fSMichal Krawczyk } 3598b9b05d6fSMichal Krawczyk } else { 3599b9b05d6fSMichal Krawczyk /* If LSC wasn't enabled by the app, let's enable all supported 3600b9b05d6fSMichal Krawczyk * AENQ procedures except the LSC. 3601b9b05d6fSMichal Krawczyk */ 3602b9b05d6fSMichal Krawczyk aenq_groups &= ~BIT(ENA_ADMIN_LINK_CHANGE); 3603b9b05d6fSMichal Krawczyk } 3604b9b05d6fSMichal Krawczyk 3605b9b05d6fSMichal Krawczyk rc = ena_com_set_aenq_config(&adapter->ena_dev, aenq_groups); 3606b9b05d6fSMichal Krawczyk if (rc != 0) { 3607b9b05d6fSMichal Krawczyk PMD_DRV_LOG(ERR, "Cannot configure AENQ groups, rc=%d\n", rc); 3608b9b05d6fSMichal Krawczyk return rc; 3609b9b05d6fSMichal Krawczyk } 3610b9b05d6fSMichal Krawczyk 3611b9b05d6fSMichal Krawczyk adapter->active_aenq_groups = aenq_groups; 3612b9b05d6fSMichal Krawczyk 3613b9b05d6fSMichal Krawczyk return 0; 3614b9b05d6fSMichal Krawczyk } 3615b9b05d6fSMichal Krawczyk 3616e3595539SStanislaw Kardach int ena_mp_indirect_table_set(struct ena_adapter *adapter) 3617e3595539SStanislaw Kardach { 3618e3595539SStanislaw Kardach return ENA_PROXY(adapter, ena_com_indirect_table_set, &adapter->ena_dev); 3619e3595539SStanislaw Kardach } 3620e3595539SStanislaw Kardach 3621e3595539SStanislaw Kardach int ena_mp_indirect_table_get(struct ena_adapter *adapter, 3622e3595539SStanislaw Kardach uint32_t *indirect_table) 3623e3595539SStanislaw Kardach { 3624e3595539SStanislaw Kardach return ENA_PROXY(adapter, ena_com_indirect_table_get, &adapter->ena_dev, 3625e3595539SStanislaw Kardach indirect_table); 3626e3595539SStanislaw Kardach } 3627e3595539SStanislaw Kardach 3628ca148440SMichal Krawczyk /********************************************************************* 3629850e1bb1SMichal Krawczyk * ena_plat_dpdk.h functions implementations 3630850e1bb1SMichal Krawczyk *********************************************************************/ 3631850e1bb1SMichal Krawczyk 3632850e1bb1SMichal Krawczyk const struct rte_memzone * 3633850e1bb1SMichal Krawczyk ena_mem_alloc_coherent(struct rte_eth_dev_data *data, size_t size, 3634850e1bb1SMichal Krawczyk int socket_id, unsigned int alignment, void **virt_addr, 3635850e1bb1SMichal Krawczyk dma_addr_t *phys_addr) 3636850e1bb1SMichal Krawczyk { 3637850e1bb1SMichal Krawczyk char z_name[RTE_MEMZONE_NAMESIZE]; 3638850e1bb1SMichal Krawczyk struct ena_adapter *adapter = data->dev_private; 3639850e1bb1SMichal Krawczyk const struct rte_memzone *memzone; 3640850e1bb1SMichal Krawczyk int rc; 3641850e1bb1SMichal Krawczyk 3642850e1bb1SMichal Krawczyk rc = snprintf(z_name, RTE_MEMZONE_NAMESIZE, "ena_p%d_mz%" PRIu64 "", 3643850e1bb1SMichal Krawczyk data->port_id, adapter->memzone_cnt); 3644850e1bb1SMichal Krawczyk if (rc >= RTE_MEMZONE_NAMESIZE) { 3645850e1bb1SMichal Krawczyk PMD_DRV_LOG(ERR, 3646850e1bb1SMichal Krawczyk "Name for the ena_com memzone is too long. Port: %d, mz_num: %" PRIu64 "\n", 3647850e1bb1SMichal Krawczyk data->port_id, adapter->memzone_cnt); 3648850e1bb1SMichal Krawczyk goto error; 3649850e1bb1SMichal Krawczyk } 3650850e1bb1SMichal Krawczyk adapter->memzone_cnt++; 3651850e1bb1SMichal Krawczyk 3652850e1bb1SMichal Krawczyk memzone = rte_memzone_reserve_aligned(z_name, size, socket_id, 3653850e1bb1SMichal Krawczyk RTE_MEMZONE_IOVA_CONTIG, alignment); 3654850e1bb1SMichal Krawczyk if (memzone == NULL) { 3655850e1bb1SMichal Krawczyk PMD_DRV_LOG(ERR, "Failed to allocate ena_com memzone: %s\n", 3656850e1bb1SMichal Krawczyk z_name); 3657850e1bb1SMichal Krawczyk goto error; 3658850e1bb1SMichal Krawczyk } 3659850e1bb1SMichal Krawczyk 3660850e1bb1SMichal Krawczyk memset(memzone->addr, 0, size); 3661850e1bb1SMichal Krawczyk *virt_addr = memzone->addr; 3662850e1bb1SMichal Krawczyk *phys_addr = memzone->iova; 3663850e1bb1SMichal Krawczyk 3664850e1bb1SMichal Krawczyk return memzone; 3665850e1bb1SMichal Krawczyk 3666850e1bb1SMichal Krawczyk error: 3667850e1bb1SMichal Krawczyk *virt_addr = NULL; 3668850e1bb1SMichal Krawczyk *phys_addr = 0; 3669850e1bb1SMichal Krawczyk 3670850e1bb1SMichal Krawczyk return NULL; 3671850e1bb1SMichal Krawczyk } 3672850e1bb1SMichal Krawczyk 3673850e1bb1SMichal Krawczyk 3674850e1bb1SMichal Krawczyk /********************************************************************* 3675ca148440SMichal Krawczyk * PMD configuration 3676ca148440SMichal Krawczyk *********************************************************************/ 3677fdf91e0fSJan Blunck static int eth_ena_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 3678fdf91e0fSJan Blunck struct rte_pci_device *pci_dev) 3679fdf91e0fSJan Blunck { 3680fdf91e0fSJan Blunck return rte_eth_dev_pci_generic_probe(pci_dev, 3681fdf91e0fSJan Blunck sizeof(struct ena_adapter), eth_ena_dev_init); 3682fdf91e0fSJan Blunck } 3683fdf91e0fSJan Blunck 3684fdf91e0fSJan Blunck static int eth_ena_pci_remove(struct rte_pci_device *pci_dev) 3685fdf91e0fSJan Blunck { 3686eb0ef49dSMichal Krawczyk return rte_eth_dev_pci_generic_remove(pci_dev, eth_ena_dev_uninit); 3687fdf91e0fSJan Blunck } 3688fdf91e0fSJan Blunck 3689fdf91e0fSJan Blunck static struct rte_pci_driver rte_ena_pmd = { 36901173fca2SJan Medala .id_table = pci_id_ena_map, 369105e0eee0SRafal Kozik .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC | 369205e0eee0SRafal Kozik RTE_PCI_DRV_WC_ACTIVATE, 3693fdf91e0fSJan Blunck .probe = eth_ena_pci_probe, 3694fdf91e0fSJan Blunck .remove = eth_ena_pci_remove, 36951173fca2SJan Medala }; 36961173fca2SJan Medala 3697fdf91e0fSJan Blunck RTE_PMD_REGISTER_PCI(net_ena, rte_ena_pmd); 369801f19227SShreyansh Jain RTE_PMD_REGISTER_PCI_TABLE(net_ena, pci_id_ena_map); 369906e81dc9SDavid Marchand RTE_PMD_REGISTER_KMOD_DEP(net_ena, "* igb_uio | uio_pci_generic | vfio-pci"); 37008a7a73f2SMichal Krawczyk RTE_PMD_REGISTER_PARAM_STRING(net_ena, ENA_DEVARG_LARGE_LLQ_HDR "=<0|1>"); 3701eeded204SDavid Marchand RTE_LOG_REGISTER_SUFFIX(ena_logtype_init, init, NOTICE); 3702eeded204SDavid Marchand RTE_LOG_REGISTER_SUFFIX(ena_logtype_driver, driver, NOTICE); 37030a001d69SMichal Krawczyk #ifdef RTE_ETHDEV_DEBUG_RX 37040a001d69SMichal Krawczyk RTE_LOG_REGISTER_SUFFIX(ena_logtype_rx, rx, DEBUG); 37056f1c9df9SStephen Hemminger #endif 37060a001d69SMichal Krawczyk #ifdef RTE_ETHDEV_DEBUG_TX 37070a001d69SMichal Krawczyk RTE_LOG_REGISTER_SUFFIX(ena_logtype_tx, tx, DEBUG); 37086f1c9df9SStephen Hemminger #endif 37090a001d69SMichal Krawczyk RTE_LOG_REGISTER_SUFFIX(ena_logtype_com, com, WARNING); 37103adcba9aSMichal Krawczyk 37113adcba9aSMichal Krawczyk /****************************************************************************** 37123adcba9aSMichal Krawczyk ******************************** AENQ Handlers ******************************* 37133adcba9aSMichal Krawczyk *****************************************************************************/ 3714ca148440SMichal Krawczyk static void ena_update_on_link_change(void *adapter_data, 3715ca148440SMichal Krawczyk struct ena_admin_aenq_entry *aenq_e) 3716ca148440SMichal Krawczyk { 3717aab58857SStanislaw Kardach struct rte_eth_dev *eth_dev = adapter_data; 3718aab58857SStanislaw Kardach struct ena_adapter *adapter = eth_dev->data->dev_private; 3719ca148440SMichal Krawczyk struct ena_admin_aenq_link_change_desc *aenq_link_desc; 3720ca148440SMichal Krawczyk uint32_t status; 3721ca148440SMichal Krawczyk 3722ca148440SMichal Krawczyk aenq_link_desc = (struct ena_admin_aenq_link_change_desc *)aenq_e; 3723ca148440SMichal Krawczyk 3724ca148440SMichal Krawczyk status = get_ena_admin_aenq_link_change_desc_link_status(aenq_link_desc); 3725ca148440SMichal Krawczyk adapter->link_status = status; 3726ca148440SMichal Krawczyk 3727ca148440SMichal Krawczyk ena_link_update(eth_dev, 0); 37285723fbedSFerruh Yigit rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL); 3729ca148440SMichal Krawczyk } 3730ca148440SMichal Krawczyk 3731aab58857SStanislaw Kardach static void ena_notification(void *adapter_data, 3732f01f060cSRafal Kozik struct ena_admin_aenq_entry *aenq_e) 3733f01f060cSRafal Kozik { 3734aab58857SStanislaw Kardach struct rte_eth_dev *eth_dev = adapter_data; 3735aab58857SStanislaw Kardach struct ena_adapter *adapter = eth_dev->data->dev_private; 3736f01f060cSRafal Kozik struct ena_admin_ena_hw_hints *hints; 3737f01f060cSRafal Kozik 3738f01f060cSRafal Kozik if (aenq_e->aenq_common_desc.group != ENA_ADMIN_NOTIFICATION) 3739617898d1SMichal Krawczyk PMD_DRV_LOG(WARNING, "Invalid AENQ group: %x. Expected: %x\n", 3740f01f060cSRafal Kozik aenq_e->aenq_common_desc.group, 3741f01f060cSRafal Kozik ENA_ADMIN_NOTIFICATION); 3742f01f060cSRafal Kozik 3743b19f366cSMichal Krawczyk switch (aenq_e->aenq_common_desc.syndrome) { 3744f01f060cSRafal Kozik case ENA_ADMIN_UPDATE_HINTS: 3745f01f060cSRafal Kozik hints = (struct ena_admin_ena_hw_hints *) 3746f01f060cSRafal Kozik (&aenq_e->inline_data_w4); 3747f01f060cSRafal Kozik ena_update_hints(adapter, hints); 3748f01f060cSRafal Kozik break; 3749f01f060cSRafal Kozik default: 3750617898d1SMichal Krawczyk PMD_DRV_LOG(ERR, "Invalid AENQ notification link state: %d\n", 3751b19f366cSMichal Krawczyk aenq_e->aenq_common_desc.syndrome); 3752f01f060cSRafal Kozik } 3753f01f060cSRafal Kozik } 3754f01f060cSRafal Kozik 3755d9b8b106SMichal Krawczyk static void ena_keep_alive(void *adapter_data, 3756d9b8b106SMichal Krawczyk __rte_unused struct ena_admin_aenq_entry *aenq_e) 3757d9b8b106SMichal Krawczyk { 3758aab58857SStanislaw Kardach struct rte_eth_dev *eth_dev = adapter_data; 3759aab58857SStanislaw Kardach struct ena_adapter *adapter = eth_dev->data->dev_private; 376094c3e376SRafal Kozik struct ena_admin_aenq_keep_alive_desc *desc; 376194c3e376SRafal Kozik uint64_t rx_drops; 3762e1e73e32SMichal Krawczyk uint64_t tx_drops; 3763d9b8b106SMichal Krawczyk 3764d9b8b106SMichal Krawczyk adapter->timestamp_wd = rte_get_timer_cycles(); 376594c3e376SRafal Kozik 376694c3e376SRafal Kozik desc = (struct ena_admin_aenq_keep_alive_desc *)aenq_e; 376794c3e376SRafal Kozik rx_drops = ((uint64_t)desc->rx_drops_high << 32) | desc->rx_drops_low; 3768e1e73e32SMichal Krawczyk tx_drops = ((uint64_t)desc->tx_drops_high << 32) | desc->tx_drops_low; 3769e1e73e32SMichal Krawczyk 3770e1e73e32SMichal Krawczyk adapter->drv_stats->rx_drops = rx_drops; 3771e1e73e32SMichal Krawczyk adapter->dev_stats.tx_drops = tx_drops; 3772d9b8b106SMichal Krawczyk } 3773d9b8b106SMichal Krawczyk 37743adcba9aSMichal Krawczyk /** 37753adcba9aSMichal Krawczyk * This handler will called for unknown event group or unimplemented handlers 37763adcba9aSMichal Krawczyk **/ 37773adcba9aSMichal Krawczyk static void unimplemented_aenq_handler(__rte_unused void *data, 37783adcba9aSMichal Krawczyk __rte_unused struct ena_admin_aenq_entry *aenq_e) 37793adcba9aSMichal Krawczyk { 3780617898d1SMichal Krawczyk PMD_DRV_LOG(ERR, 3781617898d1SMichal Krawczyk "Unknown event was received or event with unimplemented handler\n"); 37823adcba9aSMichal Krawczyk } 37833adcba9aSMichal Krawczyk 3784ca148440SMichal Krawczyk static struct ena_aenq_handlers aenq_handlers = { 37853adcba9aSMichal Krawczyk .handlers = { 3786ca148440SMichal Krawczyk [ENA_ADMIN_LINK_CHANGE] = ena_update_on_link_change, 3787f01f060cSRafal Kozik [ENA_ADMIN_NOTIFICATION] = ena_notification, 3788d9b8b106SMichal Krawczyk [ENA_ADMIN_KEEP_ALIVE] = ena_keep_alive 37893adcba9aSMichal Krawczyk }, 37903adcba9aSMichal Krawczyk .unimplemented_handler = unimplemented_aenq_handler 37913adcba9aSMichal Krawczyk }; 3792e3595539SStanislaw Kardach 3793e3595539SStanislaw Kardach /********************************************************************* 3794e3595539SStanislaw Kardach * Multi-Process communication request handling (in primary) 3795e3595539SStanislaw Kardach *********************************************************************/ 3796e3595539SStanislaw Kardach static int 3797e3595539SStanislaw Kardach ena_mp_primary_handle(const struct rte_mp_msg *mp_msg, const void *peer) 3798e3595539SStanislaw Kardach { 3799e3595539SStanislaw Kardach const struct ena_mp_body *req = 3800e3595539SStanislaw Kardach (const struct ena_mp_body *)mp_msg->param; 3801e3595539SStanislaw Kardach struct ena_adapter *adapter; 3802e3595539SStanislaw Kardach struct ena_com_dev *ena_dev; 3803e3595539SStanislaw Kardach struct ena_mp_body *rsp; 3804e3595539SStanislaw Kardach struct rte_mp_msg mp_rsp; 3805e3595539SStanislaw Kardach struct rte_eth_dev *dev; 3806e3595539SStanislaw Kardach int res = 0; 3807e3595539SStanislaw Kardach 3808e3595539SStanislaw Kardach rsp = (struct ena_mp_body *)&mp_rsp.param; 3809e3595539SStanislaw Kardach mp_msg_init(&mp_rsp, req->type, req->port_id); 3810e3595539SStanislaw Kardach 3811e3595539SStanislaw Kardach if (!rte_eth_dev_is_valid_port(req->port_id)) { 3812e3595539SStanislaw Kardach rte_errno = ENODEV; 3813e3595539SStanislaw Kardach res = -rte_errno; 3814e3595539SStanislaw Kardach PMD_DRV_LOG(ERR, "Unknown port %d in request %d\n", 3815e3595539SStanislaw Kardach req->port_id, req->type); 3816e3595539SStanislaw Kardach goto end; 3817e3595539SStanislaw Kardach } 3818e3595539SStanislaw Kardach dev = &rte_eth_devices[req->port_id]; 3819e3595539SStanislaw Kardach adapter = dev->data->dev_private; 3820e3595539SStanislaw Kardach ena_dev = &adapter->ena_dev; 3821e3595539SStanislaw Kardach 3822e3595539SStanislaw Kardach switch (req->type) { 3823e3595539SStanislaw Kardach case ENA_MP_DEV_STATS_GET: 3824e3595539SStanislaw Kardach res = ena_com_get_dev_basic_stats(ena_dev, 3825e3595539SStanislaw Kardach &adapter->basic_stats); 3826e3595539SStanislaw Kardach break; 3827e3595539SStanislaw Kardach case ENA_MP_ENI_STATS_GET: 3828e3595539SStanislaw Kardach res = ena_com_get_eni_stats(ena_dev, 3829e3595539SStanislaw Kardach (struct ena_admin_eni_stats *)&adapter->eni_stats); 3830e3595539SStanislaw Kardach break; 3831e3595539SStanislaw Kardach case ENA_MP_MTU_SET: 3832e3595539SStanislaw Kardach res = ena_com_set_dev_mtu(ena_dev, req->args.mtu); 3833e3595539SStanislaw Kardach break; 3834e3595539SStanislaw Kardach case ENA_MP_IND_TBL_GET: 3835e3595539SStanislaw Kardach res = ena_com_indirect_table_get(ena_dev, 3836e3595539SStanislaw Kardach adapter->indirect_table); 3837e3595539SStanislaw Kardach break; 3838e3595539SStanislaw Kardach case ENA_MP_IND_TBL_SET: 3839e3595539SStanislaw Kardach res = ena_com_indirect_table_set(ena_dev); 3840e3595539SStanislaw Kardach break; 3841e3595539SStanislaw Kardach default: 3842e3595539SStanislaw Kardach PMD_DRV_LOG(ERR, "Unknown request type %d\n", req->type); 3843e3595539SStanislaw Kardach res = -EINVAL; 3844e3595539SStanislaw Kardach break; 3845e3595539SStanislaw Kardach } 3846e3595539SStanislaw Kardach 3847e3595539SStanislaw Kardach end: 3848e3595539SStanislaw Kardach /* Save processing result in the reply */ 3849e3595539SStanislaw Kardach rsp->result = res; 3850e3595539SStanislaw Kardach /* Return just IPC processing status */ 3851e3595539SStanislaw Kardach return rte_mp_reply(&mp_rsp, peer); 3852e3595539SStanislaw Kardach } 3853