11173fca2SJan Medala /*- 21173fca2SJan Medala * BSD LICENSE 31173fca2SJan Medala * 41173fca2SJan Medala * Copyright (c) 2015-2016 Amazon.com, Inc. or its affiliates. 51173fca2SJan Medala * All rights reserved. 61173fca2SJan Medala * 71173fca2SJan Medala * Redistribution and use in source and binary forms, with or without 81173fca2SJan Medala * modification, are permitted provided that the following conditions 91173fca2SJan Medala * are met: 101173fca2SJan Medala * 111173fca2SJan Medala * * Redistributions of source code must retain the above copyright 121173fca2SJan Medala * notice, this list of conditions and the following disclaimer. 131173fca2SJan Medala * * Redistributions in binary form must reproduce the above copyright 141173fca2SJan Medala * notice, this list of conditions and the following disclaimer in 151173fca2SJan Medala * the documentation and/or other materials provided with the 161173fca2SJan Medala * distribution. 171173fca2SJan Medala * * Neither the name of copyright holder nor the names of its 181173fca2SJan Medala * contributors may be used to endorse or promote products derived 191173fca2SJan Medala * from this software without specific prior written permission. 201173fca2SJan Medala * 211173fca2SJan Medala * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 221173fca2SJan Medala * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 231173fca2SJan Medala * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 241173fca2SJan Medala * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 251173fca2SJan Medala * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 261173fca2SJan Medala * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 271173fca2SJan Medala * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 281173fca2SJan Medala * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 291173fca2SJan Medala * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 301173fca2SJan Medala * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 311173fca2SJan Medala * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 321173fca2SJan Medala */ 331173fca2SJan Medala 34*6723c0fcSBruce Richardson #include <rte_string_fns.h> 351173fca2SJan Medala #include <rte_ether.h> 36ffc905f3SFerruh Yigit #include <rte_ethdev_driver.h> 37fdf91e0fSJan Blunck #include <rte_ethdev_pci.h> 381173fca2SJan Medala #include <rte_tcp.h> 391173fca2SJan Medala #include <rte_atomic.h> 401173fca2SJan Medala #include <rte_dev.h> 411173fca2SJan Medala #include <rte_errno.h> 42372c1af5SJan Medala #include <rte_version.h> 433d3edc26SJan Medala #include <rte_eal_memconfig.h> 44b3fc5a1aSKonstantin Ananyev #include <rte_net.h> 451173fca2SJan Medala 461173fca2SJan Medala #include "ena_ethdev.h" 471173fca2SJan Medala #include "ena_logs.h" 481173fca2SJan Medala #include "ena_platform.h" 491173fca2SJan Medala #include "ena_com.h" 501173fca2SJan Medala #include "ena_eth_com.h" 511173fca2SJan Medala 521173fca2SJan Medala #include <ena_common_defs.h> 531173fca2SJan Medala #include <ena_regs_defs.h> 541173fca2SJan Medala #include <ena_admin_defs.h> 551173fca2SJan Medala #include <ena_eth_io_defs.h> 561173fca2SJan Medala 57419c3e3eSMichal Krawczyk #define DRV_MODULE_VER_MAJOR 2 58419c3e3eSMichal Krawczyk #define DRV_MODULE_VER_MINOR 0 59419c3e3eSMichal Krawczyk #define DRV_MODULE_VER_SUBMINOR 0 60372c1af5SJan Medala 611173fca2SJan Medala #define ENA_IO_TXQ_IDX(q) (2 * (q)) 621173fca2SJan Medala #define ENA_IO_RXQ_IDX(q) (2 * (q) + 1) 631173fca2SJan Medala /*reverse version of ENA_IO_RXQ_IDX*/ 641173fca2SJan Medala #define ENA_IO_RXQ_IDX_REV(q) ((q - 1) / 2) 651173fca2SJan Medala 661173fca2SJan Medala /* While processing submitted and completed descriptors (rx and tx path 671173fca2SJan Medala * respectively) in a loop it is desired to: 681173fca2SJan Medala * - perform batch submissions while populating sumbissmion queue 691173fca2SJan Medala * - avoid blocking transmission of other packets during cleanup phase 701173fca2SJan Medala * Hence the utilization ratio of 1/8 of a queue size. 711173fca2SJan Medala */ 721173fca2SJan Medala #define ENA_RING_DESCS_RATIO(ring_size) (ring_size / 8) 731173fca2SJan Medala 741173fca2SJan Medala #define __MERGE_64B_H_L(h, l) (((uint64_t)h << 32) | l) 751173fca2SJan Medala #define TEST_BIT(val, bit_shift) (val & (1UL << bit_shift)) 761173fca2SJan Medala 771173fca2SJan Medala #define GET_L4_HDR_LEN(mbuf) \ 781173fca2SJan Medala ((rte_pktmbuf_mtod_offset(mbuf, struct tcp_hdr *, \ 791173fca2SJan Medala mbuf->l3_len + mbuf->l2_len)->data_off) >> 4) 801173fca2SJan Medala 811173fca2SJan Medala #define ENA_RX_RSS_TABLE_LOG_SIZE 7 821173fca2SJan Medala #define ENA_RX_RSS_TABLE_SIZE (1 << ENA_RX_RSS_TABLE_LOG_SIZE) 831173fca2SJan Medala #define ENA_HASH_KEY_SIZE 40 84372c1af5SJan Medala #define ETH_GSTRING_LEN 32 85372c1af5SJan Medala 86372c1af5SJan Medala #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) 87372c1af5SJan Medala 8892680dc2SRafal Kozik #define ENA_MIN_RING_DESC 128 8992680dc2SRafal Kozik 90372c1af5SJan Medala enum ethtool_stringset { 91372c1af5SJan Medala ETH_SS_TEST = 0, 92372c1af5SJan Medala ETH_SS_STATS, 93372c1af5SJan Medala }; 94372c1af5SJan Medala 95372c1af5SJan Medala struct ena_stats { 96372c1af5SJan Medala char name[ETH_GSTRING_LEN]; 97372c1af5SJan Medala int stat_offset; 98372c1af5SJan Medala }; 99372c1af5SJan Medala 100372c1af5SJan Medala #define ENA_STAT_ENTRY(stat, stat_type) { \ 101372c1af5SJan Medala .name = #stat, \ 102372c1af5SJan Medala .stat_offset = offsetof(struct ena_stats_##stat_type, stat) \ 103372c1af5SJan Medala } 104372c1af5SJan Medala 105372c1af5SJan Medala #define ENA_STAT_RX_ENTRY(stat) \ 106372c1af5SJan Medala ENA_STAT_ENTRY(stat, rx) 107372c1af5SJan Medala 108372c1af5SJan Medala #define ENA_STAT_TX_ENTRY(stat) \ 109372c1af5SJan Medala ENA_STAT_ENTRY(stat, tx) 110372c1af5SJan Medala 111372c1af5SJan Medala #define ENA_STAT_GLOBAL_ENTRY(stat) \ 112372c1af5SJan Medala ENA_STAT_ENTRY(stat, dev) 113372c1af5SJan Medala 11435390750SRafal Kozik #define ENA_MAX_RING_SIZE_RX 8192 1152fca2a98SMichal Krawczyk #define ENA_MAX_RING_SIZE_TX 1024 1162fca2a98SMichal Krawczyk 1173adcba9aSMichal Krawczyk /* 1183adcba9aSMichal Krawczyk * Each rte_memzone should have unique name. 1193adcba9aSMichal Krawczyk * To satisfy it, count number of allocation and add it to name. 1203adcba9aSMichal Krawczyk */ 1213adcba9aSMichal Krawczyk uint32_t ena_alloc_cnt; 1223adcba9aSMichal Krawczyk 123372c1af5SJan Medala static const struct ena_stats ena_stats_global_strings[] = { 124372c1af5SJan Medala ENA_STAT_GLOBAL_ENTRY(wd_expired), 1257830e905SSolganik Alexander ENA_STAT_GLOBAL_ENTRY(dev_start), 1267830e905SSolganik Alexander ENA_STAT_GLOBAL_ENTRY(dev_stop), 127372c1af5SJan Medala }; 128372c1af5SJan Medala 129372c1af5SJan Medala static const struct ena_stats ena_stats_tx_strings[] = { 130372c1af5SJan Medala ENA_STAT_TX_ENTRY(cnt), 131372c1af5SJan Medala ENA_STAT_TX_ENTRY(bytes), 1327830e905SSolganik Alexander ENA_STAT_TX_ENTRY(prepare_ctx_err), 133372c1af5SJan Medala ENA_STAT_TX_ENTRY(linearize), 134372c1af5SJan Medala ENA_STAT_TX_ENTRY(linearize_failed), 135372c1af5SJan Medala ENA_STAT_TX_ENTRY(tx_poll), 136372c1af5SJan Medala ENA_STAT_TX_ENTRY(doorbells), 137372c1af5SJan Medala ENA_STAT_TX_ENTRY(bad_req_id), 1387830e905SSolganik Alexander ENA_STAT_TX_ENTRY(available_desc), 139372c1af5SJan Medala }; 140372c1af5SJan Medala 141372c1af5SJan Medala static const struct ena_stats ena_stats_rx_strings[] = { 142372c1af5SJan Medala ENA_STAT_RX_ENTRY(cnt), 143372c1af5SJan Medala ENA_STAT_RX_ENTRY(bytes), 1447830e905SSolganik Alexander ENA_STAT_RX_ENTRY(refill_partial), 145372c1af5SJan Medala ENA_STAT_RX_ENTRY(bad_csum), 1467830e905SSolganik Alexander ENA_STAT_RX_ENTRY(mbuf_alloc_fail), 147372c1af5SJan Medala ENA_STAT_RX_ENTRY(bad_desc_num), 1487830e905SSolganik Alexander ENA_STAT_RX_ENTRY(bad_req_id), 149372c1af5SJan Medala }; 150372c1af5SJan Medala 151372c1af5SJan Medala #define ENA_STATS_ARRAY_GLOBAL ARRAY_SIZE(ena_stats_global_strings) 152372c1af5SJan Medala #define ENA_STATS_ARRAY_TX ARRAY_SIZE(ena_stats_tx_strings) 153372c1af5SJan Medala #define ENA_STATS_ARRAY_RX ARRAY_SIZE(ena_stats_rx_strings) 1541173fca2SJan Medala 15556b8b9b7SRafal Kozik #define QUEUE_OFFLOADS (DEV_TX_OFFLOAD_TCP_CKSUM |\ 15656b8b9b7SRafal Kozik DEV_TX_OFFLOAD_UDP_CKSUM |\ 15756b8b9b7SRafal Kozik DEV_TX_OFFLOAD_IPV4_CKSUM |\ 15856b8b9b7SRafal Kozik DEV_TX_OFFLOAD_TCP_TSO) 15956b8b9b7SRafal Kozik #define MBUF_OFFLOADS (PKT_TX_L4_MASK |\ 16056b8b9b7SRafal Kozik PKT_TX_IP_CKSUM |\ 16156b8b9b7SRafal Kozik PKT_TX_TCP_SEG) 16256b8b9b7SRafal Kozik 1631173fca2SJan Medala /** Vendor ID used by Amazon devices */ 1641173fca2SJan Medala #define PCI_VENDOR_ID_AMAZON 0x1D0F 1651173fca2SJan Medala /** Amazon devices */ 1661173fca2SJan Medala #define PCI_DEVICE_ID_ENA_VF 0xEC20 1671173fca2SJan Medala #define PCI_DEVICE_ID_ENA_LLQ_VF 0xEC21 1681173fca2SJan Medala 169b3fc5a1aSKonstantin Ananyev #define ENA_TX_OFFLOAD_MASK (\ 170b3fc5a1aSKonstantin Ananyev PKT_TX_L4_MASK | \ 171d6db681bSDidier Pallard PKT_TX_IPV6 | \ 172d6db681bSDidier Pallard PKT_TX_IPV4 | \ 173b3fc5a1aSKonstantin Ananyev PKT_TX_IP_CKSUM | \ 174b3fc5a1aSKonstantin Ananyev PKT_TX_TCP_SEG) 175b3fc5a1aSKonstantin Ananyev 176b3fc5a1aSKonstantin Ananyev #define ENA_TX_OFFLOAD_NOTSUP_MASK \ 177b3fc5a1aSKonstantin Ananyev (PKT_TX_OFFLOAD_MASK ^ ENA_TX_OFFLOAD_MASK) 178b3fc5a1aSKonstantin Ananyev 1798bc0acaeSStephen Hemminger int ena_logtype_init; 1808bc0acaeSStephen Hemminger int ena_logtype_driver; 1818bc0acaeSStephen Hemminger 18228a1fd4fSFerruh Yigit static const struct rte_pci_id pci_id_ena_map[] = { 183cb990571SDavid Marchand { RTE_PCI_DEVICE(PCI_VENDOR_ID_AMAZON, PCI_DEVICE_ID_ENA_VF) }, 184cb990571SDavid Marchand { RTE_PCI_DEVICE(PCI_VENDOR_ID_AMAZON, PCI_DEVICE_ID_ENA_LLQ_VF) }, 1851173fca2SJan Medala { .device_id = 0 }, 1861173fca2SJan Medala }; 1871173fca2SJan Medala 188ca148440SMichal Krawczyk static struct ena_aenq_handlers aenq_handlers; 1893adcba9aSMichal Krawczyk 1901173fca2SJan Medala static int ena_device_init(struct ena_com_dev *ena_dev, 191e859d2b8SRafal Kozik struct ena_com_dev_get_features_ctx *get_feat_ctx, 192e859d2b8SRafal Kozik bool *wd_state); 1931173fca2SJan Medala static int ena_dev_configure(struct rte_eth_dev *dev); 1941173fca2SJan Medala static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 1951173fca2SJan Medala uint16_t nb_pkts); 196b3fc5a1aSKonstantin Ananyev static uint16_t eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 197b3fc5a1aSKonstantin Ananyev uint16_t nb_pkts); 1981173fca2SJan Medala static int ena_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, 1991173fca2SJan Medala uint16_t nb_desc, unsigned int socket_id, 2001173fca2SJan Medala const struct rte_eth_txconf *tx_conf); 2011173fca2SJan Medala static int ena_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, 2021173fca2SJan Medala uint16_t nb_desc, unsigned int socket_id, 2031173fca2SJan Medala const struct rte_eth_rxconf *rx_conf, 2041173fca2SJan Medala struct rte_mempool *mp); 2051173fca2SJan Medala static uint16_t eth_ena_recv_pkts(void *rx_queue, 2061173fca2SJan Medala struct rte_mbuf **rx_pkts, uint16_t nb_pkts); 2071173fca2SJan Medala static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count); 2081173fca2SJan Medala static void ena_init_rings(struct ena_adapter *adapter); 2091173fca2SJan Medala static int ena_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); 2101173fca2SJan Medala static int ena_start(struct rte_eth_dev *dev); 211eb0ef49dSMichal Krawczyk static void ena_stop(struct rte_eth_dev *dev); 2121173fca2SJan Medala static void ena_close(struct rte_eth_dev *dev); 2132081d5e2SMichal Krawczyk static int ena_dev_reset(struct rte_eth_dev *dev); 214d5b0924bSMatan Azrad static int ena_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats); 2151173fca2SJan Medala static void ena_rx_queue_release_all(struct rte_eth_dev *dev); 2161173fca2SJan Medala static void ena_tx_queue_release_all(struct rte_eth_dev *dev); 2171173fca2SJan Medala static void ena_rx_queue_release(void *queue); 2181173fca2SJan Medala static void ena_tx_queue_release(void *queue); 2191173fca2SJan Medala static void ena_rx_queue_release_bufs(struct ena_ring *ring); 2201173fca2SJan Medala static void ena_tx_queue_release_bufs(struct ena_ring *ring); 2211173fca2SJan Medala static int ena_link_update(struct rte_eth_dev *dev, 222dd2c630aSFerruh Yigit int wait_to_complete); 223df238f84SMichal Krawczyk static int ena_create_io_queue(struct ena_ring *ring); 22426e5543dSRafal Kozik static void ena_queue_stop(struct ena_ring *ring); 22526e5543dSRafal Kozik static void ena_queue_stop_all(struct rte_eth_dev *dev, 22626e5543dSRafal Kozik enum ena_ring_type ring_type); 22726e5543dSRafal Kozik static int ena_queue_start(struct ena_ring *ring); 22826e5543dSRafal Kozik static int ena_queue_start_all(struct rte_eth_dev *dev, 2291173fca2SJan Medala enum ena_ring_type ring_type); 2301173fca2SJan Medala static void ena_stats_restart(struct rte_eth_dev *dev); 231dd2c630aSFerruh Yigit static void ena_infos_get(struct rte_eth_dev *dev, 2321173fca2SJan Medala struct rte_eth_dev_info *dev_info); 2331173fca2SJan Medala static int ena_rss_reta_update(struct rte_eth_dev *dev, 2341173fca2SJan Medala struct rte_eth_rss_reta_entry64 *reta_conf, 2351173fca2SJan Medala uint16_t reta_size); 2361173fca2SJan Medala static int ena_rss_reta_query(struct rte_eth_dev *dev, 2371173fca2SJan Medala struct rte_eth_rss_reta_entry64 *reta_conf, 2381173fca2SJan Medala uint16_t reta_size); 23915773e06SMichal Krawczyk static void ena_interrupt_handler_rte(void *cb_arg); 240d9b8b106SMichal Krawczyk static void ena_timer_wd_callback(struct rte_timer *timer, void *arg); 241e457bc70SRafal Kozik static void ena_destroy_device(struct rte_eth_dev *eth_dev); 242e457bc70SRafal Kozik static int eth_ena_dev_init(struct rte_eth_dev *eth_dev); 2437830e905SSolganik Alexander static int ena_xstats_get_names(struct rte_eth_dev *dev, 2447830e905SSolganik Alexander struct rte_eth_xstat_name *xstats_names, 2457830e905SSolganik Alexander unsigned int n); 2467830e905SSolganik Alexander static int ena_xstats_get(struct rte_eth_dev *dev, 2477830e905SSolganik Alexander struct rte_eth_xstat *stats, 2487830e905SSolganik Alexander unsigned int n); 2497830e905SSolganik Alexander static int ena_xstats_get_by_id(struct rte_eth_dev *dev, 2507830e905SSolganik Alexander const uint64_t *ids, 2517830e905SSolganik Alexander uint64_t *values, 2527830e905SSolganik Alexander unsigned int n); 2531173fca2SJan Medala 254103ab18cSFerruh Yigit static const struct eth_dev_ops ena_dev_ops = { 2551173fca2SJan Medala .dev_configure = ena_dev_configure, 2561173fca2SJan Medala .dev_infos_get = ena_infos_get, 2571173fca2SJan Medala .rx_queue_setup = ena_rx_queue_setup, 2581173fca2SJan Medala .tx_queue_setup = ena_tx_queue_setup, 2591173fca2SJan Medala .dev_start = ena_start, 260eb0ef49dSMichal Krawczyk .dev_stop = ena_stop, 2611173fca2SJan Medala .link_update = ena_link_update, 2621173fca2SJan Medala .stats_get = ena_stats_get, 2637830e905SSolganik Alexander .xstats_get_names = ena_xstats_get_names, 2647830e905SSolganik Alexander .xstats_get = ena_xstats_get, 2657830e905SSolganik Alexander .xstats_get_by_id = ena_xstats_get_by_id, 2661173fca2SJan Medala .mtu_set = ena_mtu_set, 2671173fca2SJan Medala .rx_queue_release = ena_rx_queue_release, 2681173fca2SJan Medala .tx_queue_release = ena_tx_queue_release, 2691173fca2SJan Medala .dev_close = ena_close, 2702081d5e2SMichal Krawczyk .dev_reset = ena_dev_reset, 2711173fca2SJan Medala .reta_update = ena_rss_reta_update, 2721173fca2SJan Medala .reta_query = ena_rss_reta_query, 2731173fca2SJan Medala }; 2741173fca2SJan Medala 2753d3edc26SJan Medala #define NUMA_NO_NODE SOCKET_ID_ANY 2763d3edc26SJan Medala 2773d3edc26SJan Medala static inline int ena_cpu_to_node(int cpu) 2783d3edc26SJan Medala { 2793d3edc26SJan Medala struct rte_config *config = rte_eal_get_configuration(); 28049df3db8SAnatoly Burakov struct rte_fbarray *arr = &config->mem_config->memzones; 28149df3db8SAnatoly Burakov const struct rte_memzone *mz; 2823d3edc26SJan Medala 28349df3db8SAnatoly Burakov if (unlikely(cpu >= RTE_MAX_MEMZONE)) 2843d3edc26SJan Medala return NUMA_NO_NODE; 28549df3db8SAnatoly Burakov 28649df3db8SAnatoly Burakov mz = rte_fbarray_get(arr, cpu); 28749df3db8SAnatoly Burakov 28849df3db8SAnatoly Burakov return mz->socket_id; 2893d3edc26SJan Medala } 2903d3edc26SJan Medala 2911173fca2SJan Medala static inline void ena_rx_mbuf_prepare(struct rte_mbuf *mbuf, 2921173fca2SJan Medala struct ena_com_rx_ctx *ena_rx_ctx) 2931173fca2SJan Medala { 2941173fca2SJan Medala uint64_t ol_flags = 0; 295fd617795SRafal Kozik uint32_t packet_type = 0; 2961173fca2SJan Medala 2971173fca2SJan Medala if (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP) 298fd617795SRafal Kozik packet_type |= RTE_PTYPE_L4_TCP; 2991173fca2SJan Medala else if (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP) 300fd617795SRafal Kozik packet_type |= RTE_PTYPE_L4_UDP; 3011173fca2SJan Medala 3021173fca2SJan Medala if (ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV4) 303fd617795SRafal Kozik packet_type |= RTE_PTYPE_L3_IPV4; 3041173fca2SJan Medala else if (ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV6) 305fd617795SRafal Kozik packet_type |= RTE_PTYPE_L3_IPV6; 3061173fca2SJan Medala 3071173fca2SJan Medala if (unlikely(ena_rx_ctx->l4_csum_err)) 3081173fca2SJan Medala ol_flags |= PKT_RX_L4_CKSUM_BAD; 3091173fca2SJan Medala if (unlikely(ena_rx_ctx->l3_csum_err)) 3101173fca2SJan Medala ol_flags |= PKT_RX_IP_CKSUM_BAD; 3111173fca2SJan Medala 3121173fca2SJan Medala mbuf->ol_flags = ol_flags; 313fd617795SRafal Kozik mbuf->packet_type = packet_type; 3141173fca2SJan Medala } 3151173fca2SJan Medala 3161173fca2SJan Medala static inline void ena_tx_mbuf_prepare(struct rte_mbuf *mbuf, 31756b8b9b7SRafal Kozik struct ena_com_tx_ctx *ena_tx_ctx, 31856b8b9b7SRafal Kozik uint64_t queue_offloads) 3191173fca2SJan Medala { 3201173fca2SJan Medala struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta; 3211173fca2SJan Medala 32256b8b9b7SRafal Kozik if ((mbuf->ol_flags & MBUF_OFFLOADS) && 32356b8b9b7SRafal Kozik (queue_offloads & QUEUE_OFFLOADS)) { 3241173fca2SJan Medala /* check if TSO is required */ 32556b8b9b7SRafal Kozik if ((mbuf->ol_flags & PKT_TX_TCP_SEG) && 32656b8b9b7SRafal Kozik (queue_offloads & DEV_TX_OFFLOAD_TCP_TSO)) { 3271173fca2SJan Medala ena_tx_ctx->tso_enable = true; 3281173fca2SJan Medala 3291173fca2SJan Medala ena_meta->l4_hdr_len = GET_L4_HDR_LEN(mbuf); 3301173fca2SJan Medala } 3311173fca2SJan Medala 3321173fca2SJan Medala /* check if L3 checksum is needed */ 33356b8b9b7SRafal Kozik if ((mbuf->ol_flags & PKT_TX_IP_CKSUM) && 33456b8b9b7SRafal Kozik (queue_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)) 3351173fca2SJan Medala ena_tx_ctx->l3_csum_enable = true; 3361173fca2SJan Medala 3371173fca2SJan Medala if (mbuf->ol_flags & PKT_TX_IPV6) { 3381173fca2SJan Medala ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV6; 3391173fca2SJan Medala } else { 3401173fca2SJan Medala ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV4; 3411173fca2SJan Medala 3421173fca2SJan Medala /* set don't fragment (DF) flag */ 3431173fca2SJan Medala if (mbuf->packet_type & 3441173fca2SJan Medala (RTE_PTYPE_L4_NONFRAG 3451173fca2SJan Medala | RTE_PTYPE_INNER_L4_NONFRAG)) 3461173fca2SJan Medala ena_tx_ctx->df = true; 3471173fca2SJan Medala } 3481173fca2SJan Medala 3491173fca2SJan Medala /* check if L4 checksum is needed */ 35056b8b9b7SRafal Kozik if ((mbuf->ol_flags & PKT_TX_TCP_CKSUM) && 35156b8b9b7SRafal Kozik (queue_offloads & DEV_TX_OFFLOAD_TCP_CKSUM)) { 3521173fca2SJan Medala ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_TCP; 3531173fca2SJan Medala ena_tx_ctx->l4_csum_enable = true; 35456b8b9b7SRafal Kozik } else if ((mbuf->ol_flags & PKT_TX_UDP_CKSUM) && 35556b8b9b7SRafal Kozik (queue_offloads & DEV_TX_OFFLOAD_UDP_CKSUM)) { 3561173fca2SJan Medala ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UDP; 3571173fca2SJan Medala ena_tx_ctx->l4_csum_enable = true; 35856b8b9b7SRafal Kozik } else { 3591173fca2SJan Medala ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UNKNOWN; 3601173fca2SJan Medala ena_tx_ctx->l4_csum_enable = false; 3611173fca2SJan Medala } 3621173fca2SJan Medala 3631173fca2SJan Medala ena_meta->mss = mbuf->tso_segsz; 3641173fca2SJan Medala ena_meta->l3_hdr_len = mbuf->l3_len; 3651173fca2SJan Medala ena_meta->l3_hdr_offset = mbuf->l2_len; 3661173fca2SJan Medala 3671173fca2SJan Medala ena_tx_ctx->meta_valid = true; 3681173fca2SJan Medala } else { 3691173fca2SJan Medala ena_tx_ctx->meta_valid = false; 3701173fca2SJan Medala } 3711173fca2SJan Medala } 3721173fca2SJan Medala 373c2034976SMichal Krawczyk static inline int validate_rx_req_id(struct ena_ring *rx_ring, uint16_t req_id) 374c2034976SMichal Krawczyk { 375c2034976SMichal Krawczyk if (likely(req_id < rx_ring->ring_size)) 376c2034976SMichal Krawczyk return 0; 377c2034976SMichal Krawczyk 378c2034976SMichal Krawczyk RTE_LOG(ERR, PMD, "Invalid rx req_id: %hu\n", req_id); 379c2034976SMichal Krawczyk 380c2034976SMichal Krawczyk rx_ring->adapter->reset_reason = ENA_REGS_RESET_INV_RX_REQ_ID; 381c2034976SMichal Krawczyk rx_ring->adapter->trigger_reset = true; 38245b6d861SMichal Krawczyk ++rx_ring->rx_stats.bad_req_id; 383c2034976SMichal Krawczyk 384c2034976SMichal Krawczyk return -EFAULT; 385c2034976SMichal Krawczyk } 386c2034976SMichal Krawczyk 387f7d82d24SRafal Kozik static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id) 388f7d82d24SRafal Kozik { 389f7d82d24SRafal Kozik struct ena_tx_buffer *tx_info = NULL; 390f7d82d24SRafal Kozik 391f7d82d24SRafal Kozik if (likely(req_id < tx_ring->ring_size)) { 392f7d82d24SRafal Kozik tx_info = &tx_ring->tx_buffer_info[req_id]; 393f7d82d24SRafal Kozik if (likely(tx_info->mbuf)) 394f7d82d24SRafal Kozik return 0; 395f7d82d24SRafal Kozik } 396f7d82d24SRafal Kozik 397f7d82d24SRafal Kozik if (tx_info) 398f7d82d24SRafal Kozik RTE_LOG(ERR, PMD, "tx_info doesn't have valid mbuf\n"); 399f7d82d24SRafal Kozik else 400f7d82d24SRafal Kozik RTE_LOG(ERR, PMD, "Invalid req_id: %hu\n", req_id); 401f7d82d24SRafal Kozik 402f7d82d24SRafal Kozik /* Trigger device reset */ 4037830e905SSolganik Alexander ++tx_ring->tx_stats.bad_req_id; 404f7d82d24SRafal Kozik tx_ring->adapter->reset_reason = ENA_REGS_RESET_INV_TX_REQ_ID; 405f7d82d24SRafal Kozik tx_ring->adapter->trigger_reset = true; 406f7d82d24SRafal Kozik return -EFAULT; 407f7d82d24SRafal Kozik } 408f7d82d24SRafal Kozik 409372c1af5SJan Medala static void ena_config_host_info(struct ena_com_dev *ena_dev) 410372c1af5SJan Medala { 411372c1af5SJan Medala struct ena_admin_host_info *host_info; 412372c1af5SJan Medala int rc; 413372c1af5SJan Medala 414372c1af5SJan Medala /* Allocate only the host info */ 415372c1af5SJan Medala rc = ena_com_allocate_host_info(ena_dev); 416372c1af5SJan Medala if (rc) { 417372c1af5SJan Medala RTE_LOG(ERR, PMD, "Cannot allocate host info\n"); 418372c1af5SJan Medala return; 419372c1af5SJan Medala } 420372c1af5SJan Medala 421372c1af5SJan Medala host_info = ena_dev->host_attr.host_info; 422372c1af5SJan Medala 423372c1af5SJan Medala host_info->os_type = ENA_ADMIN_OS_DPDK; 424372c1af5SJan Medala host_info->kernel_ver = RTE_VERSION; 425*6723c0fcSBruce Richardson strlcpy((char *)host_info->kernel_ver_str, rte_version(), 426*6723c0fcSBruce Richardson sizeof(host_info->kernel_ver_str)); 427372c1af5SJan Medala host_info->os_dist = RTE_VERSION; 428*6723c0fcSBruce Richardson strlcpy((char *)host_info->os_dist_str, rte_version(), 429*6723c0fcSBruce Richardson sizeof(host_info->os_dist_str)); 430372c1af5SJan Medala host_info->driver_version = 431372c1af5SJan Medala (DRV_MODULE_VER_MAJOR) | 432372c1af5SJan Medala (DRV_MODULE_VER_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) | 433c4144557SJan Medala (DRV_MODULE_VER_SUBMINOR << 434c4144557SJan Medala ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT); 435b9302eb9SRafal Kozik host_info->num_cpus = rte_lcore_count(); 436372c1af5SJan Medala 437372c1af5SJan Medala rc = ena_com_set_host_attributes(ena_dev); 438372c1af5SJan Medala if (rc) { 439241da076SRafal Kozik if (rc == -ENA_COM_UNSUPPORTED) 440241da076SRafal Kozik RTE_LOG(WARNING, PMD, "Cannot set host attributes\n"); 441241da076SRafal Kozik else 442372c1af5SJan Medala RTE_LOG(ERR, PMD, "Cannot set host attributes\n"); 443241da076SRafal Kozik 444372c1af5SJan Medala goto err; 445372c1af5SJan Medala } 446372c1af5SJan Medala 447372c1af5SJan Medala return; 448372c1af5SJan Medala 449372c1af5SJan Medala err: 450372c1af5SJan Medala ena_com_delete_host_info(ena_dev); 451372c1af5SJan Medala } 452372c1af5SJan Medala 4537830e905SSolganik Alexander /* This function calculates the number of xstats based on the current config */ 4547830e905SSolganik Alexander static unsigned int ena_xstats_calc_num(struct rte_eth_dev *dev) 455372c1af5SJan Medala { 4567830e905SSolganik Alexander return ENA_STATS_ARRAY_GLOBAL + 4577830e905SSolganik Alexander (dev->data->nb_tx_queues * ENA_STATS_ARRAY_TX) + 4587830e905SSolganik Alexander (dev->data->nb_rx_queues * ENA_STATS_ARRAY_RX); 459372c1af5SJan Medala } 460372c1af5SJan Medala 461372c1af5SJan Medala static void ena_config_debug_area(struct ena_adapter *adapter) 462372c1af5SJan Medala { 463372c1af5SJan Medala u32 debug_area_size; 464372c1af5SJan Medala int rc, ss_count; 465372c1af5SJan Medala 4667830e905SSolganik Alexander ss_count = ena_xstats_calc_num(adapter->rte_dev); 467372c1af5SJan Medala 468372c1af5SJan Medala /* allocate 32 bytes for each string and 64bit for the value */ 469372c1af5SJan Medala debug_area_size = ss_count * ETH_GSTRING_LEN + sizeof(u64) * ss_count; 470372c1af5SJan Medala 471372c1af5SJan Medala rc = ena_com_allocate_debug_area(&adapter->ena_dev, debug_area_size); 472372c1af5SJan Medala if (rc) { 473372c1af5SJan Medala RTE_LOG(ERR, PMD, "Cannot allocate debug area\n"); 474372c1af5SJan Medala return; 475372c1af5SJan Medala } 476372c1af5SJan Medala 477372c1af5SJan Medala rc = ena_com_set_host_attributes(&adapter->ena_dev); 478372c1af5SJan Medala if (rc) { 479241da076SRafal Kozik if (rc == -ENA_COM_UNSUPPORTED) 480372c1af5SJan Medala RTE_LOG(WARNING, PMD, "Cannot set host attributes\n"); 481241da076SRafal Kozik else 482241da076SRafal Kozik RTE_LOG(ERR, PMD, "Cannot set host attributes\n"); 483241da076SRafal Kozik 484372c1af5SJan Medala goto err; 485372c1af5SJan Medala } 486372c1af5SJan Medala 487372c1af5SJan Medala return; 488372c1af5SJan Medala err: 489372c1af5SJan Medala ena_com_delete_debug_area(&adapter->ena_dev); 490372c1af5SJan Medala } 491372c1af5SJan Medala 4921173fca2SJan Medala static void ena_close(struct rte_eth_dev *dev) 4931173fca2SJan Medala { 4944d7877fdSMichal Krawczyk struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 4954d7877fdSMichal Krawczyk struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 4961173fca2SJan Medala struct ena_adapter *adapter = 4971173fca2SJan Medala (struct ena_adapter *)(dev->data->dev_private); 4981173fca2SJan Medala 499df238f84SMichal Krawczyk if (adapter->state == ENA_ADAPTER_STATE_RUNNING) 500eb0ef49dSMichal Krawczyk ena_stop(dev); 501eb0ef49dSMichal Krawczyk adapter->state = ENA_ADAPTER_STATE_CLOSED; 50215773e06SMichal Krawczyk 5031173fca2SJan Medala ena_rx_queue_release_all(dev); 5041173fca2SJan Medala ena_tx_queue_release_all(dev); 5054d7877fdSMichal Krawczyk 5064d7877fdSMichal Krawczyk rte_free(adapter->drv_stats); 5074d7877fdSMichal Krawczyk adapter->drv_stats = NULL; 5084d7877fdSMichal Krawczyk 5094d7877fdSMichal Krawczyk rte_intr_disable(intr_handle); 5104d7877fdSMichal Krawczyk rte_intr_callback_unregister(intr_handle, 5114d7877fdSMichal Krawczyk ena_interrupt_handler_rte, 5124d7877fdSMichal Krawczyk adapter); 5134d7877fdSMichal Krawczyk 5144d7877fdSMichal Krawczyk /* 5154d7877fdSMichal Krawczyk * MAC is not allocated dynamically. Setting NULL should prevent from 5164d7877fdSMichal Krawczyk * release of the resource in the rte_eth_dev_release_port(). 5174d7877fdSMichal Krawczyk */ 5184d7877fdSMichal Krawczyk dev->data->mac_addrs = NULL; 5191173fca2SJan Medala } 5201173fca2SJan Medala 5212081d5e2SMichal Krawczyk static int 5222081d5e2SMichal Krawczyk ena_dev_reset(struct rte_eth_dev *dev) 5232081d5e2SMichal Krawczyk { 524e457bc70SRafal Kozik int rc = 0; 5252081d5e2SMichal Krawczyk 526e457bc70SRafal Kozik ena_destroy_device(dev); 527e457bc70SRafal Kozik rc = eth_ena_dev_init(dev); 528241da076SRafal Kozik if (rc) 529498c687aSRafal Kozik PMD_INIT_LOG(CRIT, "Cannot initialize device"); 530e457bc70SRafal Kozik 5312081d5e2SMichal Krawczyk return rc; 5322081d5e2SMichal Krawczyk } 5332081d5e2SMichal Krawczyk 5341173fca2SJan Medala static int ena_rss_reta_update(struct rte_eth_dev *dev, 5351173fca2SJan Medala struct rte_eth_rss_reta_entry64 *reta_conf, 5361173fca2SJan Medala uint16_t reta_size) 5371173fca2SJan Medala { 5381173fca2SJan Medala struct ena_adapter *adapter = 5391173fca2SJan Medala (struct ena_adapter *)(dev->data->dev_private); 5401173fca2SJan Medala struct ena_com_dev *ena_dev = &adapter->ena_dev; 541241da076SRafal Kozik int rc, i; 5421173fca2SJan Medala u16 entry_value; 5431173fca2SJan Medala int conf_idx; 5441173fca2SJan Medala int idx; 5451173fca2SJan Medala 5461173fca2SJan Medala if ((reta_size == 0) || (reta_conf == NULL)) 5471173fca2SJan Medala return -EINVAL; 5481173fca2SJan Medala 5491173fca2SJan Medala if (reta_size > ENA_RX_RSS_TABLE_SIZE) { 5501173fca2SJan Medala RTE_LOG(WARNING, PMD, 5511173fca2SJan Medala "indirection table %d is bigger than supported (%d)\n", 5521173fca2SJan Medala reta_size, ENA_RX_RSS_TABLE_SIZE); 553241da076SRafal Kozik return -EINVAL; 5541173fca2SJan Medala } 5551173fca2SJan Medala 5561173fca2SJan Medala for (i = 0 ; i < reta_size ; i++) { 5571173fca2SJan Medala /* each reta_conf is for 64 entries. 5581173fca2SJan Medala * to support 128 we use 2 conf of 64 5591173fca2SJan Medala */ 5601173fca2SJan Medala conf_idx = i / RTE_RETA_GROUP_SIZE; 5611173fca2SJan Medala idx = i % RTE_RETA_GROUP_SIZE; 5621173fca2SJan Medala if (TEST_BIT(reta_conf[conf_idx].mask, idx)) { 5631173fca2SJan Medala entry_value = 5641173fca2SJan Medala ENA_IO_RXQ_IDX(reta_conf[conf_idx].reta[idx]); 565241da076SRafal Kozik 566241da076SRafal Kozik rc = ena_com_indirect_table_fill_entry(ena_dev, 5671173fca2SJan Medala i, 5681173fca2SJan Medala entry_value); 569241da076SRafal Kozik if (unlikely(rc && rc != ENA_COM_UNSUPPORTED)) { 5701173fca2SJan Medala RTE_LOG(ERR, PMD, 5711173fca2SJan Medala "Cannot fill indirect table\n"); 572241da076SRafal Kozik return rc; 5731173fca2SJan Medala } 5741173fca2SJan Medala } 5751173fca2SJan Medala } 5761173fca2SJan Medala 577241da076SRafal Kozik rc = ena_com_indirect_table_set(ena_dev); 578241da076SRafal Kozik if (unlikely(rc && rc != ENA_COM_UNSUPPORTED)) { 5791173fca2SJan Medala RTE_LOG(ERR, PMD, "Cannot flush the indirect table\n"); 580241da076SRafal Kozik return rc; 5811173fca2SJan Medala } 5821173fca2SJan Medala 5831173fca2SJan Medala RTE_LOG(DEBUG, PMD, "%s(): RSS configured %d entries for port %d\n", 5841173fca2SJan Medala __func__, reta_size, adapter->rte_dev->data->port_id); 585241da076SRafal Kozik 586241da076SRafal Kozik return 0; 5871173fca2SJan Medala } 5881173fca2SJan Medala 5891173fca2SJan Medala /* Query redirection table. */ 5901173fca2SJan Medala static int ena_rss_reta_query(struct rte_eth_dev *dev, 5911173fca2SJan Medala struct rte_eth_rss_reta_entry64 *reta_conf, 5921173fca2SJan Medala uint16_t reta_size) 5931173fca2SJan Medala { 5941173fca2SJan Medala struct ena_adapter *adapter = 5951173fca2SJan Medala (struct ena_adapter *)(dev->data->dev_private); 5961173fca2SJan Medala struct ena_com_dev *ena_dev = &adapter->ena_dev; 597241da076SRafal Kozik int rc; 5981173fca2SJan Medala int i; 5991173fca2SJan Medala u32 indirect_table[ENA_RX_RSS_TABLE_SIZE] = {0}; 6001173fca2SJan Medala int reta_conf_idx; 6011173fca2SJan Medala int reta_idx; 6021173fca2SJan Medala 6031173fca2SJan Medala if (reta_size == 0 || reta_conf == NULL || 6041173fca2SJan Medala (reta_size > RTE_RETA_GROUP_SIZE && ((reta_conf + 1) == NULL))) 6051173fca2SJan Medala return -EINVAL; 6061173fca2SJan Medala 607241da076SRafal Kozik rc = ena_com_indirect_table_get(ena_dev, indirect_table); 608241da076SRafal Kozik if (unlikely(rc && rc != ENA_COM_UNSUPPORTED)) { 6091173fca2SJan Medala RTE_LOG(ERR, PMD, "cannot get indirect table\n"); 610241da076SRafal Kozik return -ENOTSUP; 6111173fca2SJan Medala } 6121173fca2SJan Medala 6131173fca2SJan Medala for (i = 0 ; i < reta_size ; i++) { 6141173fca2SJan Medala reta_conf_idx = i / RTE_RETA_GROUP_SIZE; 6151173fca2SJan Medala reta_idx = i % RTE_RETA_GROUP_SIZE; 6161173fca2SJan Medala if (TEST_BIT(reta_conf[reta_conf_idx].mask, reta_idx)) 6171173fca2SJan Medala reta_conf[reta_conf_idx].reta[reta_idx] = 6181173fca2SJan Medala ENA_IO_RXQ_IDX_REV(indirect_table[i]); 6191173fca2SJan Medala } 620241da076SRafal Kozik 621241da076SRafal Kozik return 0; 6221173fca2SJan Medala } 6231173fca2SJan Medala 6241173fca2SJan Medala static int ena_rss_init_default(struct ena_adapter *adapter) 6251173fca2SJan Medala { 6261173fca2SJan Medala struct ena_com_dev *ena_dev = &adapter->ena_dev; 6271173fca2SJan Medala uint16_t nb_rx_queues = adapter->rte_dev->data->nb_rx_queues; 6281173fca2SJan Medala int rc, i; 6291173fca2SJan Medala u32 val; 6301173fca2SJan Medala 6311173fca2SJan Medala rc = ena_com_rss_init(ena_dev, ENA_RX_RSS_TABLE_LOG_SIZE); 6321173fca2SJan Medala if (unlikely(rc)) { 6331173fca2SJan Medala RTE_LOG(ERR, PMD, "Cannot init indirect table\n"); 6341173fca2SJan Medala goto err_rss_init; 6351173fca2SJan Medala } 6361173fca2SJan Medala 6371173fca2SJan Medala for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++) { 6381173fca2SJan Medala val = i % nb_rx_queues; 6391173fca2SJan Medala rc = ena_com_indirect_table_fill_entry(ena_dev, i, 6401173fca2SJan Medala ENA_IO_RXQ_IDX(val)); 6413adcba9aSMichal Krawczyk if (unlikely(rc && (rc != ENA_COM_UNSUPPORTED))) { 6421173fca2SJan Medala RTE_LOG(ERR, PMD, "Cannot fill indirect table\n"); 6431173fca2SJan Medala goto err_fill_indir; 6441173fca2SJan Medala } 6451173fca2SJan Medala } 6461173fca2SJan Medala 6471173fca2SJan Medala rc = ena_com_fill_hash_function(ena_dev, ENA_ADMIN_CRC32, NULL, 6481173fca2SJan Medala ENA_HASH_KEY_SIZE, 0xFFFFFFFF); 6493adcba9aSMichal Krawczyk if (unlikely(rc && (rc != ENA_COM_UNSUPPORTED))) { 6501173fca2SJan Medala RTE_LOG(INFO, PMD, "Cannot fill hash function\n"); 6511173fca2SJan Medala goto err_fill_indir; 6521173fca2SJan Medala } 6531173fca2SJan Medala 6541173fca2SJan Medala rc = ena_com_set_default_hash_ctrl(ena_dev); 6553adcba9aSMichal Krawczyk if (unlikely(rc && (rc != ENA_COM_UNSUPPORTED))) { 6561173fca2SJan Medala RTE_LOG(INFO, PMD, "Cannot fill hash control\n"); 6571173fca2SJan Medala goto err_fill_indir; 6581173fca2SJan Medala } 6591173fca2SJan Medala 6601173fca2SJan Medala rc = ena_com_indirect_table_set(ena_dev); 6613adcba9aSMichal Krawczyk if (unlikely(rc && (rc != ENA_COM_UNSUPPORTED))) { 6621173fca2SJan Medala RTE_LOG(ERR, PMD, "Cannot flush the indirect table\n"); 6631173fca2SJan Medala goto err_fill_indir; 6641173fca2SJan Medala } 6651173fca2SJan Medala RTE_LOG(DEBUG, PMD, "RSS configured for port %d\n", 6661173fca2SJan Medala adapter->rte_dev->data->port_id); 6671173fca2SJan Medala 6681173fca2SJan Medala return 0; 6691173fca2SJan Medala 6701173fca2SJan Medala err_fill_indir: 6711173fca2SJan Medala ena_com_rss_destroy(ena_dev); 6721173fca2SJan Medala err_rss_init: 6731173fca2SJan Medala 6741173fca2SJan Medala return rc; 6751173fca2SJan Medala } 6761173fca2SJan Medala 6771173fca2SJan Medala static void ena_rx_queue_release_all(struct rte_eth_dev *dev) 6781173fca2SJan Medala { 6791173fca2SJan Medala struct ena_ring **queues = (struct ena_ring **)dev->data->rx_queues; 6801173fca2SJan Medala int nb_queues = dev->data->nb_rx_queues; 6811173fca2SJan Medala int i; 6821173fca2SJan Medala 6831173fca2SJan Medala for (i = 0; i < nb_queues; i++) 6841173fca2SJan Medala ena_rx_queue_release(queues[i]); 6851173fca2SJan Medala } 6861173fca2SJan Medala 6871173fca2SJan Medala static void ena_tx_queue_release_all(struct rte_eth_dev *dev) 6881173fca2SJan Medala { 6891173fca2SJan Medala struct ena_ring **queues = (struct ena_ring **)dev->data->tx_queues; 6901173fca2SJan Medala int nb_queues = dev->data->nb_tx_queues; 6911173fca2SJan Medala int i; 6921173fca2SJan Medala 6931173fca2SJan Medala for (i = 0; i < nb_queues; i++) 6941173fca2SJan Medala ena_tx_queue_release(queues[i]); 6951173fca2SJan Medala } 6961173fca2SJan Medala 6971173fca2SJan Medala static void ena_rx_queue_release(void *queue) 6981173fca2SJan Medala { 6991173fca2SJan Medala struct ena_ring *ring = (struct ena_ring *)queue; 7001173fca2SJan Medala 7011173fca2SJan Medala /* Free ring resources */ 7021173fca2SJan Medala if (ring->rx_buffer_info) 7031173fca2SJan Medala rte_free(ring->rx_buffer_info); 7041173fca2SJan Medala ring->rx_buffer_info = NULL; 7051173fca2SJan Medala 70679405ee1SRafal Kozik if (ring->rx_refill_buffer) 70779405ee1SRafal Kozik rte_free(ring->rx_refill_buffer); 70879405ee1SRafal Kozik ring->rx_refill_buffer = NULL; 70979405ee1SRafal Kozik 710c2034976SMichal Krawczyk if (ring->empty_rx_reqs) 711c2034976SMichal Krawczyk rte_free(ring->empty_rx_reqs); 712c2034976SMichal Krawczyk ring->empty_rx_reqs = NULL; 713c2034976SMichal Krawczyk 7141173fca2SJan Medala ring->configured = 0; 7151173fca2SJan Medala 7161173fca2SJan Medala RTE_LOG(NOTICE, PMD, "RX Queue %d:%d released\n", 7171173fca2SJan Medala ring->port_id, ring->id); 7181173fca2SJan Medala } 7191173fca2SJan Medala 7201173fca2SJan Medala static void ena_tx_queue_release(void *queue) 7211173fca2SJan Medala { 7221173fca2SJan Medala struct ena_ring *ring = (struct ena_ring *)queue; 7231173fca2SJan Medala 7241173fca2SJan Medala /* Free ring resources */ 7252fca2a98SMichal Krawczyk if (ring->push_buf_intermediate_buf) 7262fca2a98SMichal Krawczyk rte_free(ring->push_buf_intermediate_buf); 7272fca2a98SMichal Krawczyk 7281173fca2SJan Medala if (ring->tx_buffer_info) 7291173fca2SJan Medala rte_free(ring->tx_buffer_info); 7301173fca2SJan Medala 7311173fca2SJan Medala if (ring->empty_tx_reqs) 7321173fca2SJan Medala rte_free(ring->empty_tx_reqs); 7331173fca2SJan Medala 7341173fca2SJan Medala ring->empty_tx_reqs = NULL; 7351173fca2SJan Medala ring->tx_buffer_info = NULL; 7362fca2a98SMichal Krawczyk ring->push_buf_intermediate_buf = NULL; 7371173fca2SJan Medala 7381173fca2SJan Medala ring->configured = 0; 7391173fca2SJan Medala 7401173fca2SJan Medala RTE_LOG(NOTICE, PMD, "TX Queue %d:%d released\n", 7411173fca2SJan Medala ring->port_id, ring->id); 7421173fca2SJan Medala } 7431173fca2SJan Medala 7441173fca2SJan Medala static void ena_rx_queue_release_bufs(struct ena_ring *ring) 7451173fca2SJan Medala { 746709b1dcbSRafal Kozik unsigned int i; 7471173fca2SJan Medala 748709b1dcbSRafal Kozik for (i = 0; i < ring->ring_size; ++i) 749709b1dcbSRafal Kozik if (ring->rx_buffer_info[i]) { 750709b1dcbSRafal Kozik rte_mbuf_raw_free(ring->rx_buffer_info[i]); 751709b1dcbSRafal Kozik ring->rx_buffer_info[i] = NULL; 7521173fca2SJan Medala } 7531173fca2SJan Medala } 7541173fca2SJan Medala 7551173fca2SJan Medala static void ena_tx_queue_release_bufs(struct ena_ring *ring) 7561173fca2SJan Medala { 757207a514cSMichal Krawczyk unsigned int i; 7581173fca2SJan Medala 759207a514cSMichal Krawczyk for (i = 0; i < ring->ring_size; ++i) { 760207a514cSMichal Krawczyk struct ena_tx_buffer *tx_buf = &ring->tx_buffer_info[i]; 7611173fca2SJan Medala 7621173fca2SJan Medala if (tx_buf->mbuf) 7631173fca2SJan Medala rte_pktmbuf_free(tx_buf->mbuf); 7641173fca2SJan Medala } 7651173fca2SJan Medala } 7661173fca2SJan Medala 7671173fca2SJan Medala static int ena_link_update(struct rte_eth_dev *dev, 7681173fca2SJan Medala __rte_unused int wait_to_complete) 7691173fca2SJan Medala { 7701173fca2SJan Medala struct rte_eth_link *link = &dev->data->dev_link; 771ca148440SMichal Krawczyk struct ena_adapter *adapter; 7721173fca2SJan Medala 773ca148440SMichal Krawczyk adapter = (struct ena_adapter *)(dev->data->dev_private); 774ca148440SMichal Krawczyk 775ca148440SMichal Krawczyk link->link_status = adapter->link_status ? ETH_LINK_UP : ETH_LINK_DOWN; 77641e59028SRafal Kozik link->link_speed = ETH_SPEED_NUM_NONE; 7771173fca2SJan Medala link->link_duplex = ETH_LINK_FULL_DUPLEX; 7781173fca2SJan Medala 7791173fca2SJan Medala return 0; 7801173fca2SJan Medala } 7811173fca2SJan Medala 78226e5543dSRafal Kozik static int ena_queue_start_all(struct rte_eth_dev *dev, 7831173fca2SJan Medala enum ena_ring_type ring_type) 7841173fca2SJan Medala { 7851173fca2SJan Medala struct ena_adapter *adapter = 7861173fca2SJan Medala (struct ena_adapter *)(dev->data->dev_private); 7871173fca2SJan Medala struct ena_ring *queues = NULL; 78853b61841SMichal Krawczyk int nb_queues; 7891173fca2SJan Medala int i = 0; 7901173fca2SJan Medala int rc = 0; 7911173fca2SJan Medala 79253b61841SMichal Krawczyk if (ring_type == ENA_RING_TYPE_RX) { 79353b61841SMichal Krawczyk queues = adapter->rx_ring; 79453b61841SMichal Krawczyk nb_queues = dev->data->nb_rx_queues; 79553b61841SMichal Krawczyk } else { 79653b61841SMichal Krawczyk queues = adapter->tx_ring; 79753b61841SMichal Krawczyk nb_queues = dev->data->nb_tx_queues; 79853b61841SMichal Krawczyk } 79953b61841SMichal Krawczyk for (i = 0; i < nb_queues; i++) { 8001173fca2SJan Medala if (queues[i].configured) { 8011173fca2SJan Medala if (ring_type == ENA_RING_TYPE_RX) { 8021173fca2SJan Medala ena_assert_msg( 8031173fca2SJan Medala dev->data->rx_queues[i] == &queues[i], 8041173fca2SJan Medala "Inconsistent state of rx queues\n"); 8051173fca2SJan Medala } else { 8061173fca2SJan Medala ena_assert_msg( 8071173fca2SJan Medala dev->data->tx_queues[i] == &queues[i], 8081173fca2SJan Medala "Inconsistent state of tx queues\n"); 8091173fca2SJan Medala } 8101173fca2SJan Medala 81126e5543dSRafal Kozik rc = ena_queue_start(&queues[i]); 8121173fca2SJan Medala 8131173fca2SJan Medala if (rc) { 8141173fca2SJan Medala PMD_INIT_LOG(ERR, 81526e5543dSRafal Kozik "failed to start queue %d type(%d)", 8161173fca2SJan Medala i, ring_type); 81726e5543dSRafal Kozik goto err; 8181173fca2SJan Medala } 8191173fca2SJan Medala } 8201173fca2SJan Medala } 8211173fca2SJan Medala 8221173fca2SJan Medala return 0; 82326e5543dSRafal Kozik 82426e5543dSRafal Kozik err: 82526e5543dSRafal Kozik while (i--) 82626e5543dSRafal Kozik if (queues[i].configured) 82726e5543dSRafal Kozik ena_queue_stop(&queues[i]); 82826e5543dSRafal Kozik 82926e5543dSRafal Kozik return rc; 8301173fca2SJan Medala } 8311173fca2SJan Medala 8321173fca2SJan Medala static uint32_t ena_get_mtu_conf(struct ena_adapter *adapter) 8331173fca2SJan Medala { 8341173fca2SJan Medala uint32_t max_frame_len = adapter->max_mtu; 8351173fca2SJan Medala 8367369f88fSRafal Kozik if (adapter->rte_eth_dev_data->dev_conf.rxmode.offloads & 8377369f88fSRafal Kozik DEV_RX_OFFLOAD_JUMBO_FRAME) 8381173fca2SJan Medala max_frame_len = 8391173fca2SJan Medala adapter->rte_eth_dev_data->dev_conf.rxmode.max_rx_pkt_len; 8401173fca2SJan Medala 8411173fca2SJan Medala return max_frame_len; 8421173fca2SJan Medala } 8431173fca2SJan Medala 8441173fca2SJan Medala static int ena_check_valid_conf(struct ena_adapter *adapter) 8451173fca2SJan Medala { 8461173fca2SJan Medala uint32_t max_frame_len = ena_get_mtu_conf(adapter); 8471173fca2SJan Medala 848241da076SRafal Kozik if (max_frame_len > adapter->max_mtu || max_frame_len < ENA_MIN_MTU) { 849241da076SRafal Kozik PMD_INIT_LOG(ERR, "Unsupported MTU of %d. " 850498c687aSRafal Kozik "max mtu: %d, min mtu: %d", 851241da076SRafal Kozik max_frame_len, adapter->max_mtu, ENA_MIN_MTU); 852241da076SRafal Kozik return ENA_COM_UNSUPPORTED; 8531173fca2SJan Medala } 8541173fca2SJan Medala 8551173fca2SJan Medala return 0; 8561173fca2SJan Medala } 8571173fca2SJan Medala 8581173fca2SJan Medala static int 859ea93d37eSRafal Kozik ena_calc_queue_size(struct ena_calc_queue_size_ctx *ctx) 8601173fca2SJan Medala { 8612fca2a98SMichal Krawczyk struct ena_admin_feature_llq_desc *llq = &ctx->get_feat_ctx->llq; 8622fca2a98SMichal Krawczyk struct ena_com_dev *ena_dev = ctx->ena_dev; 8632fca2a98SMichal Krawczyk uint32_t tx_queue_size = ENA_MAX_RING_SIZE_TX; 8642fca2a98SMichal Krawczyk uint32_t rx_queue_size = ENA_MAX_RING_SIZE_RX; 8651173fca2SJan Medala 8662fca2a98SMichal Krawczyk if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) { 867ea93d37eSRafal Kozik struct ena_admin_queue_ext_feature_fields *max_queue_ext = 868ea93d37eSRafal Kozik &ctx->get_feat_ctx->max_queue_ext.max_queue_ext; 8692fca2a98SMichal Krawczyk rx_queue_size = RTE_MIN(rx_queue_size, 8702fca2a98SMichal Krawczyk max_queue_ext->max_rx_cq_depth); 8712fca2a98SMichal Krawczyk rx_queue_size = RTE_MIN(rx_queue_size, 872ea93d37eSRafal Kozik max_queue_ext->max_rx_sq_depth); 8732fca2a98SMichal Krawczyk tx_queue_size = RTE_MIN(tx_queue_size, 8742fca2a98SMichal Krawczyk max_queue_ext->max_tx_cq_depth); 8752fca2a98SMichal Krawczyk 8762fca2a98SMichal Krawczyk if (ena_dev->tx_mem_queue_type == 8772fca2a98SMichal Krawczyk ENA_ADMIN_PLACEMENT_POLICY_DEV) { 8782fca2a98SMichal Krawczyk tx_queue_size = RTE_MIN(tx_queue_size, 8792fca2a98SMichal Krawczyk llq->max_llq_depth); 8802fca2a98SMichal Krawczyk } else { 8812fca2a98SMichal Krawczyk tx_queue_size = RTE_MIN(tx_queue_size, 882ea93d37eSRafal Kozik max_queue_ext->max_tx_sq_depth); 8832fca2a98SMichal Krawczyk } 8842fca2a98SMichal Krawczyk 885ea93d37eSRafal Kozik ctx->max_rx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS, 886ea93d37eSRafal Kozik max_queue_ext->max_per_packet_rx_descs); 887ea93d37eSRafal Kozik ctx->max_tx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS, 888ea93d37eSRafal Kozik max_queue_ext->max_per_packet_tx_descs); 889ea93d37eSRafal Kozik } else { 890ea93d37eSRafal Kozik struct ena_admin_queue_feature_desc *max_queues = 891ea93d37eSRafal Kozik &ctx->get_feat_ctx->max_queues; 8922fca2a98SMichal Krawczyk rx_queue_size = RTE_MIN(rx_queue_size, 8932fca2a98SMichal Krawczyk max_queues->max_cq_depth); 8942fca2a98SMichal Krawczyk rx_queue_size = RTE_MIN(rx_queue_size, 895ea93d37eSRafal Kozik max_queues->max_sq_depth); 8962fca2a98SMichal Krawczyk tx_queue_size = RTE_MIN(tx_queue_size, 8972fca2a98SMichal Krawczyk max_queues->max_cq_depth); 8982fca2a98SMichal Krawczyk 8992fca2a98SMichal Krawczyk if (ena_dev->tx_mem_queue_type == 9002fca2a98SMichal Krawczyk ENA_ADMIN_PLACEMENT_POLICY_DEV) { 9012fca2a98SMichal Krawczyk tx_queue_size = RTE_MIN(tx_queue_size, 9022fca2a98SMichal Krawczyk llq->max_llq_depth); 9032fca2a98SMichal Krawczyk } else { 9042fca2a98SMichal Krawczyk tx_queue_size = RTE_MIN(tx_queue_size, 9052fca2a98SMichal Krawczyk max_queues->max_sq_depth); 9062fca2a98SMichal Krawczyk } 9072fca2a98SMichal Krawczyk 908ea93d37eSRafal Kozik ctx->max_rx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS, 909ea93d37eSRafal Kozik max_queues->max_packet_tx_descs); 910ea93d37eSRafal Kozik ctx->max_tx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS, 911ea93d37eSRafal Kozik max_queues->max_packet_rx_descs); 912ea93d37eSRafal Kozik } 9131173fca2SJan Medala 914ea93d37eSRafal Kozik /* Round down to the nearest power of 2 */ 915ea93d37eSRafal Kozik rx_queue_size = rte_align32prevpow2(rx_queue_size); 916ea93d37eSRafal Kozik tx_queue_size = rte_align32prevpow2(tx_queue_size); 9171173fca2SJan Medala 918ea93d37eSRafal Kozik if (unlikely(rx_queue_size == 0 || tx_queue_size == 0)) { 919f2462150SFerruh Yigit PMD_INIT_LOG(ERR, "Invalid queue size"); 9201173fca2SJan Medala return -EFAULT; 9211173fca2SJan Medala } 9221173fca2SJan Medala 923ea93d37eSRafal Kozik ctx->rx_queue_size = rx_queue_size; 924ea93d37eSRafal Kozik ctx->tx_queue_size = tx_queue_size; 9252061fe41SRafal Kozik 926ea93d37eSRafal Kozik return 0; 9271173fca2SJan Medala } 9281173fca2SJan Medala 9291173fca2SJan Medala static void ena_stats_restart(struct rte_eth_dev *dev) 9301173fca2SJan Medala { 9311173fca2SJan Medala struct ena_adapter *adapter = 9321173fca2SJan Medala (struct ena_adapter *)(dev->data->dev_private); 9331173fca2SJan Medala 9341173fca2SJan Medala rte_atomic64_init(&adapter->drv_stats->ierrors); 9351173fca2SJan Medala rte_atomic64_init(&adapter->drv_stats->oerrors); 9361173fca2SJan Medala rte_atomic64_init(&adapter->drv_stats->rx_nombuf); 93794c3e376SRafal Kozik rte_atomic64_init(&adapter->drv_stats->rx_drops); 9381173fca2SJan Medala } 9391173fca2SJan Medala 940d5b0924bSMatan Azrad static int ena_stats_get(struct rte_eth_dev *dev, 9411173fca2SJan Medala struct rte_eth_stats *stats) 9421173fca2SJan Medala { 9431173fca2SJan Medala struct ena_admin_basic_stats ena_stats; 9441173fca2SJan Medala struct ena_adapter *adapter = 9451173fca2SJan Medala (struct ena_adapter *)(dev->data->dev_private); 9461173fca2SJan Medala struct ena_com_dev *ena_dev = &adapter->ena_dev; 9471173fca2SJan Medala int rc; 94845b6d861SMichal Krawczyk int i; 94945b6d861SMichal Krawczyk int max_rings_stats; 9501173fca2SJan Medala 9511173fca2SJan Medala if (rte_eal_process_type() != RTE_PROC_PRIMARY) 952d5b0924bSMatan Azrad return -ENOTSUP; 9531173fca2SJan Medala 9541173fca2SJan Medala memset(&ena_stats, 0, sizeof(ena_stats)); 9551173fca2SJan Medala rc = ena_com_get_dev_basic_stats(ena_dev, &ena_stats); 9561173fca2SJan Medala if (unlikely(rc)) { 957498c687aSRafal Kozik RTE_LOG(ERR, PMD, "Could not retrieve statistics from ENA\n"); 958d5b0924bSMatan Azrad return rc; 9591173fca2SJan Medala } 9601173fca2SJan Medala 9611173fca2SJan Medala /* Set of basic statistics from ENA */ 9621173fca2SJan Medala stats->ipackets = __MERGE_64B_H_L(ena_stats.rx_pkts_high, 9631173fca2SJan Medala ena_stats.rx_pkts_low); 9641173fca2SJan Medala stats->opackets = __MERGE_64B_H_L(ena_stats.tx_pkts_high, 9651173fca2SJan Medala ena_stats.tx_pkts_low); 9661173fca2SJan Medala stats->ibytes = __MERGE_64B_H_L(ena_stats.rx_bytes_high, 9671173fca2SJan Medala ena_stats.rx_bytes_low); 9681173fca2SJan Medala stats->obytes = __MERGE_64B_H_L(ena_stats.tx_bytes_high, 9691173fca2SJan Medala ena_stats.tx_bytes_low); 9701173fca2SJan Medala 9711173fca2SJan Medala /* Driver related stats */ 97294c3e376SRafal Kozik stats->imissed = rte_atomic64_read(&adapter->drv_stats->rx_drops); 9731173fca2SJan Medala stats->ierrors = rte_atomic64_read(&adapter->drv_stats->ierrors); 9741173fca2SJan Medala stats->oerrors = rte_atomic64_read(&adapter->drv_stats->oerrors); 9751173fca2SJan Medala stats->rx_nombuf = rte_atomic64_read(&adapter->drv_stats->rx_nombuf); 97645b6d861SMichal Krawczyk 97745b6d861SMichal Krawczyk max_rings_stats = RTE_MIN(dev->data->nb_rx_queues, 97845b6d861SMichal Krawczyk RTE_ETHDEV_QUEUE_STAT_CNTRS); 97945b6d861SMichal Krawczyk for (i = 0; i < max_rings_stats; ++i) { 98045b6d861SMichal Krawczyk struct ena_stats_rx *rx_stats = &adapter->rx_ring[i].rx_stats; 98145b6d861SMichal Krawczyk 98245b6d861SMichal Krawczyk stats->q_ibytes[i] = rx_stats->bytes; 98345b6d861SMichal Krawczyk stats->q_ipackets[i] = rx_stats->cnt; 98445b6d861SMichal Krawczyk stats->q_errors[i] = rx_stats->bad_desc_num + 98545b6d861SMichal Krawczyk rx_stats->bad_req_id; 98645b6d861SMichal Krawczyk } 98745b6d861SMichal Krawczyk 98845b6d861SMichal Krawczyk max_rings_stats = RTE_MIN(dev->data->nb_tx_queues, 98945b6d861SMichal Krawczyk RTE_ETHDEV_QUEUE_STAT_CNTRS); 99045b6d861SMichal Krawczyk for (i = 0; i < max_rings_stats; ++i) { 99145b6d861SMichal Krawczyk struct ena_stats_tx *tx_stats = &adapter->tx_ring[i].tx_stats; 99245b6d861SMichal Krawczyk 99345b6d861SMichal Krawczyk stats->q_obytes[i] = tx_stats->bytes; 99445b6d861SMichal Krawczyk stats->q_opackets[i] = tx_stats->cnt; 99545b6d861SMichal Krawczyk } 99645b6d861SMichal Krawczyk 997d5b0924bSMatan Azrad return 0; 9981173fca2SJan Medala } 9991173fca2SJan Medala 10001173fca2SJan Medala static int ena_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 10011173fca2SJan Medala { 10021173fca2SJan Medala struct ena_adapter *adapter; 10031173fca2SJan Medala struct ena_com_dev *ena_dev; 10041173fca2SJan Medala int rc = 0; 10051173fca2SJan Medala 1006498c687aSRafal Kozik ena_assert_msg(dev->data != NULL, "Uninitialized device\n"); 1007498c687aSRafal Kozik ena_assert_msg(dev->data->dev_private != NULL, "Uninitialized device\n"); 10081173fca2SJan Medala adapter = (struct ena_adapter *)(dev->data->dev_private); 10091173fca2SJan Medala 10101173fca2SJan Medala ena_dev = &adapter->ena_dev; 1011498c687aSRafal Kozik ena_assert_msg(ena_dev != NULL, "Uninitialized device\n"); 10121173fca2SJan Medala 1013241da076SRafal Kozik if (mtu > ena_get_mtu_conf(adapter) || mtu < ENA_MIN_MTU) { 10141173fca2SJan Medala RTE_LOG(ERR, PMD, 1015241da076SRafal Kozik "Invalid MTU setting. new_mtu: %d " 1016241da076SRafal Kozik "max mtu: %d min mtu: %d\n", 1017241da076SRafal Kozik mtu, ena_get_mtu_conf(adapter), ENA_MIN_MTU); 1018241da076SRafal Kozik return -EINVAL; 10191173fca2SJan Medala } 10201173fca2SJan Medala 10211173fca2SJan Medala rc = ena_com_set_dev_mtu(ena_dev, mtu); 10221173fca2SJan Medala if (rc) 10231173fca2SJan Medala RTE_LOG(ERR, PMD, "Could not set MTU: %d\n", mtu); 10241173fca2SJan Medala else 10251173fca2SJan Medala RTE_LOG(NOTICE, PMD, "Set MTU: %d\n", mtu); 10261173fca2SJan Medala 10271173fca2SJan Medala return rc; 10281173fca2SJan Medala } 10291173fca2SJan Medala 10301173fca2SJan Medala static int ena_start(struct rte_eth_dev *dev) 10311173fca2SJan Medala { 10321173fca2SJan Medala struct ena_adapter *adapter = 10331173fca2SJan Medala (struct ena_adapter *)(dev->data->dev_private); 1034d9b8b106SMichal Krawczyk uint64_t ticks; 10351173fca2SJan Medala int rc = 0; 10361173fca2SJan Medala 10371173fca2SJan Medala rc = ena_check_valid_conf(adapter); 10381173fca2SJan Medala if (rc) 10391173fca2SJan Medala return rc; 10401173fca2SJan Medala 104126e5543dSRafal Kozik rc = ena_queue_start_all(dev, ENA_RING_TYPE_RX); 10421173fca2SJan Medala if (rc) 10431173fca2SJan Medala return rc; 10441173fca2SJan Medala 104526e5543dSRafal Kozik rc = ena_queue_start_all(dev, ENA_RING_TYPE_TX); 10461173fca2SJan Medala if (rc) 104726e5543dSRafal Kozik goto err_start_tx; 10481173fca2SJan Medala 10491173fca2SJan Medala if (adapter->rte_dev->data->dev_conf.rxmode.mq_mode & 1050361913adSDaria Kolistratova ETH_MQ_RX_RSS_FLAG && adapter->rte_dev->data->nb_rx_queues > 0) { 10511173fca2SJan Medala rc = ena_rss_init_default(adapter); 10521173fca2SJan Medala if (rc) 105326e5543dSRafal Kozik goto err_rss_init; 10541173fca2SJan Medala } 10551173fca2SJan Medala 10561173fca2SJan Medala ena_stats_restart(dev); 10571173fca2SJan Medala 1058d9b8b106SMichal Krawczyk adapter->timestamp_wd = rte_get_timer_cycles(); 1059d9b8b106SMichal Krawczyk adapter->keep_alive_timeout = ENA_DEVICE_KALIVE_TIMEOUT; 1060d9b8b106SMichal Krawczyk 1061d9b8b106SMichal Krawczyk ticks = rte_get_timer_hz(); 1062d9b8b106SMichal Krawczyk rte_timer_reset(&adapter->timer_wd, ticks, PERIODICAL, rte_lcore_id(), 1063d9b8b106SMichal Krawczyk ena_timer_wd_callback, adapter); 1064d9b8b106SMichal Krawczyk 10657830e905SSolganik Alexander ++adapter->dev_stats.dev_start; 10661173fca2SJan Medala adapter->state = ENA_ADAPTER_STATE_RUNNING; 10671173fca2SJan Medala 10681173fca2SJan Medala return 0; 106926e5543dSRafal Kozik 107026e5543dSRafal Kozik err_rss_init: 107126e5543dSRafal Kozik ena_queue_stop_all(dev, ENA_RING_TYPE_TX); 107226e5543dSRafal Kozik err_start_tx: 107326e5543dSRafal Kozik ena_queue_stop_all(dev, ENA_RING_TYPE_RX); 107426e5543dSRafal Kozik return rc; 10751173fca2SJan Medala } 10761173fca2SJan Medala 1077eb0ef49dSMichal Krawczyk static void ena_stop(struct rte_eth_dev *dev) 1078eb0ef49dSMichal Krawczyk { 1079eb0ef49dSMichal Krawczyk struct ena_adapter *adapter = 1080eb0ef49dSMichal Krawczyk (struct ena_adapter *)(dev->data->dev_private); 1081e457bc70SRafal Kozik struct ena_com_dev *ena_dev = &adapter->ena_dev; 1082e457bc70SRafal Kozik int rc; 1083eb0ef49dSMichal Krawczyk 1084d9b8b106SMichal Krawczyk rte_timer_stop_sync(&adapter->timer_wd); 108526e5543dSRafal Kozik ena_queue_stop_all(dev, ENA_RING_TYPE_TX); 108626e5543dSRafal Kozik ena_queue_stop_all(dev, ENA_RING_TYPE_RX); 1087d9b8b106SMichal Krawczyk 1088e457bc70SRafal Kozik if (adapter->trigger_reset) { 1089e457bc70SRafal Kozik rc = ena_com_dev_reset(ena_dev, adapter->reset_reason); 1090e457bc70SRafal Kozik if (rc) 1091e457bc70SRafal Kozik RTE_LOG(ERR, PMD, "Device reset failed rc=%d\n", rc); 1092e457bc70SRafal Kozik } 1093e457bc70SRafal Kozik 10947830e905SSolganik Alexander ++adapter->dev_stats.dev_stop; 1095eb0ef49dSMichal Krawczyk adapter->state = ENA_ADAPTER_STATE_STOPPED; 1096eb0ef49dSMichal Krawczyk } 1097eb0ef49dSMichal Krawczyk 1098df238f84SMichal Krawczyk static int ena_create_io_queue(struct ena_ring *ring) 1099df238f84SMichal Krawczyk { 1100df238f84SMichal Krawczyk struct ena_adapter *adapter; 1101df238f84SMichal Krawczyk struct ena_com_dev *ena_dev; 1102df238f84SMichal Krawczyk struct ena_com_create_io_ctx ctx = 1103df238f84SMichal Krawczyk /* policy set to _HOST just to satisfy icc compiler */ 1104df238f84SMichal Krawczyk { ENA_ADMIN_PLACEMENT_POLICY_HOST, 1105df238f84SMichal Krawczyk 0, 0, 0, 0, 0 }; 1106df238f84SMichal Krawczyk uint16_t ena_qid; 1107778677dcSRafal Kozik unsigned int i; 1108df238f84SMichal Krawczyk int rc; 1109df238f84SMichal Krawczyk 1110df238f84SMichal Krawczyk adapter = ring->adapter; 1111df238f84SMichal Krawczyk ena_dev = &adapter->ena_dev; 1112df238f84SMichal Krawczyk 1113df238f84SMichal Krawczyk if (ring->type == ENA_RING_TYPE_TX) { 1114df238f84SMichal Krawczyk ena_qid = ENA_IO_TXQ_IDX(ring->id); 1115df238f84SMichal Krawczyk ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX; 1116df238f84SMichal Krawczyk ctx.mem_queue_type = ena_dev->tx_mem_queue_type; 1117df238f84SMichal Krawczyk ctx.queue_size = adapter->tx_ring_size; 1118778677dcSRafal Kozik for (i = 0; i < ring->ring_size; i++) 1119778677dcSRafal Kozik ring->empty_tx_reqs[i] = i; 1120df238f84SMichal Krawczyk } else { 1121df238f84SMichal Krawczyk ena_qid = ENA_IO_RXQ_IDX(ring->id); 1122df238f84SMichal Krawczyk ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX; 1123df238f84SMichal Krawczyk ctx.queue_size = adapter->rx_ring_size; 1124778677dcSRafal Kozik for (i = 0; i < ring->ring_size; i++) 1125778677dcSRafal Kozik ring->empty_rx_reqs[i] = i; 1126df238f84SMichal Krawczyk } 1127df238f84SMichal Krawczyk ctx.qid = ena_qid; 1128df238f84SMichal Krawczyk ctx.msix_vector = -1; /* interrupts not used */ 1129df238f84SMichal Krawczyk ctx.numa_node = ena_cpu_to_node(ring->id); 1130df238f84SMichal Krawczyk 1131df238f84SMichal Krawczyk rc = ena_com_create_io_queue(ena_dev, &ctx); 1132df238f84SMichal Krawczyk if (rc) { 1133df238f84SMichal Krawczyk RTE_LOG(ERR, PMD, 1134df238f84SMichal Krawczyk "failed to create io queue #%d (qid:%d) rc: %d\n", 1135df238f84SMichal Krawczyk ring->id, ena_qid, rc); 1136df238f84SMichal Krawczyk return rc; 1137df238f84SMichal Krawczyk } 1138df238f84SMichal Krawczyk 1139df238f84SMichal Krawczyk rc = ena_com_get_io_handlers(ena_dev, ena_qid, 1140df238f84SMichal Krawczyk &ring->ena_com_io_sq, 1141df238f84SMichal Krawczyk &ring->ena_com_io_cq); 1142df238f84SMichal Krawczyk if (rc) { 1143df238f84SMichal Krawczyk RTE_LOG(ERR, PMD, 1144df238f84SMichal Krawczyk "Failed to get io queue handlers. queue num %d rc: %d\n", 1145df238f84SMichal Krawczyk ring->id, rc); 1146df238f84SMichal Krawczyk ena_com_destroy_io_queue(ena_dev, ena_qid); 1147df238f84SMichal Krawczyk return rc; 1148df238f84SMichal Krawczyk } 1149df238f84SMichal Krawczyk 1150df238f84SMichal Krawczyk if (ring->type == ENA_RING_TYPE_TX) 1151df238f84SMichal Krawczyk ena_com_update_numa_node(ring->ena_com_io_cq, ctx.numa_node); 1152df238f84SMichal Krawczyk 1153df238f84SMichal Krawczyk return 0; 1154df238f84SMichal Krawczyk } 1155df238f84SMichal Krawczyk 115626e5543dSRafal Kozik static void ena_queue_stop(struct ena_ring *ring) 1157df238f84SMichal Krawczyk { 115826e5543dSRafal Kozik struct ena_com_dev *ena_dev = &ring->adapter->ena_dev; 1159df238f84SMichal Krawczyk 116026e5543dSRafal Kozik if (ring->type == ENA_RING_TYPE_RX) { 116126e5543dSRafal Kozik ena_com_destroy_io_queue(ena_dev, ENA_IO_RXQ_IDX(ring->id)); 116226e5543dSRafal Kozik ena_rx_queue_release_bufs(ring); 116326e5543dSRafal Kozik } else { 116426e5543dSRafal Kozik ena_com_destroy_io_queue(ena_dev, ENA_IO_TXQ_IDX(ring->id)); 116526e5543dSRafal Kozik ena_tx_queue_release_bufs(ring); 1166df238f84SMichal Krawczyk } 1167df238f84SMichal Krawczyk } 1168df238f84SMichal Krawczyk 116926e5543dSRafal Kozik static void ena_queue_stop_all(struct rte_eth_dev *dev, 117026e5543dSRafal Kozik enum ena_ring_type ring_type) 117126e5543dSRafal Kozik { 117226e5543dSRafal Kozik struct ena_adapter *adapter = 117326e5543dSRafal Kozik (struct ena_adapter *)(dev->data->dev_private); 117426e5543dSRafal Kozik struct ena_ring *queues = NULL; 117526e5543dSRafal Kozik uint16_t nb_queues, i; 117626e5543dSRafal Kozik 117726e5543dSRafal Kozik if (ring_type == ENA_RING_TYPE_RX) { 117826e5543dSRafal Kozik queues = adapter->rx_ring; 117926e5543dSRafal Kozik nb_queues = dev->data->nb_rx_queues; 118026e5543dSRafal Kozik } else { 118126e5543dSRafal Kozik queues = adapter->tx_ring; 118226e5543dSRafal Kozik nb_queues = dev->data->nb_tx_queues; 118326e5543dSRafal Kozik } 118426e5543dSRafal Kozik 118526e5543dSRafal Kozik for (i = 0; i < nb_queues; ++i) 118626e5543dSRafal Kozik if (queues[i].configured) 118726e5543dSRafal Kozik ena_queue_stop(&queues[i]); 118826e5543dSRafal Kozik } 118926e5543dSRafal Kozik 119026e5543dSRafal Kozik static int ena_queue_start(struct ena_ring *ring) 11911173fca2SJan Medala { 1192a467e8f3SMichal Krawczyk int rc, bufs_num; 11931173fca2SJan Medala 11941173fca2SJan Medala ena_assert_msg(ring->configured == 1, 119526e5543dSRafal Kozik "Trying to start unconfigured queue\n"); 11961173fca2SJan Medala 1197df238f84SMichal Krawczyk rc = ena_create_io_queue(ring); 1198df238f84SMichal Krawczyk if (rc) { 1199498c687aSRafal Kozik PMD_INIT_LOG(ERR, "Failed to create IO queue!"); 1200df238f84SMichal Krawczyk return rc; 1201df238f84SMichal Krawczyk } 1202df238f84SMichal Krawczyk 12031173fca2SJan Medala ring->next_to_clean = 0; 12041173fca2SJan Medala ring->next_to_use = 0; 12051173fca2SJan Medala 12067830e905SSolganik Alexander if (ring->type == ENA_RING_TYPE_TX) { 12077830e905SSolganik Alexander ring->tx_stats.available_desc = 12087830e905SSolganik Alexander ena_com_free_desc(ring->ena_com_io_sq); 12091173fca2SJan Medala return 0; 12107830e905SSolganik Alexander } 12111173fca2SJan Medala 1212a467e8f3SMichal Krawczyk bufs_num = ring->ring_size - 1; 1213a467e8f3SMichal Krawczyk rc = ena_populate_rx_queue(ring, bufs_num); 1214a467e8f3SMichal Krawczyk if (rc != bufs_num) { 121526e5543dSRafal Kozik ena_com_destroy_io_queue(&ring->adapter->ena_dev, 121626e5543dSRafal Kozik ENA_IO_RXQ_IDX(ring->id)); 1217f2462150SFerruh Yigit PMD_INIT_LOG(ERR, "Failed to populate rx ring !"); 1218241da076SRafal Kozik return ENA_COM_FAULT; 12191173fca2SJan Medala } 12201173fca2SJan Medala 12211173fca2SJan Medala return 0; 12221173fca2SJan Medala } 12231173fca2SJan Medala 12241173fca2SJan Medala static int ena_tx_queue_setup(struct rte_eth_dev *dev, 12251173fca2SJan Medala uint16_t queue_idx, 12261173fca2SJan Medala uint16_t nb_desc, 12271173fca2SJan Medala __rte_unused unsigned int socket_id, 122856b8b9b7SRafal Kozik const struct rte_eth_txconf *tx_conf) 12291173fca2SJan Medala { 12301173fca2SJan Medala struct ena_ring *txq = NULL; 12311173fca2SJan Medala struct ena_adapter *adapter = 12321173fca2SJan Medala (struct ena_adapter *)(dev->data->dev_private); 12331173fca2SJan Medala unsigned int i; 12341173fca2SJan Medala 12351173fca2SJan Medala txq = &adapter->tx_ring[queue_idx]; 12361173fca2SJan Medala 12371173fca2SJan Medala if (txq->configured) { 12381173fca2SJan Medala RTE_LOG(CRIT, PMD, 12391173fca2SJan Medala "API violation. Queue %d is already configured\n", 12401173fca2SJan Medala queue_idx); 1241241da076SRafal Kozik return ENA_COM_FAULT; 12421173fca2SJan Medala } 12431173fca2SJan Medala 12441daff526SJakub Palider if (!rte_is_power_of_2(nb_desc)) { 12451daff526SJakub Palider RTE_LOG(ERR, PMD, 1246498c687aSRafal Kozik "Unsupported size of TX queue: %d is not a power of 2.\n", 12471daff526SJakub Palider nb_desc); 12481daff526SJakub Palider return -EINVAL; 12491daff526SJakub Palider } 12501daff526SJakub Palider 12511173fca2SJan Medala if (nb_desc > adapter->tx_ring_size) { 12521173fca2SJan Medala RTE_LOG(ERR, PMD, 12531173fca2SJan Medala "Unsupported size of TX queue (max size: %d)\n", 12541173fca2SJan Medala adapter->tx_ring_size); 12551173fca2SJan Medala return -EINVAL; 12561173fca2SJan Medala } 12571173fca2SJan Medala 1258ea93d37eSRafal Kozik if (nb_desc == RTE_ETH_DEV_FALLBACK_TX_RINGSIZE) 1259ea93d37eSRafal Kozik nb_desc = adapter->tx_ring_size; 1260ea93d37eSRafal Kozik 12611173fca2SJan Medala txq->port_id = dev->data->port_id; 12621173fca2SJan Medala txq->next_to_clean = 0; 12631173fca2SJan Medala txq->next_to_use = 0; 12641173fca2SJan Medala txq->ring_size = nb_desc; 12651173fca2SJan Medala 12661173fca2SJan Medala txq->tx_buffer_info = rte_zmalloc("txq->tx_buffer_info", 12671173fca2SJan Medala sizeof(struct ena_tx_buffer) * 12681173fca2SJan Medala txq->ring_size, 12691173fca2SJan Medala RTE_CACHE_LINE_SIZE); 12701173fca2SJan Medala if (!txq->tx_buffer_info) { 12711173fca2SJan Medala RTE_LOG(ERR, PMD, "failed to alloc mem for tx buffer info\n"); 1272df238f84SMichal Krawczyk return -ENOMEM; 12731173fca2SJan Medala } 12741173fca2SJan Medala 12751173fca2SJan Medala txq->empty_tx_reqs = rte_zmalloc("txq->empty_tx_reqs", 12761173fca2SJan Medala sizeof(u16) * txq->ring_size, 12771173fca2SJan Medala RTE_CACHE_LINE_SIZE); 12781173fca2SJan Medala if (!txq->empty_tx_reqs) { 12791173fca2SJan Medala RTE_LOG(ERR, PMD, "failed to alloc mem for tx reqs\n"); 1280df238f84SMichal Krawczyk rte_free(txq->tx_buffer_info); 1281df238f84SMichal Krawczyk return -ENOMEM; 12821173fca2SJan Medala } 1283241da076SRafal Kozik 12842fca2a98SMichal Krawczyk txq->push_buf_intermediate_buf = 12852fca2a98SMichal Krawczyk rte_zmalloc("txq->push_buf_intermediate_buf", 12862fca2a98SMichal Krawczyk txq->tx_max_header_size, 12872fca2a98SMichal Krawczyk RTE_CACHE_LINE_SIZE); 12882fca2a98SMichal Krawczyk if (!txq->push_buf_intermediate_buf) { 12892fca2a98SMichal Krawczyk RTE_LOG(ERR, PMD, "failed to alloc push buff for LLQ\n"); 12902fca2a98SMichal Krawczyk rte_free(txq->tx_buffer_info); 12912fca2a98SMichal Krawczyk rte_free(txq->empty_tx_reqs); 12922fca2a98SMichal Krawczyk return -ENOMEM; 12932fca2a98SMichal Krawczyk } 12942fca2a98SMichal Krawczyk 12951173fca2SJan Medala for (i = 0; i < txq->ring_size; i++) 12961173fca2SJan Medala txq->empty_tx_reqs[i] = i; 12971173fca2SJan Medala 12982081d5e2SMichal Krawczyk if (tx_conf != NULL) { 12992081d5e2SMichal Krawczyk txq->offloads = 13002081d5e2SMichal Krawczyk tx_conf->offloads | dev->data->dev_conf.txmode.offloads; 13012081d5e2SMichal Krawczyk } 13021173fca2SJan Medala /* Store pointer to this queue in upper layer */ 13031173fca2SJan Medala txq->configured = 1; 13041173fca2SJan Medala dev->data->tx_queues[queue_idx] = txq; 1305241da076SRafal Kozik 1306241da076SRafal Kozik return 0; 13071173fca2SJan Medala } 13081173fca2SJan Medala 13091173fca2SJan Medala static int ena_rx_queue_setup(struct rte_eth_dev *dev, 13101173fca2SJan Medala uint16_t queue_idx, 13111173fca2SJan Medala uint16_t nb_desc, 13121173fca2SJan Medala __rte_unused unsigned int socket_id, 1313a4996bd8SWei Dai __rte_unused const struct rte_eth_rxconf *rx_conf, 13141173fca2SJan Medala struct rte_mempool *mp) 13151173fca2SJan Medala { 13161173fca2SJan Medala struct ena_adapter *adapter = 13171173fca2SJan Medala (struct ena_adapter *)(dev->data->dev_private); 13181173fca2SJan Medala struct ena_ring *rxq = NULL; 1319df238f84SMichal Krawczyk int i; 13201173fca2SJan Medala 13211173fca2SJan Medala rxq = &adapter->rx_ring[queue_idx]; 13221173fca2SJan Medala if (rxq->configured) { 13231173fca2SJan Medala RTE_LOG(CRIT, PMD, 13241173fca2SJan Medala "API violation. Queue %d is already configured\n", 13251173fca2SJan Medala queue_idx); 1326241da076SRafal Kozik return ENA_COM_FAULT; 13271173fca2SJan Medala } 13281173fca2SJan Medala 1329ea93d37eSRafal Kozik if (nb_desc == RTE_ETH_DEV_FALLBACK_RX_RINGSIZE) 1330ea93d37eSRafal Kozik nb_desc = adapter->rx_ring_size; 1331ea93d37eSRafal Kozik 13321daff526SJakub Palider if (!rte_is_power_of_2(nb_desc)) { 13331daff526SJakub Palider RTE_LOG(ERR, PMD, 1334498c687aSRafal Kozik "Unsupported size of RX queue: %d is not a power of 2.\n", 13351daff526SJakub Palider nb_desc); 13361daff526SJakub Palider return -EINVAL; 13371daff526SJakub Palider } 13381daff526SJakub Palider 13391173fca2SJan Medala if (nb_desc > adapter->rx_ring_size) { 13401173fca2SJan Medala RTE_LOG(ERR, PMD, 13411173fca2SJan Medala "Unsupported size of RX queue (max size: %d)\n", 13421173fca2SJan Medala adapter->rx_ring_size); 13431173fca2SJan Medala return -EINVAL; 13441173fca2SJan Medala } 13451173fca2SJan Medala 13461173fca2SJan Medala rxq->port_id = dev->data->port_id; 13471173fca2SJan Medala rxq->next_to_clean = 0; 13481173fca2SJan Medala rxq->next_to_use = 0; 13491173fca2SJan Medala rxq->ring_size = nb_desc; 13501173fca2SJan Medala rxq->mb_pool = mp; 13511173fca2SJan Medala 13521173fca2SJan Medala rxq->rx_buffer_info = rte_zmalloc("rxq->buffer_info", 13531173fca2SJan Medala sizeof(struct rte_mbuf *) * nb_desc, 13541173fca2SJan Medala RTE_CACHE_LINE_SIZE); 13551173fca2SJan Medala if (!rxq->rx_buffer_info) { 13561173fca2SJan Medala RTE_LOG(ERR, PMD, "failed to alloc mem for rx buffer info\n"); 13571173fca2SJan Medala return -ENOMEM; 13581173fca2SJan Medala } 13591173fca2SJan Medala 136079405ee1SRafal Kozik rxq->rx_refill_buffer = rte_zmalloc("rxq->rx_refill_buffer", 136179405ee1SRafal Kozik sizeof(struct rte_mbuf *) * nb_desc, 136279405ee1SRafal Kozik RTE_CACHE_LINE_SIZE); 136379405ee1SRafal Kozik 136479405ee1SRafal Kozik if (!rxq->rx_refill_buffer) { 136579405ee1SRafal Kozik RTE_LOG(ERR, PMD, "failed to alloc mem for rx refill buffer\n"); 136679405ee1SRafal Kozik rte_free(rxq->rx_buffer_info); 136779405ee1SRafal Kozik rxq->rx_buffer_info = NULL; 136879405ee1SRafal Kozik return -ENOMEM; 136979405ee1SRafal Kozik } 137079405ee1SRafal Kozik 1371c2034976SMichal Krawczyk rxq->empty_rx_reqs = rte_zmalloc("rxq->empty_rx_reqs", 1372c2034976SMichal Krawczyk sizeof(uint16_t) * nb_desc, 1373c2034976SMichal Krawczyk RTE_CACHE_LINE_SIZE); 1374c2034976SMichal Krawczyk if (!rxq->empty_rx_reqs) { 1375c2034976SMichal Krawczyk RTE_LOG(ERR, PMD, "failed to alloc mem for empty rx reqs\n"); 1376c2034976SMichal Krawczyk rte_free(rxq->rx_buffer_info); 1377c2034976SMichal Krawczyk rxq->rx_buffer_info = NULL; 137879405ee1SRafal Kozik rte_free(rxq->rx_refill_buffer); 137979405ee1SRafal Kozik rxq->rx_refill_buffer = NULL; 1380c2034976SMichal Krawczyk return -ENOMEM; 1381c2034976SMichal Krawczyk } 1382c2034976SMichal Krawczyk 1383c2034976SMichal Krawczyk for (i = 0; i < nb_desc; i++) 1384eccbe2ffSRafal Kozik rxq->empty_rx_reqs[i] = i; 1385c2034976SMichal Krawczyk 13861173fca2SJan Medala /* Store pointer to this queue in upper layer */ 13871173fca2SJan Medala rxq->configured = 1; 13881173fca2SJan Medala dev->data->rx_queues[queue_idx] = rxq; 13891173fca2SJan Medala 1390df238f84SMichal Krawczyk return 0; 13911173fca2SJan Medala } 13921173fca2SJan Medala 13931173fca2SJan Medala static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count) 13941173fca2SJan Medala { 13951173fca2SJan Medala unsigned int i; 13961173fca2SJan Medala int rc; 13971daff526SJakub Palider uint16_t ring_size = rxq->ring_size; 13981daff526SJakub Palider uint16_t ring_mask = ring_size - 1; 13991daff526SJakub Palider uint16_t next_to_use = rxq->next_to_use; 1400c2034976SMichal Krawczyk uint16_t in_use, req_id; 140179405ee1SRafal Kozik struct rte_mbuf **mbufs = rxq->rx_refill_buffer; 14021173fca2SJan Medala 14031173fca2SJan Medala if (unlikely(!count)) 14041173fca2SJan Medala return 0; 14051173fca2SJan Medala 14061daff526SJakub Palider in_use = rxq->next_to_use - rxq->next_to_clean; 1407498c687aSRafal Kozik ena_assert_msg(((in_use + count) < ring_size), "bad ring state\n"); 14081173fca2SJan Medala 14091173fca2SJan Medala /* get resources for incoming packets */ 141079405ee1SRafal Kozik rc = rte_mempool_get_bulk(rxq->mb_pool, (void **)mbufs, count); 14111173fca2SJan Medala if (unlikely(rc < 0)) { 14121173fca2SJan Medala rte_atomic64_inc(&rxq->adapter->drv_stats->rx_nombuf); 14137830e905SSolganik Alexander ++rxq->rx_stats.mbuf_alloc_fail; 14141173fca2SJan Medala PMD_RX_LOG(DEBUG, "there are no enough free buffers"); 14151173fca2SJan Medala return 0; 14161173fca2SJan Medala } 14171173fca2SJan Medala 14181173fca2SJan Medala for (i = 0; i < count; i++) { 14191daff526SJakub Palider uint16_t next_to_use_masked = next_to_use & ring_mask; 142079405ee1SRafal Kozik struct rte_mbuf *mbuf = mbufs[i]; 14211173fca2SJan Medala struct ena_com_buf ebuf; 14221173fca2SJan Medala 142379405ee1SRafal Kozik if (likely((i + 4) < count)) 142479405ee1SRafal Kozik rte_prefetch0(mbufs[i + 4]); 1425c2034976SMichal Krawczyk 1426c2034976SMichal Krawczyk req_id = rxq->empty_rx_reqs[next_to_use_masked]; 1427241da076SRafal Kozik rc = validate_rx_req_id(rxq, req_id); 1428241da076SRafal Kozik if (unlikely(rc < 0)) 1429241da076SRafal Kozik break; 143079405ee1SRafal Kozik rxq->rx_buffer_info[req_id] = mbuf; 1431241da076SRafal Kozik 14321173fca2SJan Medala /* prepare physical address for DMA transaction */ 1433455da545SSantosh Shukla ebuf.paddr = mbuf->buf_iova + RTE_PKTMBUF_HEADROOM; 14341173fca2SJan Medala ebuf.len = mbuf->buf_len - RTE_PKTMBUF_HEADROOM; 14351173fca2SJan Medala /* pass resource to device */ 14361173fca2SJan Medala rc = ena_com_add_single_rx_desc(rxq->ena_com_io_sq, 1437c2034976SMichal Krawczyk &ebuf, req_id); 14381173fca2SJan Medala if (unlikely(rc)) { 14391173fca2SJan Medala RTE_LOG(WARNING, PMD, "failed adding rx desc\n"); 144079405ee1SRafal Kozik rxq->rx_buffer_info[req_id] = NULL; 14411173fca2SJan Medala break; 14421173fca2SJan Medala } 14431daff526SJakub Palider next_to_use++; 14441173fca2SJan Medala } 14451173fca2SJan Medala 144679405ee1SRafal Kozik if (unlikely(i < count)) { 1447241da076SRafal Kozik RTE_LOG(WARNING, PMD, "refilled rx qid %d with only %d " 1448241da076SRafal Kozik "buffers (from %d)\n", rxq->id, i, count); 144979405ee1SRafal Kozik rte_mempool_put_bulk(rxq->mb_pool, (void **)(&mbufs[i]), 145079405ee1SRafal Kozik count - i); 14517830e905SSolganik Alexander ++rxq->rx_stats.refill_partial; 145279405ee1SRafal Kozik } 1453241da076SRafal Kozik 14545e02e19eSJan Medala /* When we submitted free recources to device... */ 14553d19e1abSRafal Kozik if (likely(i > 0)) { 1456241da076SRafal Kozik /* ...let HW know that it can fill buffers with data 1457241da076SRafal Kozik * 1458241da076SRafal Kozik * Add memory barrier to make sure the desc were written before 1459241da076SRafal Kozik * issue a doorbell 1460241da076SRafal Kozik */ 14611173fca2SJan Medala rte_wmb(); 14621173fca2SJan Medala ena_com_write_sq_doorbell(rxq->ena_com_io_sq); 14631173fca2SJan Medala 14645e02e19eSJan Medala rxq->next_to_use = next_to_use; 14655e02e19eSJan Medala } 14665e02e19eSJan Medala 14671173fca2SJan Medala return i; 14681173fca2SJan Medala } 14691173fca2SJan Medala 14701173fca2SJan Medala static int ena_device_init(struct ena_com_dev *ena_dev, 1471e859d2b8SRafal Kozik struct ena_com_dev_get_features_ctx *get_feat_ctx, 1472e859d2b8SRafal Kozik bool *wd_state) 14731173fca2SJan Medala { 1474ca148440SMichal Krawczyk uint32_t aenq_groups; 14751173fca2SJan Medala int rc; 1476c4144557SJan Medala bool readless_supported; 14771173fca2SJan Medala 14781173fca2SJan Medala /* Initialize mmio registers */ 14791173fca2SJan Medala rc = ena_com_mmio_reg_read_request_init(ena_dev); 14801173fca2SJan Medala if (rc) { 14811173fca2SJan Medala RTE_LOG(ERR, PMD, "failed to init mmio read less\n"); 14821173fca2SJan Medala return rc; 14831173fca2SJan Medala } 14841173fca2SJan Medala 1485c4144557SJan Medala /* The PCIe configuration space revision id indicate if mmio reg 1486c4144557SJan Medala * read is disabled. 1487c4144557SJan Medala */ 1488c4144557SJan Medala readless_supported = 1489c4144557SJan Medala !(((struct rte_pci_device *)ena_dev->dmadev)->id.class_id 1490c4144557SJan Medala & ENA_MMIO_DISABLE_REG_READ); 1491c4144557SJan Medala ena_com_set_mmio_read_mode(ena_dev, readless_supported); 1492c4144557SJan Medala 14931173fca2SJan Medala /* reset device */ 14943adcba9aSMichal Krawczyk rc = ena_com_dev_reset(ena_dev, ENA_REGS_RESET_NORMAL); 14951173fca2SJan Medala if (rc) { 14961173fca2SJan Medala RTE_LOG(ERR, PMD, "cannot reset device\n"); 14971173fca2SJan Medala goto err_mmio_read_less; 14981173fca2SJan Medala } 14991173fca2SJan Medala 15001173fca2SJan Medala /* check FW version */ 15011173fca2SJan Medala rc = ena_com_validate_version(ena_dev); 15021173fca2SJan Medala if (rc) { 15031173fca2SJan Medala RTE_LOG(ERR, PMD, "device version is too low\n"); 15041173fca2SJan Medala goto err_mmio_read_less; 15051173fca2SJan Medala } 15061173fca2SJan Medala 15071173fca2SJan Medala ena_dev->dma_addr_bits = ena_com_get_dma_width(ena_dev); 15081173fca2SJan Medala 15091173fca2SJan Medala /* ENA device administration layer init */ 1510b68309beSRafal Kozik rc = ena_com_admin_init(ena_dev, &aenq_handlers); 15111173fca2SJan Medala if (rc) { 15121173fca2SJan Medala RTE_LOG(ERR, PMD, 15131173fca2SJan Medala "cannot initialize ena admin queue with device\n"); 15141173fca2SJan Medala goto err_mmio_read_less; 15151173fca2SJan Medala } 15161173fca2SJan Medala 15171173fca2SJan Medala /* To enable the msix interrupts the driver needs to know the number 15181173fca2SJan Medala * of queues. So the driver uses polling mode to retrieve this 15191173fca2SJan Medala * information. 15201173fca2SJan Medala */ 15211173fca2SJan Medala ena_com_set_admin_polling_mode(ena_dev, true); 15221173fca2SJan Medala 1523201ff2e5SJakub Palider ena_config_host_info(ena_dev); 1524201ff2e5SJakub Palider 15251173fca2SJan Medala /* Get Device Attributes and features */ 15261173fca2SJan Medala rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx); 15271173fca2SJan Medala if (rc) { 15281173fca2SJan Medala RTE_LOG(ERR, PMD, 15291173fca2SJan Medala "cannot get attribute for ena device rc= %d\n", rc); 15301173fca2SJan Medala goto err_admin_init; 15311173fca2SJan Medala } 15321173fca2SJan Medala 1533f01f060cSRafal Kozik aenq_groups = BIT(ENA_ADMIN_LINK_CHANGE) | 1534d9b8b106SMichal Krawczyk BIT(ENA_ADMIN_NOTIFICATION) | 1535983cce2dSRafal Kozik BIT(ENA_ADMIN_KEEP_ALIVE) | 1536983cce2dSRafal Kozik BIT(ENA_ADMIN_FATAL_ERROR) | 1537983cce2dSRafal Kozik BIT(ENA_ADMIN_WARNING); 1538ca148440SMichal Krawczyk 1539ca148440SMichal Krawczyk aenq_groups &= get_feat_ctx->aenq.supported_groups; 1540ca148440SMichal Krawczyk rc = ena_com_set_aenq_config(ena_dev, aenq_groups); 1541ca148440SMichal Krawczyk if (rc) { 1542ca148440SMichal Krawczyk RTE_LOG(ERR, PMD, "Cannot configure aenq groups rc: %d\n", rc); 1543ca148440SMichal Krawczyk goto err_admin_init; 1544ca148440SMichal Krawczyk } 1545ca148440SMichal Krawczyk 1546e859d2b8SRafal Kozik *wd_state = !!(aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE)); 1547e859d2b8SRafal Kozik 15481173fca2SJan Medala return 0; 15491173fca2SJan Medala 15501173fca2SJan Medala err_admin_init: 15511173fca2SJan Medala ena_com_admin_destroy(ena_dev); 15521173fca2SJan Medala 15531173fca2SJan Medala err_mmio_read_less: 15541173fca2SJan Medala ena_com_mmio_reg_read_request_destroy(ena_dev); 15551173fca2SJan Medala 15561173fca2SJan Medala return rc; 15571173fca2SJan Medala } 15581173fca2SJan Medala 1559ca148440SMichal Krawczyk static void ena_interrupt_handler_rte(void *cb_arg) 156015773e06SMichal Krawczyk { 156115773e06SMichal Krawczyk struct ena_adapter *adapter = (struct ena_adapter *)cb_arg; 156215773e06SMichal Krawczyk struct ena_com_dev *ena_dev = &adapter->ena_dev; 156315773e06SMichal Krawczyk 156415773e06SMichal Krawczyk ena_com_admin_q_comp_intr_handler(ena_dev); 15653d19e1abSRafal Kozik if (likely(adapter->state != ENA_ADAPTER_STATE_CLOSED)) 1566ca148440SMichal Krawczyk ena_com_aenq_intr_handler(ena_dev, adapter); 156715773e06SMichal Krawczyk } 156815773e06SMichal Krawczyk 15695efb9fc7SMichal Krawczyk static void check_for_missing_keep_alive(struct ena_adapter *adapter) 15705efb9fc7SMichal Krawczyk { 1571e859d2b8SRafal Kozik if (!adapter->wd_state) 1572e859d2b8SRafal Kozik return; 1573e859d2b8SRafal Kozik 15745efb9fc7SMichal Krawczyk if (adapter->keep_alive_timeout == ENA_HW_HINTS_NO_TIMEOUT) 15755efb9fc7SMichal Krawczyk return; 15765efb9fc7SMichal Krawczyk 15775efb9fc7SMichal Krawczyk if (unlikely((rte_get_timer_cycles() - adapter->timestamp_wd) >= 15785efb9fc7SMichal Krawczyk adapter->keep_alive_timeout)) { 15795efb9fc7SMichal Krawczyk RTE_LOG(ERR, PMD, "Keep alive timeout\n"); 15805efb9fc7SMichal Krawczyk adapter->reset_reason = ENA_REGS_RESET_KEEP_ALIVE_TO; 15815efb9fc7SMichal Krawczyk adapter->trigger_reset = true; 15827830e905SSolganik Alexander ++adapter->dev_stats.wd_expired; 15835efb9fc7SMichal Krawczyk } 15845efb9fc7SMichal Krawczyk } 15855efb9fc7SMichal Krawczyk 15865efb9fc7SMichal Krawczyk /* Check if admin queue is enabled */ 15875efb9fc7SMichal Krawczyk static void check_for_admin_com_state(struct ena_adapter *adapter) 15885efb9fc7SMichal Krawczyk { 15895efb9fc7SMichal Krawczyk if (unlikely(!ena_com_get_admin_running_state(&adapter->ena_dev))) { 15905efb9fc7SMichal Krawczyk RTE_LOG(ERR, PMD, "ENA admin queue is not in running state!\n"); 15915efb9fc7SMichal Krawczyk adapter->reset_reason = ENA_REGS_RESET_ADMIN_TO; 15925efb9fc7SMichal Krawczyk adapter->trigger_reset = true; 15935efb9fc7SMichal Krawczyk } 15945efb9fc7SMichal Krawczyk } 15955efb9fc7SMichal Krawczyk 1596d9b8b106SMichal Krawczyk static void ena_timer_wd_callback(__rte_unused struct rte_timer *timer, 1597d9b8b106SMichal Krawczyk void *arg) 1598d9b8b106SMichal Krawczyk { 1599d9b8b106SMichal Krawczyk struct ena_adapter *adapter = (struct ena_adapter *)arg; 1600d9b8b106SMichal Krawczyk struct rte_eth_dev *dev = adapter->rte_dev; 1601d9b8b106SMichal Krawczyk 16025efb9fc7SMichal Krawczyk check_for_missing_keep_alive(adapter); 16035efb9fc7SMichal Krawczyk check_for_admin_com_state(adapter); 1604d9b8b106SMichal Krawczyk 16055efb9fc7SMichal Krawczyk if (unlikely(adapter->trigger_reset)) { 16065efb9fc7SMichal Krawczyk RTE_LOG(ERR, PMD, "Trigger reset is on\n"); 1607d9b8b106SMichal Krawczyk _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET, 1608d9b8b106SMichal Krawczyk NULL); 1609d9b8b106SMichal Krawczyk } 1610d9b8b106SMichal Krawczyk } 1611d9b8b106SMichal Krawczyk 16122fca2a98SMichal Krawczyk static inline void 16132fca2a98SMichal Krawczyk set_default_llq_configurations(struct ena_llq_configurations *llq_config) 16142fca2a98SMichal Krawczyk { 16152fca2a98SMichal Krawczyk llq_config->llq_header_location = ENA_ADMIN_INLINE_HEADER; 16162fca2a98SMichal Krawczyk llq_config->llq_ring_entry_size = ENA_ADMIN_LIST_ENTRY_SIZE_128B; 16172fca2a98SMichal Krawczyk llq_config->llq_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY; 16182fca2a98SMichal Krawczyk llq_config->llq_num_decs_before_header = 16192fca2a98SMichal Krawczyk ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2; 16202fca2a98SMichal Krawczyk llq_config->llq_ring_entry_size_value = 128; 16212fca2a98SMichal Krawczyk } 16222fca2a98SMichal Krawczyk 16232fca2a98SMichal Krawczyk static int 16242fca2a98SMichal Krawczyk ena_set_queues_placement_policy(struct ena_adapter *adapter, 16252fca2a98SMichal Krawczyk struct ena_com_dev *ena_dev, 16262fca2a98SMichal Krawczyk struct ena_admin_feature_llq_desc *llq, 16272fca2a98SMichal Krawczyk struct ena_llq_configurations *llq_default_configurations) 16282fca2a98SMichal Krawczyk { 16292fca2a98SMichal Krawczyk int rc; 16302fca2a98SMichal Krawczyk u32 llq_feature_mask; 16312fca2a98SMichal Krawczyk 16322fca2a98SMichal Krawczyk llq_feature_mask = 1 << ENA_ADMIN_LLQ; 16332fca2a98SMichal Krawczyk if (!(ena_dev->supported_features & llq_feature_mask)) { 16342fca2a98SMichal Krawczyk RTE_LOG(INFO, PMD, 16352fca2a98SMichal Krawczyk "LLQ is not supported. Fallback to host mode policy.\n"); 16362fca2a98SMichal Krawczyk ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 16372fca2a98SMichal Krawczyk return 0; 16382fca2a98SMichal Krawczyk } 16392fca2a98SMichal Krawczyk 16402fca2a98SMichal Krawczyk rc = ena_com_config_dev_mode(ena_dev, llq, llq_default_configurations); 16412fca2a98SMichal Krawczyk if (unlikely(rc)) { 16422fca2a98SMichal Krawczyk PMD_INIT_LOG(WARNING, "Failed to config dev mode. " 1643498c687aSRafal Kozik "Fallback to host mode policy."); 16442fca2a98SMichal Krawczyk ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 16452fca2a98SMichal Krawczyk return 0; 16462fca2a98SMichal Krawczyk } 16472fca2a98SMichal Krawczyk 16482fca2a98SMichal Krawczyk /* Nothing to config, exit */ 16492fca2a98SMichal Krawczyk if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) 16502fca2a98SMichal Krawczyk return 0; 16512fca2a98SMichal Krawczyk 16522fca2a98SMichal Krawczyk if (!adapter->dev_mem_base) { 16532fca2a98SMichal Krawczyk RTE_LOG(ERR, PMD, "Unable to access LLQ bar resource. " 16542fca2a98SMichal Krawczyk "Fallback to host mode policy.\n."); 16552fca2a98SMichal Krawczyk ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 16562fca2a98SMichal Krawczyk return 0; 16572fca2a98SMichal Krawczyk } 16582fca2a98SMichal Krawczyk 16592fca2a98SMichal Krawczyk ena_dev->mem_bar = adapter->dev_mem_base; 16602fca2a98SMichal Krawczyk 16612fca2a98SMichal Krawczyk return 0; 16622fca2a98SMichal Krawczyk } 16632fca2a98SMichal Krawczyk 1664ea93d37eSRafal Kozik static int ena_calc_io_queue_num(struct ena_com_dev *ena_dev, 166501bd6877SRafal Kozik struct ena_com_dev_get_features_ctx *get_feat_ctx) 166601bd6877SRafal Kozik { 16672fca2a98SMichal Krawczyk uint32_t io_tx_sq_num, io_tx_cq_num, io_rx_num, io_queue_num; 166801bd6877SRafal Kozik 1669ea93d37eSRafal Kozik /* Regular queues capabilities */ 1670ea93d37eSRafal Kozik if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) { 1671ea93d37eSRafal Kozik struct ena_admin_queue_ext_feature_fields *max_queue_ext = 1672ea93d37eSRafal Kozik &get_feat_ctx->max_queue_ext.max_queue_ext; 16732fca2a98SMichal Krawczyk io_rx_num = RTE_MIN(max_queue_ext->max_rx_sq_num, 16742fca2a98SMichal Krawczyk max_queue_ext->max_rx_cq_num); 16752fca2a98SMichal Krawczyk io_tx_sq_num = max_queue_ext->max_tx_sq_num; 16762fca2a98SMichal Krawczyk io_tx_cq_num = max_queue_ext->max_tx_cq_num; 1677ea93d37eSRafal Kozik } else { 1678ea93d37eSRafal Kozik struct ena_admin_queue_feature_desc *max_queues = 1679ea93d37eSRafal Kozik &get_feat_ctx->max_queues; 16802fca2a98SMichal Krawczyk io_tx_sq_num = max_queues->max_sq_num; 16812fca2a98SMichal Krawczyk io_tx_cq_num = max_queues->max_cq_num; 16822fca2a98SMichal Krawczyk io_rx_num = RTE_MIN(io_tx_sq_num, io_tx_cq_num); 1683ea93d37eSRafal Kozik } 168401bd6877SRafal Kozik 16852fca2a98SMichal Krawczyk /* In case of LLQ use the llq number in the get feature cmd */ 16862fca2a98SMichal Krawczyk if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) 16872fca2a98SMichal Krawczyk io_tx_sq_num = get_feat_ctx->llq.max_llq_num; 16882fca2a98SMichal Krawczyk 168943d9610eSMichal Krawczyk io_queue_num = RTE_MIN(ENA_MAX_NUM_IO_QUEUES, io_rx_num); 16902fca2a98SMichal Krawczyk io_queue_num = RTE_MIN(io_queue_num, io_tx_sq_num); 16912fca2a98SMichal Krawczyk io_queue_num = RTE_MIN(io_queue_num, io_tx_cq_num); 169201bd6877SRafal Kozik 169301bd6877SRafal Kozik if (unlikely(io_queue_num == 0)) { 169401bd6877SRafal Kozik RTE_LOG(ERR, PMD, "Number of IO queues should not be 0\n"); 169501bd6877SRafal Kozik return -EFAULT; 169601bd6877SRafal Kozik } 169701bd6877SRafal Kozik 169801bd6877SRafal Kozik return io_queue_num; 169901bd6877SRafal Kozik } 170001bd6877SRafal Kozik 17011173fca2SJan Medala static int eth_ena_dev_init(struct rte_eth_dev *eth_dev) 17021173fca2SJan Medala { 1703ea93d37eSRafal Kozik struct ena_calc_queue_size_ctx calc_queue_ctx = { 0 }; 17041173fca2SJan Medala struct rte_pci_device *pci_dev; 1705eb0ef49dSMichal Krawczyk struct rte_intr_handle *intr_handle; 17061173fca2SJan Medala struct ena_adapter *adapter = 17071173fca2SJan Medala (struct ena_adapter *)(eth_dev->data->dev_private); 17081173fca2SJan Medala struct ena_com_dev *ena_dev = &adapter->ena_dev; 17091173fca2SJan Medala struct ena_com_dev_get_features_ctx get_feat_ctx; 17102fca2a98SMichal Krawczyk struct ena_llq_configurations llq_config; 17112fca2a98SMichal Krawczyk const char *queue_type_str; 1712ea93d37eSRafal Kozik int rc; 17131173fca2SJan Medala 17141173fca2SJan Medala static int adapters_found; 1715e859d2b8SRafal Kozik bool wd_state; 17161173fca2SJan Medala 17171173fca2SJan Medala eth_dev->dev_ops = &ena_dev_ops; 17181173fca2SJan Medala eth_dev->rx_pkt_burst = ð_ena_recv_pkts; 17191173fca2SJan Medala eth_dev->tx_pkt_burst = ð_ena_xmit_pkts; 1720b3fc5a1aSKonstantin Ananyev eth_dev->tx_pkt_prepare = ð_ena_prep_pkts; 17211173fca2SJan Medala 17221173fca2SJan Medala if (rte_eal_process_type() != RTE_PROC_PRIMARY) 17231173fca2SJan Medala return 0; 17241173fca2SJan Medala 1725fd976890SMichal Krawczyk memset(adapter, 0, sizeof(struct ena_adapter)); 1726fd976890SMichal Krawczyk ena_dev = &adapter->ena_dev; 1727fd976890SMichal Krawczyk 1728fd976890SMichal Krawczyk adapter->rte_eth_dev_data = eth_dev->data; 1729fd976890SMichal Krawczyk adapter->rte_dev = eth_dev; 1730fd976890SMichal Krawczyk 1731c0802544SFerruh Yigit pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 17321173fca2SJan Medala adapter->pdev = pci_dev; 17331173fca2SJan Medala 1734f2462150SFerruh Yigit PMD_INIT_LOG(INFO, "Initializing %x:%x:%x.%d", 17351173fca2SJan Medala pci_dev->addr.domain, 17361173fca2SJan Medala pci_dev->addr.bus, 17371173fca2SJan Medala pci_dev->addr.devid, 17381173fca2SJan Medala pci_dev->addr.function); 17391173fca2SJan Medala 1740eb0ef49dSMichal Krawczyk intr_handle = &pci_dev->intr_handle; 1741eb0ef49dSMichal Krawczyk 17421173fca2SJan Medala adapter->regs = pci_dev->mem_resource[ENA_REGS_BAR].addr; 17431173fca2SJan Medala adapter->dev_mem_base = pci_dev->mem_resource[ENA_MEM_BAR].addr; 17441173fca2SJan Medala 17451d339597SRafal Kozik if (!adapter->regs) { 1746f2462150SFerruh Yigit PMD_INIT_LOG(CRIT, "Failed to access registers BAR(%d)", 17471173fca2SJan Medala ENA_REGS_BAR); 17481d339597SRafal Kozik return -ENXIO; 17491d339597SRafal Kozik } 17501173fca2SJan Medala 17511173fca2SJan Medala ena_dev->reg_bar = adapter->regs; 17521173fca2SJan Medala ena_dev->dmadev = adapter->pdev; 17531173fca2SJan Medala 17541173fca2SJan Medala adapter->id_number = adapters_found; 17551173fca2SJan Medala 17561173fca2SJan Medala snprintf(adapter->name, ENA_NAME_MAX_LEN, "ena_%d", 17571173fca2SJan Medala adapter->id_number); 17581173fca2SJan Medala 17591173fca2SJan Medala /* device specific initialization routine */ 1760e859d2b8SRafal Kozik rc = ena_device_init(ena_dev, &get_feat_ctx, &wd_state); 17611173fca2SJan Medala if (rc) { 1762f2462150SFerruh Yigit PMD_INIT_LOG(CRIT, "Failed to init ENA device"); 1763241da076SRafal Kozik goto err; 17641173fca2SJan Medala } 1765e859d2b8SRafal Kozik adapter->wd_state = wd_state; 17661173fca2SJan Medala 17672fca2a98SMichal Krawczyk set_default_llq_configurations(&llq_config); 17682fca2a98SMichal Krawczyk rc = ena_set_queues_placement_policy(adapter, ena_dev, 17692fca2a98SMichal Krawczyk &get_feat_ctx.llq, &llq_config); 17702fca2a98SMichal Krawczyk if (unlikely(rc)) { 17712fca2a98SMichal Krawczyk PMD_INIT_LOG(CRIT, "Failed to set placement policy"); 17722fca2a98SMichal Krawczyk return rc; 17732fca2a98SMichal Krawczyk } 17742fca2a98SMichal Krawczyk 17752fca2a98SMichal Krawczyk if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) 17762fca2a98SMichal Krawczyk queue_type_str = "Regular"; 17772fca2a98SMichal Krawczyk else 17782fca2a98SMichal Krawczyk queue_type_str = "Low latency"; 17792fca2a98SMichal Krawczyk RTE_LOG(INFO, PMD, "Placement policy: %s\n", queue_type_str); 1780ea93d37eSRafal Kozik 1781ea93d37eSRafal Kozik calc_queue_ctx.ena_dev = ena_dev; 1782ea93d37eSRafal Kozik calc_queue_ctx.get_feat_ctx = &get_feat_ctx; 178301bd6877SRafal Kozik adapter->num_queues = ena_calc_io_queue_num(ena_dev, 178401bd6877SRafal Kozik &get_feat_ctx); 17851173fca2SJan Medala 1786ea93d37eSRafal Kozik rc = ena_calc_queue_size(&calc_queue_ctx); 1787ea93d37eSRafal Kozik if (unlikely((rc != 0) || (adapter->num_queues <= 0))) { 1788241da076SRafal Kozik rc = -EFAULT; 1789241da076SRafal Kozik goto err_device_destroy; 1790241da076SRafal Kozik } 17911173fca2SJan Medala 1792ea93d37eSRafal Kozik adapter->tx_ring_size = calc_queue_ctx.tx_queue_size; 1793ea93d37eSRafal Kozik adapter->rx_ring_size = calc_queue_ctx.rx_queue_size; 17941173fca2SJan Medala 1795ea93d37eSRafal Kozik adapter->max_tx_sgl_size = calc_queue_ctx.max_tx_sgl_size; 1796ea93d37eSRafal Kozik adapter->max_rx_sgl_size = calc_queue_ctx.max_rx_sgl_size; 17972061fe41SRafal Kozik 17981173fca2SJan Medala /* prepare ring structures */ 17991173fca2SJan Medala ena_init_rings(adapter); 18001173fca2SJan Medala 1801372c1af5SJan Medala ena_config_debug_area(adapter); 1802372c1af5SJan Medala 18031173fca2SJan Medala /* Set max MTU for this device */ 18041173fca2SJan Medala adapter->max_mtu = get_feat_ctx.dev_attr.max_mtu; 18051173fca2SJan Medala 1806117ba4a6SMichal Krawczyk /* set device support for offloads */ 1807117ba4a6SMichal Krawczyk adapter->offloads.tso4_supported = (get_feat_ctx.offload.tx & 1808117ba4a6SMichal Krawczyk ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK) != 0; 1809117ba4a6SMichal Krawczyk adapter->offloads.tx_csum_supported = (get_feat_ctx.offload.tx & 1810117ba4a6SMichal Krawczyk ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK) != 0; 1811117ba4a6SMichal Krawczyk adapter->offloads.tx_csum_supported = 1812117ba4a6SMichal Krawczyk (get_feat_ctx.offload.rx_supported & 1813117ba4a6SMichal Krawczyk ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK) != 0; 181483277a7cSJakub Palider 18151173fca2SJan Medala /* Copy MAC address and point DPDK to it */ 18161173fca2SJan Medala eth_dev->data->mac_addrs = (struct ether_addr *)adapter->mac_addr; 18171173fca2SJan Medala ether_addr_copy((struct ether_addr *)get_feat_ctx.dev_attr.mac_addr, 18181173fca2SJan Medala (struct ether_addr *)adapter->mac_addr); 18191173fca2SJan Medala 182015febafdSThomas Monjalon /* 182115febafdSThomas Monjalon * Pass the information to the rte_eth_dev_close() that it should also 182215febafdSThomas Monjalon * release the private port resources. 182315febafdSThomas Monjalon */ 182415febafdSThomas Monjalon eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE; 182515febafdSThomas Monjalon 18261173fca2SJan Medala adapter->drv_stats = rte_zmalloc("adapter stats", 18271173fca2SJan Medala sizeof(*adapter->drv_stats), 18281173fca2SJan Medala RTE_CACHE_LINE_SIZE); 18291173fca2SJan Medala if (!adapter->drv_stats) { 18301173fca2SJan Medala RTE_LOG(ERR, PMD, "failed to alloc mem for adapter stats\n"); 1831241da076SRafal Kozik rc = -ENOMEM; 1832241da076SRafal Kozik goto err_delete_debug_area; 18331173fca2SJan Medala } 18341173fca2SJan Medala 1835eb0ef49dSMichal Krawczyk rte_intr_callback_register(intr_handle, 1836eb0ef49dSMichal Krawczyk ena_interrupt_handler_rte, 1837eb0ef49dSMichal Krawczyk adapter); 1838eb0ef49dSMichal Krawczyk rte_intr_enable(intr_handle); 1839eb0ef49dSMichal Krawczyk ena_com_set_admin_polling_mode(ena_dev, false); 1840ca148440SMichal Krawczyk ena_com_admin_aenq_enable(ena_dev); 1841eb0ef49dSMichal Krawczyk 1842d9b8b106SMichal Krawczyk if (adapters_found == 0) 1843d9b8b106SMichal Krawczyk rte_timer_subsystem_init(); 1844d9b8b106SMichal Krawczyk rte_timer_init(&adapter->timer_wd); 1845d9b8b106SMichal Krawczyk 18461173fca2SJan Medala adapters_found++; 18471173fca2SJan Medala adapter->state = ENA_ADAPTER_STATE_INIT; 18481173fca2SJan Medala 18491173fca2SJan Medala return 0; 1850241da076SRafal Kozik 1851241da076SRafal Kozik err_delete_debug_area: 1852241da076SRafal Kozik ena_com_delete_debug_area(ena_dev); 1853241da076SRafal Kozik 1854241da076SRafal Kozik err_device_destroy: 1855241da076SRafal Kozik ena_com_delete_host_info(ena_dev); 1856241da076SRafal Kozik ena_com_admin_destroy(ena_dev); 1857241da076SRafal Kozik 1858241da076SRafal Kozik err: 1859241da076SRafal Kozik return rc; 18601173fca2SJan Medala } 18611173fca2SJan Medala 1862e457bc70SRafal Kozik static void ena_destroy_device(struct rte_eth_dev *eth_dev) 1863eb0ef49dSMichal Krawczyk { 1864eb0ef49dSMichal Krawczyk struct ena_adapter *adapter = 1865eb0ef49dSMichal Krawczyk (struct ena_adapter *)(eth_dev->data->dev_private); 1866e457bc70SRafal Kozik struct ena_com_dev *ena_dev = &adapter->ena_dev; 1867eb0ef49dSMichal Krawczyk 1868e457bc70SRafal Kozik if (adapter->state == ENA_ADAPTER_STATE_FREE) 1869e457bc70SRafal Kozik return; 1870e457bc70SRafal Kozik 1871e457bc70SRafal Kozik ena_com_set_admin_running_state(ena_dev, false); 1872eb0ef49dSMichal Krawczyk 1873eb0ef49dSMichal Krawczyk if (adapter->state != ENA_ADAPTER_STATE_CLOSED) 1874eb0ef49dSMichal Krawczyk ena_close(eth_dev); 1875eb0ef49dSMichal Krawczyk 1876e457bc70SRafal Kozik ena_com_delete_debug_area(ena_dev); 1877e457bc70SRafal Kozik ena_com_delete_host_info(ena_dev); 1878e457bc70SRafal Kozik 1879e457bc70SRafal Kozik ena_com_abort_admin_commands(ena_dev); 1880e457bc70SRafal Kozik ena_com_wait_for_abort_completion(ena_dev); 1881e457bc70SRafal Kozik ena_com_admin_destroy(ena_dev); 1882e457bc70SRafal Kozik ena_com_mmio_reg_read_request_destroy(ena_dev); 1883e457bc70SRafal Kozik 1884e457bc70SRafal Kozik adapter->state = ENA_ADAPTER_STATE_FREE; 1885e457bc70SRafal Kozik } 1886e457bc70SRafal Kozik 1887e457bc70SRafal Kozik static int eth_ena_dev_uninit(struct rte_eth_dev *eth_dev) 1888e457bc70SRafal Kozik { 1889e457bc70SRafal Kozik if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1890e457bc70SRafal Kozik return 0; 1891e457bc70SRafal Kozik 1892e457bc70SRafal Kozik ena_destroy_device(eth_dev); 1893e457bc70SRafal Kozik 1894eb0ef49dSMichal Krawczyk eth_dev->dev_ops = NULL; 1895eb0ef49dSMichal Krawczyk eth_dev->rx_pkt_burst = NULL; 1896eb0ef49dSMichal Krawczyk eth_dev->tx_pkt_burst = NULL; 1897eb0ef49dSMichal Krawczyk eth_dev->tx_pkt_prepare = NULL; 1898eb0ef49dSMichal Krawczyk 1899eb0ef49dSMichal Krawczyk return 0; 1900eb0ef49dSMichal Krawczyk } 1901eb0ef49dSMichal Krawczyk 19021173fca2SJan Medala static int ena_dev_configure(struct rte_eth_dev *dev) 19031173fca2SJan Medala { 19041173fca2SJan Medala struct ena_adapter *adapter = 19051173fca2SJan Medala (struct ena_adapter *)(dev->data->dev_private); 19067369f88fSRafal Kozik 19071173fca2SJan Medala adapter->state = ENA_ADAPTER_STATE_CONFIG; 19081173fca2SJan Medala 1909a4996bd8SWei Dai adapter->tx_selected_offloads = dev->data->dev_conf.txmode.offloads; 1910a4996bd8SWei Dai adapter->rx_selected_offloads = dev->data->dev_conf.rxmode.offloads; 19111173fca2SJan Medala return 0; 19121173fca2SJan Medala } 19131173fca2SJan Medala 19141173fca2SJan Medala static void ena_init_rings(struct ena_adapter *adapter) 19151173fca2SJan Medala { 19161173fca2SJan Medala int i; 19171173fca2SJan Medala 19181173fca2SJan Medala for (i = 0; i < adapter->num_queues; i++) { 19191173fca2SJan Medala struct ena_ring *ring = &adapter->tx_ring[i]; 19201173fca2SJan Medala 19211173fca2SJan Medala ring->configured = 0; 19221173fca2SJan Medala ring->type = ENA_RING_TYPE_TX; 19231173fca2SJan Medala ring->adapter = adapter; 19241173fca2SJan Medala ring->id = i; 19251173fca2SJan Medala ring->tx_mem_queue_type = adapter->ena_dev.tx_mem_queue_type; 19261173fca2SJan Medala ring->tx_max_header_size = adapter->ena_dev.tx_max_header_size; 19272061fe41SRafal Kozik ring->sgl_size = adapter->max_tx_sgl_size; 19281173fca2SJan Medala } 19291173fca2SJan Medala 19301173fca2SJan Medala for (i = 0; i < adapter->num_queues; i++) { 19311173fca2SJan Medala struct ena_ring *ring = &adapter->rx_ring[i]; 19321173fca2SJan Medala 19331173fca2SJan Medala ring->configured = 0; 19341173fca2SJan Medala ring->type = ENA_RING_TYPE_RX; 19351173fca2SJan Medala ring->adapter = adapter; 19361173fca2SJan Medala ring->id = i; 1937ea93d37eSRafal Kozik ring->sgl_size = adapter->max_rx_sgl_size; 19381173fca2SJan Medala } 19391173fca2SJan Medala } 19401173fca2SJan Medala 19411173fca2SJan Medala static void ena_infos_get(struct rte_eth_dev *dev, 19421173fca2SJan Medala struct rte_eth_dev_info *dev_info) 19431173fca2SJan Medala { 19441173fca2SJan Medala struct ena_adapter *adapter; 19451173fca2SJan Medala struct ena_com_dev *ena_dev; 194656b8b9b7SRafal Kozik uint64_t rx_feat = 0, tx_feat = 0; 19471173fca2SJan Medala 1948498c687aSRafal Kozik ena_assert_msg(dev->data != NULL, "Uninitialized device\n"); 1949498c687aSRafal Kozik ena_assert_msg(dev->data->dev_private != NULL, "Uninitialized device\n"); 19501173fca2SJan Medala adapter = (struct ena_adapter *)(dev->data->dev_private); 19511173fca2SJan Medala 19521173fca2SJan Medala ena_dev = &adapter->ena_dev; 1953498c687aSRafal Kozik ena_assert_msg(ena_dev != NULL, "Uninitialized device\n"); 19541173fca2SJan Medala 1955e274f573SMarc Sune dev_info->speed_capa = 1956e274f573SMarc Sune ETH_LINK_SPEED_1G | 1957e274f573SMarc Sune ETH_LINK_SPEED_2_5G | 1958e274f573SMarc Sune ETH_LINK_SPEED_5G | 1959e274f573SMarc Sune ETH_LINK_SPEED_10G | 1960e274f573SMarc Sune ETH_LINK_SPEED_25G | 1961e274f573SMarc Sune ETH_LINK_SPEED_40G | 1962b2feed01SThomas Monjalon ETH_LINK_SPEED_50G | 1963b2feed01SThomas Monjalon ETH_LINK_SPEED_100G; 1964e274f573SMarc Sune 19651173fca2SJan Medala /* Set Tx & Rx features available for device */ 1966117ba4a6SMichal Krawczyk if (adapter->offloads.tso4_supported) 19671173fca2SJan Medala tx_feat |= DEV_TX_OFFLOAD_TCP_TSO; 19681173fca2SJan Medala 1969117ba4a6SMichal Krawczyk if (adapter->offloads.tx_csum_supported) 19701173fca2SJan Medala tx_feat |= DEV_TX_OFFLOAD_IPV4_CKSUM | 19711173fca2SJan Medala DEV_TX_OFFLOAD_UDP_CKSUM | 19721173fca2SJan Medala DEV_TX_OFFLOAD_TCP_CKSUM; 19731173fca2SJan Medala 1974117ba4a6SMichal Krawczyk if (adapter->offloads.rx_csum_supported) 19751173fca2SJan Medala rx_feat |= DEV_RX_OFFLOAD_IPV4_CKSUM | 19761173fca2SJan Medala DEV_RX_OFFLOAD_UDP_CKSUM | 19771173fca2SJan Medala DEV_RX_OFFLOAD_TCP_CKSUM; 19781173fca2SJan Medala 1979a0a4ff40SRafal Kozik rx_feat |= DEV_RX_OFFLOAD_JUMBO_FRAME; 1980a0a4ff40SRafal Kozik 19811173fca2SJan Medala /* Inform framework about available features */ 19821173fca2SJan Medala dev_info->rx_offload_capa = rx_feat; 19837369f88fSRafal Kozik dev_info->rx_queue_offload_capa = rx_feat; 19841173fca2SJan Medala dev_info->tx_offload_capa = tx_feat; 198556b8b9b7SRafal Kozik dev_info->tx_queue_offload_capa = tx_feat; 19861173fca2SJan Medala 1987b01ead20SRafal Kozik dev_info->flow_type_rss_offloads = ETH_RSS_IP | ETH_RSS_TCP | 1988b01ead20SRafal Kozik ETH_RSS_UDP; 1989b01ead20SRafal Kozik 19901173fca2SJan Medala dev_info->min_rx_bufsize = ENA_MIN_FRAME_LEN; 19911173fca2SJan Medala dev_info->max_rx_pktlen = adapter->max_mtu; 19921173fca2SJan Medala dev_info->max_mac_addrs = 1; 19931173fca2SJan Medala 19941173fca2SJan Medala dev_info->max_rx_queues = adapter->num_queues; 19951173fca2SJan Medala dev_info->max_tx_queues = adapter->num_queues; 19961173fca2SJan Medala dev_info->reta_size = ENA_RX_RSS_TABLE_SIZE; 199756b8b9b7SRafal Kozik 199856b8b9b7SRafal Kozik adapter->tx_supported_offloads = tx_feat; 19997369f88fSRafal Kozik adapter->rx_supported_offloads = rx_feat; 200092680dc2SRafal Kozik 2001ea93d37eSRafal Kozik dev_info->rx_desc_lim.nb_max = adapter->rx_ring_size; 200292680dc2SRafal Kozik dev_info->rx_desc_lim.nb_min = ENA_MIN_RING_DESC; 2003ea93d37eSRafal Kozik dev_info->rx_desc_lim.nb_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS, 2004ea93d37eSRafal Kozik adapter->max_rx_sgl_size); 2005ea93d37eSRafal Kozik dev_info->rx_desc_lim.nb_mtu_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS, 2006ea93d37eSRafal Kozik adapter->max_rx_sgl_size); 200792680dc2SRafal Kozik 2008ea93d37eSRafal Kozik dev_info->tx_desc_lim.nb_max = adapter->tx_ring_size; 200992680dc2SRafal Kozik dev_info->tx_desc_lim.nb_min = ENA_MIN_RING_DESC; 201092680dc2SRafal Kozik dev_info->tx_desc_lim.nb_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS, 2011ea93d37eSRafal Kozik adapter->max_tx_sgl_size); 201292680dc2SRafal Kozik dev_info->tx_desc_lim.nb_mtu_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS, 2013ea93d37eSRafal Kozik adapter->max_tx_sgl_size); 20141173fca2SJan Medala } 20151173fca2SJan Medala 20161173fca2SJan Medala static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, 20171173fca2SJan Medala uint16_t nb_pkts) 20181173fca2SJan Medala { 20191173fca2SJan Medala struct ena_ring *rx_ring = (struct ena_ring *)(rx_queue); 20201173fca2SJan Medala unsigned int ring_size = rx_ring->ring_size; 20211173fca2SJan Medala unsigned int ring_mask = ring_size - 1; 20221173fca2SJan Medala uint16_t next_to_clean = rx_ring->next_to_clean; 20231daff526SJakub Palider uint16_t desc_in_use = 0; 2024c2034976SMichal Krawczyk uint16_t req_id; 20251173fca2SJan Medala unsigned int recv_idx = 0; 20261173fca2SJan Medala struct rte_mbuf *mbuf = NULL; 20271173fca2SJan Medala struct rte_mbuf *mbuf_head = NULL; 20281173fca2SJan Medala struct rte_mbuf *mbuf_prev = NULL; 20291173fca2SJan Medala struct rte_mbuf **rx_buff_info = rx_ring->rx_buffer_info; 20301173fca2SJan Medala unsigned int completed; 20311173fca2SJan Medala 20321173fca2SJan Medala struct ena_com_rx_ctx ena_rx_ctx; 20331173fca2SJan Medala int rc = 0; 20341173fca2SJan Medala 20351173fca2SJan Medala /* Check adapter state */ 20361173fca2SJan Medala if (unlikely(rx_ring->adapter->state != ENA_ADAPTER_STATE_RUNNING)) { 20371173fca2SJan Medala RTE_LOG(ALERT, PMD, 20381173fca2SJan Medala "Trying to receive pkts while device is NOT running\n"); 20391173fca2SJan Medala return 0; 20401173fca2SJan Medala } 20411173fca2SJan Medala 20421daff526SJakub Palider desc_in_use = rx_ring->next_to_use - next_to_clean; 20431173fca2SJan Medala if (unlikely(nb_pkts > desc_in_use)) 20441173fca2SJan Medala nb_pkts = desc_in_use; 20451173fca2SJan Medala 20461173fca2SJan Medala for (completed = 0; completed < nb_pkts; completed++) { 20471173fca2SJan Medala int segments = 0; 20481173fca2SJan Medala 2049ea93d37eSRafal Kozik ena_rx_ctx.max_bufs = rx_ring->sgl_size; 20501173fca2SJan Medala ena_rx_ctx.ena_bufs = rx_ring->ena_bufs; 20511173fca2SJan Medala ena_rx_ctx.descs = 0; 20521173fca2SJan Medala /* receive packet context */ 20531173fca2SJan Medala rc = ena_com_rx_pkt(rx_ring->ena_com_io_cq, 20541173fca2SJan Medala rx_ring->ena_com_io_sq, 20551173fca2SJan Medala &ena_rx_ctx); 20561173fca2SJan Medala if (unlikely(rc)) { 20571173fca2SJan Medala RTE_LOG(ERR, PMD, "ena_com_rx_pkt error %d\n", rc); 20589b260dbfSRafal Kozik rx_ring->adapter->reset_reason = 20599b260dbfSRafal Kozik ENA_REGS_RESET_TOO_MANY_RX_DESCS; 2060241da076SRafal Kozik rx_ring->adapter->trigger_reset = true; 20617830e905SSolganik Alexander ++rx_ring->rx_stats.bad_desc_num; 20621173fca2SJan Medala return 0; 20631173fca2SJan Medala } 20641173fca2SJan Medala 20651173fca2SJan Medala if (unlikely(ena_rx_ctx.descs == 0)) 20661173fca2SJan Medala break; 20671173fca2SJan Medala 20681173fca2SJan Medala while (segments < ena_rx_ctx.descs) { 2069c2034976SMichal Krawczyk req_id = ena_rx_ctx.ena_bufs[segments].req_id; 2070c2034976SMichal Krawczyk rc = validate_rx_req_id(rx_ring, req_id); 2071709b1dcbSRafal Kozik if (unlikely(rc)) { 2072709b1dcbSRafal Kozik if (segments != 0) 2073709b1dcbSRafal Kozik rte_mbuf_raw_free(mbuf_head); 2074c2034976SMichal Krawczyk break; 2075709b1dcbSRafal Kozik } 2076c2034976SMichal Krawczyk 2077c2034976SMichal Krawczyk mbuf = rx_buff_info[req_id]; 2078709b1dcbSRafal Kozik rx_buff_info[req_id] = NULL; 20791173fca2SJan Medala mbuf->data_len = ena_rx_ctx.ena_bufs[segments].len; 20801173fca2SJan Medala mbuf->data_off = RTE_PKTMBUF_HEADROOM; 20811173fca2SJan Medala mbuf->refcnt = 1; 20821173fca2SJan Medala mbuf->next = NULL; 20833d19e1abSRafal Kozik if (unlikely(segments == 0)) { 20841173fca2SJan Medala mbuf->nb_segs = ena_rx_ctx.descs; 20851173fca2SJan Medala mbuf->port = rx_ring->port_id; 20861173fca2SJan Medala mbuf->pkt_len = 0; 20871173fca2SJan Medala mbuf_head = mbuf; 20881173fca2SJan Medala } else { 20891173fca2SJan Medala /* for multi-segment pkts create mbuf chain */ 20901173fca2SJan Medala mbuf_prev->next = mbuf; 20911173fca2SJan Medala } 20921173fca2SJan Medala mbuf_head->pkt_len += mbuf->data_len; 20931173fca2SJan Medala 20941173fca2SJan Medala mbuf_prev = mbuf; 2095c2034976SMichal Krawczyk rx_ring->empty_rx_reqs[next_to_clean & ring_mask] = 2096c2034976SMichal Krawczyk req_id; 20971173fca2SJan Medala segments++; 20981daff526SJakub Palider next_to_clean++; 20991173fca2SJan Medala } 2100f00930d9SRafal Kozik if (unlikely(rc)) 2101f00930d9SRafal Kozik break; 21021173fca2SJan Medala 21031173fca2SJan Medala /* fill mbuf attributes if any */ 21041173fca2SJan Medala ena_rx_mbuf_prepare(mbuf_head, &ena_rx_ctx); 21057830e905SSolganik Alexander 21067830e905SSolganik Alexander if (unlikely(mbuf_head->ol_flags & 21077830e905SSolganik Alexander (PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD))) 21087830e905SSolganik Alexander ++rx_ring->rx_stats.bad_csum; 21097830e905SSolganik Alexander 2110e5df9f33SStewart Allen mbuf_head->hash.rss = ena_rx_ctx.hash; 21111173fca2SJan Medala 21121173fca2SJan Medala /* pass to DPDK application head mbuf */ 21131173fca2SJan Medala rx_pkts[recv_idx] = mbuf_head; 21141173fca2SJan Medala recv_idx++; 211545b6d861SMichal Krawczyk rx_ring->rx_stats.bytes += mbuf_head->pkt_len; 21161173fca2SJan Medala } 21171173fca2SJan Medala 211845b6d861SMichal Krawczyk rx_ring->rx_stats.cnt += recv_idx; 2119ec78af6bSMichal Krawczyk rx_ring->next_to_clean = next_to_clean; 2120ec78af6bSMichal Krawczyk 2121ec78af6bSMichal Krawczyk desc_in_use = desc_in_use - completed + 1; 21221173fca2SJan Medala /* Burst refill to save doorbells, memory barriers, const interval */ 2123a45462c5SRafal Kozik if (ring_size - desc_in_use > ENA_RING_DESCS_RATIO(ring_size)) { 2124a45462c5SRafal Kozik ena_com_update_dev_comp_head(rx_ring->ena_com_io_cq); 21251daff526SJakub Palider ena_populate_rx_queue(rx_ring, ring_size - desc_in_use); 2126a45462c5SRafal Kozik } 21271173fca2SJan Medala 21281173fca2SJan Medala return recv_idx; 21291173fca2SJan Medala } 21301173fca2SJan Medala 2131b3fc5a1aSKonstantin Ananyev static uint16_t 213283277a7cSJakub Palider eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 2133b3fc5a1aSKonstantin Ananyev uint16_t nb_pkts) 2134b3fc5a1aSKonstantin Ananyev { 2135b3fc5a1aSKonstantin Ananyev int32_t ret; 2136b3fc5a1aSKonstantin Ananyev uint32_t i; 2137b3fc5a1aSKonstantin Ananyev struct rte_mbuf *m; 213883277a7cSJakub Palider struct ena_ring *tx_ring = (struct ena_ring *)(tx_queue); 213983277a7cSJakub Palider struct ipv4_hdr *ip_hdr; 2140b3fc5a1aSKonstantin Ananyev uint64_t ol_flags; 214183277a7cSJakub Palider uint16_t frag_field; 214283277a7cSJakub Palider 2143b3fc5a1aSKonstantin Ananyev for (i = 0; i != nb_pkts; i++) { 2144b3fc5a1aSKonstantin Ananyev m = tx_pkts[i]; 2145b3fc5a1aSKonstantin Ananyev ol_flags = m->ol_flags; 2146b3fc5a1aSKonstantin Ananyev 2147bc5ef57dSMichal Krawczyk if (!(ol_flags & PKT_TX_IPV4)) 2148bc5ef57dSMichal Krawczyk continue; 2149bc5ef57dSMichal Krawczyk 2150bc5ef57dSMichal Krawczyk /* If there was not L2 header length specified, assume it is 2151bc5ef57dSMichal Krawczyk * length of the ethernet header. 2152bc5ef57dSMichal Krawczyk */ 2153bc5ef57dSMichal Krawczyk if (unlikely(m->l2_len == 0)) 2154bc5ef57dSMichal Krawczyk m->l2_len = sizeof(struct ether_hdr); 2155bc5ef57dSMichal Krawczyk 2156bc5ef57dSMichal Krawczyk ip_hdr = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *, 2157bc5ef57dSMichal Krawczyk m->l2_len); 2158bc5ef57dSMichal Krawczyk frag_field = rte_be_to_cpu_16(ip_hdr->fragment_offset); 2159bc5ef57dSMichal Krawczyk 2160bc5ef57dSMichal Krawczyk if ((frag_field & IPV4_HDR_DF_FLAG) != 0) { 2161bc5ef57dSMichal Krawczyk m->packet_type |= RTE_PTYPE_L4_NONFRAG; 2162bc5ef57dSMichal Krawczyk 2163bc5ef57dSMichal Krawczyk /* If IPv4 header has DF flag enabled and TSO support is 2164bc5ef57dSMichal Krawczyk * disabled, partial chcecksum should not be calculated. 2165bc5ef57dSMichal Krawczyk */ 2166117ba4a6SMichal Krawczyk if (!tx_ring->adapter->offloads.tso4_supported) 2167bc5ef57dSMichal Krawczyk continue; 2168bc5ef57dSMichal Krawczyk } 2169bc5ef57dSMichal Krawczyk 2170b3fc5a1aSKonstantin Ananyev if ((ol_flags & ENA_TX_OFFLOAD_NOTSUP_MASK) != 0 || 2171b3fc5a1aSKonstantin Ananyev (ol_flags & PKT_TX_L4_MASK) == 2172b3fc5a1aSKonstantin Ananyev PKT_TX_SCTP_CKSUM) { 2173baeed5f4SMichal Krawczyk rte_errno = ENOTSUP; 2174b3fc5a1aSKonstantin Ananyev return i; 2175b3fc5a1aSKonstantin Ananyev } 2176b3fc5a1aSKonstantin Ananyev 2177b3fc5a1aSKonstantin Ananyev #ifdef RTE_LIBRTE_ETHDEV_DEBUG 2178b3fc5a1aSKonstantin Ananyev ret = rte_validate_tx_offload(m); 2179b3fc5a1aSKonstantin Ananyev if (ret != 0) { 2180baeed5f4SMichal Krawczyk rte_errno = -ret; 2181b3fc5a1aSKonstantin Ananyev return i; 2182b3fc5a1aSKonstantin Ananyev } 2183b3fc5a1aSKonstantin Ananyev #endif 218483277a7cSJakub Palider 218583277a7cSJakub Palider /* In case we are supposed to TSO and have DF not set (DF=0) 218683277a7cSJakub Palider * hardware must be provided with partial checksum, otherwise 218783277a7cSJakub Palider * it will take care of necessary calculations. 218883277a7cSJakub Palider */ 218983277a7cSJakub Palider 2190b3fc5a1aSKonstantin Ananyev ret = rte_net_intel_cksum_flags_prepare(m, 2191b3fc5a1aSKonstantin Ananyev ol_flags & ~PKT_TX_TCP_SEG); 2192b3fc5a1aSKonstantin Ananyev if (ret != 0) { 2193baeed5f4SMichal Krawczyk rte_errno = -ret; 2194b3fc5a1aSKonstantin Ananyev return i; 2195b3fc5a1aSKonstantin Ananyev } 2196b3fc5a1aSKonstantin Ananyev } 2197b3fc5a1aSKonstantin Ananyev 2198b3fc5a1aSKonstantin Ananyev return i; 2199b3fc5a1aSKonstantin Ananyev } 2200b3fc5a1aSKonstantin Ananyev 2201f01f060cSRafal Kozik static void ena_update_hints(struct ena_adapter *adapter, 2202f01f060cSRafal Kozik struct ena_admin_ena_hw_hints *hints) 2203f01f060cSRafal Kozik { 2204f01f060cSRafal Kozik if (hints->admin_completion_tx_timeout) 2205f01f060cSRafal Kozik adapter->ena_dev.admin_queue.completion_timeout = 2206f01f060cSRafal Kozik hints->admin_completion_tx_timeout * 1000; 2207f01f060cSRafal Kozik 2208f01f060cSRafal Kozik if (hints->mmio_read_timeout) 2209f01f060cSRafal Kozik /* convert to usec */ 2210f01f060cSRafal Kozik adapter->ena_dev.mmio_read.reg_read_to = 2211f01f060cSRafal Kozik hints->mmio_read_timeout * 1000; 2212d9b8b106SMichal Krawczyk 2213d9b8b106SMichal Krawczyk if (hints->driver_watchdog_timeout) { 2214d9b8b106SMichal Krawczyk if (hints->driver_watchdog_timeout == ENA_HW_HINTS_NO_TIMEOUT) 2215d9b8b106SMichal Krawczyk adapter->keep_alive_timeout = ENA_HW_HINTS_NO_TIMEOUT; 2216d9b8b106SMichal Krawczyk else 2217d9b8b106SMichal Krawczyk // Convert msecs to ticks 2218d9b8b106SMichal Krawczyk adapter->keep_alive_timeout = 2219d9b8b106SMichal Krawczyk (hints->driver_watchdog_timeout * 2220d9b8b106SMichal Krawczyk rte_get_timer_hz()) / 1000; 2221d9b8b106SMichal Krawczyk } 2222f01f060cSRafal Kozik } 2223f01f060cSRafal Kozik 22242061fe41SRafal Kozik static int ena_check_and_linearize_mbuf(struct ena_ring *tx_ring, 22252061fe41SRafal Kozik struct rte_mbuf *mbuf) 22262061fe41SRafal Kozik { 22272fca2a98SMichal Krawczyk struct ena_com_dev *ena_dev; 22282fca2a98SMichal Krawczyk int num_segments, header_len, rc; 22292061fe41SRafal Kozik 22302fca2a98SMichal Krawczyk ena_dev = &tx_ring->adapter->ena_dev; 22312061fe41SRafal Kozik num_segments = mbuf->nb_segs; 22322fca2a98SMichal Krawczyk header_len = mbuf->data_len; 22332061fe41SRafal Kozik 22342061fe41SRafal Kozik if (likely(num_segments < tx_ring->sgl_size)) 22352061fe41SRafal Kozik return 0; 22362061fe41SRafal Kozik 22372fca2a98SMichal Krawczyk if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV && 22382fca2a98SMichal Krawczyk (num_segments == tx_ring->sgl_size) && 22392fca2a98SMichal Krawczyk (header_len < tx_ring->tx_max_header_size)) 22402fca2a98SMichal Krawczyk return 0; 22412fca2a98SMichal Krawczyk 22427830e905SSolganik Alexander ++tx_ring->tx_stats.linearize; 22432061fe41SRafal Kozik rc = rte_pktmbuf_linearize(mbuf); 22447830e905SSolganik Alexander if (unlikely(rc)) { 22452061fe41SRafal Kozik RTE_LOG(WARNING, PMD, "Mbuf linearize failed\n"); 22467830e905SSolganik Alexander rte_atomic64_inc(&tx_ring->adapter->drv_stats->ierrors); 22477830e905SSolganik Alexander ++tx_ring->tx_stats.linearize_failed; 22487830e905SSolganik Alexander return rc; 22497830e905SSolganik Alexander } 22502061fe41SRafal Kozik 22512061fe41SRafal Kozik return rc; 22522061fe41SRafal Kozik } 22532061fe41SRafal Kozik 22541173fca2SJan Medala static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 22551173fca2SJan Medala uint16_t nb_pkts) 22561173fca2SJan Medala { 22571173fca2SJan Medala struct ena_ring *tx_ring = (struct ena_ring *)(tx_queue); 22581daff526SJakub Palider uint16_t next_to_use = tx_ring->next_to_use; 22591daff526SJakub Palider uint16_t next_to_clean = tx_ring->next_to_clean; 22601173fca2SJan Medala struct rte_mbuf *mbuf; 22612fca2a98SMichal Krawczyk uint16_t seg_len; 22621173fca2SJan Medala unsigned int ring_size = tx_ring->ring_size; 22631173fca2SJan Medala unsigned int ring_mask = ring_size - 1; 22641173fca2SJan Medala struct ena_com_tx_ctx ena_tx_ctx; 22651173fca2SJan Medala struct ena_tx_buffer *tx_info; 22661173fca2SJan Medala struct ena_com_buf *ebuf; 22671173fca2SJan Medala uint16_t rc, req_id, total_tx_descs = 0; 2268b66b6e72SJakub Palider uint16_t sent_idx = 0, empty_tx_reqs; 22692fca2a98SMichal Krawczyk uint16_t push_len = 0; 22702fca2a98SMichal Krawczyk uint16_t delta = 0; 22711173fca2SJan Medala int nb_hw_desc; 227245b6d861SMichal Krawczyk uint32_t total_length; 22731173fca2SJan Medala 22741173fca2SJan Medala /* Check adapter state */ 22751173fca2SJan Medala if (unlikely(tx_ring->adapter->state != ENA_ADAPTER_STATE_RUNNING)) { 22761173fca2SJan Medala RTE_LOG(ALERT, PMD, 22771173fca2SJan Medala "Trying to xmit pkts while device is NOT running\n"); 22781173fca2SJan Medala return 0; 22791173fca2SJan Medala } 22801173fca2SJan Medala 2281b66b6e72SJakub Palider empty_tx_reqs = ring_size - (next_to_use - next_to_clean); 2282b66b6e72SJakub Palider if (nb_pkts > empty_tx_reqs) 2283b66b6e72SJakub Palider nb_pkts = empty_tx_reqs; 2284b66b6e72SJakub Palider 22851173fca2SJan Medala for (sent_idx = 0; sent_idx < nb_pkts; sent_idx++) { 22861173fca2SJan Medala mbuf = tx_pkts[sent_idx]; 228745b6d861SMichal Krawczyk total_length = 0; 22881173fca2SJan Medala 22892061fe41SRafal Kozik rc = ena_check_and_linearize_mbuf(tx_ring, mbuf); 22902061fe41SRafal Kozik if (unlikely(rc)) 22912061fe41SRafal Kozik break; 22922061fe41SRafal Kozik 22931daff526SJakub Palider req_id = tx_ring->empty_tx_reqs[next_to_use & ring_mask]; 22941173fca2SJan Medala tx_info = &tx_ring->tx_buffer_info[req_id]; 22951173fca2SJan Medala tx_info->mbuf = mbuf; 22961173fca2SJan Medala tx_info->num_of_bufs = 0; 22971173fca2SJan Medala ebuf = tx_info->bufs; 22981173fca2SJan Medala 22991173fca2SJan Medala /* Prepare TX context */ 23001173fca2SJan Medala memset(&ena_tx_ctx, 0x0, sizeof(struct ena_com_tx_ctx)); 23011173fca2SJan Medala memset(&ena_tx_ctx.ena_meta, 0x0, 23021173fca2SJan Medala sizeof(struct ena_com_tx_meta)); 23031173fca2SJan Medala ena_tx_ctx.ena_bufs = ebuf; 23041173fca2SJan Medala ena_tx_ctx.req_id = req_id; 23052fca2a98SMichal Krawczyk 23062fca2a98SMichal Krawczyk delta = 0; 23072fca2a98SMichal Krawczyk seg_len = mbuf->data_len; 23082fca2a98SMichal Krawczyk 23091173fca2SJan Medala if (tx_ring->tx_mem_queue_type == 23101173fca2SJan Medala ENA_ADMIN_PLACEMENT_POLICY_DEV) { 23112fca2a98SMichal Krawczyk push_len = RTE_MIN(mbuf->pkt_len, 23121173fca2SJan Medala tx_ring->tx_max_header_size); 23132fca2a98SMichal Krawczyk ena_tx_ctx.header_len = push_len; 23142fca2a98SMichal Krawczyk 23152fca2a98SMichal Krawczyk if (likely(push_len <= seg_len)) { 23162fca2a98SMichal Krawczyk /* If the push header is in the single segment, 23172fca2a98SMichal Krawczyk * then just point it to the 1st mbuf data. 23182fca2a98SMichal Krawczyk */ 23191173fca2SJan Medala ena_tx_ctx.push_header = 23202fca2a98SMichal Krawczyk rte_pktmbuf_mtod(mbuf, uint8_t *); 23212fca2a98SMichal Krawczyk } else { 23222fca2a98SMichal Krawczyk /* If the push header lays in the several 23232fca2a98SMichal Krawczyk * segments, copy it to the intermediate buffer. 23242fca2a98SMichal Krawczyk */ 23252fca2a98SMichal Krawczyk rte_pktmbuf_read(mbuf, 0, push_len, 23262fca2a98SMichal Krawczyk tx_ring->push_buf_intermediate_buf); 23272fca2a98SMichal Krawczyk ena_tx_ctx.push_header = 23282fca2a98SMichal Krawczyk tx_ring->push_buf_intermediate_buf; 23292fca2a98SMichal Krawczyk delta = push_len - seg_len; 23302fca2a98SMichal Krawczyk } 23311173fca2SJan Medala } /* there's no else as we take advantage of memset zeroing */ 23321173fca2SJan Medala 23331173fca2SJan Medala /* Set TX offloads flags, if applicable */ 233456b8b9b7SRafal Kozik ena_tx_mbuf_prepare(mbuf, &ena_tx_ctx, tx_ring->offloads); 23351173fca2SJan Medala 23361173fca2SJan Medala if (unlikely(mbuf->ol_flags & 23371173fca2SJan Medala (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD))) 23381173fca2SJan Medala rte_atomic64_inc(&tx_ring->adapter->drv_stats->ierrors); 23391173fca2SJan Medala 23401173fca2SJan Medala rte_prefetch0(tx_pkts[(sent_idx + 4) & ring_mask]); 23411173fca2SJan Medala 23421173fca2SJan Medala /* Process first segment taking into 23431173fca2SJan Medala * consideration pushed header 23441173fca2SJan Medala */ 23452fca2a98SMichal Krawczyk if (seg_len > push_len) { 2346455da545SSantosh Shukla ebuf->paddr = mbuf->buf_iova + 23471173fca2SJan Medala mbuf->data_off + 23482fca2a98SMichal Krawczyk push_len; 23492fca2a98SMichal Krawczyk ebuf->len = seg_len - push_len; 23501173fca2SJan Medala ebuf++; 23511173fca2SJan Medala tx_info->num_of_bufs++; 23521173fca2SJan Medala } 235345b6d861SMichal Krawczyk total_length += mbuf->data_len; 23541173fca2SJan Medala 23551173fca2SJan Medala while ((mbuf = mbuf->next) != NULL) { 23562fca2a98SMichal Krawczyk seg_len = mbuf->data_len; 23572fca2a98SMichal Krawczyk 23582fca2a98SMichal Krawczyk /* Skip mbufs if whole data is pushed as a header */ 23592fca2a98SMichal Krawczyk if (unlikely(delta > seg_len)) { 23602fca2a98SMichal Krawczyk delta -= seg_len; 23612fca2a98SMichal Krawczyk continue; 23622fca2a98SMichal Krawczyk } 23632fca2a98SMichal Krawczyk 23642fca2a98SMichal Krawczyk ebuf->paddr = mbuf->buf_iova + mbuf->data_off + delta; 23652fca2a98SMichal Krawczyk ebuf->len = seg_len - delta; 236645b6d861SMichal Krawczyk total_length += ebuf->len; 23671173fca2SJan Medala ebuf++; 23681173fca2SJan Medala tx_info->num_of_bufs++; 23692fca2a98SMichal Krawczyk 23702fca2a98SMichal Krawczyk delta = 0; 23711173fca2SJan Medala } 23721173fca2SJan Medala 23731173fca2SJan Medala ena_tx_ctx.num_bufs = tx_info->num_of_bufs; 23741173fca2SJan Medala 2375c7519ea5SRafal Kozik if (ena_com_is_doorbell_needed(tx_ring->ena_com_io_sq, 2376c7519ea5SRafal Kozik &ena_tx_ctx)) { 2377c7519ea5SRafal Kozik RTE_LOG(DEBUG, PMD, "llq tx max burst size of queue %d" 2378c7519ea5SRafal Kozik " achieved, writing doorbell to send burst\n", 2379c7519ea5SRafal Kozik tx_ring->id); 2380c7519ea5SRafal Kozik rte_wmb(); 2381c7519ea5SRafal Kozik ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq); 2382c7519ea5SRafal Kozik } 2383c7519ea5SRafal Kozik 2384c7519ea5SRafal Kozik /* prepare the packet's descriptors to dma engine */ 23851173fca2SJan Medala rc = ena_com_prepare_tx(tx_ring->ena_com_io_sq, 23861173fca2SJan Medala &ena_tx_ctx, &nb_hw_desc); 23877830e905SSolganik Alexander if (unlikely(rc)) { 23887830e905SSolganik Alexander ++tx_ring->tx_stats.prepare_ctx_err; 23891173fca2SJan Medala break; 23907830e905SSolganik Alexander } 23911173fca2SJan Medala tx_info->tx_descs = nb_hw_desc; 23921173fca2SJan Medala 23931daff526SJakub Palider next_to_use++; 239445b6d861SMichal Krawczyk tx_ring->tx_stats.cnt += tx_info->num_of_bufs; 239545b6d861SMichal Krawczyk tx_ring->tx_stats.bytes += total_length; 23961173fca2SJan Medala } 23977830e905SSolganik Alexander tx_ring->tx_stats.available_desc = 23987830e905SSolganik Alexander ena_com_free_desc(tx_ring->ena_com_io_sq); 23991173fca2SJan Medala 24005e02e19eSJan Medala /* If there are ready packets to be xmitted... */ 24015e02e19eSJan Medala if (sent_idx > 0) { 24025e02e19eSJan Medala /* ...let HW do its best :-) */ 24031173fca2SJan Medala rte_wmb(); 24041173fca2SJan Medala ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq); 240545b6d861SMichal Krawczyk tx_ring->tx_stats.doorbells++; 24065e02e19eSJan Medala tx_ring->next_to_use = next_to_use; 24075e02e19eSJan Medala } 24085e02e19eSJan Medala 24091173fca2SJan Medala /* Clear complete packets */ 24101173fca2SJan Medala while (ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq, &req_id) >= 0) { 2411f7d82d24SRafal Kozik rc = validate_tx_req_id(tx_ring, req_id); 2412f7d82d24SRafal Kozik if (rc) 2413f7d82d24SRafal Kozik break; 2414f7d82d24SRafal Kozik 24151173fca2SJan Medala /* Get Tx info & store how many descs were processed */ 24161173fca2SJan Medala tx_info = &tx_ring->tx_buffer_info[req_id]; 24171173fca2SJan Medala total_tx_descs += tx_info->tx_descs; 24181173fca2SJan Medala 24191173fca2SJan Medala /* Free whole mbuf chain */ 24201173fca2SJan Medala mbuf = tx_info->mbuf; 24211173fca2SJan Medala rte_pktmbuf_free(mbuf); 2422207a514cSMichal Krawczyk tx_info->mbuf = NULL; 24231173fca2SJan Medala 24241173fca2SJan Medala /* Put back descriptor to the ring for reuse */ 24251daff526SJakub Palider tx_ring->empty_tx_reqs[next_to_clean & ring_mask] = req_id; 24261daff526SJakub Palider next_to_clean++; 24271173fca2SJan Medala 24281173fca2SJan Medala /* If too many descs to clean, leave it for another run */ 24291173fca2SJan Medala if (unlikely(total_tx_descs > ENA_RING_DESCS_RATIO(ring_size))) 24301173fca2SJan Medala break; 24311173fca2SJan Medala } 24327830e905SSolganik Alexander tx_ring->tx_stats.available_desc = 24337830e905SSolganik Alexander ena_com_free_desc(tx_ring->ena_com_io_sq); 24341173fca2SJan Medala 24355e02e19eSJan Medala if (total_tx_descs > 0) { 24361173fca2SJan Medala /* acknowledge completion of sent packets */ 24371daff526SJakub Palider tx_ring->next_to_clean = next_to_clean; 2438a45462c5SRafal Kozik ena_com_comp_ack(tx_ring->ena_com_io_sq, total_tx_descs); 2439a45462c5SRafal Kozik ena_com_update_dev_comp_head(tx_ring->ena_com_io_cq); 24405e02e19eSJan Medala } 24415e02e19eSJan Medala 24427830e905SSolganik Alexander tx_ring->tx_stats.tx_poll++; 24437830e905SSolganik Alexander 24441173fca2SJan Medala return sent_idx; 24451173fca2SJan Medala } 24461173fca2SJan Medala 24477830e905SSolganik Alexander /** 24487830e905SSolganik Alexander * DPDK callback to retrieve names of extended device statistics 24497830e905SSolganik Alexander * 24507830e905SSolganik Alexander * @param dev 24517830e905SSolganik Alexander * Pointer to Ethernet device structure. 24527830e905SSolganik Alexander * @param[out] xstats_names 24537830e905SSolganik Alexander * Buffer to insert names into. 24547830e905SSolganik Alexander * @param n 24557830e905SSolganik Alexander * Number of names. 24567830e905SSolganik Alexander * 24577830e905SSolganik Alexander * @return 24587830e905SSolganik Alexander * Number of xstats names. 24597830e905SSolganik Alexander */ 24607830e905SSolganik Alexander static int ena_xstats_get_names(struct rte_eth_dev *dev, 24617830e905SSolganik Alexander struct rte_eth_xstat_name *xstats_names, 24627830e905SSolganik Alexander unsigned int n) 24637830e905SSolganik Alexander { 24647830e905SSolganik Alexander unsigned int xstats_count = ena_xstats_calc_num(dev); 24657830e905SSolganik Alexander unsigned int stat, i, count = 0; 24667830e905SSolganik Alexander 24677830e905SSolganik Alexander if (n < xstats_count || !xstats_names) 24687830e905SSolganik Alexander return xstats_count; 24697830e905SSolganik Alexander 24707830e905SSolganik Alexander for (stat = 0; stat < ENA_STATS_ARRAY_GLOBAL; stat++, count++) 24717830e905SSolganik Alexander strcpy(xstats_names[count].name, 24727830e905SSolganik Alexander ena_stats_global_strings[stat].name); 24737830e905SSolganik Alexander 24747830e905SSolganik Alexander for (stat = 0; stat < ENA_STATS_ARRAY_RX; stat++) 24757830e905SSolganik Alexander for (i = 0; i < dev->data->nb_rx_queues; i++, count++) 24767830e905SSolganik Alexander snprintf(xstats_names[count].name, 24777830e905SSolganik Alexander sizeof(xstats_names[count].name), 24787830e905SSolganik Alexander "rx_q%d_%s", i, 24797830e905SSolganik Alexander ena_stats_rx_strings[stat].name); 24807830e905SSolganik Alexander 24817830e905SSolganik Alexander for (stat = 0; stat < ENA_STATS_ARRAY_TX; stat++) 24827830e905SSolganik Alexander for (i = 0; i < dev->data->nb_tx_queues; i++, count++) 24837830e905SSolganik Alexander snprintf(xstats_names[count].name, 24847830e905SSolganik Alexander sizeof(xstats_names[count].name), 24857830e905SSolganik Alexander "tx_q%d_%s", i, 24867830e905SSolganik Alexander ena_stats_tx_strings[stat].name); 24877830e905SSolganik Alexander 24887830e905SSolganik Alexander return xstats_count; 24897830e905SSolganik Alexander } 24907830e905SSolganik Alexander 24917830e905SSolganik Alexander /** 24927830e905SSolganik Alexander * DPDK callback to get extended device statistics. 24937830e905SSolganik Alexander * 24947830e905SSolganik Alexander * @param dev 24957830e905SSolganik Alexander * Pointer to Ethernet device structure. 24967830e905SSolganik Alexander * @param[out] stats 24977830e905SSolganik Alexander * Stats table output buffer. 24987830e905SSolganik Alexander * @param n 24997830e905SSolganik Alexander * The size of the stats table. 25007830e905SSolganik Alexander * 25017830e905SSolganik Alexander * @return 25027830e905SSolganik Alexander * Number of xstats on success, negative on failure. 25037830e905SSolganik Alexander */ 25047830e905SSolganik Alexander static int ena_xstats_get(struct rte_eth_dev *dev, 25057830e905SSolganik Alexander struct rte_eth_xstat *xstats, 25067830e905SSolganik Alexander unsigned int n) 25077830e905SSolganik Alexander { 25087830e905SSolganik Alexander struct ena_adapter *adapter = 25097830e905SSolganik Alexander (struct ena_adapter *)(dev->data->dev_private); 25107830e905SSolganik Alexander unsigned int xstats_count = ena_xstats_calc_num(dev); 25117830e905SSolganik Alexander unsigned int stat, i, count = 0; 25127830e905SSolganik Alexander int stat_offset; 25137830e905SSolganik Alexander void *stats_begin; 25147830e905SSolganik Alexander 25157830e905SSolganik Alexander if (n < xstats_count) 25167830e905SSolganik Alexander return xstats_count; 25177830e905SSolganik Alexander 25187830e905SSolganik Alexander if (!xstats) 25197830e905SSolganik Alexander return 0; 25207830e905SSolganik Alexander 25217830e905SSolganik Alexander for (stat = 0; stat < ENA_STATS_ARRAY_GLOBAL; stat++, count++) { 25227830e905SSolganik Alexander stat_offset = ena_stats_rx_strings[stat].stat_offset; 25237830e905SSolganik Alexander stats_begin = &adapter->dev_stats; 25247830e905SSolganik Alexander 25257830e905SSolganik Alexander xstats[count].id = count; 25267830e905SSolganik Alexander xstats[count].value = *((uint64_t *) 25277830e905SSolganik Alexander ((char *)stats_begin + stat_offset)); 25287830e905SSolganik Alexander } 25297830e905SSolganik Alexander 25307830e905SSolganik Alexander for (stat = 0; stat < ENA_STATS_ARRAY_RX; stat++) { 25317830e905SSolganik Alexander for (i = 0; i < dev->data->nb_rx_queues; i++, count++) { 25327830e905SSolganik Alexander stat_offset = ena_stats_rx_strings[stat].stat_offset; 25337830e905SSolganik Alexander stats_begin = &adapter->rx_ring[i].rx_stats; 25347830e905SSolganik Alexander 25357830e905SSolganik Alexander xstats[count].id = count; 25367830e905SSolganik Alexander xstats[count].value = *((uint64_t *) 25377830e905SSolganik Alexander ((char *)stats_begin + stat_offset)); 25387830e905SSolganik Alexander } 25397830e905SSolganik Alexander } 25407830e905SSolganik Alexander 25417830e905SSolganik Alexander for (stat = 0; stat < ENA_STATS_ARRAY_TX; stat++) { 25427830e905SSolganik Alexander for (i = 0; i < dev->data->nb_tx_queues; i++, count++) { 25437830e905SSolganik Alexander stat_offset = ena_stats_tx_strings[stat].stat_offset; 25447830e905SSolganik Alexander stats_begin = &adapter->tx_ring[i].rx_stats; 25457830e905SSolganik Alexander 25467830e905SSolganik Alexander xstats[count].id = count; 25477830e905SSolganik Alexander xstats[count].value = *((uint64_t *) 25487830e905SSolganik Alexander ((char *)stats_begin + stat_offset)); 25497830e905SSolganik Alexander } 25507830e905SSolganik Alexander } 25517830e905SSolganik Alexander 25527830e905SSolganik Alexander return count; 25537830e905SSolganik Alexander } 25547830e905SSolganik Alexander 25557830e905SSolganik Alexander static int ena_xstats_get_by_id(struct rte_eth_dev *dev, 25567830e905SSolganik Alexander const uint64_t *ids, 25577830e905SSolganik Alexander uint64_t *values, 25587830e905SSolganik Alexander unsigned int n) 25597830e905SSolganik Alexander { 25607830e905SSolganik Alexander struct ena_adapter *adapter = 25617830e905SSolganik Alexander (struct ena_adapter *)(dev->data->dev_private); 25627830e905SSolganik Alexander uint64_t id; 25637830e905SSolganik Alexander uint64_t rx_entries, tx_entries; 25647830e905SSolganik Alexander unsigned int i; 25657830e905SSolganik Alexander int qid; 25667830e905SSolganik Alexander int valid = 0; 25677830e905SSolganik Alexander for (i = 0; i < n; ++i) { 25687830e905SSolganik Alexander id = ids[i]; 25697830e905SSolganik Alexander /* Check if id belongs to global statistics */ 25707830e905SSolganik Alexander if (id < ENA_STATS_ARRAY_GLOBAL) { 25717830e905SSolganik Alexander values[i] = *((uint64_t *)&adapter->dev_stats + id); 25727830e905SSolganik Alexander ++valid; 25737830e905SSolganik Alexander continue; 25747830e905SSolganik Alexander } 25757830e905SSolganik Alexander 25767830e905SSolganik Alexander /* Check if id belongs to rx queue statistics */ 25777830e905SSolganik Alexander id -= ENA_STATS_ARRAY_GLOBAL; 25787830e905SSolganik Alexander rx_entries = ENA_STATS_ARRAY_RX * dev->data->nb_rx_queues; 25797830e905SSolganik Alexander if (id < rx_entries) { 25807830e905SSolganik Alexander qid = id % dev->data->nb_rx_queues; 25817830e905SSolganik Alexander id /= dev->data->nb_rx_queues; 25827830e905SSolganik Alexander values[i] = *((uint64_t *) 25837830e905SSolganik Alexander &adapter->rx_ring[qid].rx_stats + id); 25847830e905SSolganik Alexander ++valid; 25857830e905SSolganik Alexander continue; 25867830e905SSolganik Alexander } 25877830e905SSolganik Alexander /* Check if id belongs to rx queue statistics */ 25887830e905SSolganik Alexander id -= rx_entries; 25897830e905SSolganik Alexander tx_entries = ENA_STATS_ARRAY_TX * dev->data->nb_tx_queues; 25907830e905SSolganik Alexander if (id < tx_entries) { 25917830e905SSolganik Alexander qid = id % dev->data->nb_tx_queues; 25927830e905SSolganik Alexander id /= dev->data->nb_tx_queues; 25937830e905SSolganik Alexander values[i] = *((uint64_t *) 25947830e905SSolganik Alexander &adapter->tx_ring[qid].tx_stats + id); 25957830e905SSolganik Alexander ++valid; 25967830e905SSolganik Alexander continue; 25977830e905SSolganik Alexander } 25987830e905SSolganik Alexander } 25997830e905SSolganik Alexander 26007830e905SSolganik Alexander return valid; 26017830e905SSolganik Alexander } 26027830e905SSolganik Alexander 2603ca148440SMichal Krawczyk /********************************************************************* 2604ca148440SMichal Krawczyk * PMD configuration 2605ca148440SMichal Krawczyk *********************************************************************/ 2606fdf91e0fSJan Blunck static int eth_ena_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 2607fdf91e0fSJan Blunck struct rte_pci_device *pci_dev) 2608fdf91e0fSJan Blunck { 2609fdf91e0fSJan Blunck return rte_eth_dev_pci_generic_probe(pci_dev, 2610fdf91e0fSJan Blunck sizeof(struct ena_adapter), eth_ena_dev_init); 2611fdf91e0fSJan Blunck } 2612fdf91e0fSJan Blunck 2613fdf91e0fSJan Blunck static int eth_ena_pci_remove(struct rte_pci_device *pci_dev) 2614fdf91e0fSJan Blunck { 2615eb0ef49dSMichal Krawczyk return rte_eth_dev_pci_generic_remove(pci_dev, eth_ena_dev_uninit); 2616fdf91e0fSJan Blunck } 2617fdf91e0fSJan Blunck 2618fdf91e0fSJan Blunck static struct rte_pci_driver rte_ena_pmd = { 26191173fca2SJan Medala .id_table = pci_id_ena_map, 262005e0eee0SRafal Kozik .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC | 262105e0eee0SRafal Kozik RTE_PCI_DRV_WC_ACTIVATE, 2622fdf91e0fSJan Blunck .probe = eth_ena_pci_probe, 2623fdf91e0fSJan Blunck .remove = eth_ena_pci_remove, 26241173fca2SJan Medala }; 26251173fca2SJan Medala 2626fdf91e0fSJan Blunck RTE_PMD_REGISTER_PCI(net_ena, rte_ena_pmd); 262701f19227SShreyansh Jain RTE_PMD_REGISTER_PCI_TABLE(net_ena, pci_id_ena_map); 262806e81dc9SDavid Marchand RTE_PMD_REGISTER_KMOD_DEP(net_ena, "* igb_uio | uio_pci_generic | vfio-pci"); 26298bc0acaeSStephen Hemminger 2630f8e99896SThomas Monjalon RTE_INIT(ena_init_log) 26318bc0acaeSStephen Hemminger { 26323f111952SHarry van Haaren ena_logtype_init = rte_log_register("pmd.net.ena.init"); 26338bc0acaeSStephen Hemminger if (ena_logtype_init >= 0) 26348bc0acaeSStephen Hemminger rte_log_set_level(ena_logtype_init, RTE_LOG_NOTICE); 26353f111952SHarry van Haaren ena_logtype_driver = rte_log_register("pmd.net.ena.driver"); 26368bc0acaeSStephen Hemminger if (ena_logtype_driver >= 0) 26378bc0acaeSStephen Hemminger rte_log_set_level(ena_logtype_driver, RTE_LOG_NOTICE); 26388bc0acaeSStephen Hemminger } 26393adcba9aSMichal Krawczyk 26403adcba9aSMichal Krawczyk /****************************************************************************** 26413adcba9aSMichal Krawczyk ******************************** AENQ Handlers ******************************* 26423adcba9aSMichal Krawczyk *****************************************************************************/ 2643ca148440SMichal Krawczyk static void ena_update_on_link_change(void *adapter_data, 2644ca148440SMichal Krawczyk struct ena_admin_aenq_entry *aenq_e) 2645ca148440SMichal Krawczyk { 2646ca148440SMichal Krawczyk struct rte_eth_dev *eth_dev; 2647ca148440SMichal Krawczyk struct ena_adapter *adapter; 2648ca148440SMichal Krawczyk struct ena_admin_aenq_link_change_desc *aenq_link_desc; 2649ca148440SMichal Krawczyk uint32_t status; 2650ca148440SMichal Krawczyk 2651ca148440SMichal Krawczyk adapter = (struct ena_adapter *)adapter_data; 2652ca148440SMichal Krawczyk aenq_link_desc = (struct ena_admin_aenq_link_change_desc *)aenq_e; 2653ca148440SMichal Krawczyk eth_dev = adapter->rte_dev; 2654ca148440SMichal Krawczyk 2655ca148440SMichal Krawczyk status = get_ena_admin_aenq_link_change_desc_link_status(aenq_link_desc); 2656ca148440SMichal Krawczyk adapter->link_status = status; 2657ca148440SMichal Krawczyk 2658ca148440SMichal Krawczyk ena_link_update(eth_dev, 0); 2659ca148440SMichal Krawczyk _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL); 2660ca148440SMichal Krawczyk } 2661ca148440SMichal Krawczyk 2662f01f060cSRafal Kozik static void ena_notification(void *data, 2663f01f060cSRafal Kozik struct ena_admin_aenq_entry *aenq_e) 2664f01f060cSRafal Kozik { 2665f01f060cSRafal Kozik struct ena_adapter *adapter = (struct ena_adapter *)data; 2666f01f060cSRafal Kozik struct ena_admin_ena_hw_hints *hints; 2667f01f060cSRafal Kozik 2668f01f060cSRafal Kozik if (aenq_e->aenq_common_desc.group != ENA_ADMIN_NOTIFICATION) 2669f01f060cSRafal Kozik RTE_LOG(WARNING, PMD, "Invalid group(%x) expected %x\n", 2670f01f060cSRafal Kozik aenq_e->aenq_common_desc.group, 2671f01f060cSRafal Kozik ENA_ADMIN_NOTIFICATION); 2672f01f060cSRafal Kozik 2673f01f060cSRafal Kozik switch (aenq_e->aenq_common_desc.syndrom) { 2674f01f060cSRafal Kozik case ENA_ADMIN_UPDATE_HINTS: 2675f01f060cSRafal Kozik hints = (struct ena_admin_ena_hw_hints *) 2676f01f060cSRafal Kozik (&aenq_e->inline_data_w4); 2677f01f060cSRafal Kozik ena_update_hints(adapter, hints); 2678f01f060cSRafal Kozik break; 2679f01f060cSRafal Kozik default: 2680f01f060cSRafal Kozik RTE_LOG(ERR, PMD, "Invalid aenq notification link state %d\n", 2681f01f060cSRafal Kozik aenq_e->aenq_common_desc.syndrom); 2682f01f060cSRafal Kozik } 2683f01f060cSRafal Kozik } 2684f01f060cSRafal Kozik 2685d9b8b106SMichal Krawczyk static void ena_keep_alive(void *adapter_data, 2686d9b8b106SMichal Krawczyk __rte_unused struct ena_admin_aenq_entry *aenq_e) 2687d9b8b106SMichal Krawczyk { 2688d9b8b106SMichal Krawczyk struct ena_adapter *adapter = (struct ena_adapter *)adapter_data; 268994c3e376SRafal Kozik struct ena_admin_aenq_keep_alive_desc *desc; 269094c3e376SRafal Kozik uint64_t rx_drops; 2691d9b8b106SMichal Krawczyk 2692d9b8b106SMichal Krawczyk adapter->timestamp_wd = rte_get_timer_cycles(); 269394c3e376SRafal Kozik 269494c3e376SRafal Kozik desc = (struct ena_admin_aenq_keep_alive_desc *)aenq_e; 269594c3e376SRafal Kozik rx_drops = ((uint64_t)desc->rx_drops_high << 32) | desc->rx_drops_low; 269694c3e376SRafal Kozik rte_atomic64_set(&adapter->drv_stats->rx_drops, rx_drops); 2697d9b8b106SMichal Krawczyk } 2698d9b8b106SMichal Krawczyk 26993adcba9aSMichal Krawczyk /** 27003adcba9aSMichal Krawczyk * This handler will called for unknown event group or unimplemented handlers 27013adcba9aSMichal Krawczyk **/ 27023adcba9aSMichal Krawczyk static void unimplemented_aenq_handler(__rte_unused void *data, 27033adcba9aSMichal Krawczyk __rte_unused struct ena_admin_aenq_entry *aenq_e) 27043adcba9aSMichal Krawczyk { 2705983cce2dSRafal Kozik RTE_LOG(ERR, PMD, "Unknown event was received or event with " 2706983cce2dSRafal Kozik "unimplemented handler\n"); 27073adcba9aSMichal Krawczyk } 27083adcba9aSMichal Krawczyk 2709ca148440SMichal Krawczyk static struct ena_aenq_handlers aenq_handlers = { 27103adcba9aSMichal Krawczyk .handlers = { 2711ca148440SMichal Krawczyk [ENA_ADMIN_LINK_CHANGE] = ena_update_on_link_change, 2712f01f060cSRafal Kozik [ENA_ADMIN_NOTIFICATION] = ena_notification, 2713d9b8b106SMichal Krawczyk [ENA_ADMIN_KEEP_ALIVE] = ena_keep_alive 27143adcba9aSMichal Krawczyk }, 27153adcba9aSMichal Krawczyk .unimplemented_handler = unimplemented_aenq_handler 27163adcba9aSMichal Krawczyk }; 2717