11173fca2SJan Medala /*- 21173fca2SJan Medala * BSD LICENSE 31173fca2SJan Medala * 41173fca2SJan Medala * Copyright (c) 2015-2016 Amazon.com, Inc. or its affiliates. 51173fca2SJan Medala * All rights reserved. 61173fca2SJan Medala * 71173fca2SJan Medala * Redistribution and use in source and binary forms, with or without 81173fca2SJan Medala * modification, are permitted provided that the following conditions 91173fca2SJan Medala * are met: 101173fca2SJan Medala * 111173fca2SJan Medala * * Redistributions of source code must retain the above copyright 121173fca2SJan Medala * notice, this list of conditions and the following disclaimer. 131173fca2SJan Medala * * Redistributions in binary form must reproduce the above copyright 141173fca2SJan Medala * notice, this list of conditions and the following disclaimer in 151173fca2SJan Medala * the documentation and/or other materials provided with the 161173fca2SJan Medala * distribution. 171173fca2SJan Medala * * Neither the name of copyright holder nor the names of its 181173fca2SJan Medala * contributors may be used to endorse or promote products derived 191173fca2SJan Medala * from this software without specific prior written permission. 201173fca2SJan Medala * 211173fca2SJan Medala * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 221173fca2SJan Medala * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 231173fca2SJan Medala * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 241173fca2SJan Medala * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 251173fca2SJan Medala * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 261173fca2SJan Medala * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 271173fca2SJan Medala * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 281173fca2SJan Medala * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 291173fca2SJan Medala * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 301173fca2SJan Medala * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 311173fca2SJan Medala * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 321173fca2SJan Medala */ 331173fca2SJan Medala 341173fca2SJan Medala #include <rte_ether.h> 35ffc905f3SFerruh Yigit #include <rte_ethdev_driver.h> 36fdf91e0fSJan Blunck #include <rte_ethdev_pci.h> 371173fca2SJan Medala #include <rte_tcp.h> 381173fca2SJan Medala #include <rte_atomic.h> 391173fca2SJan Medala #include <rte_dev.h> 401173fca2SJan Medala #include <rte_errno.h> 41372c1af5SJan Medala #include <rte_version.h> 423d3edc26SJan Medala #include <rte_eal_memconfig.h> 43b3fc5a1aSKonstantin Ananyev #include <rte_net.h> 441173fca2SJan Medala 451173fca2SJan Medala #include "ena_ethdev.h" 461173fca2SJan Medala #include "ena_logs.h" 471173fca2SJan Medala #include "ena_platform.h" 481173fca2SJan Medala #include "ena_com.h" 491173fca2SJan Medala #include "ena_eth_com.h" 501173fca2SJan Medala 511173fca2SJan Medala #include <ena_common_defs.h> 521173fca2SJan Medala #include <ena_regs_defs.h> 531173fca2SJan Medala #include <ena_admin_defs.h> 541173fca2SJan Medala #include <ena_eth_io_defs.h> 551173fca2SJan Medala 56419c3e3eSMichal Krawczyk #define DRV_MODULE_VER_MAJOR 2 57419c3e3eSMichal Krawczyk #define DRV_MODULE_VER_MINOR 0 58419c3e3eSMichal Krawczyk #define DRV_MODULE_VER_SUBMINOR 0 59372c1af5SJan Medala 601173fca2SJan Medala #define ENA_IO_TXQ_IDX(q) (2 * (q)) 611173fca2SJan Medala #define ENA_IO_RXQ_IDX(q) (2 * (q) + 1) 621173fca2SJan Medala /*reverse version of ENA_IO_RXQ_IDX*/ 631173fca2SJan Medala #define ENA_IO_RXQ_IDX_REV(q) ((q - 1) / 2) 641173fca2SJan Medala 651173fca2SJan Medala /* While processing submitted and completed descriptors (rx and tx path 661173fca2SJan Medala * respectively) in a loop it is desired to: 671173fca2SJan Medala * - perform batch submissions while populating sumbissmion queue 681173fca2SJan Medala * - avoid blocking transmission of other packets during cleanup phase 691173fca2SJan Medala * Hence the utilization ratio of 1/8 of a queue size. 701173fca2SJan Medala */ 711173fca2SJan Medala #define ENA_RING_DESCS_RATIO(ring_size) (ring_size / 8) 721173fca2SJan Medala 731173fca2SJan Medala #define __MERGE_64B_H_L(h, l) (((uint64_t)h << 32) | l) 741173fca2SJan Medala #define TEST_BIT(val, bit_shift) (val & (1UL << bit_shift)) 751173fca2SJan Medala 761173fca2SJan Medala #define GET_L4_HDR_LEN(mbuf) \ 771173fca2SJan Medala ((rte_pktmbuf_mtod_offset(mbuf, struct tcp_hdr *, \ 781173fca2SJan Medala mbuf->l3_len + mbuf->l2_len)->data_off) >> 4) 791173fca2SJan Medala 801173fca2SJan Medala #define ENA_RX_RSS_TABLE_LOG_SIZE 7 811173fca2SJan Medala #define ENA_RX_RSS_TABLE_SIZE (1 << ENA_RX_RSS_TABLE_LOG_SIZE) 821173fca2SJan Medala #define ENA_HASH_KEY_SIZE 40 83372c1af5SJan Medala #define ETH_GSTRING_LEN 32 84372c1af5SJan Medala 85372c1af5SJan Medala #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) 86372c1af5SJan Medala 8792680dc2SRafal Kozik #define ENA_MIN_RING_DESC 128 8892680dc2SRafal Kozik 89372c1af5SJan Medala enum ethtool_stringset { 90372c1af5SJan Medala ETH_SS_TEST = 0, 91372c1af5SJan Medala ETH_SS_STATS, 92372c1af5SJan Medala }; 93372c1af5SJan Medala 94372c1af5SJan Medala struct ena_stats { 95372c1af5SJan Medala char name[ETH_GSTRING_LEN]; 96372c1af5SJan Medala int stat_offset; 97372c1af5SJan Medala }; 98372c1af5SJan Medala 99372c1af5SJan Medala #define ENA_STAT_ENTRY(stat, stat_type) { \ 100372c1af5SJan Medala .name = #stat, \ 101372c1af5SJan Medala .stat_offset = offsetof(struct ena_stats_##stat_type, stat) \ 102372c1af5SJan Medala } 103372c1af5SJan Medala 104372c1af5SJan Medala #define ENA_STAT_RX_ENTRY(stat) \ 105372c1af5SJan Medala ENA_STAT_ENTRY(stat, rx) 106372c1af5SJan Medala 107372c1af5SJan Medala #define ENA_STAT_TX_ENTRY(stat) \ 108372c1af5SJan Medala ENA_STAT_ENTRY(stat, tx) 109372c1af5SJan Medala 110372c1af5SJan Medala #define ENA_STAT_GLOBAL_ENTRY(stat) \ 111372c1af5SJan Medala ENA_STAT_ENTRY(stat, dev) 112372c1af5SJan Medala 11335390750SRafal Kozik #define ENA_MAX_RING_SIZE_RX 8192 1142fca2a98SMichal Krawczyk #define ENA_MAX_RING_SIZE_TX 1024 1152fca2a98SMichal Krawczyk 1163adcba9aSMichal Krawczyk /* 1173adcba9aSMichal Krawczyk * Each rte_memzone should have unique name. 1183adcba9aSMichal Krawczyk * To satisfy it, count number of allocation and add it to name. 1193adcba9aSMichal Krawczyk */ 1203adcba9aSMichal Krawczyk uint32_t ena_alloc_cnt; 1213adcba9aSMichal Krawczyk 122372c1af5SJan Medala static const struct ena_stats ena_stats_global_strings[] = { 123372c1af5SJan Medala ENA_STAT_GLOBAL_ENTRY(wd_expired), 1247830e905SSolganik Alexander ENA_STAT_GLOBAL_ENTRY(dev_start), 1257830e905SSolganik Alexander ENA_STAT_GLOBAL_ENTRY(dev_stop), 126372c1af5SJan Medala }; 127372c1af5SJan Medala 128372c1af5SJan Medala static const struct ena_stats ena_stats_tx_strings[] = { 129372c1af5SJan Medala ENA_STAT_TX_ENTRY(cnt), 130372c1af5SJan Medala ENA_STAT_TX_ENTRY(bytes), 1317830e905SSolganik Alexander ENA_STAT_TX_ENTRY(prepare_ctx_err), 132372c1af5SJan Medala ENA_STAT_TX_ENTRY(linearize), 133372c1af5SJan Medala ENA_STAT_TX_ENTRY(linearize_failed), 134372c1af5SJan Medala ENA_STAT_TX_ENTRY(tx_poll), 135372c1af5SJan Medala ENA_STAT_TX_ENTRY(doorbells), 136372c1af5SJan Medala ENA_STAT_TX_ENTRY(bad_req_id), 1377830e905SSolganik Alexander ENA_STAT_TX_ENTRY(available_desc), 138372c1af5SJan Medala }; 139372c1af5SJan Medala 140372c1af5SJan Medala static const struct ena_stats ena_stats_rx_strings[] = { 141372c1af5SJan Medala ENA_STAT_RX_ENTRY(cnt), 142372c1af5SJan Medala ENA_STAT_RX_ENTRY(bytes), 1437830e905SSolganik Alexander ENA_STAT_RX_ENTRY(refill_partial), 144372c1af5SJan Medala ENA_STAT_RX_ENTRY(bad_csum), 1457830e905SSolganik Alexander ENA_STAT_RX_ENTRY(mbuf_alloc_fail), 146372c1af5SJan Medala ENA_STAT_RX_ENTRY(bad_desc_num), 1477830e905SSolganik Alexander ENA_STAT_RX_ENTRY(bad_req_id), 148372c1af5SJan Medala }; 149372c1af5SJan Medala 150372c1af5SJan Medala #define ENA_STATS_ARRAY_GLOBAL ARRAY_SIZE(ena_stats_global_strings) 151372c1af5SJan Medala #define ENA_STATS_ARRAY_TX ARRAY_SIZE(ena_stats_tx_strings) 152372c1af5SJan Medala #define ENA_STATS_ARRAY_RX ARRAY_SIZE(ena_stats_rx_strings) 1531173fca2SJan Medala 15456b8b9b7SRafal Kozik #define QUEUE_OFFLOADS (DEV_TX_OFFLOAD_TCP_CKSUM |\ 15556b8b9b7SRafal Kozik DEV_TX_OFFLOAD_UDP_CKSUM |\ 15656b8b9b7SRafal Kozik DEV_TX_OFFLOAD_IPV4_CKSUM |\ 15756b8b9b7SRafal Kozik DEV_TX_OFFLOAD_TCP_TSO) 15856b8b9b7SRafal Kozik #define MBUF_OFFLOADS (PKT_TX_L4_MASK |\ 15956b8b9b7SRafal Kozik PKT_TX_IP_CKSUM |\ 16056b8b9b7SRafal Kozik PKT_TX_TCP_SEG) 16156b8b9b7SRafal Kozik 1621173fca2SJan Medala /** Vendor ID used by Amazon devices */ 1631173fca2SJan Medala #define PCI_VENDOR_ID_AMAZON 0x1D0F 1641173fca2SJan Medala /** Amazon devices */ 1651173fca2SJan Medala #define PCI_DEVICE_ID_ENA_VF 0xEC20 1661173fca2SJan Medala #define PCI_DEVICE_ID_ENA_LLQ_VF 0xEC21 1671173fca2SJan Medala 168b3fc5a1aSKonstantin Ananyev #define ENA_TX_OFFLOAD_MASK (\ 169b3fc5a1aSKonstantin Ananyev PKT_TX_L4_MASK | \ 170d6db681bSDidier Pallard PKT_TX_IPV6 | \ 171d6db681bSDidier Pallard PKT_TX_IPV4 | \ 172b3fc5a1aSKonstantin Ananyev PKT_TX_IP_CKSUM | \ 173b3fc5a1aSKonstantin Ananyev PKT_TX_TCP_SEG) 174b3fc5a1aSKonstantin Ananyev 175b3fc5a1aSKonstantin Ananyev #define ENA_TX_OFFLOAD_NOTSUP_MASK \ 176b3fc5a1aSKonstantin Ananyev (PKT_TX_OFFLOAD_MASK ^ ENA_TX_OFFLOAD_MASK) 177b3fc5a1aSKonstantin Ananyev 1788bc0acaeSStephen Hemminger int ena_logtype_init; 1798bc0acaeSStephen Hemminger int ena_logtype_driver; 1808bc0acaeSStephen Hemminger 18128a1fd4fSFerruh Yigit static const struct rte_pci_id pci_id_ena_map[] = { 182cb990571SDavid Marchand { RTE_PCI_DEVICE(PCI_VENDOR_ID_AMAZON, PCI_DEVICE_ID_ENA_VF) }, 183cb990571SDavid Marchand { RTE_PCI_DEVICE(PCI_VENDOR_ID_AMAZON, PCI_DEVICE_ID_ENA_LLQ_VF) }, 1841173fca2SJan Medala { .device_id = 0 }, 1851173fca2SJan Medala }; 1861173fca2SJan Medala 187ca148440SMichal Krawczyk static struct ena_aenq_handlers aenq_handlers; 1883adcba9aSMichal Krawczyk 1891173fca2SJan Medala static int ena_device_init(struct ena_com_dev *ena_dev, 190e859d2b8SRafal Kozik struct ena_com_dev_get_features_ctx *get_feat_ctx, 191e859d2b8SRafal Kozik bool *wd_state); 1921173fca2SJan Medala static int ena_dev_configure(struct rte_eth_dev *dev); 1931173fca2SJan Medala static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 1941173fca2SJan Medala uint16_t nb_pkts); 195b3fc5a1aSKonstantin Ananyev static uint16_t eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 196b3fc5a1aSKonstantin Ananyev uint16_t nb_pkts); 1971173fca2SJan Medala static int ena_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, 1981173fca2SJan Medala uint16_t nb_desc, unsigned int socket_id, 1991173fca2SJan Medala const struct rte_eth_txconf *tx_conf); 2001173fca2SJan Medala static int ena_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, 2011173fca2SJan Medala uint16_t nb_desc, unsigned int socket_id, 2021173fca2SJan Medala const struct rte_eth_rxconf *rx_conf, 2031173fca2SJan Medala struct rte_mempool *mp); 2041173fca2SJan Medala static uint16_t eth_ena_recv_pkts(void *rx_queue, 2051173fca2SJan Medala struct rte_mbuf **rx_pkts, uint16_t nb_pkts); 2061173fca2SJan Medala static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count); 2071173fca2SJan Medala static void ena_init_rings(struct ena_adapter *adapter); 2081173fca2SJan Medala static int ena_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); 2091173fca2SJan Medala static int ena_start(struct rte_eth_dev *dev); 210eb0ef49dSMichal Krawczyk static void ena_stop(struct rte_eth_dev *dev); 2111173fca2SJan Medala static void ena_close(struct rte_eth_dev *dev); 2122081d5e2SMichal Krawczyk static int ena_dev_reset(struct rte_eth_dev *dev); 213d5b0924bSMatan Azrad static int ena_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats); 2141173fca2SJan Medala static void ena_rx_queue_release_all(struct rte_eth_dev *dev); 2151173fca2SJan Medala static void ena_tx_queue_release_all(struct rte_eth_dev *dev); 2161173fca2SJan Medala static void ena_rx_queue_release(void *queue); 2171173fca2SJan Medala static void ena_tx_queue_release(void *queue); 2181173fca2SJan Medala static void ena_rx_queue_release_bufs(struct ena_ring *ring); 2191173fca2SJan Medala static void ena_tx_queue_release_bufs(struct ena_ring *ring); 2201173fca2SJan Medala static int ena_link_update(struct rte_eth_dev *dev, 221dd2c630aSFerruh Yigit int wait_to_complete); 222df238f84SMichal Krawczyk static int ena_create_io_queue(struct ena_ring *ring); 22326e5543dSRafal Kozik static void ena_queue_stop(struct ena_ring *ring); 22426e5543dSRafal Kozik static void ena_queue_stop_all(struct rte_eth_dev *dev, 22526e5543dSRafal Kozik enum ena_ring_type ring_type); 22626e5543dSRafal Kozik static int ena_queue_start(struct ena_ring *ring); 22726e5543dSRafal Kozik static int ena_queue_start_all(struct rte_eth_dev *dev, 2281173fca2SJan Medala enum ena_ring_type ring_type); 2291173fca2SJan Medala static void ena_stats_restart(struct rte_eth_dev *dev); 230dd2c630aSFerruh Yigit static void ena_infos_get(struct rte_eth_dev *dev, 2311173fca2SJan Medala struct rte_eth_dev_info *dev_info); 2321173fca2SJan Medala static int ena_rss_reta_update(struct rte_eth_dev *dev, 2331173fca2SJan Medala struct rte_eth_rss_reta_entry64 *reta_conf, 2341173fca2SJan Medala uint16_t reta_size); 2351173fca2SJan Medala static int ena_rss_reta_query(struct rte_eth_dev *dev, 2361173fca2SJan Medala struct rte_eth_rss_reta_entry64 *reta_conf, 2371173fca2SJan Medala uint16_t reta_size); 23815773e06SMichal Krawczyk static void ena_interrupt_handler_rte(void *cb_arg); 239d9b8b106SMichal Krawczyk static void ena_timer_wd_callback(struct rte_timer *timer, void *arg); 240e457bc70SRafal Kozik static void ena_destroy_device(struct rte_eth_dev *eth_dev); 241e457bc70SRafal Kozik static int eth_ena_dev_init(struct rte_eth_dev *eth_dev); 2427830e905SSolganik Alexander static int ena_xstats_get_names(struct rte_eth_dev *dev, 2437830e905SSolganik Alexander struct rte_eth_xstat_name *xstats_names, 2447830e905SSolganik Alexander unsigned int n); 2457830e905SSolganik Alexander static int ena_xstats_get(struct rte_eth_dev *dev, 2467830e905SSolganik Alexander struct rte_eth_xstat *stats, 2477830e905SSolganik Alexander unsigned int n); 2487830e905SSolganik Alexander static int ena_xstats_get_by_id(struct rte_eth_dev *dev, 2497830e905SSolganik Alexander const uint64_t *ids, 2507830e905SSolganik Alexander uint64_t *values, 2517830e905SSolganik Alexander unsigned int n); 2521173fca2SJan Medala 253103ab18cSFerruh Yigit static const struct eth_dev_ops ena_dev_ops = { 2541173fca2SJan Medala .dev_configure = ena_dev_configure, 2551173fca2SJan Medala .dev_infos_get = ena_infos_get, 2561173fca2SJan Medala .rx_queue_setup = ena_rx_queue_setup, 2571173fca2SJan Medala .tx_queue_setup = ena_tx_queue_setup, 2581173fca2SJan Medala .dev_start = ena_start, 259eb0ef49dSMichal Krawczyk .dev_stop = ena_stop, 2601173fca2SJan Medala .link_update = ena_link_update, 2611173fca2SJan Medala .stats_get = ena_stats_get, 2627830e905SSolganik Alexander .xstats_get_names = ena_xstats_get_names, 2637830e905SSolganik Alexander .xstats_get = ena_xstats_get, 2647830e905SSolganik Alexander .xstats_get_by_id = ena_xstats_get_by_id, 2651173fca2SJan Medala .mtu_set = ena_mtu_set, 2661173fca2SJan Medala .rx_queue_release = ena_rx_queue_release, 2671173fca2SJan Medala .tx_queue_release = ena_tx_queue_release, 2681173fca2SJan Medala .dev_close = ena_close, 2692081d5e2SMichal Krawczyk .dev_reset = ena_dev_reset, 2701173fca2SJan Medala .reta_update = ena_rss_reta_update, 2711173fca2SJan Medala .reta_query = ena_rss_reta_query, 2721173fca2SJan Medala }; 2731173fca2SJan Medala 2743d3edc26SJan Medala #define NUMA_NO_NODE SOCKET_ID_ANY 2753d3edc26SJan Medala 2763d3edc26SJan Medala static inline int ena_cpu_to_node(int cpu) 2773d3edc26SJan Medala { 2783d3edc26SJan Medala struct rte_config *config = rte_eal_get_configuration(); 27949df3db8SAnatoly Burakov struct rte_fbarray *arr = &config->mem_config->memzones; 28049df3db8SAnatoly Burakov const struct rte_memzone *mz; 2813d3edc26SJan Medala 28249df3db8SAnatoly Burakov if (unlikely(cpu >= RTE_MAX_MEMZONE)) 2833d3edc26SJan Medala return NUMA_NO_NODE; 28449df3db8SAnatoly Burakov 28549df3db8SAnatoly Burakov mz = rte_fbarray_get(arr, cpu); 28649df3db8SAnatoly Burakov 28749df3db8SAnatoly Burakov return mz->socket_id; 2883d3edc26SJan Medala } 2893d3edc26SJan Medala 2901173fca2SJan Medala static inline void ena_rx_mbuf_prepare(struct rte_mbuf *mbuf, 2911173fca2SJan Medala struct ena_com_rx_ctx *ena_rx_ctx) 2921173fca2SJan Medala { 2931173fca2SJan Medala uint64_t ol_flags = 0; 294fd617795SRafal Kozik uint32_t packet_type = 0; 2951173fca2SJan Medala 2961173fca2SJan Medala if (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP) 297fd617795SRafal Kozik packet_type |= RTE_PTYPE_L4_TCP; 2981173fca2SJan Medala else if (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP) 299fd617795SRafal Kozik packet_type |= RTE_PTYPE_L4_UDP; 3001173fca2SJan Medala 3011173fca2SJan Medala if (ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV4) 302fd617795SRafal Kozik packet_type |= RTE_PTYPE_L3_IPV4; 3031173fca2SJan Medala else if (ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV6) 304fd617795SRafal Kozik packet_type |= RTE_PTYPE_L3_IPV6; 3051173fca2SJan Medala 3061173fca2SJan Medala if (unlikely(ena_rx_ctx->l4_csum_err)) 3071173fca2SJan Medala ol_flags |= PKT_RX_L4_CKSUM_BAD; 3081173fca2SJan Medala if (unlikely(ena_rx_ctx->l3_csum_err)) 3091173fca2SJan Medala ol_flags |= PKT_RX_IP_CKSUM_BAD; 3101173fca2SJan Medala 3111173fca2SJan Medala mbuf->ol_flags = ol_flags; 312fd617795SRafal Kozik mbuf->packet_type = packet_type; 3131173fca2SJan Medala } 3141173fca2SJan Medala 3151173fca2SJan Medala static inline void ena_tx_mbuf_prepare(struct rte_mbuf *mbuf, 31656b8b9b7SRafal Kozik struct ena_com_tx_ctx *ena_tx_ctx, 31756b8b9b7SRafal Kozik uint64_t queue_offloads) 3181173fca2SJan Medala { 3191173fca2SJan Medala struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta; 3201173fca2SJan Medala 32156b8b9b7SRafal Kozik if ((mbuf->ol_flags & MBUF_OFFLOADS) && 32256b8b9b7SRafal Kozik (queue_offloads & QUEUE_OFFLOADS)) { 3231173fca2SJan Medala /* check if TSO is required */ 32456b8b9b7SRafal Kozik if ((mbuf->ol_flags & PKT_TX_TCP_SEG) && 32556b8b9b7SRafal Kozik (queue_offloads & DEV_TX_OFFLOAD_TCP_TSO)) { 3261173fca2SJan Medala ena_tx_ctx->tso_enable = true; 3271173fca2SJan Medala 3281173fca2SJan Medala ena_meta->l4_hdr_len = GET_L4_HDR_LEN(mbuf); 3291173fca2SJan Medala } 3301173fca2SJan Medala 3311173fca2SJan Medala /* check if L3 checksum is needed */ 33256b8b9b7SRafal Kozik if ((mbuf->ol_flags & PKT_TX_IP_CKSUM) && 33356b8b9b7SRafal Kozik (queue_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)) 3341173fca2SJan Medala ena_tx_ctx->l3_csum_enable = true; 3351173fca2SJan Medala 3361173fca2SJan Medala if (mbuf->ol_flags & PKT_TX_IPV6) { 3371173fca2SJan Medala ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV6; 3381173fca2SJan Medala } else { 3391173fca2SJan Medala ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV4; 3401173fca2SJan Medala 3411173fca2SJan Medala /* set don't fragment (DF) flag */ 3421173fca2SJan Medala if (mbuf->packet_type & 3431173fca2SJan Medala (RTE_PTYPE_L4_NONFRAG 3441173fca2SJan Medala | RTE_PTYPE_INNER_L4_NONFRAG)) 3451173fca2SJan Medala ena_tx_ctx->df = true; 3461173fca2SJan Medala } 3471173fca2SJan Medala 3481173fca2SJan Medala /* check if L4 checksum is needed */ 34956b8b9b7SRafal Kozik if ((mbuf->ol_flags & PKT_TX_TCP_CKSUM) && 35056b8b9b7SRafal Kozik (queue_offloads & DEV_TX_OFFLOAD_TCP_CKSUM)) { 3511173fca2SJan Medala ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_TCP; 3521173fca2SJan Medala ena_tx_ctx->l4_csum_enable = true; 35356b8b9b7SRafal Kozik } else if ((mbuf->ol_flags & PKT_TX_UDP_CKSUM) && 35456b8b9b7SRafal Kozik (queue_offloads & DEV_TX_OFFLOAD_UDP_CKSUM)) { 3551173fca2SJan Medala ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UDP; 3561173fca2SJan Medala ena_tx_ctx->l4_csum_enable = true; 35756b8b9b7SRafal Kozik } else { 3581173fca2SJan Medala ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UNKNOWN; 3591173fca2SJan Medala ena_tx_ctx->l4_csum_enable = false; 3601173fca2SJan Medala } 3611173fca2SJan Medala 3621173fca2SJan Medala ena_meta->mss = mbuf->tso_segsz; 3631173fca2SJan Medala ena_meta->l3_hdr_len = mbuf->l3_len; 3641173fca2SJan Medala ena_meta->l3_hdr_offset = mbuf->l2_len; 3651173fca2SJan Medala 3661173fca2SJan Medala ena_tx_ctx->meta_valid = true; 3671173fca2SJan Medala } else { 3681173fca2SJan Medala ena_tx_ctx->meta_valid = false; 3691173fca2SJan Medala } 3701173fca2SJan Medala } 3711173fca2SJan Medala 372c2034976SMichal Krawczyk static inline int validate_rx_req_id(struct ena_ring *rx_ring, uint16_t req_id) 373c2034976SMichal Krawczyk { 374c2034976SMichal Krawczyk if (likely(req_id < rx_ring->ring_size)) 375c2034976SMichal Krawczyk return 0; 376c2034976SMichal Krawczyk 377c2034976SMichal Krawczyk RTE_LOG(ERR, PMD, "Invalid rx req_id: %hu\n", req_id); 378c2034976SMichal Krawczyk 379c2034976SMichal Krawczyk rx_ring->adapter->reset_reason = ENA_REGS_RESET_INV_RX_REQ_ID; 380c2034976SMichal Krawczyk rx_ring->adapter->trigger_reset = true; 38145b6d861SMichal Krawczyk ++rx_ring->rx_stats.bad_req_id; 382c2034976SMichal Krawczyk 383c2034976SMichal Krawczyk return -EFAULT; 384c2034976SMichal Krawczyk } 385c2034976SMichal Krawczyk 386f7d82d24SRafal Kozik static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id) 387f7d82d24SRafal Kozik { 388f7d82d24SRafal Kozik struct ena_tx_buffer *tx_info = NULL; 389f7d82d24SRafal Kozik 390f7d82d24SRafal Kozik if (likely(req_id < tx_ring->ring_size)) { 391f7d82d24SRafal Kozik tx_info = &tx_ring->tx_buffer_info[req_id]; 392f7d82d24SRafal Kozik if (likely(tx_info->mbuf)) 393f7d82d24SRafal Kozik return 0; 394f7d82d24SRafal Kozik } 395f7d82d24SRafal Kozik 396f7d82d24SRafal Kozik if (tx_info) 397f7d82d24SRafal Kozik RTE_LOG(ERR, PMD, "tx_info doesn't have valid mbuf\n"); 398f7d82d24SRafal Kozik else 399f7d82d24SRafal Kozik RTE_LOG(ERR, PMD, "Invalid req_id: %hu\n", req_id); 400f7d82d24SRafal Kozik 401f7d82d24SRafal Kozik /* Trigger device reset */ 4027830e905SSolganik Alexander ++tx_ring->tx_stats.bad_req_id; 403f7d82d24SRafal Kozik tx_ring->adapter->reset_reason = ENA_REGS_RESET_INV_TX_REQ_ID; 404f7d82d24SRafal Kozik tx_ring->adapter->trigger_reset = true; 405f7d82d24SRafal Kozik return -EFAULT; 406f7d82d24SRafal Kozik } 407f7d82d24SRafal Kozik 408372c1af5SJan Medala static void ena_config_host_info(struct ena_com_dev *ena_dev) 409372c1af5SJan Medala { 410372c1af5SJan Medala struct ena_admin_host_info *host_info; 411372c1af5SJan Medala int rc; 412372c1af5SJan Medala 413372c1af5SJan Medala /* Allocate only the host info */ 414372c1af5SJan Medala rc = ena_com_allocate_host_info(ena_dev); 415372c1af5SJan Medala if (rc) { 416372c1af5SJan Medala RTE_LOG(ERR, PMD, "Cannot allocate host info\n"); 417372c1af5SJan Medala return; 418372c1af5SJan Medala } 419372c1af5SJan Medala 420372c1af5SJan Medala host_info = ena_dev->host_attr.host_info; 421372c1af5SJan Medala 422372c1af5SJan Medala host_info->os_type = ENA_ADMIN_OS_DPDK; 423372c1af5SJan Medala host_info->kernel_ver = RTE_VERSION; 424103bb1ccSJohn W. Linville snprintf((char *)host_info->kernel_ver_str, 425103bb1ccSJohn W. Linville sizeof(host_info->kernel_ver_str), 426103bb1ccSJohn W. Linville "%s", rte_version()); 427372c1af5SJan Medala host_info->os_dist = RTE_VERSION; 428103bb1ccSJohn W. Linville snprintf((char *)host_info->os_dist_str, 429103bb1ccSJohn W. Linville sizeof(host_info->os_dist_str), 430103bb1ccSJohn W. Linville "%s", rte_version()); 431372c1af5SJan Medala host_info->driver_version = 432372c1af5SJan Medala (DRV_MODULE_VER_MAJOR) | 433372c1af5SJan Medala (DRV_MODULE_VER_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) | 434c4144557SJan Medala (DRV_MODULE_VER_SUBMINOR << 435c4144557SJan Medala ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT); 436b9302eb9SRafal Kozik host_info->num_cpus = rte_lcore_count(); 437372c1af5SJan Medala 438372c1af5SJan Medala rc = ena_com_set_host_attributes(ena_dev); 439372c1af5SJan Medala if (rc) { 440241da076SRafal Kozik if (rc == -ENA_COM_UNSUPPORTED) 441241da076SRafal Kozik RTE_LOG(WARNING, PMD, "Cannot set host attributes\n"); 442241da076SRafal Kozik else 443372c1af5SJan Medala RTE_LOG(ERR, PMD, "Cannot set host attributes\n"); 444241da076SRafal Kozik 445372c1af5SJan Medala goto err; 446372c1af5SJan Medala } 447372c1af5SJan Medala 448372c1af5SJan Medala return; 449372c1af5SJan Medala 450372c1af5SJan Medala err: 451372c1af5SJan Medala ena_com_delete_host_info(ena_dev); 452372c1af5SJan Medala } 453372c1af5SJan Medala 4547830e905SSolganik Alexander /* This function calculates the number of xstats based on the current config */ 4557830e905SSolganik Alexander static unsigned int ena_xstats_calc_num(struct rte_eth_dev *dev) 456372c1af5SJan Medala { 4577830e905SSolganik Alexander return ENA_STATS_ARRAY_GLOBAL + 4587830e905SSolganik Alexander (dev->data->nb_tx_queues * ENA_STATS_ARRAY_TX) + 4597830e905SSolganik Alexander (dev->data->nb_rx_queues * ENA_STATS_ARRAY_RX); 460372c1af5SJan Medala } 461372c1af5SJan Medala 462372c1af5SJan Medala static void ena_config_debug_area(struct ena_adapter *adapter) 463372c1af5SJan Medala { 464372c1af5SJan Medala u32 debug_area_size; 465372c1af5SJan Medala int rc, ss_count; 466372c1af5SJan Medala 4677830e905SSolganik Alexander ss_count = ena_xstats_calc_num(adapter->rte_dev); 468372c1af5SJan Medala 469372c1af5SJan Medala /* allocate 32 bytes for each string and 64bit for the value */ 470372c1af5SJan Medala debug_area_size = ss_count * ETH_GSTRING_LEN + sizeof(u64) * ss_count; 471372c1af5SJan Medala 472372c1af5SJan Medala rc = ena_com_allocate_debug_area(&adapter->ena_dev, debug_area_size); 473372c1af5SJan Medala if (rc) { 474372c1af5SJan Medala RTE_LOG(ERR, PMD, "Cannot allocate debug area\n"); 475372c1af5SJan Medala return; 476372c1af5SJan Medala } 477372c1af5SJan Medala 478372c1af5SJan Medala rc = ena_com_set_host_attributes(&adapter->ena_dev); 479372c1af5SJan Medala if (rc) { 480241da076SRafal Kozik if (rc == -ENA_COM_UNSUPPORTED) 481372c1af5SJan Medala RTE_LOG(WARNING, PMD, "Cannot set host attributes\n"); 482241da076SRafal Kozik else 483241da076SRafal Kozik RTE_LOG(ERR, PMD, "Cannot set host attributes\n"); 484241da076SRafal Kozik 485372c1af5SJan Medala goto err; 486372c1af5SJan Medala } 487372c1af5SJan Medala 488372c1af5SJan Medala return; 489372c1af5SJan Medala err: 490372c1af5SJan Medala ena_com_delete_debug_area(&adapter->ena_dev); 491372c1af5SJan Medala } 492372c1af5SJan Medala 4931173fca2SJan Medala static void ena_close(struct rte_eth_dev *dev) 4941173fca2SJan Medala { 4954d7877fdSMichal Krawczyk struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 4964d7877fdSMichal Krawczyk struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 4971173fca2SJan Medala struct ena_adapter *adapter = 4981173fca2SJan Medala (struct ena_adapter *)(dev->data->dev_private); 4991173fca2SJan Medala 500df238f84SMichal Krawczyk if (adapter->state == ENA_ADAPTER_STATE_RUNNING) 501eb0ef49dSMichal Krawczyk ena_stop(dev); 502eb0ef49dSMichal Krawczyk adapter->state = ENA_ADAPTER_STATE_CLOSED; 50315773e06SMichal Krawczyk 5041173fca2SJan Medala ena_rx_queue_release_all(dev); 5051173fca2SJan Medala ena_tx_queue_release_all(dev); 5064d7877fdSMichal Krawczyk 5074d7877fdSMichal Krawczyk rte_free(adapter->drv_stats); 5084d7877fdSMichal Krawczyk adapter->drv_stats = NULL; 5094d7877fdSMichal Krawczyk 5104d7877fdSMichal Krawczyk rte_intr_disable(intr_handle); 5114d7877fdSMichal Krawczyk rte_intr_callback_unregister(intr_handle, 5124d7877fdSMichal Krawczyk ena_interrupt_handler_rte, 5134d7877fdSMichal Krawczyk adapter); 5144d7877fdSMichal Krawczyk 5154d7877fdSMichal Krawczyk /* 5164d7877fdSMichal Krawczyk * MAC is not allocated dynamically. Setting NULL should prevent from 5174d7877fdSMichal Krawczyk * release of the resource in the rte_eth_dev_release_port(). 5184d7877fdSMichal Krawczyk */ 5194d7877fdSMichal Krawczyk dev->data->mac_addrs = NULL; 5201173fca2SJan Medala } 5211173fca2SJan Medala 5222081d5e2SMichal Krawczyk static int 5232081d5e2SMichal Krawczyk ena_dev_reset(struct rte_eth_dev *dev) 5242081d5e2SMichal Krawczyk { 525e457bc70SRafal Kozik int rc = 0; 5262081d5e2SMichal Krawczyk 527e457bc70SRafal Kozik ena_destroy_device(dev); 528e457bc70SRafal Kozik rc = eth_ena_dev_init(dev); 529241da076SRafal Kozik if (rc) 530498c687aSRafal Kozik PMD_INIT_LOG(CRIT, "Cannot initialize device"); 531e457bc70SRafal Kozik 5322081d5e2SMichal Krawczyk return rc; 5332081d5e2SMichal Krawczyk } 5342081d5e2SMichal Krawczyk 5351173fca2SJan Medala static int ena_rss_reta_update(struct rte_eth_dev *dev, 5361173fca2SJan Medala struct rte_eth_rss_reta_entry64 *reta_conf, 5371173fca2SJan Medala uint16_t reta_size) 5381173fca2SJan Medala { 5391173fca2SJan Medala struct ena_adapter *adapter = 5401173fca2SJan Medala (struct ena_adapter *)(dev->data->dev_private); 5411173fca2SJan Medala struct ena_com_dev *ena_dev = &adapter->ena_dev; 542241da076SRafal Kozik int rc, i; 5431173fca2SJan Medala u16 entry_value; 5441173fca2SJan Medala int conf_idx; 5451173fca2SJan Medala int idx; 5461173fca2SJan Medala 5471173fca2SJan Medala if ((reta_size == 0) || (reta_conf == NULL)) 5481173fca2SJan Medala return -EINVAL; 5491173fca2SJan Medala 5501173fca2SJan Medala if (reta_size > ENA_RX_RSS_TABLE_SIZE) { 5511173fca2SJan Medala RTE_LOG(WARNING, PMD, 5521173fca2SJan Medala "indirection table %d is bigger than supported (%d)\n", 5531173fca2SJan Medala reta_size, ENA_RX_RSS_TABLE_SIZE); 554241da076SRafal Kozik return -EINVAL; 5551173fca2SJan Medala } 5561173fca2SJan Medala 5571173fca2SJan Medala for (i = 0 ; i < reta_size ; i++) { 5581173fca2SJan Medala /* each reta_conf is for 64 entries. 5591173fca2SJan Medala * to support 128 we use 2 conf of 64 5601173fca2SJan Medala */ 5611173fca2SJan Medala conf_idx = i / RTE_RETA_GROUP_SIZE; 5621173fca2SJan Medala idx = i % RTE_RETA_GROUP_SIZE; 5631173fca2SJan Medala if (TEST_BIT(reta_conf[conf_idx].mask, idx)) { 5641173fca2SJan Medala entry_value = 5651173fca2SJan Medala ENA_IO_RXQ_IDX(reta_conf[conf_idx].reta[idx]); 566241da076SRafal Kozik 567241da076SRafal Kozik rc = ena_com_indirect_table_fill_entry(ena_dev, 5681173fca2SJan Medala i, 5691173fca2SJan Medala entry_value); 570241da076SRafal Kozik if (unlikely(rc && rc != ENA_COM_UNSUPPORTED)) { 5711173fca2SJan Medala RTE_LOG(ERR, PMD, 5721173fca2SJan Medala "Cannot fill indirect table\n"); 573241da076SRafal Kozik return rc; 5741173fca2SJan Medala } 5751173fca2SJan Medala } 5761173fca2SJan Medala } 5771173fca2SJan Medala 578241da076SRafal Kozik rc = ena_com_indirect_table_set(ena_dev); 579241da076SRafal Kozik if (unlikely(rc && rc != ENA_COM_UNSUPPORTED)) { 5801173fca2SJan Medala RTE_LOG(ERR, PMD, "Cannot flush the indirect table\n"); 581241da076SRafal Kozik return rc; 5821173fca2SJan Medala } 5831173fca2SJan Medala 5841173fca2SJan Medala RTE_LOG(DEBUG, PMD, "%s(): RSS configured %d entries for port %d\n", 5851173fca2SJan Medala __func__, reta_size, adapter->rte_dev->data->port_id); 586241da076SRafal Kozik 587241da076SRafal Kozik return 0; 5881173fca2SJan Medala } 5891173fca2SJan Medala 5901173fca2SJan Medala /* Query redirection table. */ 5911173fca2SJan Medala static int ena_rss_reta_query(struct rte_eth_dev *dev, 5921173fca2SJan Medala struct rte_eth_rss_reta_entry64 *reta_conf, 5931173fca2SJan Medala uint16_t reta_size) 5941173fca2SJan Medala { 5951173fca2SJan Medala struct ena_adapter *adapter = 5961173fca2SJan Medala (struct ena_adapter *)(dev->data->dev_private); 5971173fca2SJan Medala struct ena_com_dev *ena_dev = &adapter->ena_dev; 598241da076SRafal Kozik int rc; 5991173fca2SJan Medala int i; 6001173fca2SJan Medala u32 indirect_table[ENA_RX_RSS_TABLE_SIZE] = {0}; 6011173fca2SJan Medala int reta_conf_idx; 6021173fca2SJan Medala int reta_idx; 6031173fca2SJan Medala 6041173fca2SJan Medala if (reta_size == 0 || reta_conf == NULL || 6051173fca2SJan Medala (reta_size > RTE_RETA_GROUP_SIZE && ((reta_conf + 1) == NULL))) 6061173fca2SJan Medala return -EINVAL; 6071173fca2SJan Medala 608241da076SRafal Kozik rc = ena_com_indirect_table_get(ena_dev, indirect_table); 609241da076SRafal Kozik if (unlikely(rc && rc != ENA_COM_UNSUPPORTED)) { 6101173fca2SJan Medala RTE_LOG(ERR, PMD, "cannot get indirect table\n"); 611241da076SRafal Kozik return -ENOTSUP; 6121173fca2SJan Medala } 6131173fca2SJan Medala 6141173fca2SJan Medala for (i = 0 ; i < reta_size ; i++) { 6151173fca2SJan Medala reta_conf_idx = i / RTE_RETA_GROUP_SIZE; 6161173fca2SJan Medala reta_idx = i % RTE_RETA_GROUP_SIZE; 6171173fca2SJan Medala if (TEST_BIT(reta_conf[reta_conf_idx].mask, reta_idx)) 6181173fca2SJan Medala reta_conf[reta_conf_idx].reta[reta_idx] = 6191173fca2SJan Medala ENA_IO_RXQ_IDX_REV(indirect_table[i]); 6201173fca2SJan Medala } 621241da076SRafal Kozik 622241da076SRafal Kozik return 0; 6231173fca2SJan Medala } 6241173fca2SJan Medala 6251173fca2SJan Medala static int ena_rss_init_default(struct ena_adapter *adapter) 6261173fca2SJan Medala { 6271173fca2SJan Medala struct ena_com_dev *ena_dev = &adapter->ena_dev; 6281173fca2SJan Medala uint16_t nb_rx_queues = adapter->rte_dev->data->nb_rx_queues; 6291173fca2SJan Medala int rc, i; 6301173fca2SJan Medala u32 val; 6311173fca2SJan Medala 6321173fca2SJan Medala rc = ena_com_rss_init(ena_dev, ENA_RX_RSS_TABLE_LOG_SIZE); 6331173fca2SJan Medala if (unlikely(rc)) { 6341173fca2SJan Medala RTE_LOG(ERR, PMD, "Cannot init indirect table\n"); 6351173fca2SJan Medala goto err_rss_init; 6361173fca2SJan Medala } 6371173fca2SJan Medala 6381173fca2SJan Medala for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++) { 6391173fca2SJan Medala val = i % nb_rx_queues; 6401173fca2SJan Medala rc = ena_com_indirect_table_fill_entry(ena_dev, i, 6411173fca2SJan Medala ENA_IO_RXQ_IDX(val)); 6423adcba9aSMichal Krawczyk if (unlikely(rc && (rc != ENA_COM_UNSUPPORTED))) { 6431173fca2SJan Medala RTE_LOG(ERR, PMD, "Cannot fill indirect table\n"); 6441173fca2SJan Medala goto err_fill_indir; 6451173fca2SJan Medala } 6461173fca2SJan Medala } 6471173fca2SJan Medala 6481173fca2SJan Medala rc = ena_com_fill_hash_function(ena_dev, ENA_ADMIN_CRC32, NULL, 6491173fca2SJan Medala ENA_HASH_KEY_SIZE, 0xFFFFFFFF); 6503adcba9aSMichal Krawczyk if (unlikely(rc && (rc != ENA_COM_UNSUPPORTED))) { 6511173fca2SJan Medala RTE_LOG(INFO, PMD, "Cannot fill hash function\n"); 6521173fca2SJan Medala goto err_fill_indir; 6531173fca2SJan Medala } 6541173fca2SJan Medala 6551173fca2SJan Medala rc = ena_com_set_default_hash_ctrl(ena_dev); 6563adcba9aSMichal Krawczyk if (unlikely(rc && (rc != ENA_COM_UNSUPPORTED))) { 6571173fca2SJan Medala RTE_LOG(INFO, PMD, "Cannot fill hash control\n"); 6581173fca2SJan Medala goto err_fill_indir; 6591173fca2SJan Medala } 6601173fca2SJan Medala 6611173fca2SJan Medala rc = ena_com_indirect_table_set(ena_dev); 6623adcba9aSMichal Krawczyk if (unlikely(rc && (rc != ENA_COM_UNSUPPORTED))) { 6631173fca2SJan Medala RTE_LOG(ERR, PMD, "Cannot flush the indirect table\n"); 6641173fca2SJan Medala goto err_fill_indir; 6651173fca2SJan Medala } 6661173fca2SJan Medala RTE_LOG(DEBUG, PMD, "RSS configured for port %d\n", 6671173fca2SJan Medala adapter->rte_dev->data->port_id); 6681173fca2SJan Medala 6691173fca2SJan Medala return 0; 6701173fca2SJan Medala 6711173fca2SJan Medala err_fill_indir: 6721173fca2SJan Medala ena_com_rss_destroy(ena_dev); 6731173fca2SJan Medala err_rss_init: 6741173fca2SJan Medala 6751173fca2SJan Medala return rc; 6761173fca2SJan Medala } 6771173fca2SJan Medala 6781173fca2SJan Medala static void ena_rx_queue_release_all(struct rte_eth_dev *dev) 6791173fca2SJan Medala { 6801173fca2SJan Medala struct ena_ring **queues = (struct ena_ring **)dev->data->rx_queues; 6811173fca2SJan Medala int nb_queues = dev->data->nb_rx_queues; 6821173fca2SJan Medala int i; 6831173fca2SJan Medala 6841173fca2SJan Medala for (i = 0; i < nb_queues; i++) 6851173fca2SJan Medala ena_rx_queue_release(queues[i]); 6861173fca2SJan Medala } 6871173fca2SJan Medala 6881173fca2SJan Medala static void ena_tx_queue_release_all(struct rte_eth_dev *dev) 6891173fca2SJan Medala { 6901173fca2SJan Medala struct ena_ring **queues = (struct ena_ring **)dev->data->tx_queues; 6911173fca2SJan Medala int nb_queues = dev->data->nb_tx_queues; 6921173fca2SJan Medala int i; 6931173fca2SJan Medala 6941173fca2SJan Medala for (i = 0; i < nb_queues; i++) 6951173fca2SJan Medala ena_tx_queue_release(queues[i]); 6961173fca2SJan Medala } 6971173fca2SJan Medala 6981173fca2SJan Medala static void ena_rx_queue_release(void *queue) 6991173fca2SJan Medala { 7001173fca2SJan Medala struct ena_ring *ring = (struct ena_ring *)queue; 7011173fca2SJan Medala 7021173fca2SJan Medala /* Free ring resources */ 7031173fca2SJan Medala if (ring->rx_buffer_info) 7041173fca2SJan Medala rte_free(ring->rx_buffer_info); 7051173fca2SJan Medala ring->rx_buffer_info = NULL; 7061173fca2SJan Medala 70779405ee1SRafal Kozik if (ring->rx_refill_buffer) 70879405ee1SRafal Kozik rte_free(ring->rx_refill_buffer); 70979405ee1SRafal Kozik ring->rx_refill_buffer = NULL; 71079405ee1SRafal Kozik 711c2034976SMichal Krawczyk if (ring->empty_rx_reqs) 712c2034976SMichal Krawczyk rte_free(ring->empty_rx_reqs); 713c2034976SMichal Krawczyk ring->empty_rx_reqs = NULL; 714c2034976SMichal Krawczyk 7151173fca2SJan Medala ring->configured = 0; 7161173fca2SJan Medala 7171173fca2SJan Medala RTE_LOG(NOTICE, PMD, "RX Queue %d:%d released\n", 7181173fca2SJan Medala ring->port_id, ring->id); 7191173fca2SJan Medala } 7201173fca2SJan Medala 7211173fca2SJan Medala static void ena_tx_queue_release(void *queue) 7221173fca2SJan Medala { 7231173fca2SJan Medala struct ena_ring *ring = (struct ena_ring *)queue; 7241173fca2SJan Medala 7251173fca2SJan Medala /* Free ring resources */ 7262fca2a98SMichal Krawczyk if (ring->push_buf_intermediate_buf) 7272fca2a98SMichal Krawczyk rte_free(ring->push_buf_intermediate_buf); 7282fca2a98SMichal Krawczyk 7291173fca2SJan Medala if (ring->tx_buffer_info) 7301173fca2SJan Medala rte_free(ring->tx_buffer_info); 7311173fca2SJan Medala 7321173fca2SJan Medala if (ring->empty_tx_reqs) 7331173fca2SJan Medala rte_free(ring->empty_tx_reqs); 7341173fca2SJan Medala 7351173fca2SJan Medala ring->empty_tx_reqs = NULL; 7361173fca2SJan Medala ring->tx_buffer_info = NULL; 7372fca2a98SMichal Krawczyk ring->push_buf_intermediate_buf = NULL; 7381173fca2SJan Medala 7391173fca2SJan Medala ring->configured = 0; 7401173fca2SJan Medala 7411173fca2SJan Medala RTE_LOG(NOTICE, PMD, "TX Queue %d:%d released\n", 7421173fca2SJan Medala ring->port_id, ring->id); 7431173fca2SJan Medala } 7441173fca2SJan Medala 7451173fca2SJan Medala static void ena_rx_queue_release_bufs(struct ena_ring *ring) 7461173fca2SJan Medala { 747709b1dcbSRafal Kozik unsigned int i; 7481173fca2SJan Medala 749709b1dcbSRafal Kozik for (i = 0; i < ring->ring_size; ++i) 750709b1dcbSRafal Kozik if (ring->rx_buffer_info[i]) { 751709b1dcbSRafal Kozik rte_mbuf_raw_free(ring->rx_buffer_info[i]); 752709b1dcbSRafal Kozik ring->rx_buffer_info[i] = NULL; 7531173fca2SJan Medala } 7541173fca2SJan Medala } 7551173fca2SJan Medala 7561173fca2SJan Medala static void ena_tx_queue_release_bufs(struct ena_ring *ring) 7571173fca2SJan Medala { 758207a514cSMichal Krawczyk unsigned int i; 7591173fca2SJan Medala 760207a514cSMichal Krawczyk for (i = 0; i < ring->ring_size; ++i) { 761207a514cSMichal Krawczyk struct ena_tx_buffer *tx_buf = &ring->tx_buffer_info[i]; 7621173fca2SJan Medala 7631173fca2SJan Medala if (tx_buf->mbuf) 7641173fca2SJan Medala rte_pktmbuf_free(tx_buf->mbuf); 7651173fca2SJan Medala } 7661173fca2SJan Medala } 7671173fca2SJan Medala 7681173fca2SJan Medala static int ena_link_update(struct rte_eth_dev *dev, 7691173fca2SJan Medala __rte_unused int wait_to_complete) 7701173fca2SJan Medala { 7711173fca2SJan Medala struct rte_eth_link *link = &dev->data->dev_link; 772ca148440SMichal Krawczyk struct ena_adapter *adapter; 7731173fca2SJan Medala 774ca148440SMichal Krawczyk adapter = (struct ena_adapter *)(dev->data->dev_private); 775ca148440SMichal Krawczyk 776ca148440SMichal Krawczyk link->link_status = adapter->link_status ? ETH_LINK_UP : ETH_LINK_DOWN; 77741e59028SRafal Kozik link->link_speed = ETH_SPEED_NUM_NONE; 7781173fca2SJan Medala link->link_duplex = ETH_LINK_FULL_DUPLEX; 7791173fca2SJan Medala 7801173fca2SJan Medala return 0; 7811173fca2SJan Medala } 7821173fca2SJan Medala 78326e5543dSRafal Kozik static int ena_queue_start_all(struct rte_eth_dev *dev, 7841173fca2SJan Medala enum ena_ring_type ring_type) 7851173fca2SJan Medala { 7861173fca2SJan Medala struct ena_adapter *adapter = 7871173fca2SJan Medala (struct ena_adapter *)(dev->data->dev_private); 7881173fca2SJan Medala struct ena_ring *queues = NULL; 78953b61841SMichal Krawczyk int nb_queues; 7901173fca2SJan Medala int i = 0; 7911173fca2SJan Medala int rc = 0; 7921173fca2SJan Medala 79353b61841SMichal Krawczyk if (ring_type == ENA_RING_TYPE_RX) { 79453b61841SMichal Krawczyk queues = adapter->rx_ring; 79553b61841SMichal Krawczyk nb_queues = dev->data->nb_rx_queues; 79653b61841SMichal Krawczyk } else { 79753b61841SMichal Krawczyk queues = adapter->tx_ring; 79853b61841SMichal Krawczyk nb_queues = dev->data->nb_tx_queues; 79953b61841SMichal Krawczyk } 80053b61841SMichal Krawczyk for (i = 0; i < nb_queues; i++) { 8011173fca2SJan Medala if (queues[i].configured) { 8021173fca2SJan Medala if (ring_type == ENA_RING_TYPE_RX) { 8031173fca2SJan Medala ena_assert_msg( 8041173fca2SJan Medala dev->data->rx_queues[i] == &queues[i], 8051173fca2SJan Medala "Inconsistent state of rx queues\n"); 8061173fca2SJan Medala } else { 8071173fca2SJan Medala ena_assert_msg( 8081173fca2SJan Medala dev->data->tx_queues[i] == &queues[i], 8091173fca2SJan Medala "Inconsistent state of tx queues\n"); 8101173fca2SJan Medala } 8111173fca2SJan Medala 81226e5543dSRafal Kozik rc = ena_queue_start(&queues[i]); 8131173fca2SJan Medala 8141173fca2SJan Medala if (rc) { 8151173fca2SJan Medala PMD_INIT_LOG(ERR, 81626e5543dSRafal Kozik "failed to start queue %d type(%d)", 8171173fca2SJan Medala i, ring_type); 81826e5543dSRafal Kozik goto err; 8191173fca2SJan Medala } 8201173fca2SJan Medala } 8211173fca2SJan Medala } 8221173fca2SJan Medala 8231173fca2SJan Medala return 0; 82426e5543dSRafal Kozik 82526e5543dSRafal Kozik err: 82626e5543dSRafal Kozik while (i--) 82726e5543dSRafal Kozik if (queues[i].configured) 82826e5543dSRafal Kozik ena_queue_stop(&queues[i]); 82926e5543dSRafal Kozik 83026e5543dSRafal Kozik return rc; 8311173fca2SJan Medala } 8321173fca2SJan Medala 8331173fca2SJan Medala static uint32_t ena_get_mtu_conf(struct ena_adapter *adapter) 8341173fca2SJan Medala { 8351173fca2SJan Medala uint32_t max_frame_len = adapter->max_mtu; 8361173fca2SJan Medala 8377369f88fSRafal Kozik if (adapter->rte_eth_dev_data->dev_conf.rxmode.offloads & 8387369f88fSRafal Kozik DEV_RX_OFFLOAD_JUMBO_FRAME) 8391173fca2SJan Medala max_frame_len = 8401173fca2SJan Medala adapter->rte_eth_dev_data->dev_conf.rxmode.max_rx_pkt_len; 8411173fca2SJan Medala 8421173fca2SJan Medala return max_frame_len; 8431173fca2SJan Medala } 8441173fca2SJan Medala 8451173fca2SJan Medala static int ena_check_valid_conf(struct ena_adapter *adapter) 8461173fca2SJan Medala { 8471173fca2SJan Medala uint32_t max_frame_len = ena_get_mtu_conf(adapter); 8481173fca2SJan Medala 849241da076SRafal Kozik if (max_frame_len > adapter->max_mtu || max_frame_len < ENA_MIN_MTU) { 850241da076SRafal Kozik PMD_INIT_LOG(ERR, "Unsupported MTU of %d. " 851498c687aSRafal Kozik "max mtu: %d, min mtu: %d", 852241da076SRafal Kozik max_frame_len, adapter->max_mtu, ENA_MIN_MTU); 853241da076SRafal Kozik return ENA_COM_UNSUPPORTED; 8541173fca2SJan Medala } 8551173fca2SJan Medala 8561173fca2SJan Medala return 0; 8571173fca2SJan Medala } 8581173fca2SJan Medala 8591173fca2SJan Medala static int 860ea93d37eSRafal Kozik ena_calc_queue_size(struct ena_calc_queue_size_ctx *ctx) 8611173fca2SJan Medala { 8622fca2a98SMichal Krawczyk struct ena_admin_feature_llq_desc *llq = &ctx->get_feat_ctx->llq; 8632fca2a98SMichal Krawczyk struct ena_com_dev *ena_dev = ctx->ena_dev; 8642fca2a98SMichal Krawczyk uint32_t tx_queue_size = ENA_MAX_RING_SIZE_TX; 8652fca2a98SMichal Krawczyk uint32_t rx_queue_size = ENA_MAX_RING_SIZE_RX; 8661173fca2SJan Medala 8672fca2a98SMichal Krawczyk if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) { 868ea93d37eSRafal Kozik struct ena_admin_queue_ext_feature_fields *max_queue_ext = 869ea93d37eSRafal Kozik &ctx->get_feat_ctx->max_queue_ext.max_queue_ext; 8702fca2a98SMichal Krawczyk rx_queue_size = RTE_MIN(rx_queue_size, 8712fca2a98SMichal Krawczyk max_queue_ext->max_rx_cq_depth); 8722fca2a98SMichal Krawczyk rx_queue_size = RTE_MIN(rx_queue_size, 873ea93d37eSRafal Kozik max_queue_ext->max_rx_sq_depth); 8742fca2a98SMichal Krawczyk tx_queue_size = RTE_MIN(tx_queue_size, 8752fca2a98SMichal Krawczyk max_queue_ext->max_tx_cq_depth); 8762fca2a98SMichal Krawczyk 8772fca2a98SMichal Krawczyk if (ena_dev->tx_mem_queue_type == 8782fca2a98SMichal Krawczyk ENA_ADMIN_PLACEMENT_POLICY_DEV) { 8792fca2a98SMichal Krawczyk tx_queue_size = RTE_MIN(tx_queue_size, 8802fca2a98SMichal Krawczyk llq->max_llq_depth); 8812fca2a98SMichal Krawczyk } else { 8822fca2a98SMichal Krawczyk tx_queue_size = RTE_MIN(tx_queue_size, 883ea93d37eSRafal Kozik max_queue_ext->max_tx_sq_depth); 8842fca2a98SMichal Krawczyk } 8852fca2a98SMichal Krawczyk 886ea93d37eSRafal Kozik ctx->max_rx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS, 887ea93d37eSRafal Kozik max_queue_ext->max_per_packet_rx_descs); 888ea93d37eSRafal Kozik ctx->max_tx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS, 889ea93d37eSRafal Kozik max_queue_ext->max_per_packet_tx_descs); 890ea93d37eSRafal Kozik } else { 891ea93d37eSRafal Kozik struct ena_admin_queue_feature_desc *max_queues = 892ea93d37eSRafal Kozik &ctx->get_feat_ctx->max_queues; 8932fca2a98SMichal Krawczyk rx_queue_size = RTE_MIN(rx_queue_size, 8942fca2a98SMichal Krawczyk max_queues->max_cq_depth); 8952fca2a98SMichal Krawczyk rx_queue_size = RTE_MIN(rx_queue_size, 896ea93d37eSRafal Kozik max_queues->max_sq_depth); 8972fca2a98SMichal Krawczyk tx_queue_size = RTE_MIN(tx_queue_size, 8982fca2a98SMichal Krawczyk max_queues->max_cq_depth); 8992fca2a98SMichal Krawczyk 9002fca2a98SMichal Krawczyk if (ena_dev->tx_mem_queue_type == 9012fca2a98SMichal Krawczyk ENA_ADMIN_PLACEMENT_POLICY_DEV) { 9022fca2a98SMichal Krawczyk tx_queue_size = RTE_MIN(tx_queue_size, 9032fca2a98SMichal Krawczyk llq->max_llq_depth); 9042fca2a98SMichal Krawczyk } else { 9052fca2a98SMichal Krawczyk tx_queue_size = RTE_MIN(tx_queue_size, 9062fca2a98SMichal Krawczyk max_queues->max_sq_depth); 9072fca2a98SMichal Krawczyk } 9082fca2a98SMichal Krawczyk 909ea93d37eSRafal Kozik ctx->max_rx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS, 910ea93d37eSRafal Kozik max_queues->max_packet_tx_descs); 911ea93d37eSRafal Kozik ctx->max_tx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS, 912ea93d37eSRafal Kozik max_queues->max_packet_rx_descs); 913ea93d37eSRafal Kozik } 9141173fca2SJan Medala 915ea93d37eSRafal Kozik /* Round down to the nearest power of 2 */ 916ea93d37eSRafal Kozik rx_queue_size = rte_align32prevpow2(rx_queue_size); 917ea93d37eSRafal Kozik tx_queue_size = rte_align32prevpow2(tx_queue_size); 9181173fca2SJan Medala 919ea93d37eSRafal Kozik if (unlikely(rx_queue_size == 0 || tx_queue_size == 0)) { 920f2462150SFerruh Yigit PMD_INIT_LOG(ERR, "Invalid queue size"); 9211173fca2SJan Medala return -EFAULT; 9221173fca2SJan Medala } 9231173fca2SJan Medala 924ea93d37eSRafal Kozik ctx->rx_queue_size = rx_queue_size; 925ea93d37eSRafal Kozik ctx->tx_queue_size = tx_queue_size; 9262061fe41SRafal Kozik 927ea93d37eSRafal Kozik return 0; 9281173fca2SJan Medala } 9291173fca2SJan Medala 9301173fca2SJan Medala static void ena_stats_restart(struct rte_eth_dev *dev) 9311173fca2SJan Medala { 9321173fca2SJan Medala struct ena_adapter *adapter = 9331173fca2SJan Medala (struct ena_adapter *)(dev->data->dev_private); 9341173fca2SJan Medala 9351173fca2SJan Medala rte_atomic64_init(&adapter->drv_stats->ierrors); 9361173fca2SJan Medala rte_atomic64_init(&adapter->drv_stats->oerrors); 9371173fca2SJan Medala rte_atomic64_init(&adapter->drv_stats->rx_nombuf); 93894c3e376SRafal Kozik rte_atomic64_init(&adapter->drv_stats->rx_drops); 9391173fca2SJan Medala } 9401173fca2SJan Medala 941d5b0924bSMatan Azrad static int ena_stats_get(struct rte_eth_dev *dev, 9421173fca2SJan Medala struct rte_eth_stats *stats) 9431173fca2SJan Medala { 9441173fca2SJan Medala struct ena_admin_basic_stats ena_stats; 9451173fca2SJan Medala struct ena_adapter *adapter = 9461173fca2SJan Medala (struct ena_adapter *)(dev->data->dev_private); 9471173fca2SJan Medala struct ena_com_dev *ena_dev = &adapter->ena_dev; 9481173fca2SJan Medala int rc; 94945b6d861SMichal Krawczyk int i; 95045b6d861SMichal Krawczyk int max_rings_stats; 9511173fca2SJan Medala 9521173fca2SJan Medala if (rte_eal_process_type() != RTE_PROC_PRIMARY) 953d5b0924bSMatan Azrad return -ENOTSUP; 9541173fca2SJan Medala 9551173fca2SJan Medala memset(&ena_stats, 0, sizeof(ena_stats)); 9561173fca2SJan Medala rc = ena_com_get_dev_basic_stats(ena_dev, &ena_stats); 9571173fca2SJan Medala if (unlikely(rc)) { 958498c687aSRafal Kozik RTE_LOG(ERR, PMD, "Could not retrieve statistics from ENA\n"); 959d5b0924bSMatan Azrad return rc; 9601173fca2SJan Medala } 9611173fca2SJan Medala 9621173fca2SJan Medala /* Set of basic statistics from ENA */ 9631173fca2SJan Medala stats->ipackets = __MERGE_64B_H_L(ena_stats.rx_pkts_high, 9641173fca2SJan Medala ena_stats.rx_pkts_low); 9651173fca2SJan Medala stats->opackets = __MERGE_64B_H_L(ena_stats.tx_pkts_high, 9661173fca2SJan Medala ena_stats.tx_pkts_low); 9671173fca2SJan Medala stats->ibytes = __MERGE_64B_H_L(ena_stats.rx_bytes_high, 9681173fca2SJan Medala ena_stats.rx_bytes_low); 9691173fca2SJan Medala stats->obytes = __MERGE_64B_H_L(ena_stats.tx_bytes_high, 9701173fca2SJan Medala ena_stats.tx_bytes_low); 9711173fca2SJan Medala 9721173fca2SJan Medala /* Driver related stats */ 97394c3e376SRafal Kozik stats->imissed = rte_atomic64_read(&adapter->drv_stats->rx_drops); 9741173fca2SJan Medala stats->ierrors = rte_atomic64_read(&adapter->drv_stats->ierrors); 9751173fca2SJan Medala stats->oerrors = rte_atomic64_read(&adapter->drv_stats->oerrors); 9761173fca2SJan Medala stats->rx_nombuf = rte_atomic64_read(&adapter->drv_stats->rx_nombuf); 97745b6d861SMichal Krawczyk 97845b6d861SMichal Krawczyk max_rings_stats = RTE_MIN(dev->data->nb_rx_queues, 97945b6d861SMichal Krawczyk RTE_ETHDEV_QUEUE_STAT_CNTRS); 98045b6d861SMichal Krawczyk for (i = 0; i < max_rings_stats; ++i) { 98145b6d861SMichal Krawczyk struct ena_stats_rx *rx_stats = &adapter->rx_ring[i].rx_stats; 98245b6d861SMichal Krawczyk 98345b6d861SMichal Krawczyk stats->q_ibytes[i] = rx_stats->bytes; 98445b6d861SMichal Krawczyk stats->q_ipackets[i] = rx_stats->cnt; 98545b6d861SMichal Krawczyk stats->q_errors[i] = rx_stats->bad_desc_num + 98645b6d861SMichal Krawczyk rx_stats->bad_req_id; 98745b6d861SMichal Krawczyk } 98845b6d861SMichal Krawczyk 98945b6d861SMichal Krawczyk max_rings_stats = RTE_MIN(dev->data->nb_tx_queues, 99045b6d861SMichal Krawczyk RTE_ETHDEV_QUEUE_STAT_CNTRS); 99145b6d861SMichal Krawczyk for (i = 0; i < max_rings_stats; ++i) { 99245b6d861SMichal Krawczyk struct ena_stats_tx *tx_stats = &adapter->tx_ring[i].tx_stats; 99345b6d861SMichal Krawczyk 99445b6d861SMichal Krawczyk stats->q_obytes[i] = tx_stats->bytes; 99545b6d861SMichal Krawczyk stats->q_opackets[i] = tx_stats->cnt; 99645b6d861SMichal Krawczyk } 99745b6d861SMichal Krawczyk 998d5b0924bSMatan Azrad return 0; 9991173fca2SJan Medala } 10001173fca2SJan Medala 10011173fca2SJan Medala static int ena_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 10021173fca2SJan Medala { 10031173fca2SJan Medala struct ena_adapter *adapter; 10041173fca2SJan Medala struct ena_com_dev *ena_dev; 10051173fca2SJan Medala int rc = 0; 10061173fca2SJan Medala 1007498c687aSRafal Kozik ena_assert_msg(dev->data != NULL, "Uninitialized device\n"); 1008498c687aSRafal Kozik ena_assert_msg(dev->data->dev_private != NULL, "Uninitialized device\n"); 10091173fca2SJan Medala adapter = (struct ena_adapter *)(dev->data->dev_private); 10101173fca2SJan Medala 10111173fca2SJan Medala ena_dev = &adapter->ena_dev; 1012498c687aSRafal Kozik ena_assert_msg(ena_dev != NULL, "Uninitialized device\n"); 10131173fca2SJan Medala 1014241da076SRafal Kozik if (mtu > ena_get_mtu_conf(adapter) || mtu < ENA_MIN_MTU) { 10151173fca2SJan Medala RTE_LOG(ERR, PMD, 1016241da076SRafal Kozik "Invalid MTU setting. new_mtu: %d " 1017241da076SRafal Kozik "max mtu: %d min mtu: %d\n", 1018241da076SRafal Kozik mtu, ena_get_mtu_conf(adapter), ENA_MIN_MTU); 1019241da076SRafal Kozik return -EINVAL; 10201173fca2SJan Medala } 10211173fca2SJan Medala 10221173fca2SJan Medala rc = ena_com_set_dev_mtu(ena_dev, mtu); 10231173fca2SJan Medala if (rc) 10241173fca2SJan Medala RTE_LOG(ERR, PMD, "Could not set MTU: %d\n", mtu); 10251173fca2SJan Medala else 10261173fca2SJan Medala RTE_LOG(NOTICE, PMD, "Set MTU: %d\n", mtu); 10271173fca2SJan Medala 10281173fca2SJan Medala return rc; 10291173fca2SJan Medala } 10301173fca2SJan Medala 10311173fca2SJan Medala static int ena_start(struct rte_eth_dev *dev) 10321173fca2SJan Medala { 10331173fca2SJan Medala struct ena_adapter *adapter = 10341173fca2SJan Medala (struct ena_adapter *)(dev->data->dev_private); 1035d9b8b106SMichal Krawczyk uint64_t ticks; 10361173fca2SJan Medala int rc = 0; 10371173fca2SJan Medala 10381173fca2SJan Medala rc = ena_check_valid_conf(adapter); 10391173fca2SJan Medala if (rc) 10401173fca2SJan Medala return rc; 10411173fca2SJan Medala 104226e5543dSRafal Kozik rc = ena_queue_start_all(dev, ENA_RING_TYPE_RX); 10431173fca2SJan Medala if (rc) 10441173fca2SJan Medala return rc; 10451173fca2SJan Medala 104626e5543dSRafal Kozik rc = ena_queue_start_all(dev, ENA_RING_TYPE_TX); 10471173fca2SJan Medala if (rc) 104826e5543dSRafal Kozik goto err_start_tx; 10491173fca2SJan Medala 10501173fca2SJan Medala if (adapter->rte_dev->data->dev_conf.rxmode.mq_mode & 1051361913adSDaria Kolistratova ETH_MQ_RX_RSS_FLAG && adapter->rte_dev->data->nb_rx_queues > 0) { 10521173fca2SJan Medala rc = ena_rss_init_default(adapter); 10531173fca2SJan Medala if (rc) 105426e5543dSRafal Kozik goto err_rss_init; 10551173fca2SJan Medala } 10561173fca2SJan Medala 10571173fca2SJan Medala ena_stats_restart(dev); 10581173fca2SJan Medala 1059d9b8b106SMichal Krawczyk adapter->timestamp_wd = rte_get_timer_cycles(); 1060d9b8b106SMichal Krawczyk adapter->keep_alive_timeout = ENA_DEVICE_KALIVE_TIMEOUT; 1061d9b8b106SMichal Krawczyk 1062d9b8b106SMichal Krawczyk ticks = rte_get_timer_hz(); 1063d9b8b106SMichal Krawczyk rte_timer_reset(&adapter->timer_wd, ticks, PERIODICAL, rte_lcore_id(), 1064d9b8b106SMichal Krawczyk ena_timer_wd_callback, adapter); 1065d9b8b106SMichal Krawczyk 10667830e905SSolganik Alexander ++adapter->dev_stats.dev_start; 10671173fca2SJan Medala adapter->state = ENA_ADAPTER_STATE_RUNNING; 10681173fca2SJan Medala 10691173fca2SJan Medala return 0; 107026e5543dSRafal Kozik 107126e5543dSRafal Kozik err_rss_init: 107226e5543dSRafal Kozik ena_queue_stop_all(dev, ENA_RING_TYPE_TX); 107326e5543dSRafal Kozik err_start_tx: 107426e5543dSRafal Kozik ena_queue_stop_all(dev, ENA_RING_TYPE_RX); 107526e5543dSRafal Kozik return rc; 10761173fca2SJan Medala } 10771173fca2SJan Medala 1078eb0ef49dSMichal Krawczyk static void ena_stop(struct rte_eth_dev *dev) 1079eb0ef49dSMichal Krawczyk { 1080eb0ef49dSMichal Krawczyk struct ena_adapter *adapter = 1081eb0ef49dSMichal Krawczyk (struct ena_adapter *)(dev->data->dev_private); 1082e457bc70SRafal Kozik struct ena_com_dev *ena_dev = &adapter->ena_dev; 1083e457bc70SRafal Kozik int rc; 1084eb0ef49dSMichal Krawczyk 1085d9b8b106SMichal Krawczyk rte_timer_stop_sync(&adapter->timer_wd); 108626e5543dSRafal Kozik ena_queue_stop_all(dev, ENA_RING_TYPE_TX); 108726e5543dSRafal Kozik ena_queue_stop_all(dev, ENA_RING_TYPE_RX); 1088d9b8b106SMichal Krawczyk 1089e457bc70SRafal Kozik if (adapter->trigger_reset) { 1090e457bc70SRafal Kozik rc = ena_com_dev_reset(ena_dev, adapter->reset_reason); 1091e457bc70SRafal Kozik if (rc) 1092e457bc70SRafal Kozik RTE_LOG(ERR, PMD, "Device reset failed rc=%d\n", rc); 1093e457bc70SRafal Kozik } 1094e457bc70SRafal Kozik 10957830e905SSolganik Alexander ++adapter->dev_stats.dev_stop; 1096eb0ef49dSMichal Krawczyk adapter->state = ENA_ADAPTER_STATE_STOPPED; 1097eb0ef49dSMichal Krawczyk } 1098eb0ef49dSMichal Krawczyk 1099df238f84SMichal Krawczyk static int ena_create_io_queue(struct ena_ring *ring) 1100df238f84SMichal Krawczyk { 1101df238f84SMichal Krawczyk struct ena_adapter *adapter; 1102df238f84SMichal Krawczyk struct ena_com_dev *ena_dev; 1103df238f84SMichal Krawczyk struct ena_com_create_io_ctx ctx = 1104df238f84SMichal Krawczyk /* policy set to _HOST just to satisfy icc compiler */ 1105df238f84SMichal Krawczyk { ENA_ADMIN_PLACEMENT_POLICY_HOST, 1106df238f84SMichal Krawczyk 0, 0, 0, 0, 0 }; 1107df238f84SMichal Krawczyk uint16_t ena_qid; 1108778677dcSRafal Kozik unsigned int i; 1109df238f84SMichal Krawczyk int rc; 1110df238f84SMichal Krawczyk 1111df238f84SMichal Krawczyk adapter = ring->adapter; 1112df238f84SMichal Krawczyk ena_dev = &adapter->ena_dev; 1113df238f84SMichal Krawczyk 1114df238f84SMichal Krawczyk if (ring->type == ENA_RING_TYPE_TX) { 1115df238f84SMichal Krawczyk ena_qid = ENA_IO_TXQ_IDX(ring->id); 1116df238f84SMichal Krawczyk ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX; 1117df238f84SMichal Krawczyk ctx.mem_queue_type = ena_dev->tx_mem_queue_type; 1118df238f84SMichal Krawczyk ctx.queue_size = adapter->tx_ring_size; 1119778677dcSRafal Kozik for (i = 0; i < ring->ring_size; i++) 1120778677dcSRafal Kozik ring->empty_tx_reqs[i] = i; 1121df238f84SMichal Krawczyk } else { 1122df238f84SMichal Krawczyk ena_qid = ENA_IO_RXQ_IDX(ring->id); 1123df238f84SMichal Krawczyk ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX; 1124df238f84SMichal Krawczyk ctx.queue_size = adapter->rx_ring_size; 1125778677dcSRafal Kozik for (i = 0; i < ring->ring_size; i++) 1126778677dcSRafal Kozik ring->empty_rx_reqs[i] = i; 1127df238f84SMichal Krawczyk } 1128df238f84SMichal Krawczyk ctx.qid = ena_qid; 1129df238f84SMichal Krawczyk ctx.msix_vector = -1; /* interrupts not used */ 1130df238f84SMichal Krawczyk ctx.numa_node = ena_cpu_to_node(ring->id); 1131df238f84SMichal Krawczyk 1132df238f84SMichal Krawczyk rc = ena_com_create_io_queue(ena_dev, &ctx); 1133df238f84SMichal Krawczyk if (rc) { 1134df238f84SMichal Krawczyk RTE_LOG(ERR, PMD, 1135df238f84SMichal Krawczyk "failed to create io queue #%d (qid:%d) rc: %d\n", 1136df238f84SMichal Krawczyk ring->id, ena_qid, rc); 1137df238f84SMichal Krawczyk return rc; 1138df238f84SMichal Krawczyk } 1139df238f84SMichal Krawczyk 1140df238f84SMichal Krawczyk rc = ena_com_get_io_handlers(ena_dev, ena_qid, 1141df238f84SMichal Krawczyk &ring->ena_com_io_sq, 1142df238f84SMichal Krawczyk &ring->ena_com_io_cq); 1143df238f84SMichal Krawczyk if (rc) { 1144df238f84SMichal Krawczyk RTE_LOG(ERR, PMD, 1145df238f84SMichal Krawczyk "Failed to get io queue handlers. queue num %d rc: %d\n", 1146df238f84SMichal Krawczyk ring->id, rc); 1147df238f84SMichal Krawczyk ena_com_destroy_io_queue(ena_dev, ena_qid); 1148df238f84SMichal Krawczyk return rc; 1149df238f84SMichal Krawczyk } 1150df238f84SMichal Krawczyk 1151df238f84SMichal Krawczyk if (ring->type == ENA_RING_TYPE_TX) 1152df238f84SMichal Krawczyk ena_com_update_numa_node(ring->ena_com_io_cq, ctx.numa_node); 1153df238f84SMichal Krawczyk 1154df238f84SMichal Krawczyk return 0; 1155df238f84SMichal Krawczyk } 1156df238f84SMichal Krawczyk 115726e5543dSRafal Kozik static void ena_queue_stop(struct ena_ring *ring) 1158df238f84SMichal Krawczyk { 115926e5543dSRafal Kozik struct ena_com_dev *ena_dev = &ring->adapter->ena_dev; 1160df238f84SMichal Krawczyk 116126e5543dSRafal Kozik if (ring->type == ENA_RING_TYPE_RX) { 116226e5543dSRafal Kozik ena_com_destroy_io_queue(ena_dev, ENA_IO_RXQ_IDX(ring->id)); 116326e5543dSRafal Kozik ena_rx_queue_release_bufs(ring); 116426e5543dSRafal Kozik } else { 116526e5543dSRafal Kozik ena_com_destroy_io_queue(ena_dev, ENA_IO_TXQ_IDX(ring->id)); 116626e5543dSRafal Kozik ena_tx_queue_release_bufs(ring); 1167df238f84SMichal Krawczyk } 1168df238f84SMichal Krawczyk } 1169df238f84SMichal Krawczyk 117026e5543dSRafal Kozik static void ena_queue_stop_all(struct rte_eth_dev *dev, 117126e5543dSRafal Kozik enum ena_ring_type ring_type) 117226e5543dSRafal Kozik { 117326e5543dSRafal Kozik struct ena_adapter *adapter = 117426e5543dSRafal Kozik (struct ena_adapter *)(dev->data->dev_private); 117526e5543dSRafal Kozik struct ena_ring *queues = NULL; 117626e5543dSRafal Kozik uint16_t nb_queues, i; 117726e5543dSRafal Kozik 117826e5543dSRafal Kozik if (ring_type == ENA_RING_TYPE_RX) { 117926e5543dSRafal Kozik queues = adapter->rx_ring; 118026e5543dSRafal Kozik nb_queues = dev->data->nb_rx_queues; 118126e5543dSRafal Kozik } else { 118226e5543dSRafal Kozik queues = adapter->tx_ring; 118326e5543dSRafal Kozik nb_queues = dev->data->nb_tx_queues; 118426e5543dSRafal Kozik } 118526e5543dSRafal Kozik 118626e5543dSRafal Kozik for (i = 0; i < nb_queues; ++i) 118726e5543dSRafal Kozik if (queues[i].configured) 118826e5543dSRafal Kozik ena_queue_stop(&queues[i]); 118926e5543dSRafal Kozik } 119026e5543dSRafal Kozik 119126e5543dSRafal Kozik static int ena_queue_start(struct ena_ring *ring) 11921173fca2SJan Medala { 1193a467e8f3SMichal Krawczyk int rc, bufs_num; 11941173fca2SJan Medala 11951173fca2SJan Medala ena_assert_msg(ring->configured == 1, 119626e5543dSRafal Kozik "Trying to start unconfigured queue\n"); 11971173fca2SJan Medala 1198df238f84SMichal Krawczyk rc = ena_create_io_queue(ring); 1199df238f84SMichal Krawczyk if (rc) { 1200498c687aSRafal Kozik PMD_INIT_LOG(ERR, "Failed to create IO queue!"); 1201df238f84SMichal Krawczyk return rc; 1202df238f84SMichal Krawczyk } 1203df238f84SMichal Krawczyk 12041173fca2SJan Medala ring->next_to_clean = 0; 12051173fca2SJan Medala ring->next_to_use = 0; 12061173fca2SJan Medala 12077830e905SSolganik Alexander if (ring->type == ENA_RING_TYPE_TX) { 12087830e905SSolganik Alexander ring->tx_stats.available_desc = 12097830e905SSolganik Alexander ena_com_free_desc(ring->ena_com_io_sq); 12101173fca2SJan Medala return 0; 12117830e905SSolganik Alexander } 12121173fca2SJan Medala 1213a467e8f3SMichal Krawczyk bufs_num = ring->ring_size - 1; 1214a467e8f3SMichal Krawczyk rc = ena_populate_rx_queue(ring, bufs_num); 1215a467e8f3SMichal Krawczyk if (rc != bufs_num) { 121626e5543dSRafal Kozik ena_com_destroy_io_queue(&ring->adapter->ena_dev, 121726e5543dSRafal Kozik ENA_IO_RXQ_IDX(ring->id)); 1218f2462150SFerruh Yigit PMD_INIT_LOG(ERR, "Failed to populate rx ring !"); 1219241da076SRafal Kozik return ENA_COM_FAULT; 12201173fca2SJan Medala } 12211173fca2SJan Medala 12221173fca2SJan Medala return 0; 12231173fca2SJan Medala } 12241173fca2SJan Medala 12251173fca2SJan Medala static int ena_tx_queue_setup(struct rte_eth_dev *dev, 12261173fca2SJan Medala uint16_t queue_idx, 12271173fca2SJan Medala uint16_t nb_desc, 12281173fca2SJan Medala __rte_unused unsigned int socket_id, 122956b8b9b7SRafal Kozik const struct rte_eth_txconf *tx_conf) 12301173fca2SJan Medala { 12311173fca2SJan Medala struct ena_ring *txq = NULL; 12321173fca2SJan Medala struct ena_adapter *adapter = 12331173fca2SJan Medala (struct ena_adapter *)(dev->data->dev_private); 12341173fca2SJan Medala unsigned int i; 12351173fca2SJan Medala 12361173fca2SJan Medala txq = &adapter->tx_ring[queue_idx]; 12371173fca2SJan Medala 12381173fca2SJan Medala if (txq->configured) { 12391173fca2SJan Medala RTE_LOG(CRIT, PMD, 12401173fca2SJan Medala "API violation. Queue %d is already configured\n", 12411173fca2SJan Medala queue_idx); 1242241da076SRafal Kozik return ENA_COM_FAULT; 12431173fca2SJan Medala } 12441173fca2SJan Medala 12451daff526SJakub Palider if (!rte_is_power_of_2(nb_desc)) { 12461daff526SJakub Palider RTE_LOG(ERR, PMD, 1247498c687aSRafal Kozik "Unsupported size of TX queue: %d is not a power of 2.\n", 12481daff526SJakub Palider nb_desc); 12491daff526SJakub Palider return -EINVAL; 12501daff526SJakub Palider } 12511daff526SJakub Palider 12521173fca2SJan Medala if (nb_desc > adapter->tx_ring_size) { 12531173fca2SJan Medala RTE_LOG(ERR, PMD, 12541173fca2SJan Medala "Unsupported size of TX queue (max size: %d)\n", 12551173fca2SJan Medala adapter->tx_ring_size); 12561173fca2SJan Medala return -EINVAL; 12571173fca2SJan Medala } 12581173fca2SJan Medala 1259ea93d37eSRafal Kozik if (nb_desc == RTE_ETH_DEV_FALLBACK_TX_RINGSIZE) 1260ea93d37eSRafal Kozik nb_desc = adapter->tx_ring_size; 1261ea93d37eSRafal Kozik 12621173fca2SJan Medala txq->port_id = dev->data->port_id; 12631173fca2SJan Medala txq->next_to_clean = 0; 12641173fca2SJan Medala txq->next_to_use = 0; 12651173fca2SJan Medala txq->ring_size = nb_desc; 12661173fca2SJan Medala 12671173fca2SJan Medala txq->tx_buffer_info = rte_zmalloc("txq->tx_buffer_info", 12681173fca2SJan Medala sizeof(struct ena_tx_buffer) * 12691173fca2SJan Medala txq->ring_size, 12701173fca2SJan Medala RTE_CACHE_LINE_SIZE); 12711173fca2SJan Medala if (!txq->tx_buffer_info) { 12721173fca2SJan Medala RTE_LOG(ERR, PMD, "failed to alloc mem for tx buffer info\n"); 1273df238f84SMichal Krawczyk return -ENOMEM; 12741173fca2SJan Medala } 12751173fca2SJan Medala 12761173fca2SJan Medala txq->empty_tx_reqs = rte_zmalloc("txq->empty_tx_reqs", 12771173fca2SJan Medala sizeof(u16) * txq->ring_size, 12781173fca2SJan Medala RTE_CACHE_LINE_SIZE); 12791173fca2SJan Medala if (!txq->empty_tx_reqs) { 12801173fca2SJan Medala RTE_LOG(ERR, PMD, "failed to alloc mem for tx reqs\n"); 1281df238f84SMichal Krawczyk rte_free(txq->tx_buffer_info); 1282df238f84SMichal Krawczyk return -ENOMEM; 12831173fca2SJan Medala } 1284241da076SRafal Kozik 12852fca2a98SMichal Krawczyk txq->push_buf_intermediate_buf = 12862fca2a98SMichal Krawczyk rte_zmalloc("txq->push_buf_intermediate_buf", 12872fca2a98SMichal Krawczyk txq->tx_max_header_size, 12882fca2a98SMichal Krawczyk RTE_CACHE_LINE_SIZE); 12892fca2a98SMichal Krawczyk if (!txq->push_buf_intermediate_buf) { 12902fca2a98SMichal Krawczyk RTE_LOG(ERR, PMD, "failed to alloc push buff for LLQ\n"); 12912fca2a98SMichal Krawczyk rte_free(txq->tx_buffer_info); 12922fca2a98SMichal Krawczyk rte_free(txq->empty_tx_reqs); 12932fca2a98SMichal Krawczyk return -ENOMEM; 12942fca2a98SMichal Krawczyk } 12952fca2a98SMichal Krawczyk 12961173fca2SJan Medala for (i = 0; i < txq->ring_size; i++) 12971173fca2SJan Medala txq->empty_tx_reqs[i] = i; 12981173fca2SJan Medala 12992081d5e2SMichal Krawczyk if (tx_conf != NULL) { 13002081d5e2SMichal Krawczyk txq->offloads = 13012081d5e2SMichal Krawczyk tx_conf->offloads | dev->data->dev_conf.txmode.offloads; 13022081d5e2SMichal Krawczyk } 13031173fca2SJan Medala /* Store pointer to this queue in upper layer */ 13041173fca2SJan Medala txq->configured = 1; 13051173fca2SJan Medala dev->data->tx_queues[queue_idx] = txq; 1306241da076SRafal Kozik 1307241da076SRafal Kozik return 0; 13081173fca2SJan Medala } 13091173fca2SJan Medala 13101173fca2SJan Medala static int ena_rx_queue_setup(struct rte_eth_dev *dev, 13111173fca2SJan Medala uint16_t queue_idx, 13121173fca2SJan Medala uint16_t nb_desc, 13131173fca2SJan Medala __rte_unused unsigned int socket_id, 1314a4996bd8SWei Dai __rte_unused const struct rte_eth_rxconf *rx_conf, 13151173fca2SJan Medala struct rte_mempool *mp) 13161173fca2SJan Medala { 13171173fca2SJan Medala struct ena_adapter *adapter = 13181173fca2SJan Medala (struct ena_adapter *)(dev->data->dev_private); 13191173fca2SJan Medala struct ena_ring *rxq = NULL; 1320df238f84SMichal Krawczyk int i; 13211173fca2SJan Medala 13221173fca2SJan Medala rxq = &adapter->rx_ring[queue_idx]; 13231173fca2SJan Medala if (rxq->configured) { 13241173fca2SJan Medala RTE_LOG(CRIT, PMD, 13251173fca2SJan Medala "API violation. Queue %d is already configured\n", 13261173fca2SJan Medala queue_idx); 1327241da076SRafal Kozik return ENA_COM_FAULT; 13281173fca2SJan Medala } 13291173fca2SJan Medala 1330ea93d37eSRafal Kozik if (nb_desc == RTE_ETH_DEV_FALLBACK_RX_RINGSIZE) 1331ea93d37eSRafal Kozik nb_desc = adapter->rx_ring_size; 1332ea93d37eSRafal Kozik 13331daff526SJakub Palider if (!rte_is_power_of_2(nb_desc)) { 13341daff526SJakub Palider RTE_LOG(ERR, PMD, 1335498c687aSRafal Kozik "Unsupported size of RX queue: %d is not a power of 2.\n", 13361daff526SJakub Palider nb_desc); 13371daff526SJakub Palider return -EINVAL; 13381daff526SJakub Palider } 13391daff526SJakub Palider 13401173fca2SJan Medala if (nb_desc > adapter->rx_ring_size) { 13411173fca2SJan Medala RTE_LOG(ERR, PMD, 13421173fca2SJan Medala "Unsupported size of RX queue (max size: %d)\n", 13431173fca2SJan Medala adapter->rx_ring_size); 13441173fca2SJan Medala return -EINVAL; 13451173fca2SJan Medala } 13461173fca2SJan Medala 13471173fca2SJan Medala rxq->port_id = dev->data->port_id; 13481173fca2SJan Medala rxq->next_to_clean = 0; 13491173fca2SJan Medala rxq->next_to_use = 0; 13501173fca2SJan Medala rxq->ring_size = nb_desc; 13511173fca2SJan Medala rxq->mb_pool = mp; 13521173fca2SJan Medala 13531173fca2SJan Medala rxq->rx_buffer_info = rte_zmalloc("rxq->buffer_info", 13541173fca2SJan Medala sizeof(struct rte_mbuf *) * nb_desc, 13551173fca2SJan Medala RTE_CACHE_LINE_SIZE); 13561173fca2SJan Medala if (!rxq->rx_buffer_info) { 13571173fca2SJan Medala RTE_LOG(ERR, PMD, "failed to alloc mem for rx buffer info\n"); 13581173fca2SJan Medala return -ENOMEM; 13591173fca2SJan Medala } 13601173fca2SJan Medala 136179405ee1SRafal Kozik rxq->rx_refill_buffer = rte_zmalloc("rxq->rx_refill_buffer", 136279405ee1SRafal Kozik sizeof(struct rte_mbuf *) * nb_desc, 136379405ee1SRafal Kozik RTE_CACHE_LINE_SIZE); 136479405ee1SRafal Kozik 136579405ee1SRafal Kozik if (!rxq->rx_refill_buffer) { 136679405ee1SRafal Kozik RTE_LOG(ERR, PMD, "failed to alloc mem for rx refill buffer\n"); 136779405ee1SRafal Kozik rte_free(rxq->rx_buffer_info); 136879405ee1SRafal Kozik rxq->rx_buffer_info = NULL; 136979405ee1SRafal Kozik return -ENOMEM; 137079405ee1SRafal Kozik } 137179405ee1SRafal Kozik 1372c2034976SMichal Krawczyk rxq->empty_rx_reqs = rte_zmalloc("rxq->empty_rx_reqs", 1373c2034976SMichal Krawczyk sizeof(uint16_t) * nb_desc, 1374c2034976SMichal Krawczyk RTE_CACHE_LINE_SIZE); 1375c2034976SMichal Krawczyk if (!rxq->empty_rx_reqs) { 1376c2034976SMichal Krawczyk RTE_LOG(ERR, PMD, "failed to alloc mem for empty rx reqs\n"); 1377c2034976SMichal Krawczyk rte_free(rxq->rx_buffer_info); 1378c2034976SMichal Krawczyk rxq->rx_buffer_info = NULL; 137979405ee1SRafal Kozik rte_free(rxq->rx_refill_buffer); 138079405ee1SRafal Kozik rxq->rx_refill_buffer = NULL; 1381c2034976SMichal Krawczyk return -ENOMEM; 1382c2034976SMichal Krawczyk } 1383c2034976SMichal Krawczyk 1384c2034976SMichal Krawczyk for (i = 0; i < nb_desc; i++) 1385eccbe2ffSRafal Kozik rxq->empty_rx_reqs[i] = i; 1386c2034976SMichal Krawczyk 13871173fca2SJan Medala /* Store pointer to this queue in upper layer */ 13881173fca2SJan Medala rxq->configured = 1; 13891173fca2SJan Medala dev->data->rx_queues[queue_idx] = rxq; 13901173fca2SJan Medala 1391df238f84SMichal Krawczyk return 0; 13921173fca2SJan Medala } 13931173fca2SJan Medala 13941173fca2SJan Medala static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count) 13951173fca2SJan Medala { 13961173fca2SJan Medala unsigned int i; 13971173fca2SJan Medala int rc; 13981daff526SJakub Palider uint16_t ring_size = rxq->ring_size; 13991daff526SJakub Palider uint16_t ring_mask = ring_size - 1; 14001daff526SJakub Palider uint16_t next_to_use = rxq->next_to_use; 1401c2034976SMichal Krawczyk uint16_t in_use, req_id; 140279405ee1SRafal Kozik struct rte_mbuf **mbufs = rxq->rx_refill_buffer; 14031173fca2SJan Medala 14041173fca2SJan Medala if (unlikely(!count)) 14051173fca2SJan Medala return 0; 14061173fca2SJan Medala 14071daff526SJakub Palider in_use = rxq->next_to_use - rxq->next_to_clean; 1408498c687aSRafal Kozik ena_assert_msg(((in_use + count) < ring_size), "bad ring state\n"); 14091173fca2SJan Medala 14101173fca2SJan Medala /* get resources for incoming packets */ 141179405ee1SRafal Kozik rc = rte_mempool_get_bulk(rxq->mb_pool, (void **)mbufs, count); 14121173fca2SJan Medala if (unlikely(rc < 0)) { 14131173fca2SJan Medala rte_atomic64_inc(&rxq->adapter->drv_stats->rx_nombuf); 14147830e905SSolganik Alexander ++rxq->rx_stats.mbuf_alloc_fail; 14151173fca2SJan Medala PMD_RX_LOG(DEBUG, "there are no enough free buffers"); 14161173fca2SJan Medala return 0; 14171173fca2SJan Medala } 14181173fca2SJan Medala 14191173fca2SJan Medala for (i = 0; i < count; i++) { 14201daff526SJakub Palider uint16_t next_to_use_masked = next_to_use & ring_mask; 142179405ee1SRafal Kozik struct rte_mbuf *mbuf = mbufs[i]; 14221173fca2SJan Medala struct ena_com_buf ebuf; 14231173fca2SJan Medala 142479405ee1SRafal Kozik if (likely((i + 4) < count)) 142579405ee1SRafal Kozik rte_prefetch0(mbufs[i + 4]); 1426c2034976SMichal Krawczyk 1427c2034976SMichal Krawczyk req_id = rxq->empty_rx_reqs[next_to_use_masked]; 1428241da076SRafal Kozik rc = validate_rx_req_id(rxq, req_id); 1429241da076SRafal Kozik if (unlikely(rc < 0)) 1430241da076SRafal Kozik break; 143179405ee1SRafal Kozik rxq->rx_buffer_info[req_id] = mbuf; 1432241da076SRafal Kozik 14331173fca2SJan Medala /* prepare physical address for DMA transaction */ 1434455da545SSantosh Shukla ebuf.paddr = mbuf->buf_iova + RTE_PKTMBUF_HEADROOM; 14351173fca2SJan Medala ebuf.len = mbuf->buf_len - RTE_PKTMBUF_HEADROOM; 14361173fca2SJan Medala /* pass resource to device */ 14371173fca2SJan Medala rc = ena_com_add_single_rx_desc(rxq->ena_com_io_sq, 1438c2034976SMichal Krawczyk &ebuf, req_id); 14391173fca2SJan Medala if (unlikely(rc)) { 14401173fca2SJan Medala RTE_LOG(WARNING, PMD, "failed adding rx desc\n"); 144179405ee1SRafal Kozik rxq->rx_buffer_info[req_id] = NULL; 14421173fca2SJan Medala break; 14431173fca2SJan Medala } 14441daff526SJakub Palider next_to_use++; 14451173fca2SJan Medala } 14461173fca2SJan Medala 144779405ee1SRafal Kozik if (unlikely(i < count)) { 1448241da076SRafal Kozik RTE_LOG(WARNING, PMD, "refilled rx qid %d with only %d " 1449241da076SRafal Kozik "buffers (from %d)\n", rxq->id, i, count); 145079405ee1SRafal Kozik rte_mempool_put_bulk(rxq->mb_pool, (void **)(&mbufs[i]), 145179405ee1SRafal Kozik count - i); 14527830e905SSolganik Alexander ++rxq->rx_stats.refill_partial; 145379405ee1SRafal Kozik } 1454241da076SRafal Kozik 14555e02e19eSJan Medala /* When we submitted free recources to device... */ 14563d19e1abSRafal Kozik if (likely(i > 0)) { 1457241da076SRafal Kozik /* ...let HW know that it can fill buffers with data 1458241da076SRafal Kozik * 1459241da076SRafal Kozik * Add memory barrier to make sure the desc were written before 1460241da076SRafal Kozik * issue a doorbell 1461241da076SRafal Kozik */ 14621173fca2SJan Medala rte_wmb(); 14631173fca2SJan Medala ena_com_write_sq_doorbell(rxq->ena_com_io_sq); 14641173fca2SJan Medala 14655e02e19eSJan Medala rxq->next_to_use = next_to_use; 14665e02e19eSJan Medala } 14675e02e19eSJan Medala 14681173fca2SJan Medala return i; 14691173fca2SJan Medala } 14701173fca2SJan Medala 14711173fca2SJan Medala static int ena_device_init(struct ena_com_dev *ena_dev, 1472e859d2b8SRafal Kozik struct ena_com_dev_get_features_ctx *get_feat_ctx, 1473e859d2b8SRafal Kozik bool *wd_state) 14741173fca2SJan Medala { 1475ca148440SMichal Krawczyk uint32_t aenq_groups; 14761173fca2SJan Medala int rc; 1477c4144557SJan Medala bool readless_supported; 14781173fca2SJan Medala 14791173fca2SJan Medala /* Initialize mmio registers */ 14801173fca2SJan Medala rc = ena_com_mmio_reg_read_request_init(ena_dev); 14811173fca2SJan Medala if (rc) { 14821173fca2SJan Medala RTE_LOG(ERR, PMD, "failed to init mmio read less\n"); 14831173fca2SJan Medala return rc; 14841173fca2SJan Medala } 14851173fca2SJan Medala 1486c4144557SJan Medala /* The PCIe configuration space revision id indicate if mmio reg 1487c4144557SJan Medala * read is disabled. 1488c4144557SJan Medala */ 1489c4144557SJan Medala readless_supported = 1490c4144557SJan Medala !(((struct rte_pci_device *)ena_dev->dmadev)->id.class_id 1491c4144557SJan Medala & ENA_MMIO_DISABLE_REG_READ); 1492c4144557SJan Medala ena_com_set_mmio_read_mode(ena_dev, readless_supported); 1493c4144557SJan Medala 14941173fca2SJan Medala /* reset device */ 14953adcba9aSMichal Krawczyk rc = ena_com_dev_reset(ena_dev, ENA_REGS_RESET_NORMAL); 14961173fca2SJan Medala if (rc) { 14971173fca2SJan Medala RTE_LOG(ERR, PMD, "cannot reset device\n"); 14981173fca2SJan Medala goto err_mmio_read_less; 14991173fca2SJan Medala } 15001173fca2SJan Medala 15011173fca2SJan Medala /* check FW version */ 15021173fca2SJan Medala rc = ena_com_validate_version(ena_dev); 15031173fca2SJan Medala if (rc) { 15041173fca2SJan Medala RTE_LOG(ERR, PMD, "device version is too low\n"); 15051173fca2SJan Medala goto err_mmio_read_less; 15061173fca2SJan Medala } 15071173fca2SJan Medala 15081173fca2SJan Medala ena_dev->dma_addr_bits = ena_com_get_dma_width(ena_dev); 15091173fca2SJan Medala 15101173fca2SJan Medala /* ENA device administration layer init */ 1511b68309beSRafal Kozik rc = ena_com_admin_init(ena_dev, &aenq_handlers); 15121173fca2SJan Medala if (rc) { 15131173fca2SJan Medala RTE_LOG(ERR, PMD, 15141173fca2SJan Medala "cannot initialize ena admin queue with device\n"); 15151173fca2SJan Medala goto err_mmio_read_less; 15161173fca2SJan Medala } 15171173fca2SJan Medala 15181173fca2SJan Medala /* To enable the msix interrupts the driver needs to know the number 15191173fca2SJan Medala * of queues. So the driver uses polling mode to retrieve this 15201173fca2SJan Medala * information. 15211173fca2SJan Medala */ 15221173fca2SJan Medala ena_com_set_admin_polling_mode(ena_dev, true); 15231173fca2SJan Medala 1524201ff2e5SJakub Palider ena_config_host_info(ena_dev); 1525201ff2e5SJakub Palider 15261173fca2SJan Medala /* Get Device Attributes and features */ 15271173fca2SJan Medala rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx); 15281173fca2SJan Medala if (rc) { 15291173fca2SJan Medala RTE_LOG(ERR, PMD, 15301173fca2SJan Medala "cannot get attribute for ena device rc= %d\n", rc); 15311173fca2SJan Medala goto err_admin_init; 15321173fca2SJan Medala } 15331173fca2SJan Medala 1534f01f060cSRafal Kozik aenq_groups = BIT(ENA_ADMIN_LINK_CHANGE) | 1535d9b8b106SMichal Krawczyk BIT(ENA_ADMIN_NOTIFICATION) | 1536983cce2dSRafal Kozik BIT(ENA_ADMIN_KEEP_ALIVE) | 1537983cce2dSRafal Kozik BIT(ENA_ADMIN_FATAL_ERROR) | 1538983cce2dSRafal Kozik BIT(ENA_ADMIN_WARNING); 1539ca148440SMichal Krawczyk 1540ca148440SMichal Krawczyk aenq_groups &= get_feat_ctx->aenq.supported_groups; 1541ca148440SMichal Krawczyk rc = ena_com_set_aenq_config(ena_dev, aenq_groups); 1542ca148440SMichal Krawczyk if (rc) { 1543ca148440SMichal Krawczyk RTE_LOG(ERR, PMD, "Cannot configure aenq groups rc: %d\n", rc); 1544ca148440SMichal Krawczyk goto err_admin_init; 1545ca148440SMichal Krawczyk } 1546ca148440SMichal Krawczyk 1547e859d2b8SRafal Kozik *wd_state = !!(aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE)); 1548e859d2b8SRafal Kozik 15491173fca2SJan Medala return 0; 15501173fca2SJan Medala 15511173fca2SJan Medala err_admin_init: 15521173fca2SJan Medala ena_com_admin_destroy(ena_dev); 15531173fca2SJan Medala 15541173fca2SJan Medala err_mmio_read_less: 15551173fca2SJan Medala ena_com_mmio_reg_read_request_destroy(ena_dev); 15561173fca2SJan Medala 15571173fca2SJan Medala return rc; 15581173fca2SJan Medala } 15591173fca2SJan Medala 1560ca148440SMichal Krawczyk static void ena_interrupt_handler_rte(void *cb_arg) 156115773e06SMichal Krawczyk { 156215773e06SMichal Krawczyk struct ena_adapter *adapter = (struct ena_adapter *)cb_arg; 156315773e06SMichal Krawczyk struct ena_com_dev *ena_dev = &adapter->ena_dev; 156415773e06SMichal Krawczyk 156515773e06SMichal Krawczyk ena_com_admin_q_comp_intr_handler(ena_dev); 15663d19e1abSRafal Kozik if (likely(adapter->state != ENA_ADAPTER_STATE_CLOSED)) 1567ca148440SMichal Krawczyk ena_com_aenq_intr_handler(ena_dev, adapter); 156815773e06SMichal Krawczyk } 156915773e06SMichal Krawczyk 15705efb9fc7SMichal Krawczyk static void check_for_missing_keep_alive(struct ena_adapter *adapter) 15715efb9fc7SMichal Krawczyk { 1572e859d2b8SRafal Kozik if (!adapter->wd_state) 1573e859d2b8SRafal Kozik return; 1574e859d2b8SRafal Kozik 15755efb9fc7SMichal Krawczyk if (adapter->keep_alive_timeout == ENA_HW_HINTS_NO_TIMEOUT) 15765efb9fc7SMichal Krawczyk return; 15775efb9fc7SMichal Krawczyk 15785efb9fc7SMichal Krawczyk if (unlikely((rte_get_timer_cycles() - adapter->timestamp_wd) >= 15795efb9fc7SMichal Krawczyk adapter->keep_alive_timeout)) { 15805efb9fc7SMichal Krawczyk RTE_LOG(ERR, PMD, "Keep alive timeout\n"); 15815efb9fc7SMichal Krawczyk adapter->reset_reason = ENA_REGS_RESET_KEEP_ALIVE_TO; 15825efb9fc7SMichal Krawczyk adapter->trigger_reset = true; 15837830e905SSolganik Alexander ++adapter->dev_stats.wd_expired; 15845efb9fc7SMichal Krawczyk } 15855efb9fc7SMichal Krawczyk } 15865efb9fc7SMichal Krawczyk 15875efb9fc7SMichal Krawczyk /* Check if admin queue is enabled */ 15885efb9fc7SMichal Krawczyk static void check_for_admin_com_state(struct ena_adapter *adapter) 15895efb9fc7SMichal Krawczyk { 15905efb9fc7SMichal Krawczyk if (unlikely(!ena_com_get_admin_running_state(&adapter->ena_dev))) { 15915efb9fc7SMichal Krawczyk RTE_LOG(ERR, PMD, "ENA admin queue is not in running state!\n"); 15925efb9fc7SMichal Krawczyk adapter->reset_reason = ENA_REGS_RESET_ADMIN_TO; 15935efb9fc7SMichal Krawczyk adapter->trigger_reset = true; 15945efb9fc7SMichal Krawczyk } 15955efb9fc7SMichal Krawczyk } 15965efb9fc7SMichal Krawczyk 1597d9b8b106SMichal Krawczyk static void ena_timer_wd_callback(__rte_unused struct rte_timer *timer, 1598d9b8b106SMichal Krawczyk void *arg) 1599d9b8b106SMichal Krawczyk { 1600d9b8b106SMichal Krawczyk struct ena_adapter *adapter = (struct ena_adapter *)arg; 1601d9b8b106SMichal Krawczyk struct rte_eth_dev *dev = adapter->rte_dev; 1602d9b8b106SMichal Krawczyk 16035efb9fc7SMichal Krawczyk check_for_missing_keep_alive(adapter); 16045efb9fc7SMichal Krawczyk check_for_admin_com_state(adapter); 1605d9b8b106SMichal Krawczyk 16065efb9fc7SMichal Krawczyk if (unlikely(adapter->trigger_reset)) { 16075efb9fc7SMichal Krawczyk RTE_LOG(ERR, PMD, "Trigger reset is on\n"); 1608d9b8b106SMichal Krawczyk _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET, 1609d9b8b106SMichal Krawczyk NULL); 1610d9b8b106SMichal Krawczyk } 1611d9b8b106SMichal Krawczyk } 1612d9b8b106SMichal Krawczyk 16132fca2a98SMichal Krawczyk static inline void 16142fca2a98SMichal Krawczyk set_default_llq_configurations(struct ena_llq_configurations *llq_config) 16152fca2a98SMichal Krawczyk { 16162fca2a98SMichal Krawczyk llq_config->llq_header_location = ENA_ADMIN_INLINE_HEADER; 16172fca2a98SMichal Krawczyk llq_config->llq_ring_entry_size = ENA_ADMIN_LIST_ENTRY_SIZE_128B; 16182fca2a98SMichal Krawczyk llq_config->llq_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY; 16192fca2a98SMichal Krawczyk llq_config->llq_num_decs_before_header = 16202fca2a98SMichal Krawczyk ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2; 16212fca2a98SMichal Krawczyk llq_config->llq_ring_entry_size_value = 128; 16222fca2a98SMichal Krawczyk } 16232fca2a98SMichal Krawczyk 16242fca2a98SMichal Krawczyk static int 16252fca2a98SMichal Krawczyk ena_set_queues_placement_policy(struct ena_adapter *adapter, 16262fca2a98SMichal Krawczyk struct ena_com_dev *ena_dev, 16272fca2a98SMichal Krawczyk struct ena_admin_feature_llq_desc *llq, 16282fca2a98SMichal Krawczyk struct ena_llq_configurations *llq_default_configurations) 16292fca2a98SMichal Krawczyk { 16302fca2a98SMichal Krawczyk int rc; 16312fca2a98SMichal Krawczyk u32 llq_feature_mask; 16322fca2a98SMichal Krawczyk 16332fca2a98SMichal Krawczyk llq_feature_mask = 1 << ENA_ADMIN_LLQ; 16342fca2a98SMichal Krawczyk if (!(ena_dev->supported_features & llq_feature_mask)) { 16352fca2a98SMichal Krawczyk RTE_LOG(INFO, PMD, 16362fca2a98SMichal Krawczyk "LLQ is not supported. Fallback to host mode policy.\n"); 16372fca2a98SMichal Krawczyk ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 16382fca2a98SMichal Krawczyk return 0; 16392fca2a98SMichal Krawczyk } 16402fca2a98SMichal Krawczyk 16412fca2a98SMichal Krawczyk rc = ena_com_config_dev_mode(ena_dev, llq, llq_default_configurations); 16422fca2a98SMichal Krawczyk if (unlikely(rc)) { 16432fca2a98SMichal Krawczyk PMD_INIT_LOG(WARNING, "Failed to config dev mode. " 1644498c687aSRafal Kozik "Fallback to host mode policy."); 16452fca2a98SMichal Krawczyk ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 16462fca2a98SMichal Krawczyk return 0; 16472fca2a98SMichal Krawczyk } 16482fca2a98SMichal Krawczyk 16492fca2a98SMichal Krawczyk /* Nothing to config, exit */ 16502fca2a98SMichal Krawczyk if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) 16512fca2a98SMichal Krawczyk return 0; 16522fca2a98SMichal Krawczyk 16532fca2a98SMichal Krawczyk if (!adapter->dev_mem_base) { 16542fca2a98SMichal Krawczyk RTE_LOG(ERR, PMD, "Unable to access LLQ bar resource. " 16552fca2a98SMichal Krawczyk "Fallback to host mode policy.\n."); 16562fca2a98SMichal Krawczyk ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 16572fca2a98SMichal Krawczyk return 0; 16582fca2a98SMichal Krawczyk } 16592fca2a98SMichal Krawczyk 16602fca2a98SMichal Krawczyk ena_dev->mem_bar = adapter->dev_mem_base; 16612fca2a98SMichal Krawczyk 16622fca2a98SMichal Krawczyk return 0; 16632fca2a98SMichal Krawczyk } 16642fca2a98SMichal Krawczyk 1665ea93d37eSRafal Kozik static int ena_calc_io_queue_num(struct ena_com_dev *ena_dev, 166601bd6877SRafal Kozik struct ena_com_dev_get_features_ctx *get_feat_ctx) 166701bd6877SRafal Kozik { 16682fca2a98SMichal Krawczyk uint32_t io_tx_sq_num, io_tx_cq_num, io_rx_num, io_queue_num; 166901bd6877SRafal Kozik 1670ea93d37eSRafal Kozik /* Regular queues capabilities */ 1671ea93d37eSRafal Kozik if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) { 1672ea93d37eSRafal Kozik struct ena_admin_queue_ext_feature_fields *max_queue_ext = 1673ea93d37eSRafal Kozik &get_feat_ctx->max_queue_ext.max_queue_ext; 16742fca2a98SMichal Krawczyk io_rx_num = RTE_MIN(max_queue_ext->max_rx_sq_num, 16752fca2a98SMichal Krawczyk max_queue_ext->max_rx_cq_num); 16762fca2a98SMichal Krawczyk io_tx_sq_num = max_queue_ext->max_tx_sq_num; 16772fca2a98SMichal Krawczyk io_tx_cq_num = max_queue_ext->max_tx_cq_num; 1678ea93d37eSRafal Kozik } else { 1679ea93d37eSRafal Kozik struct ena_admin_queue_feature_desc *max_queues = 1680ea93d37eSRafal Kozik &get_feat_ctx->max_queues; 16812fca2a98SMichal Krawczyk io_tx_sq_num = max_queues->max_sq_num; 16822fca2a98SMichal Krawczyk io_tx_cq_num = max_queues->max_cq_num; 16832fca2a98SMichal Krawczyk io_rx_num = RTE_MIN(io_tx_sq_num, io_tx_cq_num); 1684ea93d37eSRafal Kozik } 168501bd6877SRafal Kozik 16862fca2a98SMichal Krawczyk /* In case of LLQ use the llq number in the get feature cmd */ 16872fca2a98SMichal Krawczyk if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) 16882fca2a98SMichal Krawczyk io_tx_sq_num = get_feat_ctx->llq.max_llq_num; 16892fca2a98SMichal Krawczyk 169043d9610eSMichal Krawczyk io_queue_num = RTE_MIN(ENA_MAX_NUM_IO_QUEUES, io_rx_num); 16912fca2a98SMichal Krawczyk io_queue_num = RTE_MIN(io_queue_num, io_tx_sq_num); 16922fca2a98SMichal Krawczyk io_queue_num = RTE_MIN(io_queue_num, io_tx_cq_num); 169301bd6877SRafal Kozik 169401bd6877SRafal Kozik if (unlikely(io_queue_num == 0)) { 169501bd6877SRafal Kozik RTE_LOG(ERR, PMD, "Number of IO queues should not be 0\n"); 169601bd6877SRafal Kozik return -EFAULT; 169701bd6877SRafal Kozik } 169801bd6877SRafal Kozik 169901bd6877SRafal Kozik return io_queue_num; 170001bd6877SRafal Kozik } 170101bd6877SRafal Kozik 17021173fca2SJan Medala static int eth_ena_dev_init(struct rte_eth_dev *eth_dev) 17031173fca2SJan Medala { 1704ea93d37eSRafal Kozik struct ena_calc_queue_size_ctx calc_queue_ctx = { 0 }; 17051173fca2SJan Medala struct rte_pci_device *pci_dev; 1706eb0ef49dSMichal Krawczyk struct rte_intr_handle *intr_handle; 17071173fca2SJan Medala struct ena_adapter *adapter = 17081173fca2SJan Medala (struct ena_adapter *)(eth_dev->data->dev_private); 17091173fca2SJan Medala struct ena_com_dev *ena_dev = &adapter->ena_dev; 17101173fca2SJan Medala struct ena_com_dev_get_features_ctx get_feat_ctx; 17112fca2a98SMichal Krawczyk struct ena_llq_configurations llq_config; 17122fca2a98SMichal Krawczyk const char *queue_type_str; 1713ea93d37eSRafal Kozik int rc; 17141173fca2SJan Medala 17151173fca2SJan Medala static int adapters_found; 1716e859d2b8SRafal Kozik bool wd_state; 17171173fca2SJan Medala 17181173fca2SJan Medala eth_dev->dev_ops = &ena_dev_ops; 17191173fca2SJan Medala eth_dev->rx_pkt_burst = ð_ena_recv_pkts; 17201173fca2SJan Medala eth_dev->tx_pkt_burst = ð_ena_xmit_pkts; 1721b3fc5a1aSKonstantin Ananyev eth_dev->tx_pkt_prepare = ð_ena_prep_pkts; 17221173fca2SJan Medala 17231173fca2SJan Medala if (rte_eal_process_type() != RTE_PROC_PRIMARY) 17241173fca2SJan Medala return 0; 17251173fca2SJan Medala 1726*fd976890SMichal Krawczyk memset(adapter, 0, sizeof(struct ena_adapter)); 1727*fd976890SMichal Krawczyk ena_dev = &adapter->ena_dev; 1728*fd976890SMichal Krawczyk 1729*fd976890SMichal Krawczyk adapter->rte_eth_dev_data = eth_dev->data; 1730*fd976890SMichal Krawczyk adapter->rte_dev = eth_dev; 1731*fd976890SMichal Krawczyk 1732c0802544SFerruh Yigit pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 17331173fca2SJan Medala adapter->pdev = pci_dev; 17341173fca2SJan Medala 1735f2462150SFerruh Yigit PMD_INIT_LOG(INFO, "Initializing %x:%x:%x.%d", 17361173fca2SJan Medala pci_dev->addr.domain, 17371173fca2SJan Medala pci_dev->addr.bus, 17381173fca2SJan Medala pci_dev->addr.devid, 17391173fca2SJan Medala pci_dev->addr.function); 17401173fca2SJan Medala 1741eb0ef49dSMichal Krawczyk intr_handle = &pci_dev->intr_handle; 1742eb0ef49dSMichal Krawczyk 17431173fca2SJan Medala adapter->regs = pci_dev->mem_resource[ENA_REGS_BAR].addr; 17441173fca2SJan Medala adapter->dev_mem_base = pci_dev->mem_resource[ENA_MEM_BAR].addr; 17451173fca2SJan Medala 17461d339597SRafal Kozik if (!adapter->regs) { 1747f2462150SFerruh Yigit PMD_INIT_LOG(CRIT, "Failed to access registers BAR(%d)", 17481173fca2SJan Medala ENA_REGS_BAR); 17491d339597SRafal Kozik return -ENXIO; 17501d339597SRafal Kozik } 17511173fca2SJan Medala 17521173fca2SJan Medala ena_dev->reg_bar = adapter->regs; 17531173fca2SJan Medala ena_dev->dmadev = adapter->pdev; 17541173fca2SJan Medala 17551173fca2SJan Medala adapter->id_number = adapters_found; 17561173fca2SJan Medala 17571173fca2SJan Medala snprintf(adapter->name, ENA_NAME_MAX_LEN, "ena_%d", 17581173fca2SJan Medala adapter->id_number); 17591173fca2SJan Medala 17601173fca2SJan Medala /* device specific initialization routine */ 1761e859d2b8SRafal Kozik rc = ena_device_init(ena_dev, &get_feat_ctx, &wd_state); 17621173fca2SJan Medala if (rc) { 1763f2462150SFerruh Yigit PMD_INIT_LOG(CRIT, "Failed to init ENA device"); 1764241da076SRafal Kozik goto err; 17651173fca2SJan Medala } 1766e859d2b8SRafal Kozik adapter->wd_state = wd_state; 17671173fca2SJan Medala 17682fca2a98SMichal Krawczyk set_default_llq_configurations(&llq_config); 17692fca2a98SMichal Krawczyk rc = ena_set_queues_placement_policy(adapter, ena_dev, 17702fca2a98SMichal Krawczyk &get_feat_ctx.llq, &llq_config); 17712fca2a98SMichal Krawczyk if (unlikely(rc)) { 17722fca2a98SMichal Krawczyk PMD_INIT_LOG(CRIT, "Failed to set placement policy"); 17732fca2a98SMichal Krawczyk return rc; 17742fca2a98SMichal Krawczyk } 17752fca2a98SMichal Krawczyk 17762fca2a98SMichal Krawczyk if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) 17772fca2a98SMichal Krawczyk queue_type_str = "Regular"; 17782fca2a98SMichal Krawczyk else 17792fca2a98SMichal Krawczyk queue_type_str = "Low latency"; 17802fca2a98SMichal Krawczyk RTE_LOG(INFO, PMD, "Placement policy: %s\n", queue_type_str); 1781ea93d37eSRafal Kozik 1782ea93d37eSRafal Kozik calc_queue_ctx.ena_dev = ena_dev; 1783ea93d37eSRafal Kozik calc_queue_ctx.get_feat_ctx = &get_feat_ctx; 178401bd6877SRafal Kozik adapter->num_queues = ena_calc_io_queue_num(ena_dev, 178501bd6877SRafal Kozik &get_feat_ctx); 17861173fca2SJan Medala 1787ea93d37eSRafal Kozik rc = ena_calc_queue_size(&calc_queue_ctx); 1788ea93d37eSRafal Kozik if (unlikely((rc != 0) || (adapter->num_queues <= 0))) { 1789241da076SRafal Kozik rc = -EFAULT; 1790241da076SRafal Kozik goto err_device_destroy; 1791241da076SRafal Kozik } 17921173fca2SJan Medala 1793ea93d37eSRafal Kozik adapter->tx_ring_size = calc_queue_ctx.tx_queue_size; 1794ea93d37eSRafal Kozik adapter->rx_ring_size = calc_queue_ctx.rx_queue_size; 17951173fca2SJan Medala 1796ea93d37eSRafal Kozik adapter->max_tx_sgl_size = calc_queue_ctx.max_tx_sgl_size; 1797ea93d37eSRafal Kozik adapter->max_rx_sgl_size = calc_queue_ctx.max_rx_sgl_size; 17982061fe41SRafal Kozik 17991173fca2SJan Medala /* prepare ring structures */ 18001173fca2SJan Medala ena_init_rings(adapter); 18011173fca2SJan Medala 1802372c1af5SJan Medala ena_config_debug_area(adapter); 1803372c1af5SJan Medala 18041173fca2SJan Medala /* Set max MTU for this device */ 18051173fca2SJan Medala adapter->max_mtu = get_feat_ctx.dev_attr.max_mtu; 18061173fca2SJan Medala 180783277a7cSJakub Palider /* set device support for TSO */ 180883277a7cSJakub Palider adapter->tso4_supported = get_feat_ctx.offload.tx & 180983277a7cSJakub Palider ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK; 181083277a7cSJakub Palider 18111173fca2SJan Medala /* Copy MAC address and point DPDK to it */ 18121173fca2SJan Medala eth_dev->data->mac_addrs = (struct ether_addr *)adapter->mac_addr; 18131173fca2SJan Medala ether_addr_copy((struct ether_addr *)get_feat_ctx.dev_attr.mac_addr, 18141173fca2SJan Medala (struct ether_addr *)adapter->mac_addr); 18151173fca2SJan Medala 181615febafdSThomas Monjalon /* 181715febafdSThomas Monjalon * Pass the information to the rte_eth_dev_close() that it should also 181815febafdSThomas Monjalon * release the private port resources. 181915febafdSThomas Monjalon */ 182015febafdSThomas Monjalon eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE; 182115febafdSThomas Monjalon 18221173fca2SJan Medala adapter->drv_stats = rte_zmalloc("adapter stats", 18231173fca2SJan Medala sizeof(*adapter->drv_stats), 18241173fca2SJan Medala RTE_CACHE_LINE_SIZE); 18251173fca2SJan Medala if (!adapter->drv_stats) { 18261173fca2SJan Medala RTE_LOG(ERR, PMD, "failed to alloc mem for adapter stats\n"); 1827241da076SRafal Kozik rc = -ENOMEM; 1828241da076SRafal Kozik goto err_delete_debug_area; 18291173fca2SJan Medala } 18301173fca2SJan Medala 1831eb0ef49dSMichal Krawczyk rte_intr_callback_register(intr_handle, 1832eb0ef49dSMichal Krawczyk ena_interrupt_handler_rte, 1833eb0ef49dSMichal Krawczyk adapter); 1834eb0ef49dSMichal Krawczyk rte_intr_enable(intr_handle); 1835eb0ef49dSMichal Krawczyk ena_com_set_admin_polling_mode(ena_dev, false); 1836ca148440SMichal Krawczyk ena_com_admin_aenq_enable(ena_dev); 1837eb0ef49dSMichal Krawczyk 1838d9b8b106SMichal Krawczyk if (adapters_found == 0) 1839d9b8b106SMichal Krawczyk rte_timer_subsystem_init(); 1840d9b8b106SMichal Krawczyk rte_timer_init(&adapter->timer_wd); 1841d9b8b106SMichal Krawczyk 18421173fca2SJan Medala adapters_found++; 18431173fca2SJan Medala adapter->state = ENA_ADAPTER_STATE_INIT; 18441173fca2SJan Medala 18451173fca2SJan Medala return 0; 1846241da076SRafal Kozik 1847241da076SRafal Kozik err_delete_debug_area: 1848241da076SRafal Kozik ena_com_delete_debug_area(ena_dev); 1849241da076SRafal Kozik 1850241da076SRafal Kozik err_device_destroy: 1851241da076SRafal Kozik ena_com_delete_host_info(ena_dev); 1852241da076SRafal Kozik ena_com_admin_destroy(ena_dev); 1853241da076SRafal Kozik 1854241da076SRafal Kozik err: 1855241da076SRafal Kozik return rc; 18561173fca2SJan Medala } 18571173fca2SJan Medala 1858e457bc70SRafal Kozik static void ena_destroy_device(struct rte_eth_dev *eth_dev) 1859eb0ef49dSMichal Krawczyk { 1860eb0ef49dSMichal Krawczyk struct ena_adapter *adapter = 1861eb0ef49dSMichal Krawczyk (struct ena_adapter *)(eth_dev->data->dev_private); 1862e457bc70SRafal Kozik struct ena_com_dev *ena_dev = &adapter->ena_dev; 1863eb0ef49dSMichal Krawczyk 1864e457bc70SRafal Kozik if (adapter->state == ENA_ADAPTER_STATE_FREE) 1865e457bc70SRafal Kozik return; 1866e457bc70SRafal Kozik 1867e457bc70SRafal Kozik ena_com_set_admin_running_state(ena_dev, false); 1868eb0ef49dSMichal Krawczyk 1869eb0ef49dSMichal Krawczyk if (adapter->state != ENA_ADAPTER_STATE_CLOSED) 1870eb0ef49dSMichal Krawczyk ena_close(eth_dev); 1871eb0ef49dSMichal Krawczyk 1872e457bc70SRafal Kozik ena_com_delete_debug_area(ena_dev); 1873e457bc70SRafal Kozik ena_com_delete_host_info(ena_dev); 1874e457bc70SRafal Kozik 1875e457bc70SRafal Kozik ena_com_abort_admin_commands(ena_dev); 1876e457bc70SRafal Kozik ena_com_wait_for_abort_completion(ena_dev); 1877e457bc70SRafal Kozik ena_com_admin_destroy(ena_dev); 1878e457bc70SRafal Kozik ena_com_mmio_reg_read_request_destroy(ena_dev); 1879e457bc70SRafal Kozik 1880e457bc70SRafal Kozik adapter->state = ENA_ADAPTER_STATE_FREE; 1881e457bc70SRafal Kozik } 1882e457bc70SRafal Kozik 1883e457bc70SRafal Kozik static int eth_ena_dev_uninit(struct rte_eth_dev *eth_dev) 1884e457bc70SRafal Kozik { 1885e457bc70SRafal Kozik if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1886e457bc70SRafal Kozik return 0; 1887e457bc70SRafal Kozik 1888e457bc70SRafal Kozik ena_destroy_device(eth_dev); 1889e457bc70SRafal Kozik 1890eb0ef49dSMichal Krawczyk eth_dev->dev_ops = NULL; 1891eb0ef49dSMichal Krawczyk eth_dev->rx_pkt_burst = NULL; 1892eb0ef49dSMichal Krawczyk eth_dev->tx_pkt_burst = NULL; 1893eb0ef49dSMichal Krawczyk eth_dev->tx_pkt_prepare = NULL; 1894eb0ef49dSMichal Krawczyk 1895eb0ef49dSMichal Krawczyk return 0; 1896eb0ef49dSMichal Krawczyk } 1897eb0ef49dSMichal Krawczyk 18981173fca2SJan Medala static int ena_dev_configure(struct rte_eth_dev *dev) 18991173fca2SJan Medala { 19001173fca2SJan Medala struct ena_adapter *adapter = 19011173fca2SJan Medala (struct ena_adapter *)(dev->data->dev_private); 19027369f88fSRafal Kozik 19031173fca2SJan Medala adapter->state = ENA_ADAPTER_STATE_CONFIG; 19041173fca2SJan Medala 1905a4996bd8SWei Dai adapter->tx_selected_offloads = dev->data->dev_conf.txmode.offloads; 1906a4996bd8SWei Dai adapter->rx_selected_offloads = dev->data->dev_conf.rxmode.offloads; 19071173fca2SJan Medala return 0; 19081173fca2SJan Medala } 19091173fca2SJan Medala 19101173fca2SJan Medala static void ena_init_rings(struct ena_adapter *adapter) 19111173fca2SJan Medala { 19121173fca2SJan Medala int i; 19131173fca2SJan Medala 19141173fca2SJan Medala for (i = 0; i < adapter->num_queues; i++) { 19151173fca2SJan Medala struct ena_ring *ring = &adapter->tx_ring[i]; 19161173fca2SJan Medala 19171173fca2SJan Medala ring->configured = 0; 19181173fca2SJan Medala ring->type = ENA_RING_TYPE_TX; 19191173fca2SJan Medala ring->adapter = adapter; 19201173fca2SJan Medala ring->id = i; 19211173fca2SJan Medala ring->tx_mem_queue_type = adapter->ena_dev.tx_mem_queue_type; 19221173fca2SJan Medala ring->tx_max_header_size = adapter->ena_dev.tx_max_header_size; 19232061fe41SRafal Kozik ring->sgl_size = adapter->max_tx_sgl_size; 19241173fca2SJan Medala } 19251173fca2SJan Medala 19261173fca2SJan Medala for (i = 0; i < adapter->num_queues; i++) { 19271173fca2SJan Medala struct ena_ring *ring = &adapter->rx_ring[i]; 19281173fca2SJan Medala 19291173fca2SJan Medala ring->configured = 0; 19301173fca2SJan Medala ring->type = ENA_RING_TYPE_RX; 19311173fca2SJan Medala ring->adapter = adapter; 19321173fca2SJan Medala ring->id = i; 1933ea93d37eSRafal Kozik ring->sgl_size = adapter->max_rx_sgl_size; 19341173fca2SJan Medala } 19351173fca2SJan Medala } 19361173fca2SJan Medala 19371173fca2SJan Medala static void ena_infos_get(struct rte_eth_dev *dev, 19381173fca2SJan Medala struct rte_eth_dev_info *dev_info) 19391173fca2SJan Medala { 19401173fca2SJan Medala struct ena_adapter *adapter; 19411173fca2SJan Medala struct ena_com_dev *ena_dev; 19421173fca2SJan Medala struct ena_com_dev_get_features_ctx feat; 194356b8b9b7SRafal Kozik uint64_t rx_feat = 0, tx_feat = 0; 19441173fca2SJan Medala int rc = 0; 19451173fca2SJan Medala 1946498c687aSRafal Kozik ena_assert_msg(dev->data != NULL, "Uninitialized device\n"); 1947498c687aSRafal Kozik ena_assert_msg(dev->data->dev_private != NULL, "Uninitialized device\n"); 19481173fca2SJan Medala adapter = (struct ena_adapter *)(dev->data->dev_private); 19491173fca2SJan Medala 19501173fca2SJan Medala ena_dev = &adapter->ena_dev; 1951498c687aSRafal Kozik ena_assert_msg(ena_dev != NULL, "Uninitialized device\n"); 19521173fca2SJan Medala 1953e274f573SMarc Sune dev_info->speed_capa = 1954e274f573SMarc Sune ETH_LINK_SPEED_1G | 1955e274f573SMarc Sune ETH_LINK_SPEED_2_5G | 1956e274f573SMarc Sune ETH_LINK_SPEED_5G | 1957e274f573SMarc Sune ETH_LINK_SPEED_10G | 1958e274f573SMarc Sune ETH_LINK_SPEED_25G | 1959e274f573SMarc Sune ETH_LINK_SPEED_40G | 1960b2feed01SThomas Monjalon ETH_LINK_SPEED_50G | 1961b2feed01SThomas Monjalon ETH_LINK_SPEED_100G; 1962e274f573SMarc Sune 19631173fca2SJan Medala /* Get supported features from HW */ 19641173fca2SJan Medala rc = ena_com_get_dev_attr_feat(ena_dev, &feat); 19651173fca2SJan Medala if (unlikely(rc)) { 19661173fca2SJan Medala RTE_LOG(ERR, PMD, 19671173fca2SJan Medala "Cannot get attribute for ena device rc= %d\n", rc); 19681173fca2SJan Medala return; 19691173fca2SJan Medala } 19701173fca2SJan Medala 19711173fca2SJan Medala /* Set Tx & Rx features available for device */ 19721173fca2SJan Medala if (feat.offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK) 19731173fca2SJan Medala tx_feat |= DEV_TX_OFFLOAD_TCP_TSO; 19741173fca2SJan Medala 19751173fca2SJan Medala if (feat.offload.tx & 19761173fca2SJan Medala ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK) 19771173fca2SJan Medala tx_feat |= DEV_TX_OFFLOAD_IPV4_CKSUM | 19781173fca2SJan Medala DEV_TX_OFFLOAD_UDP_CKSUM | 19791173fca2SJan Medala DEV_TX_OFFLOAD_TCP_CKSUM; 19801173fca2SJan Medala 19814eea092bSJakub Palider if (feat.offload.rx_supported & 19821173fca2SJan Medala ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK) 19831173fca2SJan Medala rx_feat |= DEV_RX_OFFLOAD_IPV4_CKSUM | 19841173fca2SJan Medala DEV_RX_OFFLOAD_UDP_CKSUM | 19851173fca2SJan Medala DEV_RX_OFFLOAD_TCP_CKSUM; 19861173fca2SJan Medala 1987a0a4ff40SRafal Kozik rx_feat |= DEV_RX_OFFLOAD_JUMBO_FRAME; 1988a0a4ff40SRafal Kozik 19891173fca2SJan Medala /* Inform framework about available features */ 19901173fca2SJan Medala dev_info->rx_offload_capa = rx_feat; 19917369f88fSRafal Kozik dev_info->rx_queue_offload_capa = rx_feat; 19921173fca2SJan Medala dev_info->tx_offload_capa = tx_feat; 199356b8b9b7SRafal Kozik dev_info->tx_queue_offload_capa = tx_feat; 19941173fca2SJan Medala 1995b01ead20SRafal Kozik dev_info->flow_type_rss_offloads = ETH_RSS_IP | ETH_RSS_TCP | 1996b01ead20SRafal Kozik ETH_RSS_UDP; 1997b01ead20SRafal Kozik 19981173fca2SJan Medala dev_info->min_rx_bufsize = ENA_MIN_FRAME_LEN; 19991173fca2SJan Medala dev_info->max_rx_pktlen = adapter->max_mtu; 20001173fca2SJan Medala dev_info->max_mac_addrs = 1; 20011173fca2SJan Medala 20021173fca2SJan Medala dev_info->max_rx_queues = adapter->num_queues; 20031173fca2SJan Medala dev_info->max_tx_queues = adapter->num_queues; 20041173fca2SJan Medala dev_info->reta_size = ENA_RX_RSS_TABLE_SIZE; 200556b8b9b7SRafal Kozik 200656b8b9b7SRafal Kozik adapter->tx_supported_offloads = tx_feat; 20077369f88fSRafal Kozik adapter->rx_supported_offloads = rx_feat; 200892680dc2SRafal Kozik 2009ea93d37eSRafal Kozik dev_info->rx_desc_lim.nb_max = adapter->rx_ring_size; 201092680dc2SRafal Kozik dev_info->rx_desc_lim.nb_min = ENA_MIN_RING_DESC; 2011ea93d37eSRafal Kozik dev_info->rx_desc_lim.nb_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS, 2012ea93d37eSRafal Kozik adapter->max_rx_sgl_size); 2013ea93d37eSRafal Kozik dev_info->rx_desc_lim.nb_mtu_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS, 2014ea93d37eSRafal Kozik adapter->max_rx_sgl_size); 201592680dc2SRafal Kozik 2016ea93d37eSRafal Kozik dev_info->tx_desc_lim.nb_max = adapter->tx_ring_size; 201792680dc2SRafal Kozik dev_info->tx_desc_lim.nb_min = ENA_MIN_RING_DESC; 201892680dc2SRafal Kozik dev_info->tx_desc_lim.nb_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS, 2019ea93d37eSRafal Kozik adapter->max_tx_sgl_size); 202092680dc2SRafal Kozik dev_info->tx_desc_lim.nb_mtu_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS, 2021ea93d37eSRafal Kozik adapter->max_tx_sgl_size); 20221173fca2SJan Medala } 20231173fca2SJan Medala 20241173fca2SJan Medala static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, 20251173fca2SJan Medala uint16_t nb_pkts) 20261173fca2SJan Medala { 20271173fca2SJan Medala struct ena_ring *rx_ring = (struct ena_ring *)(rx_queue); 20281173fca2SJan Medala unsigned int ring_size = rx_ring->ring_size; 20291173fca2SJan Medala unsigned int ring_mask = ring_size - 1; 20301173fca2SJan Medala uint16_t next_to_clean = rx_ring->next_to_clean; 20311daff526SJakub Palider uint16_t desc_in_use = 0; 2032c2034976SMichal Krawczyk uint16_t req_id; 20331173fca2SJan Medala unsigned int recv_idx = 0; 20341173fca2SJan Medala struct rte_mbuf *mbuf = NULL; 20351173fca2SJan Medala struct rte_mbuf *mbuf_head = NULL; 20361173fca2SJan Medala struct rte_mbuf *mbuf_prev = NULL; 20371173fca2SJan Medala struct rte_mbuf **rx_buff_info = rx_ring->rx_buffer_info; 20381173fca2SJan Medala unsigned int completed; 20391173fca2SJan Medala 20401173fca2SJan Medala struct ena_com_rx_ctx ena_rx_ctx; 20411173fca2SJan Medala int rc = 0; 20421173fca2SJan Medala 20431173fca2SJan Medala /* Check adapter state */ 20441173fca2SJan Medala if (unlikely(rx_ring->adapter->state != ENA_ADAPTER_STATE_RUNNING)) { 20451173fca2SJan Medala RTE_LOG(ALERT, PMD, 20461173fca2SJan Medala "Trying to receive pkts while device is NOT running\n"); 20471173fca2SJan Medala return 0; 20481173fca2SJan Medala } 20491173fca2SJan Medala 20501daff526SJakub Palider desc_in_use = rx_ring->next_to_use - next_to_clean; 20511173fca2SJan Medala if (unlikely(nb_pkts > desc_in_use)) 20521173fca2SJan Medala nb_pkts = desc_in_use; 20531173fca2SJan Medala 20541173fca2SJan Medala for (completed = 0; completed < nb_pkts; completed++) { 20551173fca2SJan Medala int segments = 0; 20561173fca2SJan Medala 2057ea93d37eSRafal Kozik ena_rx_ctx.max_bufs = rx_ring->sgl_size; 20581173fca2SJan Medala ena_rx_ctx.ena_bufs = rx_ring->ena_bufs; 20591173fca2SJan Medala ena_rx_ctx.descs = 0; 20601173fca2SJan Medala /* receive packet context */ 20611173fca2SJan Medala rc = ena_com_rx_pkt(rx_ring->ena_com_io_cq, 20621173fca2SJan Medala rx_ring->ena_com_io_sq, 20631173fca2SJan Medala &ena_rx_ctx); 20641173fca2SJan Medala if (unlikely(rc)) { 20651173fca2SJan Medala RTE_LOG(ERR, PMD, "ena_com_rx_pkt error %d\n", rc); 20669b260dbfSRafal Kozik rx_ring->adapter->reset_reason = 20679b260dbfSRafal Kozik ENA_REGS_RESET_TOO_MANY_RX_DESCS; 2068241da076SRafal Kozik rx_ring->adapter->trigger_reset = true; 20697830e905SSolganik Alexander ++rx_ring->rx_stats.bad_desc_num; 20701173fca2SJan Medala return 0; 20711173fca2SJan Medala } 20721173fca2SJan Medala 20731173fca2SJan Medala if (unlikely(ena_rx_ctx.descs == 0)) 20741173fca2SJan Medala break; 20751173fca2SJan Medala 20761173fca2SJan Medala while (segments < ena_rx_ctx.descs) { 2077c2034976SMichal Krawczyk req_id = ena_rx_ctx.ena_bufs[segments].req_id; 2078c2034976SMichal Krawczyk rc = validate_rx_req_id(rx_ring, req_id); 2079709b1dcbSRafal Kozik if (unlikely(rc)) { 2080709b1dcbSRafal Kozik if (segments != 0) 2081709b1dcbSRafal Kozik rte_mbuf_raw_free(mbuf_head); 2082c2034976SMichal Krawczyk break; 2083709b1dcbSRafal Kozik } 2084c2034976SMichal Krawczyk 2085c2034976SMichal Krawczyk mbuf = rx_buff_info[req_id]; 2086709b1dcbSRafal Kozik rx_buff_info[req_id] = NULL; 20871173fca2SJan Medala mbuf->data_len = ena_rx_ctx.ena_bufs[segments].len; 20881173fca2SJan Medala mbuf->data_off = RTE_PKTMBUF_HEADROOM; 20891173fca2SJan Medala mbuf->refcnt = 1; 20901173fca2SJan Medala mbuf->next = NULL; 20913d19e1abSRafal Kozik if (unlikely(segments == 0)) { 20921173fca2SJan Medala mbuf->nb_segs = ena_rx_ctx.descs; 20931173fca2SJan Medala mbuf->port = rx_ring->port_id; 20941173fca2SJan Medala mbuf->pkt_len = 0; 20951173fca2SJan Medala mbuf_head = mbuf; 20961173fca2SJan Medala } else { 20971173fca2SJan Medala /* for multi-segment pkts create mbuf chain */ 20981173fca2SJan Medala mbuf_prev->next = mbuf; 20991173fca2SJan Medala } 21001173fca2SJan Medala mbuf_head->pkt_len += mbuf->data_len; 21011173fca2SJan Medala 21021173fca2SJan Medala mbuf_prev = mbuf; 2103c2034976SMichal Krawczyk rx_ring->empty_rx_reqs[next_to_clean & ring_mask] = 2104c2034976SMichal Krawczyk req_id; 21051173fca2SJan Medala segments++; 21061daff526SJakub Palider next_to_clean++; 21071173fca2SJan Medala } 2108f00930d9SRafal Kozik if (unlikely(rc)) 2109f00930d9SRafal Kozik break; 21101173fca2SJan Medala 21111173fca2SJan Medala /* fill mbuf attributes if any */ 21121173fca2SJan Medala ena_rx_mbuf_prepare(mbuf_head, &ena_rx_ctx); 21137830e905SSolganik Alexander 21147830e905SSolganik Alexander if (unlikely(mbuf_head->ol_flags & 21157830e905SSolganik Alexander (PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD))) 21167830e905SSolganik Alexander ++rx_ring->rx_stats.bad_csum; 21177830e905SSolganik Alexander 2118e5df9f33SStewart Allen mbuf_head->hash.rss = ena_rx_ctx.hash; 21191173fca2SJan Medala 21201173fca2SJan Medala /* pass to DPDK application head mbuf */ 21211173fca2SJan Medala rx_pkts[recv_idx] = mbuf_head; 21221173fca2SJan Medala recv_idx++; 212345b6d861SMichal Krawczyk rx_ring->rx_stats.bytes += mbuf_head->pkt_len; 21241173fca2SJan Medala } 21251173fca2SJan Medala 212645b6d861SMichal Krawczyk rx_ring->rx_stats.cnt += recv_idx; 2127ec78af6bSMichal Krawczyk rx_ring->next_to_clean = next_to_clean; 2128ec78af6bSMichal Krawczyk 2129ec78af6bSMichal Krawczyk desc_in_use = desc_in_use - completed + 1; 21301173fca2SJan Medala /* Burst refill to save doorbells, memory barriers, const interval */ 2131a45462c5SRafal Kozik if (ring_size - desc_in_use > ENA_RING_DESCS_RATIO(ring_size)) { 2132a45462c5SRafal Kozik ena_com_update_dev_comp_head(rx_ring->ena_com_io_cq); 21331daff526SJakub Palider ena_populate_rx_queue(rx_ring, ring_size - desc_in_use); 2134a45462c5SRafal Kozik } 21351173fca2SJan Medala 21361173fca2SJan Medala return recv_idx; 21371173fca2SJan Medala } 21381173fca2SJan Medala 2139b3fc5a1aSKonstantin Ananyev static uint16_t 214083277a7cSJakub Palider eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 2141b3fc5a1aSKonstantin Ananyev uint16_t nb_pkts) 2142b3fc5a1aSKonstantin Ananyev { 2143b3fc5a1aSKonstantin Ananyev int32_t ret; 2144b3fc5a1aSKonstantin Ananyev uint32_t i; 2145b3fc5a1aSKonstantin Ananyev struct rte_mbuf *m; 214683277a7cSJakub Palider struct ena_ring *tx_ring = (struct ena_ring *)(tx_queue); 214783277a7cSJakub Palider struct ipv4_hdr *ip_hdr; 2148b3fc5a1aSKonstantin Ananyev uint64_t ol_flags; 214983277a7cSJakub Palider uint16_t frag_field; 215083277a7cSJakub Palider 2151b3fc5a1aSKonstantin Ananyev for (i = 0; i != nb_pkts; i++) { 2152b3fc5a1aSKonstantin Ananyev m = tx_pkts[i]; 2153b3fc5a1aSKonstantin Ananyev ol_flags = m->ol_flags; 2154b3fc5a1aSKonstantin Ananyev 2155bc5ef57dSMichal Krawczyk if (!(ol_flags & PKT_TX_IPV4)) 2156bc5ef57dSMichal Krawczyk continue; 2157bc5ef57dSMichal Krawczyk 2158bc5ef57dSMichal Krawczyk /* If there was not L2 header length specified, assume it is 2159bc5ef57dSMichal Krawczyk * length of the ethernet header. 2160bc5ef57dSMichal Krawczyk */ 2161bc5ef57dSMichal Krawczyk if (unlikely(m->l2_len == 0)) 2162bc5ef57dSMichal Krawczyk m->l2_len = sizeof(struct ether_hdr); 2163bc5ef57dSMichal Krawczyk 2164bc5ef57dSMichal Krawczyk ip_hdr = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *, 2165bc5ef57dSMichal Krawczyk m->l2_len); 2166bc5ef57dSMichal Krawczyk frag_field = rte_be_to_cpu_16(ip_hdr->fragment_offset); 2167bc5ef57dSMichal Krawczyk 2168bc5ef57dSMichal Krawczyk if ((frag_field & IPV4_HDR_DF_FLAG) != 0) { 2169bc5ef57dSMichal Krawczyk m->packet_type |= RTE_PTYPE_L4_NONFRAG; 2170bc5ef57dSMichal Krawczyk 2171bc5ef57dSMichal Krawczyk /* If IPv4 header has DF flag enabled and TSO support is 2172bc5ef57dSMichal Krawczyk * disabled, partial chcecksum should not be calculated. 2173bc5ef57dSMichal Krawczyk */ 2174bc5ef57dSMichal Krawczyk if (!tx_ring->adapter->tso4_supported) 2175bc5ef57dSMichal Krawczyk continue; 2176bc5ef57dSMichal Krawczyk } 2177bc5ef57dSMichal Krawczyk 2178b3fc5a1aSKonstantin Ananyev if ((ol_flags & ENA_TX_OFFLOAD_NOTSUP_MASK) != 0 || 2179b3fc5a1aSKonstantin Ananyev (ol_flags & PKT_TX_L4_MASK) == 2180b3fc5a1aSKonstantin Ananyev PKT_TX_SCTP_CKSUM) { 2181b3fc5a1aSKonstantin Ananyev rte_errno = -ENOTSUP; 2182b3fc5a1aSKonstantin Ananyev return i; 2183b3fc5a1aSKonstantin Ananyev } 2184b3fc5a1aSKonstantin Ananyev 2185b3fc5a1aSKonstantin Ananyev #ifdef RTE_LIBRTE_ETHDEV_DEBUG 2186b3fc5a1aSKonstantin Ananyev ret = rte_validate_tx_offload(m); 2187b3fc5a1aSKonstantin Ananyev if (ret != 0) { 2188b3fc5a1aSKonstantin Ananyev rte_errno = ret; 2189b3fc5a1aSKonstantin Ananyev return i; 2190b3fc5a1aSKonstantin Ananyev } 2191b3fc5a1aSKonstantin Ananyev #endif 219283277a7cSJakub Palider 219383277a7cSJakub Palider /* In case we are supposed to TSO and have DF not set (DF=0) 219483277a7cSJakub Palider * hardware must be provided with partial checksum, otherwise 219583277a7cSJakub Palider * it will take care of necessary calculations. 219683277a7cSJakub Palider */ 219783277a7cSJakub Palider 2198b3fc5a1aSKonstantin Ananyev ret = rte_net_intel_cksum_flags_prepare(m, 2199b3fc5a1aSKonstantin Ananyev ol_flags & ~PKT_TX_TCP_SEG); 2200b3fc5a1aSKonstantin Ananyev if (ret != 0) { 2201b3fc5a1aSKonstantin Ananyev rte_errno = ret; 2202b3fc5a1aSKonstantin Ananyev return i; 2203b3fc5a1aSKonstantin Ananyev } 2204b3fc5a1aSKonstantin Ananyev } 2205b3fc5a1aSKonstantin Ananyev 2206b3fc5a1aSKonstantin Ananyev return i; 2207b3fc5a1aSKonstantin Ananyev } 2208b3fc5a1aSKonstantin Ananyev 2209f01f060cSRafal Kozik static void ena_update_hints(struct ena_adapter *adapter, 2210f01f060cSRafal Kozik struct ena_admin_ena_hw_hints *hints) 2211f01f060cSRafal Kozik { 2212f01f060cSRafal Kozik if (hints->admin_completion_tx_timeout) 2213f01f060cSRafal Kozik adapter->ena_dev.admin_queue.completion_timeout = 2214f01f060cSRafal Kozik hints->admin_completion_tx_timeout * 1000; 2215f01f060cSRafal Kozik 2216f01f060cSRafal Kozik if (hints->mmio_read_timeout) 2217f01f060cSRafal Kozik /* convert to usec */ 2218f01f060cSRafal Kozik adapter->ena_dev.mmio_read.reg_read_to = 2219f01f060cSRafal Kozik hints->mmio_read_timeout * 1000; 2220d9b8b106SMichal Krawczyk 2221d9b8b106SMichal Krawczyk if (hints->driver_watchdog_timeout) { 2222d9b8b106SMichal Krawczyk if (hints->driver_watchdog_timeout == ENA_HW_HINTS_NO_TIMEOUT) 2223d9b8b106SMichal Krawczyk adapter->keep_alive_timeout = ENA_HW_HINTS_NO_TIMEOUT; 2224d9b8b106SMichal Krawczyk else 2225d9b8b106SMichal Krawczyk // Convert msecs to ticks 2226d9b8b106SMichal Krawczyk adapter->keep_alive_timeout = 2227d9b8b106SMichal Krawczyk (hints->driver_watchdog_timeout * 2228d9b8b106SMichal Krawczyk rte_get_timer_hz()) / 1000; 2229d9b8b106SMichal Krawczyk } 2230f01f060cSRafal Kozik } 2231f01f060cSRafal Kozik 22322061fe41SRafal Kozik static int ena_check_and_linearize_mbuf(struct ena_ring *tx_ring, 22332061fe41SRafal Kozik struct rte_mbuf *mbuf) 22342061fe41SRafal Kozik { 22352fca2a98SMichal Krawczyk struct ena_com_dev *ena_dev; 22362fca2a98SMichal Krawczyk int num_segments, header_len, rc; 22372061fe41SRafal Kozik 22382fca2a98SMichal Krawczyk ena_dev = &tx_ring->adapter->ena_dev; 22392061fe41SRafal Kozik num_segments = mbuf->nb_segs; 22402fca2a98SMichal Krawczyk header_len = mbuf->data_len; 22412061fe41SRafal Kozik 22422061fe41SRafal Kozik if (likely(num_segments < tx_ring->sgl_size)) 22432061fe41SRafal Kozik return 0; 22442061fe41SRafal Kozik 22452fca2a98SMichal Krawczyk if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV && 22462fca2a98SMichal Krawczyk (num_segments == tx_ring->sgl_size) && 22472fca2a98SMichal Krawczyk (header_len < tx_ring->tx_max_header_size)) 22482fca2a98SMichal Krawczyk return 0; 22492fca2a98SMichal Krawczyk 22507830e905SSolganik Alexander ++tx_ring->tx_stats.linearize; 22512061fe41SRafal Kozik rc = rte_pktmbuf_linearize(mbuf); 22527830e905SSolganik Alexander if (unlikely(rc)) { 22532061fe41SRafal Kozik RTE_LOG(WARNING, PMD, "Mbuf linearize failed\n"); 22547830e905SSolganik Alexander rte_atomic64_inc(&tx_ring->adapter->drv_stats->ierrors); 22557830e905SSolganik Alexander ++tx_ring->tx_stats.linearize_failed; 22567830e905SSolganik Alexander return rc; 22577830e905SSolganik Alexander } 22582061fe41SRafal Kozik 22592061fe41SRafal Kozik return rc; 22602061fe41SRafal Kozik } 22612061fe41SRafal Kozik 22621173fca2SJan Medala static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 22631173fca2SJan Medala uint16_t nb_pkts) 22641173fca2SJan Medala { 22651173fca2SJan Medala struct ena_ring *tx_ring = (struct ena_ring *)(tx_queue); 22661daff526SJakub Palider uint16_t next_to_use = tx_ring->next_to_use; 22671daff526SJakub Palider uint16_t next_to_clean = tx_ring->next_to_clean; 22681173fca2SJan Medala struct rte_mbuf *mbuf; 22692fca2a98SMichal Krawczyk uint16_t seg_len; 22701173fca2SJan Medala unsigned int ring_size = tx_ring->ring_size; 22711173fca2SJan Medala unsigned int ring_mask = ring_size - 1; 22721173fca2SJan Medala struct ena_com_tx_ctx ena_tx_ctx; 22731173fca2SJan Medala struct ena_tx_buffer *tx_info; 22741173fca2SJan Medala struct ena_com_buf *ebuf; 22751173fca2SJan Medala uint16_t rc, req_id, total_tx_descs = 0; 2276b66b6e72SJakub Palider uint16_t sent_idx = 0, empty_tx_reqs; 22772fca2a98SMichal Krawczyk uint16_t push_len = 0; 22782fca2a98SMichal Krawczyk uint16_t delta = 0; 22791173fca2SJan Medala int nb_hw_desc; 228045b6d861SMichal Krawczyk uint32_t total_length; 22811173fca2SJan Medala 22821173fca2SJan Medala /* Check adapter state */ 22831173fca2SJan Medala if (unlikely(tx_ring->adapter->state != ENA_ADAPTER_STATE_RUNNING)) { 22841173fca2SJan Medala RTE_LOG(ALERT, PMD, 22851173fca2SJan Medala "Trying to xmit pkts while device is NOT running\n"); 22861173fca2SJan Medala return 0; 22871173fca2SJan Medala } 22881173fca2SJan Medala 2289b66b6e72SJakub Palider empty_tx_reqs = ring_size - (next_to_use - next_to_clean); 2290b66b6e72SJakub Palider if (nb_pkts > empty_tx_reqs) 2291b66b6e72SJakub Palider nb_pkts = empty_tx_reqs; 2292b66b6e72SJakub Palider 22931173fca2SJan Medala for (sent_idx = 0; sent_idx < nb_pkts; sent_idx++) { 22941173fca2SJan Medala mbuf = tx_pkts[sent_idx]; 229545b6d861SMichal Krawczyk total_length = 0; 22961173fca2SJan Medala 22972061fe41SRafal Kozik rc = ena_check_and_linearize_mbuf(tx_ring, mbuf); 22982061fe41SRafal Kozik if (unlikely(rc)) 22992061fe41SRafal Kozik break; 23002061fe41SRafal Kozik 23011daff526SJakub Palider req_id = tx_ring->empty_tx_reqs[next_to_use & ring_mask]; 23021173fca2SJan Medala tx_info = &tx_ring->tx_buffer_info[req_id]; 23031173fca2SJan Medala tx_info->mbuf = mbuf; 23041173fca2SJan Medala tx_info->num_of_bufs = 0; 23051173fca2SJan Medala ebuf = tx_info->bufs; 23061173fca2SJan Medala 23071173fca2SJan Medala /* Prepare TX context */ 23081173fca2SJan Medala memset(&ena_tx_ctx, 0x0, sizeof(struct ena_com_tx_ctx)); 23091173fca2SJan Medala memset(&ena_tx_ctx.ena_meta, 0x0, 23101173fca2SJan Medala sizeof(struct ena_com_tx_meta)); 23111173fca2SJan Medala ena_tx_ctx.ena_bufs = ebuf; 23121173fca2SJan Medala ena_tx_ctx.req_id = req_id; 23132fca2a98SMichal Krawczyk 23142fca2a98SMichal Krawczyk delta = 0; 23152fca2a98SMichal Krawczyk seg_len = mbuf->data_len; 23162fca2a98SMichal Krawczyk 23171173fca2SJan Medala if (tx_ring->tx_mem_queue_type == 23181173fca2SJan Medala ENA_ADMIN_PLACEMENT_POLICY_DEV) { 23192fca2a98SMichal Krawczyk push_len = RTE_MIN(mbuf->pkt_len, 23201173fca2SJan Medala tx_ring->tx_max_header_size); 23212fca2a98SMichal Krawczyk ena_tx_ctx.header_len = push_len; 23222fca2a98SMichal Krawczyk 23232fca2a98SMichal Krawczyk if (likely(push_len <= seg_len)) { 23242fca2a98SMichal Krawczyk /* If the push header is in the single segment, 23252fca2a98SMichal Krawczyk * then just point it to the 1st mbuf data. 23262fca2a98SMichal Krawczyk */ 23271173fca2SJan Medala ena_tx_ctx.push_header = 23282fca2a98SMichal Krawczyk rte_pktmbuf_mtod(mbuf, uint8_t *); 23292fca2a98SMichal Krawczyk } else { 23302fca2a98SMichal Krawczyk /* If the push header lays in the several 23312fca2a98SMichal Krawczyk * segments, copy it to the intermediate buffer. 23322fca2a98SMichal Krawczyk */ 23332fca2a98SMichal Krawczyk rte_pktmbuf_read(mbuf, 0, push_len, 23342fca2a98SMichal Krawczyk tx_ring->push_buf_intermediate_buf); 23352fca2a98SMichal Krawczyk ena_tx_ctx.push_header = 23362fca2a98SMichal Krawczyk tx_ring->push_buf_intermediate_buf; 23372fca2a98SMichal Krawczyk delta = push_len - seg_len; 23382fca2a98SMichal Krawczyk } 23391173fca2SJan Medala } /* there's no else as we take advantage of memset zeroing */ 23401173fca2SJan Medala 23411173fca2SJan Medala /* Set TX offloads flags, if applicable */ 234256b8b9b7SRafal Kozik ena_tx_mbuf_prepare(mbuf, &ena_tx_ctx, tx_ring->offloads); 23431173fca2SJan Medala 23441173fca2SJan Medala if (unlikely(mbuf->ol_flags & 23451173fca2SJan Medala (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD))) 23461173fca2SJan Medala rte_atomic64_inc(&tx_ring->adapter->drv_stats->ierrors); 23471173fca2SJan Medala 23481173fca2SJan Medala rte_prefetch0(tx_pkts[(sent_idx + 4) & ring_mask]); 23491173fca2SJan Medala 23501173fca2SJan Medala /* Process first segment taking into 23511173fca2SJan Medala * consideration pushed header 23521173fca2SJan Medala */ 23532fca2a98SMichal Krawczyk if (seg_len > push_len) { 2354455da545SSantosh Shukla ebuf->paddr = mbuf->buf_iova + 23551173fca2SJan Medala mbuf->data_off + 23562fca2a98SMichal Krawczyk push_len; 23572fca2a98SMichal Krawczyk ebuf->len = seg_len - push_len; 23581173fca2SJan Medala ebuf++; 23591173fca2SJan Medala tx_info->num_of_bufs++; 23601173fca2SJan Medala } 236145b6d861SMichal Krawczyk total_length += mbuf->data_len; 23621173fca2SJan Medala 23631173fca2SJan Medala while ((mbuf = mbuf->next) != NULL) { 23642fca2a98SMichal Krawczyk seg_len = mbuf->data_len; 23652fca2a98SMichal Krawczyk 23662fca2a98SMichal Krawczyk /* Skip mbufs if whole data is pushed as a header */ 23672fca2a98SMichal Krawczyk if (unlikely(delta > seg_len)) { 23682fca2a98SMichal Krawczyk delta -= seg_len; 23692fca2a98SMichal Krawczyk continue; 23702fca2a98SMichal Krawczyk } 23712fca2a98SMichal Krawczyk 23722fca2a98SMichal Krawczyk ebuf->paddr = mbuf->buf_iova + mbuf->data_off + delta; 23732fca2a98SMichal Krawczyk ebuf->len = seg_len - delta; 237445b6d861SMichal Krawczyk total_length += ebuf->len; 23751173fca2SJan Medala ebuf++; 23761173fca2SJan Medala tx_info->num_of_bufs++; 23772fca2a98SMichal Krawczyk 23782fca2a98SMichal Krawczyk delta = 0; 23791173fca2SJan Medala } 23801173fca2SJan Medala 23811173fca2SJan Medala ena_tx_ctx.num_bufs = tx_info->num_of_bufs; 23821173fca2SJan Medala 2383c7519ea5SRafal Kozik if (ena_com_is_doorbell_needed(tx_ring->ena_com_io_sq, 2384c7519ea5SRafal Kozik &ena_tx_ctx)) { 2385c7519ea5SRafal Kozik RTE_LOG(DEBUG, PMD, "llq tx max burst size of queue %d" 2386c7519ea5SRafal Kozik " achieved, writing doorbell to send burst\n", 2387c7519ea5SRafal Kozik tx_ring->id); 2388c7519ea5SRafal Kozik rte_wmb(); 2389c7519ea5SRafal Kozik ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq); 2390c7519ea5SRafal Kozik } 2391c7519ea5SRafal Kozik 2392c7519ea5SRafal Kozik /* prepare the packet's descriptors to dma engine */ 23931173fca2SJan Medala rc = ena_com_prepare_tx(tx_ring->ena_com_io_sq, 23941173fca2SJan Medala &ena_tx_ctx, &nb_hw_desc); 23957830e905SSolganik Alexander if (unlikely(rc)) { 23967830e905SSolganik Alexander ++tx_ring->tx_stats.prepare_ctx_err; 23971173fca2SJan Medala break; 23987830e905SSolganik Alexander } 23991173fca2SJan Medala tx_info->tx_descs = nb_hw_desc; 24001173fca2SJan Medala 24011daff526SJakub Palider next_to_use++; 240245b6d861SMichal Krawczyk tx_ring->tx_stats.cnt += tx_info->num_of_bufs; 240345b6d861SMichal Krawczyk tx_ring->tx_stats.bytes += total_length; 24041173fca2SJan Medala } 24057830e905SSolganik Alexander tx_ring->tx_stats.available_desc = 24067830e905SSolganik Alexander ena_com_free_desc(tx_ring->ena_com_io_sq); 24071173fca2SJan Medala 24085e02e19eSJan Medala /* If there are ready packets to be xmitted... */ 24095e02e19eSJan Medala if (sent_idx > 0) { 24105e02e19eSJan Medala /* ...let HW do its best :-) */ 24111173fca2SJan Medala rte_wmb(); 24121173fca2SJan Medala ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq); 241345b6d861SMichal Krawczyk tx_ring->tx_stats.doorbells++; 24145e02e19eSJan Medala tx_ring->next_to_use = next_to_use; 24155e02e19eSJan Medala } 24165e02e19eSJan Medala 24171173fca2SJan Medala /* Clear complete packets */ 24181173fca2SJan Medala while (ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq, &req_id) >= 0) { 2419f7d82d24SRafal Kozik rc = validate_tx_req_id(tx_ring, req_id); 2420f7d82d24SRafal Kozik if (rc) 2421f7d82d24SRafal Kozik break; 2422f7d82d24SRafal Kozik 24231173fca2SJan Medala /* Get Tx info & store how many descs were processed */ 24241173fca2SJan Medala tx_info = &tx_ring->tx_buffer_info[req_id]; 24251173fca2SJan Medala total_tx_descs += tx_info->tx_descs; 24261173fca2SJan Medala 24271173fca2SJan Medala /* Free whole mbuf chain */ 24281173fca2SJan Medala mbuf = tx_info->mbuf; 24291173fca2SJan Medala rte_pktmbuf_free(mbuf); 2430207a514cSMichal Krawczyk tx_info->mbuf = NULL; 24311173fca2SJan Medala 24321173fca2SJan Medala /* Put back descriptor to the ring for reuse */ 24331daff526SJakub Palider tx_ring->empty_tx_reqs[next_to_clean & ring_mask] = req_id; 24341daff526SJakub Palider next_to_clean++; 24351173fca2SJan Medala 24361173fca2SJan Medala /* If too many descs to clean, leave it for another run */ 24371173fca2SJan Medala if (unlikely(total_tx_descs > ENA_RING_DESCS_RATIO(ring_size))) 24381173fca2SJan Medala break; 24391173fca2SJan Medala } 24407830e905SSolganik Alexander tx_ring->tx_stats.available_desc = 24417830e905SSolganik Alexander ena_com_free_desc(tx_ring->ena_com_io_sq); 24421173fca2SJan Medala 24435e02e19eSJan Medala if (total_tx_descs > 0) { 24441173fca2SJan Medala /* acknowledge completion of sent packets */ 24451daff526SJakub Palider tx_ring->next_to_clean = next_to_clean; 2446a45462c5SRafal Kozik ena_com_comp_ack(tx_ring->ena_com_io_sq, total_tx_descs); 2447a45462c5SRafal Kozik ena_com_update_dev_comp_head(tx_ring->ena_com_io_cq); 24485e02e19eSJan Medala } 24495e02e19eSJan Medala 24507830e905SSolganik Alexander tx_ring->tx_stats.tx_poll++; 24517830e905SSolganik Alexander 24521173fca2SJan Medala return sent_idx; 24531173fca2SJan Medala } 24541173fca2SJan Medala 24557830e905SSolganik Alexander /** 24567830e905SSolganik Alexander * DPDK callback to retrieve names of extended device statistics 24577830e905SSolganik Alexander * 24587830e905SSolganik Alexander * @param dev 24597830e905SSolganik Alexander * Pointer to Ethernet device structure. 24607830e905SSolganik Alexander * @param[out] xstats_names 24617830e905SSolganik Alexander * Buffer to insert names into. 24627830e905SSolganik Alexander * @param n 24637830e905SSolganik Alexander * Number of names. 24647830e905SSolganik Alexander * 24657830e905SSolganik Alexander * @return 24667830e905SSolganik Alexander * Number of xstats names. 24677830e905SSolganik Alexander */ 24687830e905SSolganik Alexander static int ena_xstats_get_names(struct rte_eth_dev *dev, 24697830e905SSolganik Alexander struct rte_eth_xstat_name *xstats_names, 24707830e905SSolganik Alexander unsigned int n) 24717830e905SSolganik Alexander { 24727830e905SSolganik Alexander unsigned int xstats_count = ena_xstats_calc_num(dev); 24737830e905SSolganik Alexander unsigned int stat, i, count = 0; 24747830e905SSolganik Alexander 24757830e905SSolganik Alexander if (n < xstats_count || !xstats_names) 24767830e905SSolganik Alexander return xstats_count; 24777830e905SSolganik Alexander 24787830e905SSolganik Alexander for (stat = 0; stat < ENA_STATS_ARRAY_GLOBAL; stat++, count++) 24797830e905SSolganik Alexander strcpy(xstats_names[count].name, 24807830e905SSolganik Alexander ena_stats_global_strings[stat].name); 24817830e905SSolganik Alexander 24827830e905SSolganik Alexander for (stat = 0; stat < ENA_STATS_ARRAY_RX; stat++) 24837830e905SSolganik Alexander for (i = 0; i < dev->data->nb_rx_queues; i++, count++) 24847830e905SSolganik Alexander snprintf(xstats_names[count].name, 24857830e905SSolganik Alexander sizeof(xstats_names[count].name), 24867830e905SSolganik Alexander "rx_q%d_%s", i, 24877830e905SSolganik Alexander ena_stats_rx_strings[stat].name); 24887830e905SSolganik Alexander 24897830e905SSolganik Alexander for (stat = 0; stat < ENA_STATS_ARRAY_TX; stat++) 24907830e905SSolganik Alexander for (i = 0; i < dev->data->nb_tx_queues; i++, count++) 24917830e905SSolganik Alexander snprintf(xstats_names[count].name, 24927830e905SSolganik Alexander sizeof(xstats_names[count].name), 24937830e905SSolganik Alexander "tx_q%d_%s", i, 24947830e905SSolganik Alexander ena_stats_tx_strings[stat].name); 24957830e905SSolganik Alexander 24967830e905SSolganik Alexander return xstats_count; 24977830e905SSolganik Alexander } 24987830e905SSolganik Alexander 24997830e905SSolganik Alexander /** 25007830e905SSolganik Alexander * DPDK callback to get extended device statistics. 25017830e905SSolganik Alexander * 25027830e905SSolganik Alexander * @param dev 25037830e905SSolganik Alexander * Pointer to Ethernet device structure. 25047830e905SSolganik Alexander * @param[out] stats 25057830e905SSolganik Alexander * Stats table output buffer. 25067830e905SSolganik Alexander * @param n 25077830e905SSolganik Alexander * The size of the stats table. 25087830e905SSolganik Alexander * 25097830e905SSolganik Alexander * @return 25107830e905SSolganik Alexander * Number of xstats on success, negative on failure. 25117830e905SSolganik Alexander */ 25127830e905SSolganik Alexander static int ena_xstats_get(struct rte_eth_dev *dev, 25137830e905SSolganik Alexander struct rte_eth_xstat *xstats, 25147830e905SSolganik Alexander unsigned int n) 25157830e905SSolganik Alexander { 25167830e905SSolganik Alexander struct ena_adapter *adapter = 25177830e905SSolganik Alexander (struct ena_adapter *)(dev->data->dev_private); 25187830e905SSolganik Alexander unsigned int xstats_count = ena_xstats_calc_num(dev); 25197830e905SSolganik Alexander unsigned int stat, i, count = 0; 25207830e905SSolganik Alexander int stat_offset; 25217830e905SSolganik Alexander void *stats_begin; 25227830e905SSolganik Alexander 25237830e905SSolganik Alexander if (n < xstats_count) 25247830e905SSolganik Alexander return xstats_count; 25257830e905SSolganik Alexander 25267830e905SSolganik Alexander if (!xstats) 25277830e905SSolganik Alexander return 0; 25287830e905SSolganik Alexander 25297830e905SSolganik Alexander for (stat = 0; stat < ENA_STATS_ARRAY_GLOBAL; stat++, count++) { 25307830e905SSolganik Alexander stat_offset = ena_stats_rx_strings[stat].stat_offset; 25317830e905SSolganik Alexander stats_begin = &adapter->dev_stats; 25327830e905SSolganik Alexander 25337830e905SSolganik Alexander xstats[count].id = count; 25347830e905SSolganik Alexander xstats[count].value = *((uint64_t *) 25357830e905SSolganik Alexander ((char *)stats_begin + stat_offset)); 25367830e905SSolganik Alexander } 25377830e905SSolganik Alexander 25387830e905SSolganik Alexander for (stat = 0; stat < ENA_STATS_ARRAY_RX; stat++) { 25397830e905SSolganik Alexander for (i = 0; i < dev->data->nb_rx_queues; i++, count++) { 25407830e905SSolganik Alexander stat_offset = ena_stats_rx_strings[stat].stat_offset; 25417830e905SSolganik Alexander stats_begin = &adapter->rx_ring[i].rx_stats; 25427830e905SSolganik Alexander 25437830e905SSolganik Alexander xstats[count].id = count; 25447830e905SSolganik Alexander xstats[count].value = *((uint64_t *) 25457830e905SSolganik Alexander ((char *)stats_begin + stat_offset)); 25467830e905SSolganik Alexander } 25477830e905SSolganik Alexander } 25487830e905SSolganik Alexander 25497830e905SSolganik Alexander for (stat = 0; stat < ENA_STATS_ARRAY_TX; stat++) { 25507830e905SSolganik Alexander for (i = 0; i < dev->data->nb_tx_queues; i++, count++) { 25517830e905SSolganik Alexander stat_offset = ena_stats_tx_strings[stat].stat_offset; 25527830e905SSolganik Alexander stats_begin = &adapter->tx_ring[i].rx_stats; 25537830e905SSolganik Alexander 25547830e905SSolganik Alexander xstats[count].id = count; 25557830e905SSolganik Alexander xstats[count].value = *((uint64_t *) 25567830e905SSolganik Alexander ((char *)stats_begin + stat_offset)); 25577830e905SSolganik Alexander } 25587830e905SSolganik Alexander } 25597830e905SSolganik Alexander 25607830e905SSolganik Alexander return count; 25617830e905SSolganik Alexander } 25627830e905SSolganik Alexander 25637830e905SSolganik Alexander static int ena_xstats_get_by_id(struct rte_eth_dev *dev, 25647830e905SSolganik Alexander const uint64_t *ids, 25657830e905SSolganik Alexander uint64_t *values, 25667830e905SSolganik Alexander unsigned int n) 25677830e905SSolganik Alexander { 25687830e905SSolganik Alexander struct ena_adapter *adapter = 25697830e905SSolganik Alexander (struct ena_adapter *)(dev->data->dev_private); 25707830e905SSolganik Alexander uint64_t id; 25717830e905SSolganik Alexander uint64_t rx_entries, tx_entries; 25727830e905SSolganik Alexander unsigned int i; 25737830e905SSolganik Alexander int qid; 25747830e905SSolganik Alexander int valid = 0; 25757830e905SSolganik Alexander for (i = 0; i < n; ++i) { 25767830e905SSolganik Alexander id = ids[i]; 25777830e905SSolganik Alexander /* Check if id belongs to global statistics */ 25787830e905SSolganik Alexander if (id < ENA_STATS_ARRAY_GLOBAL) { 25797830e905SSolganik Alexander values[i] = *((uint64_t *)&adapter->dev_stats + id); 25807830e905SSolganik Alexander ++valid; 25817830e905SSolganik Alexander continue; 25827830e905SSolganik Alexander } 25837830e905SSolganik Alexander 25847830e905SSolganik Alexander /* Check if id belongs to rx queue statistics */ 25857830e905SSolganik Alexander id -= ENA_STATS_ARRAY_GLOBAL; 25867830e905SSolganik Alexander rx_entries = ENA_STATS_ARRAY_RX * dev->data->nb_rx_queues; 25877830e905SSolganik Alexander if (id < rx_entries) { 25887830e905SSolganik Alexander qid = id % dev->data->nb_rx_queues; 25897830e905SSolganik Alexander id /= dev->data->nb_rx_queues; 25907830e905SSolganik Alexander values[i] = *((uint64_t *) 25917830e905SSolganik Alexander &adapter->rx_ring[qid].rx_stats + id); 25927830e905SSolganik Alexander ++valid; 25937830e905SSolganik Alexander continue; 25947830e905SSolganik Alexander } 25957830e905SSolganik Alexander /* Check if id belongs to rx queue statistics */ 25967830e905SSolganik Alexander id -= rx_entries; 25977830e905SSolganik Alexander tx_entries = ENA_STATS_ARRAY_TX * dev->data->nb_tx_queues; 25987830e905SSolganik Alexander if (id < tx_entries) { 25997830e905SSolganik Alexander qid = id % dev->data->nb_tx_queues; 26007830e905SSolganik Alexander id /= dev->data->nb_tx_queues; 26017830e905SSolganik Alexander values[i] = *((uint64_t *) 26027830e905SSolganik Alexander &adapter->tx_ring[qid].tx_stats + id); 26037830e905SSolganik Alexander ++valid; 26047830e905SSolganik Alexander continue; 26057830e905SSolganik Alexander } 26067830e905SSolganik Alexander } 26077830e905SSolganik Alexander 26087830e905SSolganik Alexander return valid; 26097830e905SSolganik Alexander } 26107830e905SSolganik Alexander 2611ca148440SMichal Krawczyk /********************************************************************* 2612ca148440SMichal Krawczyk * PMD configuration 2613ca148440SMichal Krawczyk *********************************************************************/ 2614fdf91e0fSJan Blunck static int eth_ena_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 2615fdf91e0fSJan Blunck struct rte_pci_device *pci_dev) 2616fdf91e0fSJan Blunck { 2617fdf91e0fSJan Blunck return rte_eth_dev_pci_generic_probe(pci_dev, 2618fdf91e0fSJan Blunck sizeof(struct ena_adapter), eth_ena_dev_init); 2619fdf91e0fSJan Blunck } 2620fdf91e0fSJan Blunck 2621fdf91e0fSJan Blunck static int eth_ena_pci_remove(struct rte_pci_device *pci_dev) 2622fdf91e0fSJan Blunck { 2623eb0ef49dSMichal Krawczyk return rte_eth_dev_pci_generic_remove(pci_dev, eth_ena_dev_uninit); 2624fdf91e0fSJan Blunck } 2625fdf91e0fSJan Blunck 2626fdf91e0fSJan Blunck static struct rte_pci_driver rte_ena_pmd = { 26271173fca2SJan Medala .id_table = pci_id_ena_map, 262805e0eee0SRafal Kozik .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC | 262905e0eee0SRafal Kozik RTE_PCI_DRV_WC_ACTIVATE, 2630fdf91e0fSJan Blunck .probe = eth_ena_pci_probe, 2631fdf91e0fSJan Blunck .remove = eth_ena_pci_remove, 26321173fca2SJan Medala }; 26331173fca2SJan Medala 2634fdf91e0fSJan Blunck RTE_PMD_REGISTER_PCI(net_ena, rte_ena_pmd); 263501f19227SShreyansh Jain RTE_PMD_REGISTER_PCI_TABLE(net_ena, pci_id_ena_map); 263606e81dc9SDavid Marchand RTE_PMD_REGISTER_KMOD_DEP(net_ena, "* igb_uio | uio_pci_generic | vfio-pci"); 26378bc0acaeSStephen Hemminger 2638f8e99896SThomas Monjalon RTE_INIT(ena_init_log) 26398bc0acaeSStephen Hemminger { 26403f111952SHarry van Haaren ena_logtype_init = rte_log_register("pmd.net.ena.init"); 26418bc0acaeSStephen Hemminger if (ena_logtype_init >= 0) 26428bc0acaeSStephen Hemminger rte_log_set_level(ena_logtype_init, RTE_LOG_NOTICE); 26433f111952SHarry van Haaren ena_logtype_driver = rte_log_register("pmd.net.ena.driver"); 26448bc0acaeSStephen Hemminger if (ena_logtype_driver >= 0) 26458bc0acaeSStephen Hemminger rte_log_set_level(ena_logtype_driver, RTE_LOG_NOTICE); 26468bc0acaeSStephen Hemminger } 26473adcba9aSMichal Krawczyk 26483adcba9aSMichal Krawczyk /****************************************************************************** 26493adcba9aSMichal Krawczyk ******************************** AENQ Handlers ******************************* 26503adcba9aSMichal Krawczyk *****************************************************************************/ 2651ca148440SMichal Krawczyk static void ena_update_on_link_change(void *adapter_data, 2652ca148440SMichal Krawczyk struct ena_admin_aenq_entry *aenq_e) 2653ca148440SMichal Krawczyk { 2654ca148440SMichal Krawczyk struct rte_eth_dev *eth_dev; 2655ca148440SMichal Krawczyk struct ena_adapter *adapter; 2656ca148440SMichal Krawczyk struct ena_admin_aenq_link_change_desc *aenq_link_desc; 2657ca148440SMichal Krawczyk uint32_t status; 2658ca148440SMichal Krawczyk 2659ca148440SMichal Krawczyk adapter = (struct ena_adapter *)adapter_data; 2660ca148440SMichal Krawczyk aenq_link_desc = (struct ena_admin_aenq_link_change_desc *)aenq_e; 2661ca148440SMichal Krawczyk eth_dev = adapter->rte_dev; 2662ca148440SMichal Krawczyk 2663ca148440SMichal Krawczyk status = get_ena_admin_aenq_link_change_desc_link_status(aenq_link_desc); 2664ca148440SMichal Krawczyk adapter->link_status = status; 2665ca148440SMichal Krawczyk 2666ca148440SMichal Krawczyk ena_link_update(eth_dev, 0); 2667ca148440SMichal Krawczyk _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL); 2668ca148440SMichal Krawczyk } 2669ca148440SMichal Krawczyk 2670f01f060cSRafal Kozik static void ena_notification(void *data, 2671f01f060cSRafal Kozik struct ena_admin_aenq_entry *aenq_e) 2672f01f060cSRafal Kozik { 2673f01f060cSRafal Kozik struct ena_adapter *adapter = (struct ena_adapter *)data; 2674f01f060cSRafal Kozik struct ena_admin_ena_hw_hints *hints; 2675f01f060cSRafal Kozik 2676f01f060cSRafal Kozik if (aenq_e->aenq_common_desc.group != ENA_ADMIN_NOTIFICATION) 2677f01f060cSRafal Kozik RTE_LOG(WARNING, PMD, "Invalid group(%x) expected %x\n", 2678f01f060cSRafal Kozik aenq_e->aenq_common_desc.group, 2679f01f060cSRafal Kozik ENA_ADMIN_NOTIFICATION); 2680f01f060cSRafal Kozik 2681f01f060cSRafal Kozik switch (aenq_e->aenq_common_desc.syndrom) { 2682f01f060cSRafal Kozik case ENA_ADMIN_UPDATE_HINTS: 2683f01f060cSRafal Kozik hints = (struct ena_admin_ena_hw_hints *) 2684f01f060cSRafal Kozik (&aenq_e->inline_data_w4); 2685f01f060cSRafal Kozik ena_update_hints(adapter, hints); 2686f01f060cSRafal Kozik break; 2687f01f060cSRafal Kozik default: 2688f01f060cSRafal Kozik RTE_LOG(ERR, PMD, "Invalid aenq notification link state %d\n", 2689f01f060cSRafal Kozik aenq_e->aenq_common_desc.syndrom); 2690f01f060cSRafal Kozik } 2691f01f060cSRafal Kozik } 2692f01f060cSRafal Kozik 2693d9b8b106SMichal Krawczyk static void ena_keep_alive(void *adapter_data, 2694d9b8b106SMichal Krawczyk __rte_unused struct ena_admin_aenq_entry *aenq_e) 2695d9b8b106SMichal Krawczyk { 2696d9b8b106SMichal Krawczyk struct ena_adapter *adapter = (struct ena_adapter *)adapter_data; 269794c3e376SRafal Kozik struct ena_admin_aenq_keep_alive_desc *desc; 269894c3e376SRafal Kozik uint64_t rx_drops; 2699d9b8b106SMichal Krawczyk 2700d9b8b106SMichal Krawczyk adapter->timestamp_wd = rte_get_timer_cycles(); 270194c3e376SRafal Kozik 270294c3e376SRafal Kozik desc = (struct ena_admin_aenq_keep_alive_desc *)aenq_e; 270394c3e376SRafal Kozik rx_drops = ((uint64_t)desc->rx_drops_high << 32) | desc->rx_drops_low; 270494c3e376SRafal Kozik rte_atomic64_set(&adapter->drv_stats->rx_drops, rx_drops); 2705d9b8b106SMichal Krawczyk } 2706d9b8b106SMichal Krawczyk 27073adcba9aSMichal Krawczyk /** 27083adcba9aSMichal Krawczyk * This handler will called for unknown event group or unimplemented handlers 27093adcba9aSMichal Krawczyk **/ 27103adcba9aSMichal Krawczyk static void unimplemented_aenq_handler(__rte_unused void *data, 27113adcba9aSMichal Krawczyk __rte_unused struct ena_admin_aenq_entry *aenq_e) 27123adcba9aSMichal Krawczyk { 2713983cce2dSRafal Kozik RTE_LOG(ERR, PMD, "Unknown event was received or event with " 2714983cce2dSRafal Kozik "unimplemented handler\n"); 27153adcba9aSMichal Krawczyk } 27163adcba9aSMichal Krawczyk 2717ca148440SMichal Krawczyk static struct ena_aenq_handlers aenq_handlers = { 27183adcba9aSMichal Krawczyk .handlers = { 2719ca148440SMichal Krawczyk [ENA_ADMIN_LINK_CHANGE] = ena_update_on_link_change, 2720f01f060cSRafal Kozik [ENA_ADMIN_NOTIFICATION] = ena_notification, 2721d9b8b106SMichal Krawczyk [ENA_ADMIN_KEEP_ALIVE] = ena_keep_alive 27223adcba9aSMichal Krawczyk }, 27233adcba9aSMichal Krawczyk .unimplemented_handler = unimplemented_aenq_handler 27243adcba9aSMichal Krawczyk }; 2725