11173fca2SJan Medala /*- 21173fca2SJan Medala * BSD LICENSE 31173fca2SJan Medala * 41173fca2SJan Medala * Copyright (c) 2015-2016 Amazon.com, Inc. or its affiliates. 51173fca2SJan Medala * All rights reserved. 61173fca2SJan Medala * 71173fca2SJan Medala * Redistribution and use in source and binary forms, with or without 81173fca2SJan Medala * modification, are permitted provided that the following conditions 91173fca2SJan Medala * are met: 101173fca2SJan Medala * 111173fca2SJan Medala * * Redistributions of source code must retain the above copyright 121173fca2SJan Medala * notice, this list of conditions and the following disclaimer. 131173fca2SJan Medala * * Redistributions in binary form must reproduce the above copyright 141173fca2SJan Medala * notice, this list of conditions and the following disclaimer in 151173fca2SJan Medala * the documentation and/or other materials provided with the 161173fca2SJan Medala * distribution. 171173fca2SJan Medala * * Neither the name of copyright holder nor the names of its 181173fca2SJan Medala * contributors may be used to endorse or promote products derived 191173fca2SJan Medala * from this software without specific prior written permission. 201173fca2SJan Medala * 211173fca2SJan Medala * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 221173fca2SJan Medala * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 231173fca2SJan Medala * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 241173fca2SJan Medala * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 251173fca2SJan Medala * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 261173fca2SJan Medala * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 271173fca2SJan Medala * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 281173fca2SJan Medala * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 291173fca2SJan Medala * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 301173fca2SJan Medala * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 311173fca2SJan Medala * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 321173fca2SJan Medala */ 331173fca2SJan Medala 341173fca2SJan Medala #include <rte_ether.h> 35ffc905f3SFerruh Yigit #include <rte_ethdev_driver.h> 36fdf91e0fSJan Blunck #include <rte_ethdev_pci.h> 371173fca2SJan Medala #include <rte_tcp.h> 381173fca2SJan Medala #include <rte_atomic.h> 391173fca2SJan Medala #include <rte_dev.h> 401173fca2SJan Medala #include <rte_errno.h> 41372c1af5SJan Medala #include <rte_version.h> 423d3edc26SJan Medala #include <rte_eal_memconfig.h> 43b3fc5a1aSKonstantin Ananyev #include <rte_net.h> 441173fca2SJan Medala 451173fca2SJan Medala #include "ena_ethdev.h" 461173fca2SJan Medala #include "ena_logs.h" 471173fca2SJan Medala #include "ena_platform.h" 481173fca2SJan Medala #include "ena_com.h" 491173fca2SJan Medala #include "ena_eth_com.h" 501173fca2SJan Medala 511173fca2SJan Medala #include <ena_common_defs.h> 521173fca2SJan Medala #include <ena_regs_defs.h> 531173fca2SJan Medala #include <ena_admin_defs.h> 541173fca2SJan Medala #include <ena_eth_io_defs.h> 551173fca2SJan Medala 56372c1af5SJan Medala #define DRV_MODULE_VER_MAJOR 1 5768a48ef2SMichal Krawczyk #define DRV_MODULE_VER_MINOR 1 58372c1af5SJan Medala #define DRV_MODULE_VER_SUBMINOR 0 59372c1af5SJan Medala 601173fca2SJan Medala #define ENA_IO_TXQ_IDX(q) (2 * (q)) 611173fca2SJan Medala #define ENA_IO_RXQ_IDX(q) (2 * (q) + 1) 621173fca2SJan Medala /*reverse version of ENA_IO_RXQ_IDX*/ 631173fca2SJan Medala #define ENA_IO_RXQ_IDX_REV(q) ((q - 1) / 2) 641173fca2SJan Medala 651173fca2SJan Medala /* While processing submitted and completed descriptors (rx and tx path 661173fca2SJan Medala * respectively) in a loop it is desired to: 671173fca2SJan Medala * - perform batch submissions while populating sumbissmion queue 681173fca2SJan Medala * - avoid blocking transmission of other packets during cleanup phase 691173fca2SJan Medala * Hence the utilization ratio of 1/8 of a queue size. 701173fca2SJan Medala */ 711173fca2SJan Medala #define ENA_RING_DESCS_RATIO(ring_size) (ring_size / 8) 721173fca2SJan Medala 731173fca2SJan Medala #define __MERGE_64B_H_L(h, l) (((uint64_t)h << 32) | l) 741173fca2SJan Medala #define TEST_BIT(val, bit_shift) (val & (1UL << bit_shift)) 751173fca2SJan Medala 761173fca2SJan Medala #define GET_L4_HDR_LEN(mbuf) \ 771173fca2SJan Medala ((rte_pktmbuf_mtod_offset(mbuf, struct tcp_hdr *, \ 781173fca2SJan Medala mbuf->l3_len + mbuf->l2_len)->data_off) >> 4) 791173fca2SJan Medala 801173fca2SJan Medala #define ENA_RX_RSS_TABLE_LOG_SIZE 7 811173fca2SJan Medala #define ENA_RX_RSS_TABLE_SIZE (1 << ENA_RX_RSS_TABLE_LOG_SIZE) 821173fca2SJan Medala #define ENA_HASH_KEY_SIZE 40 83372c1af5SJan Medala #define ENA_ETH_SS_STATS 0xFF 84372c1af5SJan Medala #define ETH_GSTRING_LEN 32 85372c1af5SJan Medala 86372c1af5SJan Medala #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) 87372c1af5SJan Medala 8892680dc2SRafal Kozik #define ENA_MAX_RING_DESC ENA_DEFAULT_RING_SIZE 8992680dc2SRafal Kozik #define ENA_MIN_RING_DESC 128 9092680dc2SRafal Kozik 91372c1af5SJan Medala enum ethtool_stringset { 92372c1af5SJan Medala ETH_SS_TEST = 0, 93372c1af5SJan Medala ETH_SS_STATS, 94372c1af5SJan Medala }; 95372c1af5SJan Medala 96372c1af5SJan Medala struct ena_stats { 97372c1af5SJan Medala char name[ETH_GSTRING_LEN]; 98372c1af5SJan Medala int stat_offset; 99372c1af5SJan Medala }; 100372c1af5SJan Medala 101372c1af5SJan Medala #define ENA_STAT_ENA_COM_ENTRY(stat) { \ 102372c1af5SJan Medala .name = #stat, \ 103372c1af5SJan Medala .stat_offset = offsetof(struct ena_com_stats_admin, stat) \ 104372c1af5SJan Medala } 105372c1af5SJan Medala 106372c1af5SJan Medala #define ENA_STAT_ENTRY(stat, stat_type) { \ 107372c1af5SJan Medala .name = #stat, \ 108372c1af5SJan Medala .stat_offset = offsetof(struct ena_stats_##stat_type, stat) \ 109372c1af5SJan Medala } 110372c1af5SJan Medala 111372c1af5SJan Medala #define ENA_STAT_RX_ENTRY(stat) \ 112372c1af5SJan Medala ENA_STAT_ENTRY(stat, rx) 113372c1af5SJan Medala 114372c1af5SJan Medala #define ENA_STAT_TX_ENTRY(stat) \ 115372c1af5SJan Medala ENA_STAT_ENTRY(stat, tx) 116372c1af5SJan Medala 117372c1af5SJan Medala #define ENA_STAT_GLOBAL_ENTRY(stat) \ 118372c1af5SJan Medala ENA_STAT_ENTRY(stat, dev) 119372c1af5SJan Medala 1203adcba9aSMichal Krawczyk /* 1213adcba9aSMichal Krawczyk * Each rte_memzone should have unique name. 1223adcba9aSMichal Krawczyk * To satisfy it, count number of allocation and add it to name. 1233adcba9aSMichal Krawczyk */ 1243adcba9aSMichal Krawczyk uint32_t ena_alloc_cnt; 1253adcba9aSMichal Krawczyk 126372c1af5SJan Medala static const struct ena_stats ena_stats_global_strings[] = { 127372c1af5SJan Medala ENA_STAT_GLOBAL_ENTRY(tx_timeout), 128372c1af5SJan Medala ENA_STAT_GLOBAL_ENTRY(io_suspend), 129372c1af5SJan Medala ENA_STAT_GLOBAL_ENTRY(io_resume), 130372c1af5SJan Medala ENA_STAT_GLOBAL_ENTRY(wd_expired), 131372c1af5SJan Medala ENA_STAT_GLOBAL_ENTRY(interface_up), 132372c1af5SJan Medala ENA_STAT_GLOBAL_ENTRY(interface_down), 133372c1af5SJan Medala ENA_STAT_GLOBAL_ENTRY(admin_q_pause), 134372c1af5SJan Medala }; 135372c1af5SJan Medala 136372c1af5SJan Medala static const struct ena_stats ena_stats_tx_strings[] = { 137372c1af5SJan Medala ENA_STAT_TX_ENTRY(cnt), 138372c1af5SJan Medala ENA_STAT_TX_ENTRY(bytes), 139372c1af5SJan Medala ENA_STAT_TX_ENTRY(queue_stop), 140372c1af5SJan Medala ENA_STAT_TX_ENTRY(queue_wakeup), 141372c1af5SJan Medala ENA_STAT_TX_ENTRY(dma_mapping_err), 142372c1af5SJan Medala ENA_STAT_TX_ENTRY(linearize), 143372c1af5SJan Medala ENA_STAT_TX_ENTRY(linearize_failed), 144372c1af5SJan Medala ENA_STAT_TX_ENTRY(tx_poll), 145372c1af5SJan Medala ENA_STAT_TX_ENTRY(doorbells), 146372c1af5SJan Medala ENA_STAT_TX_ENTRY(prepare_ctx_err), 147372c1af5SJan Medala ENA_STAT_TX_ENTRY(missing_tx_comp), 148372c1af5SJan Medala ENA_STAT_TX_ENTRY(bad_req_id), 149372c1af5SJan Medala }; 150372c1af5SJan Medala 151372c1af5SJan Medala static const struct ena_stats ena_stats_rx_strings[] = { 152372c1af5SJan Medala ENA_STAT_RX_ENTRY(cnt), 153372c1af5SJan Medala ENA_STAT_RX_ENTRY(bytes), 154372c1af5SJan Medala ENA_STAT_RX_ENTRY(refil_partial), 155372c1af5SJan Medala ENA_STAT_RX_ENTRY(bad_csum), 156372c1af5SJan Medala ENA_STAT_RX_ENTRY(page_alloc_fail), 157372c1af5SJan Medala ENA_STAT_RX_ENTRY(skb_alloc_fail), 158372c1af5SJan Medala ENA_STAT_RX_ENTRY(dma_mapping_err), 159372c1af5SJan Medala ENA_STAT_RX_ENTRY(bad_desc_num), 160372c1af5SJan Medala ENA_STAT_RX_ENTRY(small_copy_len_pkt), 161372c1af5SJan Medala }; 162372c1af5SJan Medala 163372c1af5SJan Medala static const struct ena_stats ena_stats_ena_com_strings[] = { 164372c1af5SJan Medala ENA_STAT_ENA_COM_ENTRY(aborted_cmd), 165372c1af5SJan Medala ENA_STAT_ENA_COM_ENTRY(submitted_cmd), 166372c1af5SJan Medala ENA_STAT_ENA_COM_ENTRY(completed_cmd), 167372c1af5SJan Medala ENA_STAT_ENA_COM_ENTRY(out_of_space), 168372c1af5SJan Medala ENA_STAT_ENA_COM_ENTRY(no_completion), 169372c1af5SJan Medala }; 170372c1af5SJan Medala 171372c1af5SJan Medala #define ENA_STATS_ARRAY_GLOBAL ARRAY_SIZE(ena_stats_global_strings) 172372c1af5SJan Medala #define ENA_STATS_ARRAY_TX ARRAY_SIZE(ena_stats_tx_strings) 173372c1af5SJan Medala #define ENA_STATS_ARRAY_RX ARRAY_SIZE(ena_stats_rx_strings) 174372c1af5SJan Medala #define ENA_STATS_ARRAY_ENA_COM ARRAY_SIZE(ena_stats_ena_com_strings) 1751173fca2SJan Medala 17656b8b9b7SRafal Kozik #define QUEUE_OFFLOADS (DEV_TX_OFFLOAD_TCP_CKSUM |\ 17756b8b9b7SRafal Kozik DEV_TX_OFFLOAD_UDP_CKSUM |\ 17856b8b9b7SRafal Kozik DEV_TX_OFFLOAD_IPV4_CKSUM |\ 17956b8b9b7SRafal Kozik DEV_TX_OFFLOAD_TCP_TSO) 18056b8b9b7SRafal Kozik #define MBUF_OFFLOADS (PKT_TX_L4_MASK |\ 18156b8b9b7SRafal Kozik PKT_TX_IP_CKSUM |\ 18256b8b9b7SRafal Kozik PKT_TX_TCP_SEG) 18356b8b9b7SRafal Kozik 1841173fca2SJan Medala /** Vendor ID used by Amazon devices */ 1851173fca2SJan Medala #define PCI_VENDOR_ID_AMAZON 0x1D0F 1861173fca2SJan Medala /** Amazon devices */ 1871173fca2SJan Medala #define PCI_DEVICE_ID_ENA_VF 0xEC20 1881173fca2SJan Medala #define PCI_DEVICE_ID_ENA_LLQ_VF 0xEC21 1891173fca2SJan Medala 190b3fc5a1aSKonstantin Ananyev #define ENA_TX_OFFLOAD_MASK (\ 191b3fc5a1aSKonstantin Ananyev PKT_TX_L4_MASK | \ 192b3fc5a1aSKonstantin Ananyev PKT_TX_IP_CKSUM | \ 193b3fc5a1aSKonstantin Ananyev PKT_TX_TCP_SEG) 194b3fc5a1aSKonstantin Ananyev 195b3fc5a1aSKonstantin Ananyev #define ENA_TX_OFFLOAD_NOTSUP_MASK \ 196b3fc5a1aSKonstantin Ananyev (PKT_TX_OFFLOAD_MASK ^ ENA_TX_OFFLOAD_MASK) 197b3fc5a1aSKonstantin Ananyev 1988bc0acaeSStephen Hemminger int ena_logtype_init; 1998bc0acaeSStephen Hemminger int ena_logtype_driver; 2008bc0acaeSStephen Hemminger 20128a1fd4fSFerruh Yigit static const struct rte_pci_id pci_id_ena_map[] = { 202cb990571SDavid Marchand { RTE_PCI_DEVICE(PCI_VENDOR_ID_AMAZON, PCI_DEVICE_ID_ENA_VF) }, 203cb990571SDavid Marchand { RTE_PCI_DEVICE(PCI_VENDOR_ID_AMAZON, PCI_DEVICE_ID_ENA_LLQ_VF) }, 2041173fca2SJan Medala { .device_id = 0 }, 2051173fca2SJan Medala }; 2061173fca2SJan Medala 207ca148440SMichal Krawczyk static struct ena_aenq_handlers aenq_handlers; 2083adcba9aSMichal Krawczyk 2091173fca2SJan Medala static int ena_device_init(struct ena_com_dev *ena_dev, 210e859d2b8SRafal Kozik struct ena_com_dev_get_features_ctx *get_feat_ctx, 211e859d2b8SRafal Kozik bool *wd_state); 2121173fca2SJan Medala static int ena_dev_configure(struct rte_eth_dev *dev); 2131173fca2SJan Medala static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 2141173fca2SJan Medala uint16_t nb_pkts); 215b3fc5a1aSKonstantin Ananyev static uint16_t eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 216b3fc5a1aSKonstantin Ananyev uint16_t nb_pkts); 2171173fca2SJan Medala static int ena_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, 2181173fca2SJan Medala uint16_t nb_desc, unsigned int socket_id, 2191173fca2SJan Medala const struct rte_eth_txconf *tx_conf); 2201173fca2SJan Medala static int ena_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, 2211173fca2SJan Medala uint16_t nb_desc, unsigned int socket_id, 2221173fca2SJan Medala const struct rte_eth_rxconf *rx_conf, 2231173fca2SJan Medala struct rte_mempool *mp); 2241173fca2SJan Medala static uint16_t eth_ena_recv_pkts(void *rx_queue, 2251173fca2SJan Medala struct rte_mbuf **rx_pkts, uint16_t nb_pkts); 2261173fca2SJan Medala static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count); 2271173fca2SJan Medala static void ena_init_rings(struct ena_adapter *adapter); 2281173fca2SJan Medala static int ena_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); 2291173fca2SJan Medala static int ena_start(struct rte_eth_dev *dev); 230eb0ef49dSMichal Krawczyk static void ena_stop(struct rte_eth_dev *dev); 2311173fca2SJan Medala static void ena_close(struct rte_eth_dev *dev); 2322081d5e2SMichal Krawczyk static int ena_dev_reset(struct rte_eth_dev *dev); 233d5b0924bSMatan Azrad static int ena_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats); 2341173fca2SJan Medala static void ena_rx_queue_release_all(struct rte_eth_dev *dev); 2351173fca2SJan Medala static void ena_tx_queue_release_all(struct rte_eth_dev *dev); 2361173fca2SJan Medala static void ena_rx_queue_release(void *queue); 2371173fca2SJan Medala static void ena_tx_queue_release(void *queue); 2381173fca2SJan Medala static void ena_rx_queue_release_bufs(struct ena_ring *ring); 2391173fca2SJan Medala static void ena_tx_queue_release_bufs(struct ena_ring *ring); 2401173fca2SJan Medala static int ena_link_update(struct rte_eth_dev *dev, 241dd2c630aSFerruh Yigit int wait_to_complete); 2421173fca2SJan Medala static int ena_queue_restart(struct ena_ring *ring); 2431173fca2SJan Medala static int ena_queue_restart_all(struct rte_eth_dev *dev, 2441173fca2SJan Medala enum ena_ring_type ring_type); 2451173fca2SJan Medala static void ena_stats_restart(struct rte_eth_dev *dev); 246dd2c630aSFerruh Yigit static void ena_infos_get(struct rte_eth_dev *dev, 2471173fca2SJan Medala struct rte_eth_dev_info *dev_info); 2481173fca2SJan Medala static int ena_rss_reta_update(struct rte_eth_dev *dev, 2491173fca2SJan Medala struct rte_eth_rss_reta_entry64 *reta_conf, 2501173fca2SJan Medala uint16_t reta_size); 2511173fca2SJan Medala static int ena_rss_reta_query(struct rte_eth_dev *dev, 2521173fca2SJan Medala struct rte_eth_rss_reta_entry64 *reta_conf, 2531173fca2SJan Medala uint16_t reta_size); 254372c1af5SJan Medala static int ena_get_sset_count(struct rte_eth_dev *dev, int sset); 25515773e06SMichal Krawczyk static void ena_interrupt_handler_rte(void *cb_arg); 256d9b8b106SMichal Krawczyk static void ena_timer_wd_callback(struct rte_timer *timer, void *arg); 2571173fca2SJan Medala 258103ab18cSFerruh Yigit static const struct eth_dev_ops ena_dev_ops = { 2591173fca2SJan Medala .dev_configure = ena_dev_configure, 2601173fca2SJan Medala .dev_infos_get = ena_infos_get, 2611173fca2SJan Medala .rx_queue_setup = ena_rx_queue_setup, 2621173fca2SJan Medala .tx_queue_setup = ena_tx_queue_setup, 2631173fca2SJan Medala .dev_start = ena_start, 264eb0ef49dSMichal Krawczyk .dev_stop = ena_stop, 2651173fca2SJan Medala .link_update = ena_link_update, 2661173fca2SJan Medala .stats_get = ena_stats_get, 2671173fca2SJan Medala .mtu_set = ena_mtu_set, 2681173fca2SJan Medala .rx_queue_release = ena_rx_queue_release, 2691173fca2SJan Medala .tx_queue_release = ena_tx_queue_release, 2701173fca2SJan Medala .dev_close = ena_close, 2712081d5e2SMichal Krawczyk .dev_reset = ena_dev_reset, 2721173fca2SJan Medala .reta_update = ena_rss_reta_update, 2731173fca2SJan Medala .reta_query = ena_rss_reta_query, 2741173fca2SJan Medala }; 2751173fca2SJan Medala 2763d3edc26SJan Medala #define NUMA_NO_NODE SOCKET_ID_ANY 2773d3edc26SJan Medala 2783d3edc26SJan Medala static inline int ena_cpu_to_node(int cpu) 2793d3edc26SJan Medala { 2803d3edc26SJan Medala struct rte_config *config = rte_eal_get_configuration(); 28149df3db8SAnatoly Burakov struct rte_fbarray *arr = &config->mem_config->memzones; 28249df3db8SAnatoly Burakov const struct rte_memzone *mz; 2833d3edc26SJan Medala 28449df3db8SAnatoly Burakov if (unlikely(cpu >= RTE_MAX_MEMZONE)) 2853d3edc26SJan Medala return NUMA_NO_NODE; 28649df3db8SAnatoly Burakov 28749df3db8SAnatoly Burakov mz = rte_fbarray_get(arr, cpu); 28849df3db8SAnatoly Burakov 28949df3db8SAnatoly Burakov return mz->socket_id; 2903d3edc26SJan Medala } 2913d3edc26SJan Medala 2921173fca2SJan Medala static inline void ena_rx_mbuf_prepare(struct rte_mbuf *mbuf, 2931173fca2SJan Medala struct ena_com_rx_ctx *ena_rx_ctx) 2941173fca2SJan Medala { 2951173fca2SJan Medala uint64_t ol_flags = 0; 296fd617795SRafal Kozik uint32_t packet_type = 0; 2971173fca2SJan Medala 2981173fca2SJan Medala if (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP) 299fd617795SRafal Kozik packet_type |= RTE_PTYPE_L4_TCP; 3001173fca2SJan Medala else if (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP) 301fd617795SRafal Kozik packet_type |= RTE_PTYPE_L4_UDP; 3021173fca2SJan Medala 3031173fca2SJan Medala if (ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV4) 304fd617795SRafal Kozik packet_type |= RTE_PTYPE_L3_IPV4; 3051173fca2SJan Medala else if (ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV6) 306fd617795SRafal Kozik packet_type |= RTE_PTYPE_L3_IPV6; 3071173fca2SJan Medala 3081173fca2SJan Medala if (unlikely(ena_rx_ctx->l4_csum_err)) 3091173fca2SJan Medala ol_flags |= PKT_RX_L4_CKSUM_BAD; 3101173fca2SJan Medala if (unlikely(ena_rx_ctx->l3_csum_err)) 3111173fca2SJan Medala ol_flags |= PKT_RX_IP_CKSUM_BAD; 3121173fca2SJan Medala 3131173fca2SJan Medala mbuf->ol_flags = ol_flags; 314fd617795SRafal Kozik mbuf->packet_type = packet_type; 3151173fca2SJan Medala } 3161173fca2SJan Medala 3171173fca2SJan Medala static inline void ena_tx_mbuf_prepare(struct rte_mbuf *mbuf, 31856b8b9b7SRafal Kozik struct ena_com_tx_ctx *ena_tx_ctx, 31956b8b9b7SRafal Kozik uint64_t queue_offloads) 3201173fca2SJan Medala { 3211173fca2SJan Medala struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta; 3221173fca2SJan Medala 32356b8b9b7SRafal Kozik if ((mbuf->ol_flags & MBUF_OFFLOADS) && 32456b8b9b7SRafal Kozik (queue_offloads & QUEUE_OFFLOADS)) { 3251173fca2SJan Medala /* check if TSO is required */ 32656b8b9b7SRafal Kozik if ((mbuf->ol_flags & PKT_TX_TCP_SEG) && 32756b8b9b7SRafal Kozik (queue_offloads & DEV_TX_OFFLOAD_TCP_TSO)) { 3281173fca2SJan Medala ena_tx_ctx->tso_enable = true; 3291173fca2SJan Medala 3301173fca2SJan Medala ena_meta->l4_hdr_len = GET_L4_HDR_LEN(mbuf); 3311173fca2SJan Medala } 3321173fca2SJan Medala 3331173fca2SJan Medala /* check if L3 checksum is needed */ 33456b8b9b7SRafal Kozik if ((mbuf->ol_flags & PKT_TX_IP_CKSUM) && 33556b8b9b7SRafal Kozik (queue_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)) 3361173fca2SJan Medala ena_tx_ctx->l3_csum_enable = true; 3371173fca2SJan Medala 3381173fca2SJan Medala if (mbuf->ol_flags & PKT_TX_IPV6) { 3391173fca2SJan Medala ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV6; 3401173fca2SJan Medala } else { 3411173fca2SJan Medala ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV4; 3421173fca2SJan Medala 3431173fca2SJan Medala /* set don't fragment (DF) flag */ 3441173fca2SJan Medala if (mbuf->packet_type & 3451173fca2SJan Medala (RTE_PTYPE_L4_NONFRAG 3461173fca2SJan Medala | RTE_PTYPE_INNER_L4_NONFRAG)) 3471173fca2SJan Medala ena_tx_ctx->df = true; 3481173fca2SJan Medala } 3491173fca2SJan Medala 3501173fca2SJan Medala /* check if L4 checksum is needed */ 35156b8b9b7SRafal Kozik if ((mbuf->ol_flags & PKT_TX_TCP_CKSUM) && 35256b8b9b7SRafal Kozik (queue_offloads & DEV_TX_OFFLOAD_TCP_CKSUM)) { 3531173fca2SJan Medala ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_TCP; 3541173fca2SJan Medala ena_tx_ctx->l4_csum_enable = true; 35556b8b9b7SRafal Kozik } else if ((mbuf->ol_flags & PKT_TX_UDP_CKSUM) && 35656b8b9b7SRafal Kozik (queue_offloads & DEV_TX_OFFLOAD_UDP_CKSUM)) { 3571173fca2SJan Medala ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UDP; 3581173fca2SJan Medala ena_tx_ctx->l4_csum_enable = true; 35956b8b9b7SRafal Kozik } else { 3601173fca2SJan Medala ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UNKNOWN; 3611173fca2SJan Medala ena_tx_ctx->l4_csum_enable = false; 3621173fca2SJan Medala } 3631173fca2SJan Medala 3641173fca2SJan Medala ena_meta->mss = mbuf->tso_segsz; 3651173fca2SJan Medala ena_meta->l3_hdr_len = mbuf->l3_len; 3661173fca2SJan Medala ena_meta->l3_hdr_offset = mbuf->l2_len; 3671173fca2SJan Medala 3681173fca2SJan Medala ena_tx_ctx->meta_valid = true; 3691173fca2SJan Medala } else { 3701173fca2SJan Medala ena_tx_ctx->meta_valid = false; 3711173fca2SJan Medala } 3721173fca2SJan Medala } 3731173fca2SJan Medala 374c2034976SMichal Krawczyk static inline int validate_rx_req_id(struct ena_ring *rx_ring, uint16_t req_id) 375c2034976SMichal Krawczyk { 376c2034976SMichal Krawczyk if (likely(req_id < rx_ring->ring_size)) 377c2034976SMichal Krawczyk return 0; 378c2034976SMichal Krawczyk 379c2034976SMichal Krawczyk RTE_LOG(ERR, PMD, "Invalid rx req_id: %hu\n", req_id); 380c2034976SMichal Krawczyk 381c2034976SMichal Krawczyk rx_ring->adapter->reset_reason = ENA_REGS_RESET_INV_RX_REQ_ID; 382c2034976SMichal Krawczyk rx_ring->adapter->trigger_reset = true; 383c2034976SMichal Krawczyk 384c2034976SMichal Krawczyk return -EFAULT; 385c2034976SMichal Krawczyk } 386c2034976SMichal Krawczyk 387*f7d82d24SRafal Kozik static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id) 388*f7d82d24SRafal Kozik { 389*f7d82d24SRafal Kozik struct ena_tx_buffer *tx_info = NULL; 390*f7d82d24SRafal Kozik 391*f7d82d24SRafal Kozik if (likely(req_id < tx_ring->ring_size)) { 392*f7d82d24SRafal Kozik tx_info = &tx_ring->tx_buffer_info[req_id]; 393*f7d82d24SRafal Kozik if (likely(tx_info->mbuf)) 394*f7d82d24SRafal Kozik return 0; 395*f7d82d24SRafal Kozik } 396*f7d82d24SRafal Kozik 397*f7d82d24SRafal Kozik if (tx_info) 398*f7d82d24SRafal Kozik RTE_LOG(ERR, PMD, "tx_info doesn't have valid mbuf\n"); 399*f7d82d24SRafal Kozik else 400*f7d82d24SRafal Kozik RTE_LOG(ERR, PMD, "Invalid req_id: %hu\n", req_id); 401*f7d82d24SRafal Kozik 402*f7d82d24SRafal Kozik /* Trigger device reset */ 403*f7d82d24SRafal Kozik tx_ring->adapter->reset_reason = ENA_REGS_RESET_INV_TX_REQ_ID; 404*f7d82d24SRafal Kozik tx_ring->adapter->trigger_reset = true; 405*f7d82d24SRafal Kozik return -EFAULT; 406*f7d82d24SRafal Kozik } 407*f7d82d24SRafal Kozik 408372c1af5SJan Medala static void ena_config_host_info(struct ena_com_dev *ena_dev) 409372c1af5SJan Medala { 410372c1af5SJan Medala struct ena_admin_host_info *host_info; 411372c1af5SJan Medala int rc; 412372c1af5SJan Medala 413372c1af5SJan Medala /* Allocate only the host info */ 414372c1af5SJan Medala rc = ena_com_allocate_host_info(ena_dev); 415372c1af5SJan Medala if (rc) { 416372c1af5SJan Medala RTE_LOG(ERR, PMD, "Cannot allocate host info\n"); 417372c1af5SJan Medala return; 418372c1af5SJan Medala } 419372c1af5SJan Medala 420372c1af5SJan Medala host_info = ena_dev->host_attr.host_info; 421372c1af5SJan Medala 422372c1af5SJan Medala host_info->os_type = ENA_ADMIN_OS_DPDK; 423372c1af5SJan Medala host_info->kernel_ver = RTE_VERSION; 424103bb1ccSJohn W. Linville snprintf((char *)host_info->kernel_ver_str, 425103bb1ccSJohn W. Linville sizeof(host_info->kernel_ver_str), 426103bb1ccSJohn W. Linville "%s", rte_version()); 427372c1af5SJan Medala host_info->os_dist = RTE_VERSION; 428103bb1ccSJohn W. Linville snprintf((char *)host_info->os_dist_str, 429103bb1ccSJohn W. Linville sizeof(host_info->os_dist_str), 430103bb1ccSJohn W. Linville "%s", rte_version()); 431372c1af5SJan Medala host_info->driver_version = 432372c1af5SJan Medala (DRV_MODULE_VER_MAJOR) | 433372c1af5SJan Medala (DRV_MODULE_VER_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) | 434c4144557SJan Medala (DRV_MODULE_VER_SUBMINOR << 435c4144557SJan Medala ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT); 436372c1af5SJan Medala 437372c1af5SJan Medala rc = ena_com_set_host_attributes(ena_dev); 438372c1af5SJan Medala if (rc) { 439372c1af5SJan Medala RTE_LOG(ERR, PMD, "Cannot set host attributes\n"); 4403adcba9aSMichal Krawczyk if (rc != -ENA_COM_UNSUPPORTED) 441372c1af5SJan Medala goto err; 442372c1af5SJan Medala } 443372c1af5SJan Medala 444372c1af5SJan Medala return; 445372c1af5SJan Medala 446372c1af5SJan Medala err: 447372c1af5SJan Medala ena_com_delete_host_info(ena_dev); 448372c1af5SJan Medala } 449372c1af5SJan Medala 450372c1af5SJan Medala static int 451372c1af5SJan Medala ena_get_sset_count(struct rte_eth_dev *dev, int sset) 452372c1af5SJan Medala { 453372c1af5SJan Medala if (sset != ETH_SS_STATS) 454372c1af5SJan Medala return -EOPNOTSUPP; 455372c1af5SJan Medala 456372c1af5SJan Medala /* Workaround for clang: 457372c1af5SJan Medala * touch internal structures to prevent 458372c1af5SJan Medala * compiler error 459372c1af5SJan Medala */ 460372c1af5SJan Medala ENA_TOUCH(ena_stats_global_strings); 461372c1af5SJan Medala ENA_TOUCH(ena_stats_tx_strings); 462372c1af5SJan Medala ENA_TOUCH(ena_stats_rx_strings); 463372c1af5SJan Medala ENA_TOUCH(ena_stats_ena_com_strings); 464372c1af5SJan Medala 465372c1af5SJan Medala return dev->data->nb_tx_queues * 466372c1af5SJan Medala (ENA_STATS_ARRAY_TX + ENA_STATS_ARRAY_RX) + 467372c1af5SJan Medala ENA_STATS_ARRAY_GLOBAL + ENA_STATS_ARRAY_ENA_COM; 468372c1af5SJan Medala } 469372c1af5SJan Medala 470372c1af5SJan Medala static void ena_config_debug_area(struct ena_adapter *adapter) 471372c1af5SJan Medala { 472372c1af5SJan Medala u32 debug_area_size; 473372c1af5SJan Medala int rc, ss_count; 474372c1af5SJan Medala 475372c1af5SJan Medala ss_count = ena_get_sset_count(adapter->rte_dev, ETH_SS_STATS); 476372c1af5SJan Medala if (ss_count <= 0) { 477372c1af5SJan Medala RTE_LOG(ERR, PMD, "SS count is negative\n"); 478372c1af5SJan Medala return; 479372c1af5SJan Medala } 480372c1af5SJan Medala 481372c1af5SJan Medala /* allocate 32 bytes for each string and 64bit for the value */ 482372c1af5SJan Medala debug_area_size = ss_count * ETH_GSTRING_LEN + sizeof(u64) * ss_count; 483372c1af5SJan Medala 484372c1af5SJan Medala rc = ena_com_allocate_debug_area(&adapter->ena_dev, debug_area_size); 485372c1af5SJan Medala if (rc) { 486372c1af5SJan Medala RTE_LOG(ERR, PMD, "Cannot allocate debug area\n"); 487372c1af5SJan Medala return; 488372c1af5SJan Medala } 489372c1af5SJan Medala 490372c1af5SJan Medala rc = ena_com_set_host_attributes(&adapter->ena_dev); 491372c1af5SJan Medala if (rc) { 492372c1af5SJan Medala RTE_LOG(WARNING, PMD, "Cannot set host attributes\n"); 4933adcba9aSMichal Krawczyk if (rc != -ENA_COM_UNSUPPORTED) 494372c1af5SJan Medala goto err; 495372c1af5SJan Medala } 496372c1af5SJan Medala 497372c1af5SJan Medala return; 498372c1af5SJan Medala err: 499372c1af5SJan Medala ena_com_delete_debug_area(&adapter->ena_dev); 500372c1af5SJan Medala } 501372c1af5SJan Medala 5021173fca2SJan Medala static void ena_close(struct rte_eth_dev *dev) 5031173fca2SJan Medala { 5041173fca2SJan Medala struct ena_adapter *adapter = 5051173fca2SJan Medala (struct ena_adapter *)(dev->data->dev_private); 5061173fca2SJan Medala 507eb0ef49dSMichal Krawczyk ena_stop(dev); 508eb0ef49dSMichal Krawczyk adapter->state = ENA_ADAPTER_STATE_CLOSED; 50915773e06SMichal Krawczyk 5101173fca2SJan Medala ena_rx_queue_release_all(dev); 5111173fca2SJan Medala ena_tx_queue_release_all(dev); 5121173fca2SJan Medala } 5131173fca2SJan Medala 5142081d5e2SMichal Krawczyk static int 5152081d5e2SMichal Krawczyk ena_dev_reset(struct rte_eth_dev *dev) 5162081d5e2SMichal Krawczyk { 5172081d5e2SMichal Krawczyk struct rte_mempool *mb_pool_rx[ENA_MAX_NUM_QUEUES]; 5182081d5e2SMichal Krawczyk struct rte_eth_dev *eth_dev; 5192081d5e2SMichal Krawczyk struct rte_pci_device *pci_dev; 5202081d5e2SMichal Krawczyk struct rte_intr_handle *intr_handle; 5212081d5e2SMichal Krawczyk struct ena_com_dev *ena_dev; 5222081d5e2SMichal Krawczyk struct ena_com_dev_get_features_ctx get_feat_ctx; 5232081d5e2SMichal Krawczyk struct ena_adapter *adapter; 5242081d5e2SMichal Krawczyk int nb_queues; 5252081d5e2SMichal Krawczyk int rc, i; 526e859d2b8SRafal Kozik bool wd_state; 5272081d5e2SMichal Krawczyk 5282081d5e2SMichal Krawczyk adapter = (struct ena_adapter *)(dev->data->dev_private); 5292081d5e2SMichal Krawczyk ena_dev = &adapter->ena_dev; 5302081d5e2SMichal Krawczyk eth_dev = adapter->rte_dev; 5312081d5e2SMichal Krawczyk pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 5322081d5e2SMichal Krawczyk intr_handle = &pci_dev->intr_handle; 5332081d5e2SMichal Krawczyk nb_queues = eth_dev->data->nb_rx_queues; 5342081d5e2SMichal Krawczyk 5352081d5e2SMichal Krawczyk ena_com_set_admin_running_state(ena_dev, false); 5362081d5e2SMichal Krawczyk 5372081d5e2SMichal Krawczyk ena_com_dev_reset(ena_dev, adapter->reset_reason); 5382081d5e2SMichal Krawczyk 5392081d5e2SMichal Krawczyk for (i = 0; i < nb_queues; i++) 5402081d5e2SMichal Krawczyk mb_pool_rx[i] = adapter->rx_ring[i].mb_pool; 5412081d5e2SMichal Krawczyk 5422081d5e2SMichal Krawczyk ena_rx_queue_release_all(eth_dev); 5432081d5e2SMichal Krawczyk ena_tx_queue_release_all(eth_dev); 5442081d5e2SMichal Krawczyk 5452081d5e2SMichal Krawczyk rte_intr_disable(intr_handle); 5462081d5e2SMichal Krawczyk 5472081d5e2SMichal Krawczyk ena_com_abort_admin_commands(ena_dev); 5482081d5e2SMichal Krawczyk ena_com_wait_for_abort_completion(ena_dev); 5492081d5e2SMichal Krawczyk ena_com_admin_destroy(ena_dev); 5502081d5e2SMichal Krawczyk ena_com_mmio_reg_read_request_destroy(ena_dev); 5512081d5e2SMichal Krawczyk 552e859d2b8SRafal Kozik rc = ena_device_init(ena_dev, &get_feat_ctx, &wd_state); 5532081d5e2SMichal Krawczyk if (rc) { 5542081d5e2SMichal Krawczyk PMD_INIT_LOG(CRIT, "Cannot initialize device\n"); 5552081d5e2SMichal Krawczyk return rc; 5562081d5e2SMichal Krawczyk } 557e859d2b8SRafal Kozik adapter->wd_state = wd_state; 5582081d5e2SMichal Krawczyk 5592081d5e2SMichal Krawczyk rte_intr_enable(intr_handle); 5602081d5e2SMichal Krawczyk ena_com_set_admin_polling_mode(ena_dev, false); 5612081d5e2SMichal Krawczyk ena_com_admin_aenq_enable(ena_dev); 5622081d5e2SMichal Krawczyk 5632081d5e2SMichal Krawczyk for (i = 0; i < nb_queues; ++i) 5642081d5e2SMichal Krawczyk ena_rx_queue_setup(eth_dev, i, adapter->rx_ring_size, 0, NULL, 5652081d5e2SMichal Krawczyk mb_pool_rx[i]); 5662081d5e2SMichal Krawczyk 5672081d5e2SMichal Krawczyk for (i = 0; i < nb_queues; ++i) 5682081d5e2SMichal Krawczyk ena_tx_queue_setup(eth_dev, i, adapter->tx_ring_size, 0, NULL); 5692081d5e2SMichal Krawczyk 5705efb9fc7SMichal Krawczyk adapter->trigger_reset = false; 5715efb9fc7SMichal Krawczyk 5722081d5e2SMichal Krawczyk return 0; 5732081d5e2SMichal Krawczyk } 5742081d5e2SMichal Krawczyk 5751173fca2SJan Medala static int ena_rss_reta_update(struct rte_eth_dev *dev, 5761173fca2SJan Medala struct rte_eth_rss_reta_entry64 *reta_conf, 5771173fca2SJan Medala uint16_t reta_size) 5781173fca2SJan Medala { 5791173fca2SJan Medala struct ena_adapter *adapter = 5801173fca2SJan Medala (struct ena_adapter *)(dev->data->dev_private); 5811173fca2SJan Medala struct ena_com_dev *ena_dev = &adapter->ena_dev; 5821173fca2SJan Medala int ret, i; 5831173fca2SJan Medala u16 entry_value; 5841173fca2SJan Medala int conf_idx; 5851173fca2SJan Medala int idx; 5861173fca2SJan Medala 5871173fca2SJan Medala if ((reta_size == 0) || (reta_conf == NULL)) 5881173fca2SJan Medala return -EINVAL; 5891173fca2SJan Medala 5901173fca2SJan Medala if (reta_size > ENA_RX_RSS_TABLE_SIZE) { 5911173fca2SJan Medala RTE_LOG(WARNING, PMD, 5921173fca2SJan Medala "indirection table %d is bigger than supported (%d)\n", 5931173fca2SJan Medala reta_size, ENA_RX_RSS_TABLE_SIZE); 5941173fca2SJan Medala ret = -EINVAL; 5951173fca2SJan Medala goto err; 5961173fca2SJan Medala } 5971173fca2SJan Medala 5981173fca2SJan Medala for (i = 0 ; i < reta_size ; i++) { 5991173fca2SJan Medala /* each reta_conf is for 64 entries. 6001173fca2SJan Medala * to support 128 we use 2 conf of 64 6011173fca2SJan Medala */ 6021173fca2SJan Medala conf_idx = i / RTE_RETA_GROUP_SIZE; 6031173fca2SJan Medala idx = i % RTE_RETA_GROUP_SIZE; 6041173fca2SJan Medala if (TEST_BIT(reta_conf[conf_idx].mask, idx)) { 6051173fca2SJan Medala entry_value = 6061173fca2SJan Medala ENA_IO_RXQ_IDX(reta_conf[conf_idx].reta[idx]); 6071173fca2SJan Medala ret = ena_com_indirect_table_fill_entry(ena_dev, 6081173fca2SJan Medala i, 6091173fca2SJan Medala entry_value); 6103adcba9aSMichal Krawczyk if (unlikely(ret && (ret != ENA_COM_UNSUPPORTED))) { 6111173fca2SJan Medala RTE_LOG(ERR, PMD, 6121173fca2SJan Medala "Cannot fill indirect table\n"); 6131173fca2SJan Medala ret = -ENOTSUP; 6141173fca2SJan Medala goto err; 6151173fca2SJan Medala } 6161173fca2SJan Medala } 6171173fca2SJan Medala } 6181173fca2SJan Medala 6191173fca2SJan Medala ret = ena_com_indirect_table_set(ena_dev); 6203adcba9aSMichal Krawczyk if (unlikely(ret && (ret != ENA_COM_UNSUPPORTED))) { 6211173fca2SJan Medala RTE_LOG(ERR, PMD, "Cannot flush the indirect table\n"); 6221173fca2SJan Medala ret = -ENOTSUP; 6231173fca2SJan Medala goto err; 6241173fca2SJan Medala } 6251173fca2SJan Medala 6261173fca2SJan Medala RTE_LOG(DEBUG, PMD, "%s(): RSS configured %d entries for port %d\n", 6271173fca2SJan Medala __func__, reta_size, adapter->rte_dev->data->port_id); 6281173fca2SJan Medala err: 6291173fca2SJan Medala return ret; 6301173fca2SJan Medala } 6311173fca2SJan Medala 6321173fca2SJan Medala /* Query redirection table. */ 6331173fca2SJan Medala static int ena_rss_reta_query(struct rte_eth_dev *dev, 6341173fca2SJan Medala struct rte_eth_rss_reta_entry64 *reta_conf, 6351173fca2SJan Medala uint16_t reta_size) 6361173fca2SJan Medala { 6371173fca2SJan Medala struct ena_adapter *adapter = 6381173fca2SJan Medala (struct ena_adapter *)(dev->data->dev_private); 6391173fca2SJan Medala struct ena_com_dev *ena_dev = &adapter->ena_dev; 6401173fca2SJan Medala int ret; 6411173fca2SJan Medala int i; 6421173fca2SJan Medala u32 indirect_table[ENA_RX_RSS_TABLE_SIZE] = {0}; 6431173fca2SJan Medala int reta_conf_idx; 6441173fca2SJan Medala int reta_idx; 6451173fca2SJan Medala 6461173fca2SJan Medala if (reta_size == 0 || reta_conf == NULL || 6471173fca2SJan Medala (reta_size > RTE_RETA_GROUP_SIZE && ((reta_conf + 1) == NULL))) 6481173fca2SJan Medala return -EINVAL; 6491173fca2SJan Medala 6501173fca2SJan Medala ret = ena_com_indirect_table_get(ena_dev, indirect_table); 6513adcba9aSMichal Krawczyk if (unlikely(ret && (ret != ENA_COM_UNSUPPORTED))) { 6521173fca2SJan Medala RTE_LOG(ERR, PMD, "cannot get indirect table\n"); 6531173fca2SJan Medala ret = -ENOTSUP; 6541173fca2SJan Medala goto err; 6551173fca2SJan Medala } 6561173fca2SJan Medala 6571173fca2SJan Medala for (i = 0 ; i < reta_size ; i++) { 6581173fca2SJan Medala reta_conf_idx = i / RTE_RETA_GROUP_SIZE; 6591173fca2SJan Medala reta_idx = i % RTE_RETA_GROUP_SIZE; 6601173fca2SJan Medala if (TEST_BIT(reta_conf[reta_conf_idx].mask, reta_idx)) 6611173fca2SJan Medala reta_conf[reta_conf_idx].reta[reta_idx] = 6621173fca2SJan Medala ENA_IO_RXQ_IDX_REV(indirect_table[i]); 6631173fca2SJan Medala } 6641173fca2SJan Medala err: 6651173fca2SJan Medala return ret; 6661173fca2SJan Medala } 6671173fca2SJan Medala 6681173fca2SJan Medala static int ena_rss_init_default(struct ena_adapter *adapter) 6691173fca2SJan Medala { 6701173fca2SJan Medala struct ena_com_dev *ena_dev = &adapter->ena_dev; 6711173fca2SJan Medala uint16_t nb_rx_queues = adapter->rte_dev->data->nb_rx_queues; 6721173fca2SJan Medala int rc, i; 6731173fca2SJan Medala u32 val; 6741173fca2SJan Medala 6751173fca2SJan Medala rc = ena_com_rss_init(ena_dev, ENA_RX_RSS_TABLE_LOG_SIZE); 6761173fca2SJan Medala if (unlikely(rc)) { 6771173fca2SJan Medala RTE_LOG(ERR, PMD, "Cannot init indirect table\n"); 6781173fca2SJan Medala goto err_rss_init; 6791173fca2SJan Medala } 6801173fca2SJan Medala 6811173fca2SJan Medala for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++) { 6821173fca2SJan Medala val = i % nb_rx_queues; 6831173fca2SJan Medala rc = ena_com_indirect_table_fill_entry(ena_dev, i, 6841173fca2SJan Medala ENA_IO_RXQ_IDX(val)); 6853adcba9aSMichal Krawczyk if (unlikely(rc && (rc != ENA_COM_UNSUPPORTED))) { 6861173fca2SJan Medala RTE_LOG(ERR, PMD, "Cannot fill indirect table\n"); 6871173fca2SJan Medala goto err_fill_indir; 6881173fca2SJan Medala } 6891173fca2SJan Medala } 6901173fca2SJan Medala 6911173fca2SJan Medala rc = ena_com_fill_hash_function(ena_dev, ENA_ADMIN_CRC32, NULL, 6921173fca2SJan Medala ENA_HASH_KEY_SIZE, 0xFFFFFFFF); 6933adcba9aSMichal Krawczyk if (unlikely(rc && (rc != ENA_COM_UNSUPPORTED))) { 6941173fca2SJan Medala RTE_LOG(INFO, PMD, "Cannot fill hash function\n"); 6951173fca2SJan Medala goto err_fill_indir; 6961173fca2SJan Medala } 6971173fca2SJan Medala 6981173fca2SJan Medala rc = ena_com_set_default_hash_ctrl(ena_dev); 6993adcba9aSMichal Krawczyk if (unlikely(rc && (rc != ENA_COM_UNSUPPORTED))) { 7001173fca2SJan Medala RTE_LOG(INFO, PMD, "Cannot fill hash control\n"); 7011173fca2SJan Medala goto err_fill_indir; 7021173fca2SJan Medala } 7031173fca2SJan Medala 7041173fca2SJan Medala rc = ena_com_indirect_table_set(ena_dev); 7053adcba9aSMichal Krawczyk if (unlikely(rc && (rc != ENA_COM_UNSUPPORTED))) { 7061173fca2SJan Medala RTE_LOG(ERR, PMD, "Cannot flush the indirect table\n"); 7071173fca2SJan Medala goto err_fill_indir; 7081173fca2SJan Medala } 7091173fca2SJan Medala RTE_LOG(DEBUG, PMD, "RSS configured for port %d\n", 7101173fca2SJan Medala adapter->rte_dev->data->port_id); 7111173fca2SJan Medala 7121173fca2SJan Medala return 0; 7131173fca2SJan Medala 7141173fca2SJan Medala err_fill_indir: 7151173fca2SJan Medala ena_com_rss_destroy(ena_dev); 7161173fca2SJan Medala err_rss_init: 7171173fca2SJan Medala 7181173fca2SJan Medala return rc; 7191173fca2SJan Medala } 7201173fca2SJan Medala 7211173fca2SJan Medala static void ena_rx_queue_release_all(struct rte_eth_dev *dev) 7221173fca2SJan Medala { 7231173fca2SJan Medala struct ena_ring **queues = (struct ena_ring **)dev->data->rx_queues; 7241173fca2SJan Medala int nb_queues = dev->data->nb_rx_queues; 7251173fca2SJan Medala int i; 7261173fca2SJan Medala 7271173fca2SJan Medala for (i = 0; i < nb_queues; i++) 7281173fca2SJan Medala ena_rx_queue_release(queues[i]); 7291173fca2SJan Medala } 7301173fca2SJan Medala 7311173fca2SJan Medala static void ena_tx_queue_release_all(struct rte_eth_dev *dev) 7321173fca2SJan Medala { 7331173fca2SJan Medala struct ena_ring **queues = (struct ena_ring **)dev->data->tx_queues; 7341173fca2SJan Medala int nb_queues = dev->data->nb_tx_queues; 7351173fca2SJan Medala int i; 7361173fca2SJan Medala 7371173fca2SJan Medala for (i = 0; i < nb_queues; i++) 7381173fca2SJan Medala ena_tx_queue_release(queues[i]); 7391173fca2SJan Medala } 7401173fca2SJan Medala 7411173fca2SJan Medala static void ena_rx_queue_release(void *queue) 7421173fca2SJan Medala { 7431173fca2SJan Medala struct ena_ring *ring = (struct ena_ring *)queue; 7441173fca2SJan Medala struct ena_adapter *adapter = ring->adapter; 7451173fca2SJan Medala int ena_qid; 7461173fca2SJan Medala 7471173fca2SJan Medala ena_assert_msg(ring->configured, 7481173fca2SJan Medala "API violation - releasing not configured queue"); 7491173fca2SJan Medala ena_assert_msg(ring->adapter->state != ENA_ADAPTER_STATE_RUNNING, 7501173fca2SJan Medala "API violation"); 7511173fca2SJan Medala 7521173fca2SJan Medala /* Destroy HW queue */ 7531173fca2SJan Medala ena_qid = ENA_IO_RXQ_IDX(ring->id); 7541173fca2SJan Medala ena_com_destroy_io_queue(&adapter->ena_dev, ena_qid); 7551173fca2SJan Medala 7561173fca2SJan Medala /* Free all bufs */ 7571173fca2SJan Medala ena_rx_queue_release_bufs(ring); 7581173fca2SJan Medala 7591173fca2SJan Medala /* Free ring resources */ 7601173fca2SJan Medala if (ring->rx_buffer_info) 7611173fca2SJan Medala rte_free(ring->rx_buffer_info); 7621173fca2SJan Medala ring->rx_buffer_info = NULL; 7631173fca2SJan Medala 764c2034976SMichal Krawczyk if (ring->empty_rx_reqs) 765c2034976SMichal Krawczyk rte_free(ring->empty_rx_reqs); 766c2034976SMichal Krawczyk ring->empty_rx_reqs = NULL; 767c2034976SMichal Krawczyk 7681173fca2SJan Medala ring->configured = 0; 7691173fca2SJan Medala 7701173fca2SJan Medala RTE_LOG(NOTICE, PMD, "RX Queue %d:%d released\n", 7711173fca2SJan Medala ring->port_id, ring->id); 7721173fca2SJan Medala } 7731173fca2SJan Medala 7741173fca2SJan Medala static void ena_tx_queue_release(void *queue) 7751173fca2SJan Medala { 7761173fca2SJan Medala struct ena_ring *ring = (struct ena_ring *)queue; 7771173fca2SJan Medala struct ena_adapter *adapter = ring->adapter; 7781173fca2SJan Medala int ena_qid; 7791173fca2SJan Medala 7801173fca2SJan Medala ena_assert_msg(ring->configured, 7811173fca2SJan Medala "API violation. Releasing not configured queue"); 7821173fca2SJan Medala ena_assert_msg(ring->adapter->state != ENA_ADAPTER_STATE_RUNNING, 7831173fca2SJan Medala "API violation"); 7841173fca2SJan Medala 7851173fca2SJan Medala /* Destroy HW queue */ 7861173fca2SJan Medala ena_qid = ENA_IO_TXQ_IDX(ring->id); 7871173fca2SJan Medala ena_com_destroy_io_queue(&adapter->ena_dev, ena_qid); 7881173fca2SJan Medala 7891173fca2SJan Medala /* Free all bufs */ 7901173fca2SJan Medala ena_tx_queue_release_bufs(ring); 7911173fca2SJan Medala 7921173fca2SJan Medala /* Free ring resources */ 7931173fca2SJan Medala if (ring->tx_buffer_info) 7941173fca2SJan Medala rte_free(ring->tx_buffer_info); 7951173fca2SJan Medala 7961173fca2SJan Medala if (ring->empty_tx_reqs) 7971173fca2SJan Medala rte_free(ring->empty_tx_reqs); 7981173fca2SJan Medala 7991173fca2SJan Medala ring->empty_tx_reqs = NULL; 8001173fca2SJan Medala ring->tx_buffer_info = NULL; 8011173fca2SJan Medala 8021173fca2SJan Medala ring->configured = 0; 8031173fca2SJan Medala 8041173fca2SJan Medala RTE_LOG(NOTICE, PMD, "TX Queue %d:%d released\n", 8051173fca2SJan Medala ring->port_id, ring->id); 8061173fca2SJan Medala } 8071173fca2SJan Medala 8081173fca2SJan Medala static void ena_rx_queue_release_bufs(struct ena_ring *ring) 8091173fca2SJan Medala { 8101173fca2SJan Medala unsigned int ring_mask = ring->ring_size - 1; 8111173fca2SJan Medala 8121173fca2SJan Medala while (ring->next_to_clean != ring->next_to_use) { 8131173fca2SJan Medala struct rte_mbuf *m = 8141173fca2SJan Medala ring->rx_buffer_info[ring->next_to_clean & ring_mask]; 8151173fca2SJan Medala 8161173fca2SJan Medala if (m) 8171f88c0a2SOlivier Matz rte_mbuf_raw_free(m); 8181173fca2SJan Medala 8191daff526SJakub Palider ring->next_to_clean++; 8201173fca2SJan Medala } 8211173fca2SJan Medala } 8221173fca2SJan Medala 8231173fca2SJan Medala static void ena_tx_queue_release_bufs(struct ena_ring *ring) 8241173fca2SJan Medala { 825207a514cSMichal Krawczyk unsigned int i; 8261173fca2SJan Medala 827207a514cSMichal Krawczyk for (i = 0; i < ring->ring_size; ++i) { 828207a514cSMichal Krawczyk struct ena_tx_buffer *tx_buf = &ring->tx_buffer_info[i]; 8291173fca2SJan Medala 8301173fca2SJan Medala if (tx_buf->mbuf) 8311173fca2SJan Medala rte_pktmbuf_free(tx_buf->mbuf); 8321173fca2SJan Medala 8331daff526SJakub Palider ring->next_to_clean++; 8341173fca2SJan Medala } 8351173fca2SJan Medala } 8361173fca2SJan Medala 8371173fca2SJan Medala static int ena_link_update(struct rte_eth_dev *dev, 8381173fca2SJan Medala __rte_unused int wait_to_complete) 8391173fca2SJan Medala { 8401173fca2SJan Medala struct rte_eth_link *link = &dev->data->dev_link; 841ca148440SMichal Krawczyk struct ena_adapter *adapter; 8421173fca2SJan Medala 843ca148440SMichal Krawczyk adapter = (struct ena_adapter *)(dev->data->dev_private); 844ca148440SMichal Krawczyk 845ca148440SMichal Krawczyk link->link_status = adapter->link_status ? ETH_LINK_UP : ETH_LINK_DOWN; 84639fd068aSMarc Sune link->link_speed = ETH_SPEED_NUM_10G; 8471173fca2SJan Medala link->link_duplex = ETH_LINK_FULL_DUPLEX; 8481173fca2SJan Medala 8491173fca2SJan Medala return 0; 8501173fca2SJan Medala } 8511173fca2SJan Medala 8521173fca2SJan Medala static int ena_queue_restart_all(struct rte_eth_dev *dev, 8531173fca2SJan Medala enum ena_ring_type ring_type) 8541173fca2SJan Medala { 8551173fca2SJan Medala struct ena_adapter *adapter = 8561173fca2SJan Medala (struct ena_adapter *)(dev->data->dev_private); 8571173fca2SJan Medala struct ena_ring *queues = NULL; 85853b61841SMichal Krawczyk int nb_queues; 8591173fca2SJan Medala int i = 0; 8601173fca2SJan Medala int rc = 0; 8611173fca2SJan Medala 86253b61841SMichal Krawczyk if (ring_type == ENA_RING_TYPE_RX) { 86353b61841SMichal Krawczyk queues = adapter->rx_ring; 86453b61841SMichal Krawczyk nb_queues = dev->data->nb_rx_queues; 86553b61841SMichal Krawczyk } else { 86653b61841SMichal Krawczyk queues = adapter->tx_ring; 86753b61841SMichal Krawczyk nb_queues = dev->data->nb_tx_queues; 86853b61841SMichal Krawczyk } 86953b61841SMichal Krawczyk for (i = 0; i < nb_queues; i++) { 8701173fca2SJan Medala if (queues[i].configured) { 8711173fca2SJan Medala if (ring_type == ENA_RING_TYPE_RX) { 8721173fca2SJan Medala ena_assert_msg( 8731173fca2SJan Medala dev->data->rx_queues[i] == &queues[i], 8741173fca2SJan Medala "Inconsistent state of rx queues\n"); 8751173fca2SJan Medala } else { 8761173fca2SJan Medala ena_assert_msg( 8771173fca2SJan Medala dev->data->tx_queues[i] == &queues[i], 8781173fca2SJan Medala "Inconsistent state of tx queues\n"); 8791173fca2SJan Medala } 8801173fca2SJan Medala 8811173fca2SJan Medala rc = ena_queue_restart(&queues[i]); 8821173fca2SJan Medala 8831173fca2SJan Medala if (rc) { 8841173fca2SJan Medala PMD_INIT_LOG(ERR, 885f2462150SFerruh Yigit "failed to restart queue %d type(%d)", 8861173fca2SJan Medala i, ring_type); 8871173fca2SJan Medala return -1; 8881173fca2SJan Medala } 8891173fca2SJan Medala } 8901173fca2SJan Medala } 8911173fca2SJan Medala 8921173fca2SJan Medala return 0; 8931173fca2SJan Medala } 8941173fca2SJan Medala 8951173fca2SJan Medala static uint32_t ena_get_mtu_conf(struct ena_adapter *adapter) 8961173fca2SJan Medala { 8971173fca2SJan Medala uint32_t max_frame_len = adapter->max_mtu; 8981173fca2SJan Medala 8997369f88fSRafal Kozik if (adapter->rte_eth_dev_data->dev_conf.rxmode.offloads & 9007369f88fSRafal Kozik DEV_RX_OFFLOAD_JUMBO_FRAME) 9011173fca2SJan Medala max_frame_len = 9021173fca2SJan Medala adapter->rte_eth_dev_data->dev_conf.rxmode.max_rx_pkt_len; 9031173fca2SJan Medala 9041173fca2SJan Medala return max_frame_len; 9051173fca2SJan Medala } 9061173fca2SJan Medala 9071173fca2SJan Medala static int ena_check_valid_conf(struct ena_adapter *adapter) 9081173fca2SJan Medala { 9091173fca2SJan Medala uint32_t max_frame_len = ena_get_mtu_conf(adapter); 9101173fca2SJan Medala 9111173fca2SJan Medala if (max_frame_len > adapter->max_mtu) { 912f2462150SFerruh Yigit PMD_INIT_LOG(ERR, "Unsupported MTU of %d", max_frame_len); 9131173fca2SJan Medala return -1; 9141173fca2SJan Medala } 9151173fca2SJan Medala 9161173fca2SJan Medala return 0; 9171173fca2SJan Medala } 9181173fca2SJan Medala 9191173fca2SJan Medala static int 9201173fca2SJan Medala ena_calc_queue_size(struct ena_com_dev *ena_dev, 9212061fe41SRafal Kozik u16 *max_tx_sgl_size, 9221173fca2SJan Medala struct ena_com_dev_get_features_ctx *get_feat_ctx) 9231173fca2SJan Medala { 9241173fca2SJan Medala uint32_t queue_size = ENA_DEFAULT_RING_SIZE; 9251173fca2SJan Medala 9261173fca2SJan Medala queue_size = RTE_MIN(queue_size, 9271173fca2SJan Medala get_feat_ctx->max_queues.max_cq_depth); 9281173fca2SJan Medala queue_size = RTE_MIN(queue_size, 9291173fca2SJan Medala get_feat_ctx->max_queues.max_sq_depth); 9301173fca2SJan Medala 9311173fca2SJan Medala if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) 9321173fca2SJan Medala queue_size = RTE_MIN(queue_size, 9331173fca2SJan Medala get_feat_ctx->max_queues.max_llq_depth); 9341173fca2SJan Medala 9351173fca2SJan Medala /* Round down to power of 2 */ 9361173fca2SJan Medala if (!rte_is_power_of_2(queue_size)) 9371173fca2SJan Medala queue_size = rte_align32pow2(queue_size >> 1); 9381173fca2SJan Medala 9391173fca2SJan Medala if (queue_size == 0) { 940f2462150SFerruh Yigit PMD_INIT_LOG(ERR, "Invalid queue size"); 9411173fca2SJan Medala return -EFAULT; 9421173fca2SJan Medala } 9431173fca2SJan Medala 9442061fe41SRafal Kozik *max_tx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS, 9452061fe41SRafal Kozik get_feat_ctx->max_queues.max_packet_tx_descs); 9462061fe41SRafal Kozik 9471173fca2SJan Medala return queue_size; 9481173fca2SJan Medala } 9491173fca2SJan Medala 9501173fca2SJan Medala static void ena_stats_restart(struct rte_eth_dev *dev) 9511173fca2SJan Medala { 9521173fca2SJan Medala struct ena_adapter *adapter = 9531173fca2SJan Medala (struct ena_adapter *)(dev->data->dev_private); 9541173fca2SJan Medala 9551173fca2SJan Medala rte_atomic64_init(&adapter->drv_stats->ierrors); 9561173fca2SJan Medala rte_atomic64_init(&adapter->drv_stats->oerrors); 9571173fca2SJan Medala rte_atomic64_init(&adapter->drv_stats->rx_nombuf); 9581173fca2SJan Medala } 9591173fca2SJan Medala 960d5b0924bSMatan Azrad static int ena_stats_get(struct rte_eth_dev *dev, 9611173fca2SJan Medala struct rte_eth_stats *stats) 9621173fca2SJan Medala { 9631173fca2SJan Medala struct ena_admin_basic_stats ena_stats; 9641173fca2SJan Medala struct ena_adapter *adapter = 9651173fca2SJan Medala (struct ena_adapter *)(dev->data->dev_private); 9661173fca2SJan Medala struct ena_com_dev *ena_dev = &adapter->ena_dev; 9671173fca2SJan Medala int rc; 9681173fca2SJan Medala 9691173fca2SJan Medala if (rte_eal_process_type() != RTE_PROC_PRIMARY) 970d5b0924bSMatan Azrad return -ENOTSUP; 9711173fca2SJan Medala 9721173fca2SJan Medala memset(&ena_stats, 0, sizeof(ena_stats)); 9731173fca2SJan Medala rc = ena_com_get_dev_basic_stats(ena_dev, &ena_stats); 9741173fca2SJan Medala if (unlikely(rc)) { 9751173fca2SJan Medala RTE_LOG(ERR, PMD, "Could not retrieve statistics from ENA"); 976d5b0924bSMatan Azrad return rc; 9771173fca2SJan Medala } 9781173fca2SJan Medala 9791173fca2SJan Medala /* Set of basic statistics from ENA */ 9801173fca2SJan Medala stats->ipackets = __MERGE_64B_H_L(ena_stats.rx_pkts_high, 9811173fca2SJan Medala ena_stats.rx_pkts_low); 9821173fca2SJan Medala stats->opackets = __MERGE_64B_H_L(ena_stats.tx_pkts_high, 9831173fca2SJan Medala ena_stats.tx_pkts_low); 9841173fca2SJan Medala stats->ibytes = __MERGE_64B_H_L(ena_stats.rx_bytes_high, 9851173fca2SJan Medala ena_stats.rx_bytes_low); 9861173fca2SJan Medala stats->obytes = __MERGE_64B_H_L(ena_stats.tx_bytes_high, 9871173fca2SJan Medala ena_stats.tx_bytes_low); 9881173fca2SJan Medala stats->imissed = __MERGE_64B_H_L(ena_stats.rx_drops_high, 9891173fca2SJan Medala ena_stats.rx_drops_low); 9901173fca2SJan Medala 9911173fca2SJan Medala /* Driver related stats */ 9921173fca2SJan Medala stats->ierrors = rte_atomic64_read(&adapter->drv_stats->ierrors); 9931173fca2SJan Medala stats->oerrors = rte_atomic64_read(&adapter->drv_stats->oerrors); 9941173fca2SJan Medala stats->rx_nombuf = rte_atomic64_read(&adapter->drv_stats->rx_nombuf); 995d5b0924bSMatan Azrad return 0; 9961173fca2SJan Medala } 9971173fca2SJan Medala 9981173fca2SJan Medala static int ena_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 9991173fca2SJan Medala { 10001173fca2SJan Medala struct ena_adapter *adapter; 10011173fca2SJan Medala struct ena_com_dev *ena_dev; 10021173fca2SJan Medala int rc = 0; 10031173fca2SJan Medala 10041173fca2SJan Medala ena_assert_msg(dev->data != NULL, "Uninitialized device"); 10051173fca2SJan Medala ena_assert_msg(dev->data->dev_private != NULL, "Uninitialized device"); 10061173fca2SJan Medala adapter = (struct ena_adapter *)(dev->data->dev_private); 10071173fca2SJan Medala 10081173fca2SJan Medala ena_dev = &adapter->ena_dev; 10091173fca2SJan Medala ena_assert_msg(ena_dev != NULL, "Uninitialized device"); 10101173fca2SJan Medala 10111173fca2SJan Medala if (mtu > ena_get_mtu_conf(adapter)) { 10121173fca2SJan Medala RTE_LOG(ERR, PMD, 10131173fca2SJan Medala "Given MTU (%d) exceeds maximum MTU supported (%d)\n", 10141173fca2SJan Medala mtu, ena_get_mtu_conf(adapter)); 10151173fca2SJan Medala rc = -EINVAL; 10161173fca2SJan Medala goto err; 10171173fca2SJan Medala } 10181173fca2SJan Medala 10191173fca2SJan Medala rc = ena_com_set_dev_mtu(ena_dev, mtu); 10201173fca2SJan Medala if (rc) 10211173fca2SJan Medala RTE_LOG(ERR, PMD, "Could not set MTU: %d\n", mtu); 10221173fca2SJan Medala else 10231173fca2SJan Medala RTE_LOG(NOTICE, PMD, "Set MTU: %d\n", mtu); 10241173fca2SJan Medala 10251173fca2SJan Medala err: 10261173fca2SJan Medala return rc; 10271173fca2SJan Medala } 10281173fca2SJan Medala 10291173fca2SJan Medala static int ena_start(struct rte_eth_dev *dev) 10301173fca2SJan Medala { 10311173fca2SJan Medala struct ena_adapter *adapter = 10321173fca2SJan Medala (struct ena_adapter *)(dev->data->dev_private); 1033d9b8b106SMichal Krawczyk uint64_t ticks; 10341173fca2SJan Medala int rc = 0; 10351173fca2SJan Medala 10361173fca2SJan Medala rc = ena_check_valid_conf(adapter); 10371173fca2SJan Medala if (rc) 10381173fca2SJan Medala return rc; 10391173fca2SJan Medala 10401173fca2SJan Medala rc = ena_queue_restart_all(dev, ENA_RING_TYPE_RX); 10411173fca2SJan Medala if (rc) 10421173fca2SJan Medala return rc; 10431173fca2SJan Medala 10441173fca2SJan Medala rc = ena_queue_restart_all(dev, ENA_RING_TYPE_TX); 10451173fca2SJan Medala if (rc) 10461173fca2SJan Medala return rc; 10471173fca2SJan Medala 10481173fca2SJan Medala if (adapter->rte_dev->data->dev_conf.rxmode.mq_mode & 10491173fca2SJan Medala ETH_MQ_RX_RSS_FLAG) { 10501173fca2SJan Medala rc = ena_rss_init_default(adapter); 10511173fca2SJan Medala if (rc) 10521173fca2SJan Medala return rc; 10531173fca2SJan Medala } 10541173fca2SJan Medala 10551173fca2SJan Medala ena_stats_restart(dev); 10561173fca2SJan Medala 1057d9b8b106SMichal Krawczyk adapter->timestamp_wd = rte_get_timer_cycles(); 1058d9b8b106SMichal Krawczyk adapter->keep_alive_timeout = ENA_DEVICE_KALIVE_TIMEOUT; 1059d9b8b106SMichal Krawczyk 1060d9b8b106SMichal Krawczyk ticks = rte_get_timer_hz(); 1061d9b8b106SMichal Krawczyk rte_timer_reset(&adapter->timer_wd, ticks, PERIODICAL, rte_lcore_id(), 1062d9b8b106SMichal Krawczyk ena_timer_wd_callback, adapter); 1063d9b8b106SMichal Krawczyk 10641173fca2SJan Medala adapter->state = ENA_ADAPTER_STATE_RUNNING; 10651173fca2SJan Medala 10661173fca2SJan Medala return 0; 10671173fca2SJan Medala } 10681173fca2SJan Medala 1069eb0ef49dSMichal Krawczyk static void ena_stop(struct rte_eth_dev *dev) 1070eb0ef49dSMichal Krawczyk { 1071eb0ef49dSMichal Krawczyk struct ena_adapter *adapter = 1072eb0ef49dSMichal Krawczyk (struct ena_adapter *)(dev->data->dev_private); 1073eb0ef49dSMichal Krawczyk 1074d9b8b106SMichal Krawczyk rte_timer_stop_sync(&adapter->timer_wd); 1075d9b8b106SMichal Krawczyk 1076eb0ef49dSMichal Krawczyk adapter->state = ENA_ADAPTER_STATE_STOPPED; 1077eb0ef49dSMichal Krawczyk } 1078eb0ef49dSMichal Krawczyk 10791173fca2SJan Medala static int ena_queue_restart(struct ena_ring *ring) 10801173fca2SJan Medala { 1081a467e8f3SMichal Krawczyk int rc, bufs_num; 10821173fca2SJan Medala 10831173fca2SJan Medala ena_assert_msg(ring->configured == 1, 10841173fca2SJan Medala "Trying to restart unconfigured queue\n"); 10851173fca2SJan Medala 10861173fca2SJan Medala ring->next_to_clean = 0; 10871173fca2SJan Medala ring->next_to_use = 0; 10881173fca2SJan Medala 10891173fca2SJan Medala if (ring->type == ENA_RING_TYPE_TX) 10901173fca2SJan Medala return 0; 10911173fca2SJan Medala 1092a467e8f3SMichal Krawczyk bufs_num = ring->ring_size - 1; 1093a467e8f3SMichal Krawczyk rc = ena_populate_rx_queue(ring, bufs_num); 1094a467e8f3SMichal Krawczyk if (rc != bufs_num) { 1095f2462150SFerruh Yigit PMD_INIT_LOG(ERR, "Failed to populate rx ring !"); 10961173fca2SJan Medala return (-1); 10971173fca2SJan Medala } 10981173fca2SJan Medala 10991173fca2SJan Medala return 0; 11001173fca2SJan Medala } 11011173fca2SJan Medala 11021173fca2SJan Medala static int ena_tx_queue_setup(struct rte_eth_dev *dev, 11031173fca2SJan Medala uint16_t queue_idx, 11041173fca2SJan Medala uint16_t nb_desc, 11051173fca2SJan Medala __rte_unused unsigned int socket_id, 110656b8b9b7SRafal Kozik const struct rte_eth_txconf *tx_conf) 11071173fca2SJan Medala { 11086dcee7cdSJan Medala struct ena_com_create_io_ctx ctx = 11096dcee7cdSJan Medala /* policy set to _HOST just to satisfy icc compiler */ 11106dcee7cdSJan Medala { ENA_ADMIN_PLACEMENT_POLICY_HOST, 11116dcee7cdSJan Medala ENA_COM_IO_QUEUE_DIRECTION_TX, 0, 0, 0, 0 }; 11121173fca2SJan Medala struct ena_ring *txq = NULL; 11131173fca2SJan Medala struct ena_adapter *adapter = 11141173fca2SJan Medala (struct ena_adapter *)(dev->data->dev_private); 11151173fca2SJan Medala unsigned int i; 11161173fca2SJan Medala int ena_qid; 11171173fca2SJan Medala int rc; 11181173fca2SJan Medala struct ena_com_dev *ena_dev = &adapter->ena_dev; 11191173fca2SJan Medala 11201173fca2SJan Medala txq = &adapter->tx_ring[queue_idx]; 11211173fca2SJan Medala 11221173fca2SJan Medala if (txq->configured) { 11231173fca2SJan Medala RTE_LOG(CRIT, PMD, 11241173fca2SJan Medala "API violation. Queue %d is already configured\n", 11251173fca2SJan Medala queue_idx); 11261173fca2SJan Medala return -1; 11271173fca2SJan Medala } 11281173fca2SJan Medala 11291daff526SJakub Palider if (!rte_is_power_of_2(nb_desc)) { 11301daff526SJakub Palider RTE_LOG(ERR, PMD, 11311daff526SJakub Palider "Unsupported size of RX queue: %d is not a power of 2.", 11321daff526SJakub Palider nb_desc); 11331daff526SJakub Palider return -EINVAL; 11341daff526SJakub Palider } 11351daff526SJakub Palider 11361173fca2SJan Medala if (nb_desc > adapter->tx_ring_size) { 11371173fca2SJan Medala RTE_LOG(ERR, PMD, 11381173fca2SJan Medala "Unsupported size of TX queue (max size: %d)\n", 11391173fca2SJan Medala adapter->tx_ring_size); 11401173fca2SJan Medala return -EINVAL; 11411173fca2SJan Medala } 11421173fca2SJan Medala 11431173fca2SJan Medala ena_qid = ENA_IO_TXQ_IDX(queue_idx); 11446dcee7cdSJan Medala 11456dcee7cdSJan Medala ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX; 11466dcee7cdSJan Medala ctx.qid = ena_qid; 11476dcee7cdSJan Medala ctx.msix_vector = -1; /* admin interrupts not used */ 11486dcee7cdSJan Medala ctx.mem_queue_type = ena_dev->tx_mem_queue_type; 11496dcee7cdSJan Medala ctx.queue_size = adapter->tx_ring_size; 11503d3edc26SJan Medala ctx.numa_node = ena_cpu_to_node(queue_idx); 11516dcee7cdSJan Medala 11526dcee7cdSJan Medala rc = ena_com_create_io_queue(ena_dev, &ctx); 11531173fca2SJan Medala if (rc) { 11541173fca2SJan Medala RTE_LOG(ERR, PMD, 11551173fca2SJan Medala "failed to create io TX queue #%d (qid:%d) rc: %d\n", 11561173fca2SJan Medala queue_idx, ena_qid, rc); 11571173fca2SJan Medala } 11581173fca2SJan Medala txq->ena_com_io_cq = &ena_dev->io_cq_queues[ena_qid]; 11591173fca2SJan Medala txq->ena_com_io_sq = &ena_dev->io_sq_queues[ena_qid]; 11601173fca2SJan Medala 11616dcee7cdSJan Medala rc = ena_com_get_io_handlers(ena_dev, ena_qid, 11626dcee7cdSJan Medala &txq->ena_com_io_sq, 11636dcee7cdSJan Medala &txq->ena_com_io_cq); 11646dcee7cdSJan Medala if (rc) { 11656dcee7cdSJan Medala RTE_LOG(ERR, PMD, 11666dcee7cdSJan Medala "Failed to get TX queue handlers. TX queue num %d rc: %d\n", 11676dcee7cdSJan Medala queue_idx, rc); 11686dcee7cdSJan Medala ena_com_destroy_io_queue(ena_dev, ena_qid); 11696dcee7cdSJan Medala goto err; 11706dcee7cdSJan Medala } 11716dcee7cdSJan Medala 11721173fca2SJan Medala txq->port_id = dev->data->port_id; 11731173fca2SJan Medala txq->next_to_clean = 0; 11741173fca2SJan Medala txq->next_to_use = 0; 11751173fca2SJan Medala txq->ring_size = nb_desc; 11761173fca2SJan Medala 11771173fca2SJan Medala txq->tx_buffer_info = rte_zmalloc("txq->tx_buffer_info", 11781173fca2SJan Medala sizeof(struct ena_tx_buffer) * 11791173fca2SJan Medala txq->ring_size, 11801173fca2SJan Medala RTE_CACHE_LINE_SIZE); 11811173fca2SJan Medala if (!txq->tx_buffer_info) { 11821173fca2SJan Medala RTE_LOG(ERR, PMD, "failed to alloc mem for tx buffer info\n"); 11831173fca2SJan Medala return -ENOMEM; 11841173fca2SJan Medala } 11851173fca2SJan Medala 11861173fca2SJan Medala txq->empty_tx_reqs = rte_zmalloc("txq->empty_tx_reqs", 11871173fca2SJan Medala sizeof(u16) * txq->ring_size, 11881173fca2SJan Medala RTE_CACHE_LINE_SIZE); 11891173fca2SJan Medala if (!txq->empty_tx_reqs) { 11901173fca2SJan Medala RTE_LOG(ERR, PMD, "failed to alloc mem for tx reqs\n"); 11911173fca2SJan Medala rte_free(txq->tx_buffer_info); 11921173fca2SJan Medala return -ENOMEM; 11931173fca2SJan Medala } 11941173fca2SJan Medala for (i = 0; i < txq->ring_size; i++) 11951173fca2SJan Medala txq->empty_tx_reqs[i] = i; 11961173fca2SJan Medala 11972081d5e2SMichal Krawczyk if (tx_conf != NULL) { 11982081d5e2SMichal Krawczyk txq->offloads = 11992081d5e2SMichal Krawczyk tx_conf->offloads | dev->data->dev_conf.txmode.offloads; 12002081d5e2SMichal Krawczyk } 120156b8b9b7SRafal Kozik 12021173fca2SJan Medala /* Store pointer to this queue in upper layer */ 12031173fca2SJan Medala txq->configured = 1; 12041173fca2SJan Medala dev->data->tx_queues[queue_idx] = txq; 12056dcee7cdSJan Medala err: 12061173fca2SJan Medala return rc; 12071173fca2SJan Medala } 12081173fca2SJan Medala 12091173fca2SJan Medala static int ena_rx_queue_setup(struct rte_eth_dev *dev, 12101173fca2SJan Medala uint16_t queue_idx, 12111173fca2SJan Medala uint16_t nb_desc, 12121173fca2SJan Medala __rte_unused unsigned int socket_id, 1213a4996bd8SWei Dai __rte_unused const struct rte_eth_rxconf *rx_conf, 12141173fca2SJan Medala struct rte_mempool *mp) 12151173fca2SJan Medala { 12166dcee7cdSJan Medala struct ena_com_create_io_ctx ctx = 12176dcee7cdSJan Medala /* policy set to _HOST just to satisfy icc compiler */ 12186dcee7cdSJan Medala { ENA_ADMIN_PLACEMENT_POLICY_HOST, 12196dcee7cdSJan Medala ENA_COM_IO_QUEUE_DIRECTION_RX, 0, 0, 0, 0 }; 12201173fca2SJan Medala struct ena_adapter *adapter = 12211173fca2SJan Medala (struct ena_adapter *)(dev->data->dev_private); 12221173fca2SJan Medala struct ena_ring *rxq = NULL; 12231173fca2SJan Medala uint16_t ena_qid = 0; 1224c2034976SMichal Krawczyk int i, rc = 0; 12251173fca2SJan Medala struct ena_com_dev *ena_dev = &adapter->ena_dev; 12261173fca2SJan Medala 12271173fca2SJan Medala rxq = &adapter->rx_ring[queue_idx]; 12281173fca2SJan Medala if (rxq->configured) { 12291173fca2SJan Medala RTE_LOG(CRIT, PMD, 12301173fca2SJan Medala "API violation. Queue %d is already configured\n", 12311173fca2SJan Medala queue_idx); 12321173fca2SJan Medala return -1; 12331173fca2SJan Medala } 12341173fca2SJan Medala 12351daff526SJakub Palider if (!rte_is_power_of_2(nb_desc)) { 12361daff526SJakub Palider RTE_LOG(ERR, PMD, 12371daff526SJakub Palider "Unsupported size of TX queue: %d is not a power of 2.", 12381daff526SJakub Palider nb_desc); 12391daff526SJakub Palider return -EINVAL; 12401daff526SJakub Palider } 12411daff526SJakub Palider 12421173fca2SJan Medala if (nb_desc > adapter->rx_ring_size) { 12431173fca2SJan Medala RTE_LOG(ERR, PMD, 12441173fca2SJan Medala "Unsupported size of RX queue (max size: %d)\n", 12451173fca2SJan Medala adapter->rx_ring_size); 12461173fca2SJan Medala return -EINVAL; 12471173fca2SJan Medala } 12481173fca2SJan Medala 12491173fca2SJan Medala ena_qid = ENA_IO_RXQ_IDX(queue_idx); 12506dcee7cdSJan Medala 12516dcee7cdSJan Medala ctx.qid = ena_qid; 12526dcee7cdSJan Medala ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX; 12536dcee7cdSJan Medala ctx.mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 12546dcee7cdSJan Medala ctx.msix_vector = -1; /* admin interrupts not used */ 12556dcee7cdSJan Medala ctx.queue_size = adapter->rx_ring_size; 12563d3edc26SJan Medala ctx.numa_node = ena_cpu_to_node(queue_idx); 12576dcee7cdSJan Medala 12586dcee7cdSJan Medala rc = ena_com_create_io_queue(ena_dev, &ctx); 12591173fca2SJan Medala if (rc) 12601173fca2SJan Medala RTE_LOG(ERR, PMD, "failed to create io RX queue #%d rc: %d\n", 12611173fca2SJan Medala queue_idx, rc); 12621173fca2SJan Medala 12631173fca2SJan Medala rxq->ena_com_io_cq = &ena_dev->io_cq_queues[ena_qid]; 12641173fca2SJan Medala rxq->ena_com_io_sq = &ena_dev->io_sq_queues[ena_qid]; 12651173fca2SJan Medala 12666dcee7cdSJan Medala rc = ena_com_get_io_handlers(ena_dev, ena_qid, 12676dcee7cdSJan Medala &rxq->ena_com_io_sq, 12686dcee7cdSJan Medala &rxq->ena_com_io_cq); 12696dcee7cdSJan Medala if (rc) { 12706dcee7cdSJan Medala RTE_LOG(ERR, PMD, 12716dcee7cdSJan Medala "Failed to get RX queue handlers. RX queue num %d rc: %d\n", 12726dcee7cdSJan Medala queue_idx, rc); 12736dcee7cdSJan Medala ena_com_destroy_io_queue(ena_dev, ena_qid); 12746dcee7cdSJan Medala } 12756dcee7cdSJan Medala 12761173fca2SJan Medala rxq->port_id = dev->data->port_id; 12771173fca2SJan Medala rxq->next_to_clean = 0; 12781173fca2SJan Medala rxq->next_to_use = 0; 12791173fca2SJan Medala rxq->ring_size = nb_desc; 12801173fca2SJan Medala rxq->mb_pool = mp; 12811173fca2SJan Medala 12821173fca2SJan Medala rxq->rx_buffer_info = rte_zmalloc("rxq->buffer_info", 12831173fca2SJan Medala sizeof(struct rte_mbuf *) * nb_desc, 12841173fca2SJan Medala RTE_CACHE_LINE_SIZE); 12851173fca2SJan Medala if (!rxq->rx_buffer_info) { 12861173fca2SJan Medala RTE_LOG(ERR, PMD, "failed to alloc mem for rx buffer info\n"); 12871173fca2SJan Medala return -ENOMEM; 12881173fca2SJan Medala } 12891173fca2SJan Medala 1290c2034976SMichal Krawczyk rxq->empty_rx_reqs = rte_zmalloc("rxq->empty_rx_reqs", 1291c2034976SMichal Krawczyk sizeof(uint16_t) * nb_desc, 1292c2034976SMichal Krawczyk RTE_CACHE_LINE_SIZE); 1293c2034976SMichal Krawczyk if (!rxq->empty_rx_reqs) { 1294c2034976SMichal Krawczyk RTE_LOG(ERR, PMD, "failed to alloc mem for empty rx reqs\n"); 1295c2034976SMichal Krawczyk rte_free(rxq->rx_buffer_info); 1296c2034976SMichal Krawczyk rxq->rx_buffer_info = NULL; 1297c2034976SMichal Krawczyk return -ENOMEM; 1298c2034976SMichal Krawczyk } 1299c2034976SMichal Krawczyk 1300c2034976SMichal Krawczyk for (i = 0; i < nb_desc; i++) 1301c2034976SMichal Krawczyk rxq->empty_tx_reqs[i] = i; 1302c2034976SMichal Krawczyk 13031173fca2SJan Medala /* Store pointer to this queue in upper layer */ 13041173fca2SJan Medala rxq->configured = 1; 13051173fca2SJan Medala dev->data->rx_queues[queue_idx] = rxq; 13061173fca2SJan Medala 13071173fca2SJan Medala return rc; 13081173fca2SJan Medala } 13091173fca2SJan Medala 13101173fca2SJan Medala static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count) 13111173fca2SJan Medala { 13121173fca2SJan Medala unsigned int i; 13131173fca2SJan Medala int rc; 13141daff526SJakub Palider uint16_t ring_size = rxq->ring_size; 13151daff526SJakub Palider uint16_t ring_mask = ring_size - 1; 13161daff526SJakub Palider uint16_t next_to_use = rxq->next_to_use; 1317c2034976SMichal Krawczyk uint16_t in_use, req_id; 13181173fca2SJan Medala struct rte_mbuf **mbufs = &rxq->rx_buffer_info[0]; 13191173fca2SJan Medala 13201173fca2SJan Medala if (unlikely(!count)) 13211173fca2SJan Medala return 0; 13221173fca2SJan Medala 13231daff526SJakub Palider in_use = rxq->next_to_use - rxq->next_to_clean; 1324a467e8f3SMichal Krawczyk ena_assert_msg(((in_use + count) < ring_size), "bad ring state"); 13251173fca2SJan Medala 13261daff526SJakub Palider count = RTE_MIN(count, 13271daff526SJakub Palider (uint16_t)(ring_size - (next_to_use & ring_mask))); 13281173fca2SJan Medala 13291173fca2SJan Medala /* get resources for incoming packets */ 13301173fca2SJan Medala rc = rte_mempool_get_bulk(rxq->mb_pool, 13311daff526SJakub Palider (void **)(&mbufs[next_to_use & ring_mask]), 13321daff526SJakub Palider count); 13331173fca2SJan Medala if (unlikely(rc < 0)) { 13341173fca2SJan Medala rte_atomic64_inc(&rxq->adapter->drv_stats->rx_nombuf); 13351173fca2SJan Medala PMD_RX_LOG(DEBUG, "there are no enough free buffers"); 13361173fca2SJan Medala return 0; 13371173fca2SJan Medala } 13381173fca2SJan Medala 13391173fca2SJan Medala for (i = 0; i < count; i++) { 13401daff526SJakub Palider uint16_t next_to_use_masked = next_to_use & ring_mask; 13411daff526SJakub Palider struct rte_mbuf *mbuf = mbufs[next_to_use_masked]; 13421173fca2SJan Medala struct ena_com_buf ebuf; 13431173fca2SJan Medala 13441173fca2SJan Medala rte_prefetch0(mbufs[((next_to_use + 4) & ring_mask)]); 1345c2034976SMichal Krawczyk 1346c2034976SMichal Krawczyk req_id = rxq->empty_rx_reqs[next_to_use_masked]; 13471173fca2SJan Medala /* prepare physical address for DMA transaction */ 1348455da545SSantosh Shukla ebuf.paddr = mbuf->buf_iova + RTE_PKTMBUF_HEADROOM; 13491173fca2SJan Medala ebuf.len = mbuf->buf_len - RTE_PKTMBUF_HEADROOM; 13501173fca2SJan Medala /* pass resource to device */ 13511173fca2SJan Medala rc = ena_com_add_single_rx_desc(rxq->ena_com_io_sq, 1352c2034976SMichal Krawczyk &ebuf, req_id); 13531173fca2SJan Medala if (unlikely(rc)) { 13542732e07aSMichal Krawczyk rte_mempool_put_bulk(rxq->mb_pool, (void **)(&mbuf), 13552732e07aSMichal Krawczyk count - i); 13561173fca2SJan Medala RTE_LOG(WARNING, PMD, "failed adding rx desc\n"); 13571173fca2SJan Medala break; 13581173fca2SJan Medala } 13591daff526SJakub Palider next_to_use++; 13601173fca2SJan Medala } 13611173fca2SJan Medala 13625e02e19eSJan Medala /* When we submitted free recources to device... */ 13635e02e19eSJan Medala if (i > 0) { 13645e02e19eSJan Medala /* ...let HW know that it can fill buffers with data */ 13651173fca2SJan Medala rte_wmb(); 13661173fca2SJan Medala ena_com_write_sq_doorbell(rxq->ena_com_io_sq); 13671173fca2SJan Medala 13685e02e19eSJan Medala rxq->next_to_use = next_to_use; 13695e02e19eSJan Medala } 13705e02e19eSJan Medala 13711173fca2SJan Medala return i; 13721173fca2SJan Medala } 13731173fca2SJan Medala 13741173fca2SJan Medala static int ena_device_init(struct ena_com_dev *ena_dev, 1375e859d2b8SRafal Kozik struct ena_com_dev_get_features_ctx *get_feat_ctx, 1376e859d2b8SRafal Kozik bool *wd_state) 13771173fca2SJan Medala { 1378ca148440SMichal Krawczyk uint32_t aenq_groups; 13791173fca2SJan Medala int rc; 1380c4144557SJan Medala bool readless_supported; 13811173fca2SJan Medala 13821173fca2SJan Medala /* Initialize mmio registers */ 13831173fca2SJan Medala rc = ena_com_mmio_reg_read_request_init(ena_dev); 13841173fca2SJan Medala if (rc) { 13851173fca2SJan Medala RTE_LOG(ERR, PMD, "failed to init mmio read less\n"); 13861173fca2SJan Medala return rc; 13871173fca2SJan Medala } 13881173fca2SJan Medala 1389c4144557SJan Medala /* The PCIe configuration space revision id indicate if mmio reg 1390c4144557SJan Medala * read is disabled. 1391c4144557SJan Medala */ 1392c4144557SJan Medala readless_supported = 1393c4144557SJan Medala !(((struct rte_pci_device *)ena_dev->dmadev)->id.class_id 1394c4144557SJan Medala & ENA_MMIO_DISABLE_REG_READ); 1395c4144557SJan Medala ena_com_set_mmio_read_mode(ena_dev, readless_supported); 1396c4144557SJan Medala 13971173fca2SJan Medala /* reset device */ 13983adcba9aSMichal Krawczyk rc = ena_com_dev_reset(ena_dev, ENA_REGS_RESET_NORMAL); 13991173fca2SJan Medala if (rc) { 14001173fca2SJan Medala RTE_LOG(ERR, PMD, "cannot reset device\n"); 14011173fca2SJan Medala goto err_mmio_read_less; 14021173fca2SJan Medala } 14031173fca2SJan Medala 14041173fca2SJan Medala /* check FW version */ 14051173fca2SJan Medala rc = ena_com_validate_version(ena_dev); 14061173fca2SJan Medala if (rc) { 14071173fca2SJan Medala RTE_LOG(ERR, PMD, "device version is too low\n"); 14081173fca2SJan Medala goto err_mmio_read_less; 14091173fca2SJan Medala } 14101173fca2SJan Medala 14111173fca2SJan Medala ena_dev->dma_addr_bits = ena_com_get_dma_width(ena_dev); 14121173fca2SJan Medala 14131173fca2SJan Medala /* ENA device administration layer init */ 1414ca148440SMichal Krawczyk rc = ena_com_admin_init(ena_dev, &aenq_handlers, true); 14151173fca2SJan Medala if (rc) { 14161173fca2SJan Medala RTE_LOG(ERR, PMD, 14171173fca2SJan Medala "cannot initialize ena admin queue with device\n"); 14181173fca2SJan Medala goto err_mmio_read_less; 14191173fca2SJan Medala } 14201173fca2SJan Medala 14211173fca2SJan Medala /* To enable the msix interrupts the driver needs to know the number 14221173fca2SJan Medala * of queues. So the driver uses polling mode to retrieve this 14231173fca2SJan Medala * information. 14241173fca2SJan Medala */ 14251173fca2SJan Medala ena_com_set_admin_polling_mode(ena_dev, true); 14261173fca2SJan Medala 1427201ff2e5SJakub Palider ena_config_host_info(ena_dev); 1428201ff2e5SJakub Palider 14291173fca2SJan Medala /* Get Device Attributes and features */ 14301173fca2SJan Medala rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx); 14311173fca2SJan Medala if (rc) { 14321173fca2SJan Medala RTE_LOG(ERR, PMD, 14331173fca2SJan Medala "cannot get attribute for ena device rc= %d\n", rc); 14341173fca2SJan Medala goto err_admin_init; 14351173fca2SJan Medala } 14361173fca2SJan Medala 1437f01f060cSRafal Kozik aenq_groups = BIT(ENA_ADMIN_LINK_CHANGE) | 1438d9b8b106SMichal Krawczyk BIT(ENA_ADMIN_NOTIFICATION) | 1439983cce2dSRafal Kozik BIT(ENA_ADMIN_KEEP_ALIVE) | 1440983cce2dSRafal Kozik BIT(ENA_ADMIN_FATAL_ERROR) | 1441983cce2dSRafal Kozik BIT(ENA_ADMIN_WARNING); 1442ca148440SMichal Krawczyk 1443ca148440SMichal Krawczyk aenq_groups &= get_feat_ctx->aenq.supported_groups; 1444ca148440SMichal Krawczyk rc = ena_com_set_aenq_config(ena_dev, aenq_groups); 1445ca148440SMichal Krawczyk if (rc) { 1446ca148440SMichal Krawczyk RTE_LOG(ERR, PMD, "Cannot configure aenq groups rc: %d\n", rc); 1447ca148440SMichal Krawczyk goto err_admin_init; 1448ca148440SMichal Krawczyk } 1449ca148440SMichal Krawczyk 1450e859d2b8SRafal Kozik *wd_state = !!(aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE)); 1451e859d2b8SRafal Kozik 14521173fca2SJan Medala return 0; 14531173fca2SJan Medala 14541173fca2SJan Medala err_admin_init: 14551173fca2SJan Medala ena_com_admin_destroy(ena_dev); 14561173fca2SJan Medala 14571173fca2SJan Medala err_mmio_read_less: 14581173fca2SJan Medala ena_com_mmio_reg_read_request_destroy(ena_dev); 14591173fca2SJan Medala 14601173fca2SJan Medala return rc; 14611173fca2SJan Medala } 14621173fca2SJan Medala 1463ca148440SMichal Krawczyk static void ena_interrupt_handler_rte(void *cb_arg) 146415773e06SMichal Krawczyk { 146515773e06SMichal Krawczyk struct ena_adapter *adapter = (struct ena_adapter *)cb_arg; 146615773e06SMichal Krawczyk struct ena_com_dev *ena_dev = &adapter->ena_dev; 146715773e06SMichal Krawczyk 146815773e06SMichal Krawczyk ena_com_admin_q_comp_intr_handler(ena_dev); 1469ca148440SMichal Krawczyk if (adapter->state != ENA_ADAPTER_STATE_CLOSED) 1470ca148440SMichal Krawczyk ena_com_aenq_intr_handler(ena_dev, adapter); 147115773e06SMichal Krawczyk } 147215773e06SMichal Krawczyk 14735efb9fc7SMichal Krawczyk static void check_for_missing_keep_alive(struct ena_adapter *adapter) 14745efb9fc7SMichal Krawczyk { 1475e859d2b8SRafal Kozik if (!adapter->wd_state) 1476e859d2b8SRafal Kozik return; 1477e859d2b8SRafal Kozik 14785efb9fc7SMichal Krawczyk if (adapter->keep_alive_timeout == ENA_HW_HINTS_NO_TIMEOUT) 14795efb9fc7SMichal Krawczyk return; 14805efb9fc7SMichal Krawczyk 14815efb9fc7SMichal Krawczyk if (unlikely((rte_get_timer_cycles() - adapter->timestamp_wd) >= 14825efb9fc7SMichal Krawczyk adapter->keep_alive_timeout)) { 14835efb9fc7SMichal Krawczyk RTE_LOG(ERR, PMD, "Keep alive timeout\n"); 14845efb9fc7SMichal Krawczyk adapter->reset_reason = ENA_REGS_RESET_KEEP_ALIVE_TO; 14855efb9fc7SMichal Krawczyk adapter->trigger_reset = true; 14865efb9fc7SMichal Krawczyk } 14875efb9fc7SMichal Krawczyk } 14885efb9fc7SMichal Krawczyk 14895efb9fc7SMichal Krawczyk /* Check if admin queue is enabled */ 14905efb9fc7SMichal Krawczyk static void check_for_admin_com_state(struct ena_adapter *adapter) 14915efb9fc7SMichal Krawczyk { 14925efb9fc7SMichal Krawczyk if (unlikely(!ena_com_get_admin_running_state(&adapter->ena_dev))) { 14935efb9fc7SMichal Krawczyk RTE_LOG(ERR, PMD, "ENA admin queue is not in running state!\n"); 14945efb9fc7SMichal Krawczyk adapter->reset_reason = ENA_REGS_RESET_ADMIN_TO; 14955efb9fc7SMichal Krawczyk adapter->trigger_reset = true; 14965efb9fc7SMichal Krawczyk } 14975efb9fc7SMichal Krawczyk } 14985efb9fc7SMichal Krawczyk 1499d9b8b106SMichal Krawczyk static void ena_timer_wd_callback(__rte_unused struct rte_timer *timer, 1500d9b8b106SMichal Krawczyk void *arg) 1501d9b8b106SMichal Krawczyk { 1502d9b8b106SMichal Krawczyk struct ena_adapter *adapter = (struct ena_adapter *)arg; 1503d9b8b106SMichal Krawczyk struct rte_eth_dev *dev = adapter->rte_dev; 1504d9b8b106SMichal Krawczyk 15055efb9fc7SMichal Krawczyk check_for_missing_keep_alive(adapter); 15065efb9fc7SMichal Krawczyk check_for_admin_com_state(adapter); 1507d9b8b106SMichal Krawczyk 15085efb9fc7SMichal Krawczyk if (unlikely(adapter->trigger_reset)) { 15095efb9fc7SMichal Krawczyk RTE_LOG(ERR, PMD, "Trigger reset is on\n"); 1510d9b8b106SMichal Krawczyk _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET, 1511d9b8b106SMichal Krawczyk NULL); 1512d9b8b106SMichal Krawczyk } 1513d9b8b106SMichal Krawczyk } 1514d9b8b106SMichal Krawczyk 151501bd6877SRafal Kozik static int ena_calc_io_queue_num(__rte_unused struct ena_com_dev *ena_dev, 151601bd6877SRafal Kozik struct ena_com_dev_get_features_ctx *get_feat_ctx) 151701bd6877SRafal Kozik { 151801bd6877SRafal Kozik int io_sq_num, io_cq_num, io_queue_num; 151901bd6877SRafal Kozik 152001bd6877SRafal Kozik io_sq_num = get_feat_ctx->max_queues.max_sq_num; 152101bd6877SRafal Kozik io_cq_num = get_feat_ctx->max_queues.max_cq_num; 152201bd6877SRafal Kozik 152301bd6877SRafal Kozik io_queue_num = RTE_MIN(io_sq_num, io_cq_num); 152401bd6877SRafal Kozik 152501bd6877SRafal Kozik if (unlikely(io_queue_num == 0)) { 152601bd6877SRafal Kozik RTE_LOG(ERR, PMD, "Number of IO queues should not be 0\n"); 152701bd6877SRafal Kozik return -EFAULT; 152801bd6877SRafal Kozik } 152901bd6877SRafal Kozik 153001bd6877SRafal Kozik return io_queue_num; 153101bd6877SRafal Kozik } 153201bd6877SRafal Kozik 15331173fca2SJan Medala static int eth_ena_dev_init(struct rte_eth_dev *eth_dev) 15341173fca2SJan Medala { 15351173fca2SJan Medala struct rte_pci_device *pci_dev; 1536eb0ef49dSMichal Krawczyk struct rte_intr_handle *intr_handle; 15371173fca2SJan Medala struct ena_adapter *adapter = 15381173fca2SJan Medala (struct ena_adapter *)(eth_dev->data->dev_private); 15391173fca2SJan Medala struct ena_com_dev *ena_dev = &adapter->ena_dev; 15401173fca2SJan Medala struct ena_com_dev_get_features_ctx get_feat_ctx; 15411173fca2SJan Medala int queue_size, rc; 15422061fe41SRafal Kozik u16 tx_sgl_size = 0; 15431173fca2SJan Medala 15441173fca2SJan Medala static int adapters_found; 1545e859d2b8SRafal Kozik bool wd_state; 15461173fca2SJan Medala 15471173fca2SJan Medala memset(adapter, 0, sizeof(struct ena_adapter)); 15481173fca2SJan Medala ena_dev = &adapter->ena_dev; 15491173fca2SJan Medala 15501173fca2SJan Medala eth_dev->dev_ops = &ena_dev_ops; 15511173fca2SJan Medala eth_dev->rx_pkt_burst = ð_ena_recv_pkts; 15521173fca2SJan Medala eth_dev->tx_pkt_burst = ð_ena_xmit_pkts; 1553b3fc5a1aSKonstantin Ananyev eth_dev->tx_pkt_prepare = ð_ena_prep_pkts; 15541173fca2SJan Medala adapter->rte_eth_dev_data = eth_dev->data; 15551173fca2SJan Medala adapter->rte_dev = eth_dev; 15561173fca2SJan Medala 15571173fca2SJan Medala if (rte_eal_process_type() != RTE_PROC_PRIMARY) 15581173fca2SJan Medala return 0; 15591173fca2SJan Medala 1560c0802544SFerruh Yigit pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 15611173fca2SJan Medala adapter->pdev = pci_dev; 15621173fca2SJan Medala 1563f2462150SFerruh Yigit PMD_INIT_LOG(INFO, "Initializing %x:%x:%x.%d", 15641173fca2SJan Medala pci_dev->addr.domain, 15651173fca2SJan Medala pci_dev->addr.bus, 15661173fca2SJan Medala pci_dev->addr.devid, 15671173fca2SJan Medala pci_dev->addr.function); 15681173fca2SJan Medala 1569eb0ef49dSMichal Krawczyk intr_handle = &pci_dev->intr_handle; 1570eb0ef49dSMichal Krawczyk 15711173fca2SJan Medala adapter->regs = pci_dev->mem_resource[ENA_REGS_BAR].addr; 15721173fca2SJan Medala adapter->dev_mem_base = pci_dev->mem_resource[ENA_MEM_BAR].addr; 15731173fca2SJan Medala 15741d339597SRafal Kozik if (!adapter->regs) { 1575f2462150SFerruh Yigit PMD_INIT_LOG(CRIT, "Failed to access registers BAR(%d)", 15761173fca2SJan Medala ENA_REGS_BAR); 15771d339597SRafal Kozik return -ENXIO; 15781d339597SRafal Kozik } 15791173fca2SJan Medala 15801173fca2SJan Medala ena_dev->reg_bar = adapter->regs; 15811173fca2SJan Medala ena_dev->dmadev = adapter->pdev; 15821173fca2SJan Medala 15831173fca2SJan Medala adapter->id_number = adapters_found; 15841173fca2SJan Medala 15851173fca2SJan Medala snprintf(adapter->name, ENA_NAME_MAX_LEN, "ena_%d", 15861173fca2SJan Medala adapter->id_number); 15871173fca2SJan Medala 15881173fca2SJan Medala /* device specific initialization routine */ 1589e859d2b8SRafal Kozik rc = ena_device_init(ena_dev, &get_feat_ctx, &wd_state); 15901173fca2SJan Medala if (rc) { 1591f2462150SFerruh Yigit PMD_INIT_LOG(CRIT, "Failed to init ENA device"); 15921173fca2SJan Medala return -1; 15931173fca2SJan Medala } 1594e859d2b8SRafal Kozik adapter->wd_state = wd_state; 15951173fca2SJan Medala 15961d339597SRafal Kozik ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 159701bd6877SRafal Kozik adapter->num_queues = ena_calc_io_queue_num(ena_dev, 159801bd6877SRafal Kozik &get_feat_ctx); 15991173fca2SJan Medala 16002061fe41SRafal Kozik queue_size = ena_calc_queue_size(ena_dev, &tx_sgl_size, &get_feat_ctx); 16011173fca2SJan Medala if ((queue_size <= 0) || (adapter->num_queues <= 0)) 16021173fca2SJan Medala return -EFAULT; 16031173fca2SJan Medala 16041173fca2SJan Medala adapter->tx_ring_size = queue_size; 16051173fca2SJan Medala adapter->rx_ring_size = queue_size; 16061173fca2SJan Medala 16072061fe41SRafal Kozik adapter->max_tx_sgl_size = tx_sgl_size; 16082061fe41SRafal Kozik 16091173fca2SJan Medala /* prepare ring structures */ 16101173fca2SJan Medala ena_init_rings(adapter); 16111173fca2SJan Medala 1612372c1af5SJan Medala ena_config_debug_area(adapter); 1613372c1af5SJan Medala 16141173fca2SJan Medala /* Set max MTU for this device */ 16151173fca2SJan Medala adapter->max_mtu = get_feat_ctx.dev_attr.max_mtu; 16161173fca2SJan Medala 161783277a7cSJakub Palider /* set device support for TSO */ 161883277a7cSJakub Palider adapter->tso4_supported = get_feat_ctx.offload.tx & 161983277a7cSJakub Palider ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK; 162083277a7cSJakub Palider 16211173fca2SJan Medala /* Copy MAC address and point DPDK to it */ 16221173fca2SJan Medala eth_dev->data->mac_addrs = (struct ether_addr *)adapter->mac_addr; 16231173fca2SJan Medala ether_addr_copy((struct ether_addr *)get_feat_ctx.dev_attr.mac_addr, 16241173fca2SJan Medala (struct ether_addr *)adapter->mac_addr); 16251173fca2SJan Medala 16261173fca2SJan Medala adapter->drv_stats = rte_zmalloc("adapter stats", 16271173fca2SJan Medala sizeof(*adapter->drv_stats), 16281173fca2SJan Medala RTE_CACHE_LINE_SIZE); 16291173fca2SJan Medala if (!adapter->drv_stats) { 16301173fca2SJan Medala RTE_LOG(ERR, PMD, "failed to alloc mem for adapter stats\n"); 16311173fca2SJan Medala return -ENOMEM; 16321173fca2SJan Medala } 16331173fca2SJan Medala 1634eb0ef49dSMichal Krawczyk rte_intr_callback_register(intr_handle, 1635eb0ef49dSMichal Krawczyk ena_interrupt_handler_rte, 1636eb0ef49dSMichal Krawczyk adapter); 1637eb0ef49dSMichal Krawczyk rte_intr_enable(intr_handle); 1638eb0ef49dSMichal Krawczyk ena_com_set_admin_polling_mode(ena_dev, false); 1639ca148440SMichal Krawczyk ena_com_admin_aenq_enable(ena_dev); 1640eb0ef49dSMichal Krawczyk 1641d9b8b106SMichal Krawczyk if (adapters_found == 0) 1642d9b8b106SMichal Krawczyk rte_timer_subsystem_init(); 1643d9b8b106SMichal Krawczyk rte_timer_init(&adapter->timer_wd); 1644d9b8b106SMichal Krawczyk 16451173fca2SJan Medala adapters_found++; 16461173fca2SJan Medala adapter->state = ENA_ADAPTER_STATE_INIT; 16471173fca2SJan Medala 16481173fca2SJan Medala return 0; 16491173fca2SJan Medala } 16501173fca2SJan Medala 1651eb0ef49dSMichal Krawczyk static int eth_ena_dev_uninit(struct rte_eth_dev *eth_dev) 1652eb0ef49dSMichal Krawczyk { 1653eb0ef49dSMichal Krawczyk struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 1654eb0ef49dSMichal Krawczyk struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 1655eb0ef49dSMichal Krawczyk struct ena_adapter *adapter = 1656eb0ef49dSMichal Krawczyk (struct ena_adapter *)(eth_dev->data->dev_private); 1657eb0ef49dSMichal Krawczyk 1658eb0ef49dSMichal Krawczyk if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1659eb0ef49dSMichal Krawczyk return -EPERM; 1660eb0ef49dSMichal Krawczyk 1661eb0ef49dSMichal Krawczyk if (adapter->state != ENA_ADAPTER_STATE_CLOSED) 1662eb0ef49dSMichal Krawczyk ena_close(eth_dev); 1663eb0ef49dSMichal Krawczyk 1664eb0ef49dSMichal Krawczyk eth_dev->dev_ops = NULL; 1665eb0ef49dSMichal Krawczyk eth_dev->rx_pkt_burst = NULL; 1666eb0ef49dSMichal Krawczyk eth_dev->tx_pkt_burst = NULL; 1667eb0ef49dSMichal Krawczyk eth_dev->tx_pkt_prepare = NULL; 1668eb0ef49dSMichal Krawczyk 1669eb0ef49dSMichal Krawczyk rte_free(adapter->drv_stats); 1670eb0ef49dSMichal Krawczyk adapter->drv_stats = NULL; 1671eb0ef49dSMichal Krawczyk 1672eb0ef49dSMichal Krawczyk rte_intr_disable(intr_handle); 1673eb0ef49dSMichal Krawczyk rte_intr_callback_unregister(intr_handle, 1674eb0ef49dSMichal Krawczyk ena_interrupt_handler_rte, 1675eb0ef49dSMichal Krawczyk adapter); 1676eb0ef49dSMichal Krawczyk 1677eb0ef49dSMichal Krawczyk adapter->state = ENA_ADAPTER_STATE_FREE; 1678eb0ef49dSMichal Krawczyk 1679eb0ef49dSMichal Krawczyk return 0; 1680eb0ef49dSMichal Krawczyk } 1681eb0ef49dSMichal Krawczyk 16821173fca2SJan Medala static int ena_dev_configure(struct rte_eth_dev *dev) 16831173fca2SJan Medala { 16841173fca2SJan Medala struct ena_adapter *adapter = 16851173fca2SJan Medala (struct ena_adapter *)(dev->data->dev_private); 16867369f88fSRafal Kozik 16871173fca2SJan Medala adapter->state = ENA_ADAPTER_STATE_CONFIG; 16881173fca2SJan Medala 1689a4996bd8SWei Dai adapter->tx_selected_offloads = dev->data->dev_conf.txmode.offloads; 1690a4996bd8SWei Dai adapter->rx_selected_offloads = dev->data->dev_conf.rxmode.offloads; 16911173fca2SJan Medala return 0; 16921173fca2SJan Medala } 16931173fca2SJan Medala 16941173fca2SJan Medala static void ena_init_rings(struct ena_adapter *adapter) 16951173fca2SJan Medala { 16961173fca2SJan Medala int i; 16971173fca2SJan Medala 16981173fca2SJan Medala for (i = 0; i < adapter->num_queues; i++) { 16991173fca2SJan Medala struct ena_ring *ring = &adapter->tx_ring[i]; 17001173fca2SJan Medala 17011173fca2SJan Medala ring->configured = 0; 17021173fca2SJan Medala ring->type = ENA_RING_TYPE_TX; 17031173fca2SJan Medala ring->adapter = adapter; 17041173fca2SJan Medala ring->id = i; 17051173fca2SJan Medala ring->tx_mem_queue_type = adapter->ena_dev.tx_mem_queue_type; 17061173fca2SJan Medala ring->tx_max_header_size = adapter->ena_dev.tx_max_header_size; 17072061fe41SRafal Kozik ring->sgl_size = adapter->max_tx_sgl_size; 17081173fca2SJan Medala } 17091173fca2SJan Medala 17101173fca2SJan Medala for (i = 0; i < adapter->num_queues; i++) { 17111173fca2SJan Medala struct ena_ring *ring = &adapter->rx_ring[i]; 17121173fca2SJan Medala 17131173fca2SJan Medala ring->configured = 0; 17141173fca2SJan Medala ring->type = ENA_RING_TYPE_RX; 17151173fca2SJan Medala ring->adapter = adapter; 17161173fca2SJan Medala ring->id = i; 17171173fca2SJan Medala } 17181173fca2SJan Medala } 17191173fca2SJan Medala 17201173fca2SJan Medala static void ena_infos_get(struct rte_eth_dev *dev, 17211173fca2SJan Medala struct rte_eth_dev_info *dev_info) 17221173fca2SJan Medala { 17231173fca2SJan Medala struct ena_adapter *adapter; 17241173fca2SJan Medala struct ena_com_dev *ena_dev; 17251173fca2SJan Medala struct ena_com_dev_get_features_ctx feat; 172656b8b9b7SRafal Kozik uint64_t rx_feat = 0, tx_feat = 0; 17271173fca2SJan Medala int rc = 0; 17281173fca2SJan Medala 17291173fca2SJan Medala ena_assert_msg(dev->data != NULL, "Uninitialized device"); 17301173fca2SJan Medala ena_assert_msg(dev->data->dev_private != NULL, "Uninitialized device"); 17311173fca2SJan Medala adapter = (struct ena_adapter *)(dev->data->dev_private); 17321173fca2SJan Medala 17331173fca2SJan Medala ena_dev = &adapter->ena_dev; 17341173fca2SJan Medala ena_assert_msg(ena_dev != NULL, "Uninitialized device"); 17351173fca2SJan Medala 1736e274f573SMarc Sune dev_info->speed_capa = 1737e274f573SMarc Sune ETH_LINK_SPEED_1G | 1738e274f573SMarc Sune ETH_LINK_SPEED_2_5G | 1739e274f573SMarc Sune ETH_LINK_SPEED_5G | 1740e274f573SMarc Sune ETH_LINK_SPEED_10G | 1741e274f573SMarc Sune ETH_LINK_SPEED_25G | 1742e274f573SMarc Sune ETH_LINK_SPEED_40G | 1743b2feed01SThomas Monjalon ETH_LINK_SPEED_50G | 1744b2feed01SThomas Monjalon ETH_LINK_SPEED_100G; 1745e274f573SMarc Sune 17461173fca2SJan Medala /* Get supported features from HW */ 17471173fca2SJan Medala rc = ena_com_get_dev_attr_feat(ena_dev, &feat); 17481173fca2SJan Medala if (unlikely(rc)) { 17491173fca2SJan Medala RTE_LOG(ERR, PMD, 17501173fca2SJan Medala "Cannot get attribute for ena device rc= %d\n", rc); 17511173fca2SJan Medala return; 17521173fca2SJan Medala } 17531173fca2SJan Medala 17541173fca2SJan Medala /* Set Tx & Rx features available for device */ 17551173fca2SJan Medala if (feat.offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK) 17561173fca2SJan Medala tx_feat |= DEV_TX_OFFLOAD_TCP_TSO; 17571173fca2SJan Medala 17581173fca2SJan Medala if (feat.offload.tx & 17591173fca2SJan Medala ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK) 17601173fca2SJan Medala tx_feat |= DEV_TX_OFFLOAD_IPV4_CKSUM | 17611173fca2SJan Medala DEV_TX_OFFLOAD_UDP_CKSUM | 17621173fca2SJan Medala DEV_TX_OFFLOAD_TCP_CKSUM; 17631173fca2SJan Medala 17644eea092bSJakub Palider if (feat.offload.rx_supported & 17651173fca2SJan Medala ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK) 17661173fca2SJan Medala rx_feat |= DEV_RX_OFFLOAD_IPV4_CKSUM | 17671173fca2SJan Medala DEV_RX_OFFLOAD_UDP_CKSUM | 17681173fca2SJan Medala DEV_RX_OFFLOAD_TCP_CKSUM; 17691173fca2SJan Medala 1770a0a4ff40SRafal Kozik rx_feat |= DEV_RX_OFFLOAD_JUMBO_FRAME; 1771a0a4ff40SRafal Kozik 17721173fca2SJan Medala /* Inform framework about available features */ 17731173fca2SJan Medala dev_info->rx_offload_capa = rx_feat; 17747369f88fSRafal Kozik dev_info->rx_queue_offload_capa = rx_feat; 17751173fca2SJan Medala dev_info->tx_offload_capa = tx_feat; 177656b8b9b7SRafal Kozik dev_info->tx_queue_offload_capa = tx_feat; 17771173fca2SJan Medala 17781173fca2SJan Medala dev_info->min_rx_bufsize = ENA_MIN_FRAME_LEN; 17791173fca2SJan Medala dev_info->max_rx_pktlen = adapter->max_mtu; 17801173fca2SJan Medala dev_info->max_mac_addrs = 1; 17811173fca2SJan Medala 17821173fca2SJan Medala dev_info->max_rx_queues = adapter->num_queues; 17831173fca2SJan Medala dev_info->max_tx_queues = adapter->num_queues; 17841173fca2SJan Medala dev_info->reta_size = ENA_RX_RSS_TABLE_SIZE; 178556b8b9b7SRafal Kozik 178656b8b9b7SRafal Kozik adapter->tx_supported_offloads = tx_feat; 17877369f88fSRafal Kozik adapter->rx_supported_offloads = rx_feat; 178892680dc2SRafal Kozik 178992680dc2SRafal Kozik dev_info->rx_desc_lim.nb_max = ENA_MAX_RING_DESC; 179092680dc2SRafal Kozik dev_info->rx_desc_lim.nb_min = ENA_MIN_RING_DESC; 179192680dc2SRafal Kozik 179292680dc2SRafal Kozik dev_info->tx_desc_lim.nb_max = ENA_MAX_RING_DESC; 179392680dc2SRafal Kozik dev_info->tx_desc_lim.nb_min = ENA_MIN_RING_DESC; 179492680dc2SRafal Kozik dev_info->tx_desc_lim.nb_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS, 179592680dc2SRafal Kozik feat.max_queues.max_packet_tx_descs); 179692680dc2SRafal Kozik dev_info->tx_desc_lim.nb_mtu_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS, 179792680dc2SRafal Kozik feat.max_queues.max_packet_tx_descs); 17981173fca2SJan Medala } 17991173fca2SJan Medala 18001173fca2SJan Medala static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, 18011173fca2SJan Medala uint16_t nb_pkts) 18021173fca2SJan Medala { 18031173fca2SJan Medala struct ena_ring *rx_ring = (struct ena_ring *)(rx_queue); 18041173fca2SJan Medala unsigned int ring_size = rx_ring->ring_size; 18051173fca2SJan Medala unsigned int ring_mask = ring_size - 1; 18061173fca2SJan Medala uint16_t next_to_clean = rx_ring->next_to_clean; 18071daff526SJakub Palider uint16_t desc_in_use = 0; 1808c2034976SMichal Krawczyk uint16_t req_id; 18091173fca2SJan Medala unsigned int recv_idx = 0; 18101173fca2SJan Medala struct rte_mbuf *mbuf = NULL; 18111173fca2SJan Medala struct rte_mbuf *mbuf_head = NULL; 18121173fca2SJan Medala struct rte_mbuf *mbuf_prev = NULL; 18131173fca2SJan Medala struct rte_mbuf **rx_buff_info = rx_ring->rx_buffer_info; 18141173fca2SJan Medala unsigned int completed; 18151173fca2SJan Medala 18161173fca2SJan Medala struct ena_com_rx_ctx ena_rx_ctx; 18171173fca2SJan Medala int rc = 0; 18181173fca2SJan Medala 18191173fca2SJan Medala /* Check adapter state */ 18201173fca2SJan Medala if (unlikely(rx_ring->adapter->state != ENA_ADAPTER_STATE_RUNNING)) { 18211173fca2SJan Medala RTE_LOG(ALERT, PMD, 18221173fca2SJan Medala "Trying to receive pkts while device is NOT running\n"); 18231173fca2SJan Medala return 0; 18241173fca2SJan Medala } 18251173fca2SJan Medala 18261daff526SJakub Palider desc_in_use = rx_ring->next_to_use - next_to_clean; 18271173fca2SJan Medala if (unlikely(nb_pkts > desc_in_use)) 18281173fca2SJan Medala nb_pkts = desc_in_use; 18291173fca2SJan Medala 18301173fca2SJan Medala for (completed = 0; completed < nb_pkts; completed++) { 18311173fca2SJan Medala int segments = 0; 18321173fca2SJan Medala 18331173fca2SJan Medala ena_rx_ctx.max_bufs = rx_ring->ring_size; 18341173fca2SJan Medala ena_rx_ctx.ena_bufs = rx_ring->ena_bufs; 18351173fca2SJan Medala ena_rx_ctx.descs = 0; 18361173fca2SJan Medala /* receive packet context */ 18371173fca2SJan Medala rc = ena_com_rx_pkt(rx_ring->ena_com_io_cq, 18381173fca2SJan Medala rx_ring->ena_com_io_sq, 18391173fca2SJan Medala &ena_rx_ctx); 18401173fca2SJan Medala if (unlikely(rc)) { 18411173fca2SJan Medala RTE_LOG(ERR, PMD, "ena_com_rx_pkt error %d\n", rc); 18421173fca2SJan Medala return 0; 18431173fca2SJan Medala } 18441173fca2SJan Medala 18451173fca2SJan Medala if (unlikely(ena_rx_ctx.descs == 0)) 18461173fca2SJan Medala break; 18471173fca2SJan Medala 18481173fca2SJan Medala while (segments < ena_rx_ctx.descs) { 1849c2034976SMichal Krawczyk req_id = ena_rx_ctx.ena_bufs[segments].req_id; 1850c2034976SMichal Krawczyk rc = validate_rx_req_id(rx_ring, req_id); 1851c2034976SMichal Krawczyk if (unlikely(rc)) 1852c2034976SMichal Krawczyk break; 1853c2034976SMichal Krawczyk 1854c2034976SMichal Krawczyk mbuf = rx_buff_info[req_id]; 18551173fca2SJan Medala mbuf->data_len = ena_rx_ctx.ena_bufs[segments].len; 18561173fca2SJan Medala mbuf->data_off = RTE_PKTMBUF_HEADROOM; 18571173fca2SJan Medala mbuf->refcnt = 1; 18581173fca2SJan Medala mbuf->next = NULL; 18591173fca2SJan Medala if (segments == 0) { 18601173fca2SJan Medala mbuf->nb_segs = ena_rx_ctx.descs; 18611173fca2SJan Medala mbuf->port = rx_ring->port_id; 18621173fca2SJan Medala mbuf->pkt_len = 0; 18631173fca2SJan Medala mbuf_head = mbuf; 18641173fca2SJan Medala } else { 18651173fca2SJan Medala /* for multi-segment pkts create mbuf chain */ 18661173fca2SJan Medala mbuf_prev->next = mbuf; 18671173fca2SJan Medala } 18681173fca2SJan Medala mbuf_head->pkt_len += mbuf->data_len; 18691173fca2SJan Medala 18701173fca2SJan Medala mbuf_prev = mbuf; 1871c2034976SMichal Krawczyk rx_ring->empty_rx_reqs[next_to_clean & ring_mask] = 1872c2034976SMichal Krawczyk req_id; 18731173fca2SJan Medala segments++; 18741daff526SJakub Palider next_to_clean++; 18751173fca2SJan Medala } 18761173fca2SJan Medala 18771173fca2SJan Medala /* fill mbuf attributes if any */ 18781173fca2SJan Medala ena_rx_mbuf_prepare(mbuf_head, &ena_rx_ctx); 18791173fca2SJan Medala mbuf_head->hash.rss = (uint32_t)rx_ring->id; 18801173fca2SJan Medala 18811173fca2SJan Medala /* pass to DPDK application head mbuf */ 18821173fca2SJan Medala rx_pkts[recv_idx] = mbuf_head; 18831173fca2SJan Medala recv_idx++; 18841173fca2SJan Medala } 18851173fca2SJan Medala 1886ec78af6bSMichal Krawczyk rx_ring->next_to_clean = next_to_clean; 1887ec78af6bSMichal Krawczyk 1888ec78af6bSMichal Krawczyk desc_in_use = desc_in_use - completed + 1; 18891173fca2SJan Medala /* Burst refill to save doorbells, memory barriers, const interval */ 18901daff526SJakub Palider if (ring_size - desc_in_use > ENA_RING_DESCS_RATIO(ring_size)) 18911daff526SJakub Palider ena_populate_rx_queue(rx_ring, ring_size - desc_in_use); 18921173fca2SJan Medala 18931173fca2SJan Medala return recv_idx; 18941173fca2SJan Medala } 18951173fca2SJan Medala 1896b3fc5a1aSKonstantin Ananyev static uint16_t 189783277a7cSJakub Palider eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 1898b3fc5a1aSKonstantin Ananyev uint16_t nb_pkts) 1899b3fc5a1aSKonstantin Ananyev { 1900b3fc5a1aSKonstantin Ananyev int32_t ret; 1901b3fc5a1aSKonstantin Ananyev uint32_t i; 1902b3fc5a1aSKonstantin Ananyev struct rte_mbuf *m; 190383277a7cSJakub Palider struct ena_ring *tx_ring = (struct ena_ring *)(tx_queue); 190483277a7cSJakub Palider struct ipv4_hdr *ip_hdr; 1905b3fc5a1aSKonstantin Ananyev uint64_t ol_flags; 190683277a7cSJakub Palider uint16_t frag_field; 190783277a7cSJakub Palider 1908b3fc5a1aSKonstantin Ananyev for (i = 0; i != nb_pkts; i++) { 1909b3fc5a1aSKonstantin Ananyev m = tx_pkts[i]; 1910b3fc5a1aSKonstantin Ananyev ol_flags = m->ol_flags; 1911b3fc5a1aSKonstantin Ananyev 1912bc5ef57dSMichal Krawczyk if (!(ol_flags & PKT_TX_IPV4)) 1913bc5ef57dSMichal Krawczyk continue; 1914bc5ef57dSMichal Krawczyk 1915bc5ef57dSMichal Krawczyk /* If there was not L2 header length specified, assume it is 1916bc5ef57dSMichal Krawczyk * length of the ethernet header. 1917bc5ef57dSMichal Krawczyk */ 1918bc5ef57dSMichal Krawczyk if (unlikely(m->l2_len == 0)) 1919bc5ef57dSMichal Krawczyk m->l2_len = sizeof(struct ether_hdr); 1920bc5ef57dSMichal Krawczyk 1921bc5ef57dSMichal Krawczyk ip_hdr = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *, 1922bc5ef57dSMichal Krawczyk m->l2_len); 1923bc5ef57dSMichal Krawczyk frag_field = rte_be_to_cpu_16(ip_hdr->fragment_offset); 1924bc5ef57dSMichal Krawczyk 1925bc5ef57dSMichal Krawczyk if ((frag_field & IPV4_HDR_DF_FLAG) != 0) { 1926bc5ef57dSMichal Krawczyk m->packet_type |= RTE_PTYPE_L4_NONFRAG; 1927bc5ef57dSMichal Krawczyk 1928bc5ef57dSMichal Krawczyk /* If IPv4 header has DF flag enabled and TSO support is 1929bc5ef57dSMichal Krawczyk * disabled, partial chcecksum should not be calculated. 1930bc5ef57dSMichal Krawczyk */ 1931bc5ef57dSMichal Krawczyk if (!tx_ring->adapter->tso4_supported) 1932bc5ef57dSMichal Krawczyk continue; 1933bc5ef57dSMichal Krawczyk } 1934bc5ef57dSMichal Krawczyk 1935b3fc5a1aSKonstantin Ananyev if ((ol_flags & ENA_TX_OFFLOAD_NOTSUP_MASK) != 0 || 1936b3fc5a1aSKonstantin Ananyev (ol_flags & PKT_TX_L4_MASK) == 1937b3fc5a1aSKonstantin Ananyev PKT_TX_SCTP_CKSUM) { 1938b3fc5a1aSKonstantin Ananyev rte_errno = -ENOTSUP; 1939b3fc5a1aSKonstantin Ananyev return i; 1940b3fc5a1aSKonstantin Ananyev } 1941b3fc5a1aSKonstantin Ananyev 1942b3fc5a1aSKonstantin Ananyev #ifdef RTE_LIBRTE_ETHDEV_DEBUG 1943b3fc5a1aSKonstantin Ananyev ret = rte_validate_tx_offload(m); 1944b3fc5a1aSKonstantin Ananyev if (ret != 0) { 1945b3fc5a1aSKonstantin Ananyev rte_errno = ret; 1946b3fc5a1aSKonstantin Ananyev return i; 1947b3fc5a1aSKonstantin Ananyev } 1948b3fc5a1aSKonstantin Ananyev #endif 194983277a7cSJakub Palider 195083277a7cSJakub Palider /* In case we are supposed to TSO and have DF not set (DF=0) 195183277a7cSJakub Palider * hardware must be provided with partial checksum, otherwise 195283277a7cSJakub Palider * it will take care of necessary calculations. 195383277a7cSJakub Palider */ 195483277a7cSJakub Palider 1955b3fc5a1aSKonstantin Ananyev ret = rte_net_intel_cksum_flags_prepare(m, 1956b3fc5a1aSKonstantin Ananyev ol_flags & ~PKT_TX_TCP_SEG); 1957b3fc5a1aSKonstantin Ananyev if (ret != 0) { 1958b3fc5a1aSKonstantin Ananyev rte_errno = ret; 1959b3fc5a1aSKonstantin Ananyev return i; 1960b3fc5a1aSKonstantin Ananyev } 1961b3fc5a1aSKonstantin Ananyev } 1962b3fc5a1aSKonstantin Ananyev 1963b3fc5a1aSKonstantin Ananyev return i; 1964b3fc5a1aSKonstantin Ananyev } 1965b3fc5a1aSKonstantin Ananyev 1966f01f060cSRafal Kozik static void ena_update_hints(struct ena_adapter *adapter, 1967f01f060cSRafal Kozik struct ena_admin_ena_hw_hints *hints) 1968f01f060cSRafal Kozik { 1969f01f060cSRafal Kozik if (hints->admin_completion_tx_timeout) 1970f01f060cSRafal Kozik adapter->ena_dev.admin_queue.completion_timeout = 1971f01f060cSRafal Kozik hints->admin_completion_tx_timeout * 1000; 1972f01f060cSRafal Kozik 1973f01f060cSRafal Kozik if (hints->mmio_read_timeout) 1974f01f060cSRafal Kozik /* convert to usec */ 1975f01f060cSRafal Kozik adapter->ena_dev.mmio_read.reg_read_to = 1976f01f060cSRafal Kozik hints->mmio_read_timeout * 1000; 1977d9b8b106SMichal Krawczyk 1978d9b8b106SMichal Krawczyk if (hints->driver_watchdog_timeout) { 1979d9b8b106SMichal Krawczyk if (hints->driver_watchdog_timeout == ENA_HW_HINTS_NO_TIMEOUT) 1980d9b8b106SMichal Krawczyk adapter->keep_alive_timeout = ENA_HW_HINTS_NO_TIMEOUT; 1981d9b8b106SMichal Krawczyk else 1982d9b8b106SMichal Krawczyk // Convert msecs to ticks 1983d9b8b106SMichal Krawczyk adapter->keep_alive_timeout = 1984d9b8b106SMichal Krawczyk (hints->driver_watchdog_timeout * 1985d9b8b106SMichal Krawczyk rte_get_timer_hz()) / 1000; 1986d9b8b106SMichal Krawczyk } 1987f01f060cSRafal Kozik } 1988f01f060cSRafal Kozik 19892061fe41SRafal Kozik static int ena_check_and_linearize_mbuf(struct ena_ring *tx_ring, 19902061fe41SRafal Kozik struct rte_mbuf *mbuf) 19912061fe41SRafal Kozik { 19922061fe41SRafal Kozik int num_segments, rc; 19932061fe41SRafal Kozik 19942061fe41SRafal Kozik num_segments = mbuf->nb_segs; 19952061fe41SRafal Kozik 19962061fe41SRafal Kozik if (likely(num_segments < tx_ring->sgl_size)) 19972061fe41SRafal Kozik return 0; 19982061fe41SRafal Kozik 19992061fe41SRafal Kozik rc = rte_pktmbuf_linearize(mbuf); 20002061fe41SRafal Kozik if (unlikely(rc)) 20012061fe41SRafal Kozik RTE_LOG(WARNING, PMD, "Mbuf linearize failed\n"); 20022061fe41SRafal Kozik 20032061fe41SRafal Kozik return rc; 20042061fe41SRafal Kozik } 20052061fe41SRafal Kozik 20061173fca2SJan Medala static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 20071173fca2SJan Medala uint16_t nb_pkts) 20081173fca2SJan Medala { 20091173fca2SJan Medala struct ena_ring *tx_ring = (struct ena_ring *)(tx_queue); 20101daff526SJakub Palider uint16_t next_to_use = tx_ring->next_to_use; 20111daff526SJakub Palider uint16_t next_to_clean = tx_ring->next_to_clean; 20121173fca2SJan Medala struct rte_mbuf *mbuf; 20131173fca2SJan Medala unsigned int ring_size = tx_ring->ring_size; 20141173fca2SJan Medala unsigned int ring_mask = ring_size - 1; 20151173fca2SJan Medala struct ena_com_tx_ctx ena_tx_ctx; 20161173fca2SJan Medala struct ena_tx_buffer *tx_info; 20171173fca2SJan Medala struct ena_com_buf *ebuf; 20181173fca2SJan Medala uint16_t rc, req_id, total_tx_descs = 0; 2019b66b6e72SJakub Palider uint16_t sent_idx = 0, empty_tx_reqs; 20201173fca2SJan Medala int nb_hw_desc; 20211173fca2SJan Medala 20221173fca2SJan Medala /* Check adapter state */ 20231173fca2SJan Medala if (unlikely(tx_ring->adapter->state != ENA_ADAPTER_STATE_RUNNING)) { 20241173fca2SJan Medala RTE_LOG(ALERT, PMD, 20251173fca2SJan Medala "Trying to xmit pkts while device is NOT running\n"); 20261173fca2SJan Medala return 0; 20271173fca2SJan Medala } 20281173fca2SJan Medala 2029b66b6e72SJakub Palider empty_tx_reqs = ring_size - (next_to_use - next_to_clean); 2030b66b6e72SJakub Palider if (nb_pkts > empty_tx_reqs) 2031b66b6e72SJakub Palider nb_pkts = empty_tx_reqs; 2032b66b6e72SJakub Palider 20331173fca2SJan Medala for (sent_idx = 0; sent_idx < nb_pkts; sent_idx++) { 20341173fca2SJan Medala mbuf = tx_pkts[sent_idx]; 20351173fca2SJan Medala 20362061fe41SRafal Kozik rc = ena_check_and_linearize_mbuf(tx_ring, mbuf); 20372061fe41SRafal Kozik if (unlikely(rc)) 20382061fe41SRafal Kozik break; 20392061fe41SRafal Kozik 20401daff526SJakub Palider req_id = tx_ring->empty_tx_reqs[next_to_use & ring_mask]; 20411173fca2SJan Medala tx_info = &tx_ring->tx_buffer_info[req_id]; 20421173fca2SJan Medala tx_info->mbuf = mbuf; 20431173fca2SJan Medala tx_info->num_of_bufs = 0; 20441173fca2SJan Medala ebuf = tx_info->bufs; 20451173fca2SJan Medala 20461173fca2SJan Medala /* Prepare TX context */ 20471173fca2SJan Medala memset(&ena_tx_ctx, 0x0, sizeof(struct ena_com_tx_ctx)); 20481173fca2SJan Medala memset(&ena_tx_ctx.ena_meta, 0x0, 20491173fca2SJan Medala sizeof(struct ena_com_tx_meta)); 20501173fca2SJan Medala ena_tx_ctx.ena_bufs = ebuf; 20511173fca2SJan Medala ena_tx_ctx.req_id = req_id; 20521173fca2SJan Medala if (tx_ring->tx_mem_queue_type == 20531173fca2SJan Medala ENA_ADMIN_PLACEMENT_POLICY_DEV) { 20541173fca2SJan Medala /* prepare the push buffer with 20551173fca2SJan Medala * virtual address of the data 20561173fca2SJan Medala */ 20571173fca2SJan Medala ena_tx_ctx.header_len = 20581173fca2SJan Medala RTE_MIN(mbuf->data_len, 20591173fca2SJan Medala tx_ring->tx_max_header_size); 20601173fca2SJan Medala ena_tx_ctx.push_header = 20611173fca2SJan Medala (void *)((char *)mbuf->buf_addr + 20621173fca2SJan Medala mbuf->data_off); 20631173fca2SJan Medala } /* there's no else as we take advantage of memset zeroing */ 20641173fca2SJan Medala 20651173fca2SJan Medala /* Set TX offloads flags, if applicable */ 206656b8b9b7SRafal Kozik ena_tx_mbuf_prepare(mbuf, &ena_tx_ctx, tx_ring->offloads); 20671173fca2SJan Medala 20681173fca2SJan Medala if (unlikely(mbuf->ol_flags & 20691173fca2SJan Medala (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD))) 20701173fca2SJan Medala rte_atomic64_inc(&tx_ring->adapter->drv_stats->ierrors); 20711173fca2SJan Medala 20721173fca2SJan Medala rte_prefetch0(tx_pkts[(sent_idx + 4) & ring_mask]); 20731173fca2SJan Medala 20741173fca2SJan Medala /* Process first segment taking into 20751173fca2SJan Medala * consideration pushed header 20761173fca2SJan Medala */ 20771173fca2SJan Medala if (mbuf->data_len > ena_tx_ctx.header_len) { 2078455da545SSantosh Shukla ebuf->paddr = mbuf->buf_iova + 20791173fca2SJan Medala mbuf->data_off + 20801173fca2SJan Medala ena_tx_ctx.header_len; 20811173fca2SJan Medala ebuf->len = mbuf->data_len - ena_tx_ctx.header_len; 20821173fca2SJan Medala ebuf++; 20831173fca2SJan Medala tx_info->num_of_bufs++; 20841173fca2SJan Medala } 20851173fca2SJan Medala 20861173fca2SJan Medala while ((mbuf = mbuf->next) != NULL) { 2087455da545SSantosh Shukla ebuf->paddr = mbuf->buf_iova + mbuf->data_off; 20881173fca2SJan Medala ebuf->len = mbuf->data_len; 20891173fca2SJan Medala ebuf++; 20901173fca2SJan Medala tx_info->num_of_bufs++; 20911173fca2SJan Medala } 20921173fca2SJan Medala 20931173fca2SJan Medala ena_tx_ctx.num_bufs = tx_info->num_of_bufs; 20941173fca2SJan Medala 20951173fca2SJan Medala /* Write data to device */ 20961173fca2SJan Medala rc = ena_com_prepare_tx(tx_ring->ena_com_io_sq, 20971173fca2SJan Medala &ena_tx_ctx, &nb_hw_desc); 20981173fca2SJan Medala if (unlikely(rc)) 20991173fca2SJan Medala break; 21001173fca2SJan Medala 21011173fca2SJan Medala tx_info->tx_descs = nb_hw_desc; 21021173fca2SJan Medala 21031daff526SJakub Palider next_to_use++; 21041173fca2SJan Medala } 21051173fca2SJan Medala 21065e02e19eSJan Medala /* If there are ready packets to be xmitted... */ 21075e02e19eSJan Medala if (sent_idx > 0) { 21085e02e19eSJan Medala /* ...let HW do its best :-) */ 21091173fca2SJan Medala rte_wmb(); 21101173fca2SJan Medala ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq); 21111173fca2SJan Medala 21125e02e19eSJan Medala tx_ring->next_to_use = next_to_use; 21135e02e19eSJan Medala } 21145e02e19eSJan Medala 21151173fca2SJan Medala /* Clear complete packets */ 21161173fca2SJan Medala while (ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq, &req_id) >= 0) { 2117*f7d82d24SRafal Kozik rc = validate_tx_req_id(tx_ring, req_id); 2118*f7d82d24SRafal Kozik if (rc) 2119*f7d82d24SRafal Kozik break; 2120*f7d82d24SRafal Kozik 21211173fca2SJan Medala /* Get Tx info & store how many descs were processed */ 21221173fca2SJan Medala tx_info = &tx_ring->tx_buffer_info[req_id]; 21231173fca2SJan Medala total_tx_descs += tx_info->tx_descs; 21241173fca2SJan Medala 21251173fca2SJan Medala /* Free whole mbuf chain */ 21261173fca2SJan Medala mbuf = tx_info->mbuf; 21271173fca2SJan Medala rte_pktmbuf_free(mbuf); 2128207a514cSMichal Krawczyk tx_info->mbuf = NULL; 21291173fca2SJan Medala 21301173fca2SJan Medala /* Put back descriptor to the ring for reuse */ 21311daff526SJakub Palider tx_ring->empty_tx_reqs[next_to_clean & ring_mask] = req_id; 21321daff526SJakub Palider next_to_clean++; 21331173fca2SJan Medala 21341173fca2SJan Medala /* If too many descs to clean, leave it for another run */ 21351173fca2SJan Medala if (unlikely(total_tx_descs > ENA_RING_DESCS_RATIO(ring_size))) 21361173fca2SJan Medala break; 21371173fca2SJan Medala } 21381173fca2SJan Medala 21395e02e19eSJan Medala if (total_tx_descs > 0) { 21401173fca2SJan Medala /* acknowledge completion of sent packets */ 21411173fca2SJan Medala ena_com_comp_ack(tx_ring->ena_com_io_sq, total_tx_descs); 21421daff526SJakub Palider tx_ring->next_to_clean = next_to_clean; 21435e02e19eSJan Medala } 21445e02e19eSJan Medala 21451173fca2SJan Medala return sent_idx; 21461173fca2SJan Medala } 21471173fca2SJan Medala 2148ca148440SMichal Krawczyk /********************************************************************* 2149ca148440SMichal Krawczyk * PMD configuration 2150ca148440SMichal Krawczyk *********************************************************************/ 2151fdf91e0fSJan Blunck static int eth_ena_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 2152fdf91e0fSJan Blunck struct rte_pci_device *pci_dev) 2153fdf91e0fSJan Blunck { 2154fdf91e0fSJan Blunck return rte_eth_dev_pci_generic_probe(pci_dev, 2155fdf91e0fSJan Blunck sizeof(struct ena_adapter), eth_ena_dev_init); 2156fdf91e0fSJan Blunck } 2157fdf91e0fSJan Blunck 2158fdf91e0fSJan Blunck static int eth_ena_pci_remove(struct rte_pci_device *pci_dev) 2159fdf91e0fSJan Blunck { 2160eb0ef49dSMichal Krawczyk return rte_eth_dev_pci_generic_remove(pci_dev, eth_ena_dev_uninit); 2161fdf91e0fSJan Blunck } 2162fdf91e0fSJan Blunck 2163fdf91e0fSJan Blunck static struct rte_pci_driver rte_ena_pmd = { 21641173fca2SJan Medala .id_table = pci_id_ena_map, 2165ca148440SMichal Krawczyk .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, 2166fdf91e0fSJan Blunck .probe = eth_ena_pci_probe, 2167fdf91e0fSJan Blunck .remove = eth_ena_pci_remove, 21681173fca2SJan Medala }; 21691173fca2SJan Medala 2170fdf91e0fSJan Blunck RTE_PMD_REGISTER_PCI(net_ena, rte_ena_pmd); 217101f19227SShreyansh Jain RTE_PMD_REGISTER_PCI_TABLE(net_ena, pci_id_ena_map); 217206e81dc9SDavid Marchand RTE_PMD_REGISTER_KMOD_DEP(net_ena, "* igb_uio | uio_pci_generic | vfio-pci"); 21738bc0acaeSStephen Hemminger 21748bc0acaeSStephen Hemminger RTE_INIT(ena_init_log); 21758bc0acaeSStephen Hemminger static void 21768bc0acaeSStephen Hemminger ena_init_log(void) 21778bc0acaeSStephen Hemminger { 21783f111952SHarry van Haaren ena_logtype_init = rte_log_register("pmd.net.ena.init"); 21798bc0acaeSStephen Hemminger if (ena_logtype_init >= 0) 21808bc0acaeSStephen Hemminger rte_log_set_level(ena_logtype_init, RTE_LOG_NOTICE); 21813f111952SHarry van Haaren ena_logtype_driver = rte_log_register("pmd.net.ena.driver"); 21828bc0acaeSStephen Hemminger if (ena_logtype_driver >= 0) 21838bc0acaeSStephen Hemminger rte_log_set_level(ena_logtype_driver, RTE_LOG_NOTICE); 21848bc0acaeSStephen Hemminger } 21853adcba9aSMichal Krawczyk 21863adcba9aSMichal Krawczyk /****************************************************************************** 21873adcba9aSMichal Krawczyk ******************************** AENQ Handlers ******************************* 21883adcba9aSMichal Krawczyk *****************************************************************************/ 2189ca148440SMichal Krawczyk static void ena_update_on_link_change(void *adapter_data, 2190ca148440SMichal Krawczyk struct ena_admin_aenq_entry *aenq_e) 2191ca148440SMichal Krawczyk { 2192ca148440SMichal Krawczyk struct rte_eth_dev *eth_dev; 2193ca148440SMichal Krawczyk struct ena_adapter *adapter; 2194ca148440SMichal Krawczyk struct ena_admin_aenq_link_change_desc *aenq_link_desc; 2195ca148440SMichal Krawczyk uint32_t status; 2196ca148440SMichal Krawczyk 2197ca148440SMichal Krawczyk adapter = (struct ena_adapter *)adapter_data; 2198ca148440SMichal Krawczyk aenq_link_desc = (struct ena_admin_aenq_link_change_desc *)aenq_e; 2199ca148440SMichal Krawczyk eth_dev = adapter->rte_dev; 2200ca148440SMichal Krawczyk 2201ca148440SMichal Krawczyk status = get_ena_admin_aenq_link_change_desc_link_status(aenq_link_desc); 2202ca148440SMichal Krawczyk adapter->link_status = status; 2203ca148440SMichal Krawczyk 2204ca148440SMichal Krawczyk ena_link_update(eth_dev, 0); 2205ca148440SMichal Krawczyk _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL); 2206ca148440SMichal Krawczyk } 2207ca148440SMichal Krawczyk 2208f01f060cSRafal Kozik static void ena_notification(void *data, 2209f01f060cSRafal Kozik struct ena_admin_aenq_entry *aenq_e) 2210f01f060cSRafal Kozik { 2211f01f060cSRafal Kozik struct ena_adapter *adapter = (struct ena_adapter *)data; 2212f01f060cSRafal Kozik struct ena_admin_ena_hw_hints *hints; 2213f01f060cSRafal Kozik 2214f01f060cSRafal Kozik if (aenq_e->aenq_common_desc.group != ENA_ADMIN_NOTIFICATION) 2215f01f060cSRafal Kozik RTE_LOG(WARNING, PMD, "Invalid group(%x) expected %x\n", 2216f01f060cSRafal Kozik aenq_e->aenq_common_desc.group, 2217f01f060cSRafal Kozik ENA_ADMIN_NOTIFICATION); 2218f01f060cSRafal Kozik 2219f01f060cSRafal Kozik switch (aenq_e->aenq_common_desc.syndrom) { 2220f01f060cSRafal Kozik case ENA_ADMIN_UPDATE_HINTS: 2221f01f060cSRafal Kozik hints = (struct ena_admin_ena_hw_hints *) 2222f01f060cSRafal Kozik (&aenq_e->inline_data_w4); 2223f01f060cSRafal Kozik ena_update_hints(adapter, hints); 2224f01f060cSRafal Kozik break; 2225f01f060cSRafal Kozik default: 2226f01f060cSRafal Kozik RTE_LOG(ERR, PMD, "Invalid aenq notification link state %d\n", 2227f01f060cSRafal Kozik aenq_e->aenq_common_desc.syndrom); 2228f01f060cSRafal Kozik } 2229f01f060cSRafal Kozik } 2230f01f060cSRafal Kozik 2231d9b8b106SMichal Krawczyk static void ena_keep_alive(void *adapter_data, 2232d9b8b106SMichal Krawczyk __rte_unused struct ena_admin_aenq_entry *aenq_e) 2233d9b8b106SMichal Krawczyk { 2234d9b8b106SMichal Krawczyk struct ena_adapter *adapter = (struct ena_adapter *)adapter_data; 2235d9b8b106SMichal Krawczyk 2236d9b8b106SMichal Krawczyk adapter->timestamp_wd = rte_get_timer_cycles(); 2237d9b8b106SMichal Krawczyk } 2238d9b8b106SMichal Krawczyk 22393adcba9aSMichal Krawczyk /** 22403adcba9aSMichal Krawczyk * This handler will called for unknown event group or unimplemented handlers 22413adcba9aSMichal Krawczyk **/ 22423adcba9aSMichal Krawczyk static void unimplemented_aenq_handler(__rte_unused void *data, 22433adcba9aSMichal Krawczyk __rte_unused struct ena_admin_aenq_entry *aenq_e) 22443adcba9aSMichal Krawczyk { 2245983cce2dSRafal Kozik RTE_LOG(ERR, PMD, "Unknown event was received or event with " 2246983cce2dSRafal Kozik "unimplemented handler\n"); 22473adcba9aSMichal Krawczyk } 22483adcba9aSMichal Krawczyk 2249ca148440SMichal Krawczyk static struct ena_aenq_handlers aenq_handlers = { 22503adcba9aSMichal Krawczyk .handlers = { 2251ca148440SMichal Krawczyk [ENA_ADMIN_LINK_CHANGE] = ena_update_on_link_change, 2252f01f060cSRafal Kozik [ENA_ADMIN_NOTIFICATION] = ena_notification, 2253d9b8b106SMichal Krawczyk [ENA_ADMIN_KEEP_ALIVE] = ena_keep_alive 22543adcba9aSMichal Krawczyk }, 22553adcba9aSMichal Krawczyk .unimplemented_handler = unimplemented_aenq_handler 22563adcba9aSMichal Krawczyk }; 2257