11173fca2SJan Medala /*- 21173fca2SJan Medala * BSD LICENSE 31173fca2SJan Medala * 41173fca2SJan Medala * Copyright (c) 2015-2016 Amazon.com, Inc. or its affiliates. 51173fca2SJan Medala * All rights reserved. 61173fca2SJan Medala * 71173fca2SJan Medala * Redistribution and use in source and binary forms, with or without 81173fca2SJan Medala * modification, are permitted provided that the following conditions 91173fca2SJan Medala * are met: 101173fca2SJan Medala * 111173fca2SJan Medala * * Redistributions of source code must retain the above copyright 121173fca2SJan Medala * notice, this list of conditions and the following disclaimer. 131173fca2SJan Medala * * Redistributions in binary form must reproduce the above copyright 141173fca2SJan Medala * notice, this list of conditions and the following disclaimer in 151173fca2SJan Medala * the documentation and/or other materials provided with the 161173fca2SJan Medala * distribution. 171173fca2SJan Medala * * Neither the name of copyright holder nor the names of its 181173fca2SJan Medala * contributors may be used to endorse or promote products derived 191173fca2SJan Medala * from this software without specific prior written permission. 201173fca2SJan Medala * 211173fca2SJan Medala * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 221173fca2SJan Medala * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 231173fca2SJan Medala * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 241173fca2SJan Medala * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 251173fca2SJan Medala * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 261173fca2SJan Medala * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 271173fca2SJan Medala * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 281173fca2SJan Medala * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 291173fca2SJan Medala * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 301173fca2SJan Medala * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 311173fca2SJan Medala * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 321173fca2SJan Medala */ 331173fca2SJan Medala 341173fca2SJan Medala #include <rte_ether.h> 35ffc905f3SFerruh Yigit #include <rte_ethdev_driver.h> 36fdf91e0fSJan Blunck #include <rte_ethdev_pci.h> 371173fca2SJan Medala #include <rte_tcp.h> 381173fca2SJan Medala #include <rte_atomic.h> 391173fca2SJan Medala #include <rte_dev.h> 401173fca2SJan Medala #include <rte_errno.h> 41372c1af5SJan Medala #include <rte_version.h> 423d3edc26SJan Medala #include <rte_eal_memconfig.h> 43b3fc5a1aSKonstantin Ananyev #include <rte_net.h> 441173fca2SJan Medala 451173fca2SJan Medala #include "ena_ethdev.h" 461173fca2SJan Medala #include "ena_logs.h" 471173fca2SJan Medala #include "ena_platform.h" 481173fca2SJan Medala #include "ena_com.h" 491173fca2SJan Medala #include "ena_eth_com.h" 501173fca2SJan Medala 511173fca2SJan Medala #include <ena_common_defs.h> 521173fca2SJan Medala #include <ena_regs_defs.h> 531173fca2SJan Medala #include <ena_admin_defs.h> 541173fca2SJan Medala #include <ena_eth_io_defs.h> 551173fca2SJan Medala 56372c1af5SJan Medala #define DRV_MODULE_VER_MAJOR 1 5768a48ef2SMichal Krawczyk #define DRV_MODULE_VER_MINOR 1 580bfe8460SMichal Krawczyk #define DRV_MODULE_VER_SUBMINOR 1 59372c1af5SJan Medala 601173fca2SJan Medala #define ENA_IO_TXQ_IDX(q) (2 * (q)) 611173fca2SJan Medala #define ENA_IO_RXQ_IDX(q) (2 * (q) + 1) 621173fca2SJan Medala /*reverse version of ENA_IO_RXQ_IDX*/ 631173fca2SJan Medala #define ENA_IO_RXQ_IDX_REV(q) ((q - 1) / 2) 641173fca2SJan Medala 651173fca2SJan Medala /* While processing submitted and completed descriptors (rx and tx path 661173fca2SJan Medala * respectively) in a loop it is desired to: 671173fca2SJan Medala * - perform batch submissions while populating sumbissmion queue 681173fca2SJan Medala * - avoid blocking transmission of other packets during cleanup phase 691173fca2SJan Medala * Hence the utilization ratio of 1/8 of a queue size. 701173fca2SJan Medala */ 711173fca2SJan Medala #define ENA_RING_DESCS_RATIO(ring_size) (ring_size / 8) 721173fca2SJan Medala 731173fca2SJan Medala #define __MERGE_64B_H_L(h, l) (((uint64_t)h << 32) | l) 741173fca2SJan Medala #define TEST_BIT(val, bit_shift) (val & (1UL << bit_shift)) 751173fca2SJan Medala 761173fca2SJan Medala #define GET_L4_HDR_LEN(mbuf) \ 771173fca2SJan Medala ((rte_pktmbuf_mtod_offset(mbuf, struct tcp_hdr *, \ 781173fca2SJan Medala mbuf->l3_len + mbuf->l2_len)->data_off) >> 4) 791173fca2SJan Medala 801173fca2SJan Medala #define ENA_RX_RSS_TABLE_LOG_SIZE 7 811173fca2SJan Medala #define ENA_RX_RSS_TABLE_SIZE (1 << ENA_RX_RSS_TABLE_LOG_SIZE) 821173fca2SJan Medala #define ENA_HASH_KEY_SIZE 40 83372c1af5SJan Medala #define ENA_ETH_SS_STATS 0xFF 84372c1af5SJan Medala #define ETH_GSTRING_LEN 32 85372c1af5SJan Medala 86372c1af5SJan Medala #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) 87372c1af5SJan Medala 8892680dc2SRafal Kozik #define ENA_MIN_RING_DESC 128 8992680dc2SRafal Kozik 90372c1af5SJan Medala enum ethtool_stringset { 91372c1af5SJan Medala ETH_SS_TEST = 0, 92372c1af5SJan Medala ETH_SS_STATS, 93372c1af5SJan Medala }; 94372c1af5SJan Medala 95372c1af5SJan Medala struct ena_stats { 96372c1af5SJan Medala char name[ETH_GSTRING_LEN]; 97372c1af5SJan Medala int stat_offset; 98372c1af5SJan Medala }; 99372c1af5SJan Medala 100372c1af5SJan Medala #define ENA_STAT_ENA_COM_ENTRY(stat) { \ 101372c1af5SJan Medala .name = #stat, \ 102372c1af5SJan Medala .stat_offset = offsetof(struct ena_com_stats_admin, stat) \ 103372c1af5SJan Medala } 104372c1af5SJan Medala 105372c1af5SJan Medala #define ENA_STAT_ENTRY(stat, stat_type) { \ 106372c1af5SJan Medala .name = #stat, \ 107372c1af5SJan Medala .stat_offset = offsetof(struct ena_stats_##stat_type, stat) \ 108372c1af5SJan Medala } 109372c1af5SJan Medala 110372c1af5SJan Medala #define ENA_STAT_RX_ENTRY(stat) \ 111372c1af5SJan Medala ENA_STAT_ENTRY(stat, rx) 112372c1af5SJan Medala 113372c1af5SJan Medala #define ENA_STAT_TX_ENTRY(stat) \ 114372c1af5SJan Medala ENA_STAT_ENTRY(stat, tx) 115372c1af5SJan Medala 116372c1af5SJan Medala #define ENA_STAT_GLOBAL_ENTRY(stat) \ 117372c1af5SJan Medala ENA_STAT_ENTRY(stat, dev) 118372c1af5SJan Medala 11935390750SRafal Kozik #define ENA_MAX_RING_SIZE_RX 8192 1202fca2a98SMichal Krawczyk #define ENA_MAX_RING_SIZE_TX 1024 1212fca2a98SMichal Krawczyk 1223adcba9aSMichal Krawczyk /* 1233adcba9aSMichal Krawczyk * Each rte_memzone should have unique name. 1243adcba9aSMichal Krawczyk * To satisfy it, count number of allocation and add it to name. 1253adcba9aSMichal Krawczyk */ 1263adcba9aSMichal Krawczyk uint32_t ena_alloc_cnt; 1273adcba9aSMichal Krawczyk 128372c1af5SJan Medala static const struct ena_stats ena_stats_global_strings[] = { 129372c1af5SJan Medala ENA_STAT_GLOBAL_ENTRY(tx_timeout), 130372c1af5SJan Medala ENA_STAT_GLOBAL_ENTRY(io_suspend), 131372c1af5SJan Medala ENA_STAT_GLOBAL_ENTRY(io_resume), 132372c1af5SJan Medala ENA_STAT_GLOBAL_ENTRY(wd_expired), 133372c1af5SJan Medala ENA_STAT_GLOBAL_ENTRY(interface_up), 134372c1af5SJan Medala ENA_STAT_GLOBAL_ENTRY(interface_down), 135372c1af5SJan Medala ENA_STAT_GLOBAL_ENTRY(admin_q_pause), 136372c1af5SJan Medala }; 137372c1af5SJan Medala 138372c1af5SJan Medala static const struct ena_stats ena_stats_tx_strings[] = { 139372c1af5SJan Medala ENA_STAT_TX_ENTRY(cnt), 140372c1af5SJan Medala ENA_STAT_TX_ENTRY(bytes), 141372c1af5SJan Medala ENA_STAT_TX_ENTRY(queue_stop), 142372c1af5SJan Medala ENA_STAT_TX_ENTRY(queue_wakeup), 143372c1af5SJan Medala ENA_STAT_TX_ENTRY(dma_mapping_err), 144372c1af5SJan Medala ENA_STAT_TX_ENTRY(linearize), 145372c1af5SJan Medala ENA_STAT_TX_ENTRY(linearize_failed), 146372c1af5SJan Medala ENA_STAT_TX_ENTRY(tx_poll), 147372c1af5SJan Medala ENA_STAT_TX_ENTRY(doorbells), 148372c1af5SJan Medala ENA_STAT_TX_ENTRY(prepare_ctx_err), 149372c1af5SJan Medala ENA_STAT_TX_ENTRY(missing_tx_comp), 150372c1af5SJan Medala ENA_STAT_TX_ENTRY(bad_req_id), 151372c1af5SJan Medala }; 152372c1af5SJan Medala 153372c1af5SJan Medala static const struct ena_stats ena_stats_rx_strings[] = { 154372c1af5SJan Medala ENA_STAT_RX_ENTRY(cnt), 155372c1af5SJan Medala ENA_STAT_RX_ENTRY(bytes), 156372c1af5SJan Medala ENA_STAT_RX_ENTRY(refil_partial), 157372c1af5SJan Medala ENA_STAT_RX_ENTRY(bad_csum), 158372c1af5SJan Medala ENA_STAT_RX_ENTRY(page_alloc_fail), 159372c1af5SJan Medala ENA_STAT_RX_ENTRY(skb_alloc_fail), 160372c1af5SJan Medala ENA_STAT_RX_ENTRY(dma_mapping_err), 161372c1af5SJan Medala ENA_STAT_RX_ENTRY(bad_desc_num), 162372c1af5SJan Medala ENA_STAT_RX_ENTRY(small_copy_len_pkt), 163372c1af5SJan Medala }; 164372c1af5SJan Medala 165372c1af5SJan Medala static const struct ena_stats ena_stats_ena_com_strings[] = { 166372c1af5SJan Medala ENA_STAT_ENA_COM_ENTRY(aborted_cmd), 167372c1af5SJan Medala ENA_STAT_ENA_COM_ENTRY(submitted_cmd), 168372c1af5SJan Medala ENA_STAT_ENA_COM_ENTRY(completed_cmd), 169372c1af5SJan Medala ENA_STAT_ENA_COM_ENTRY(out_of_space), 170372c1af5SJan Medala ENA_STAT_ENA_COM_ENTRY(no_completion), 171372c1af5SJan Medala }; 172372c1af5SJan Medala 173372c1af5SJan Medala #define ENA_STATS_ARRAY_GLOBAL ARRAY_SIZE(ena_stats_global_strings) 174372c1af5SJan Medala #define ENA_STATS_ARRAY_TX ARRAY_SIZE(ena_stats_tx_strings) 175372c1af5SJan Medala #define ENA_STATS_ARRAY_RX ARRAY_SIZE(ena_stats_rx_strings) 176372c1af5SJan Medala #define ENA_STATS_ARRAY_ENA_COM ARRAY_SIZE(ena_stats_ena_com_strings) 1771173fca2SJan Medala 17856b8b9b7SRafal Kozik #define QUEUE_OFFLOADS (DEV_TX_OFFLOAD_TCP_CKSUM |\ 17956b8b9b7SRafal Kozik DEV_TX_OFFLOAD_UDP_CKSUM |\ 18056b8b9b7SRafal Kozik DEV_TX_OFFLOAD_IPV4_CKSUM |\ 18156b8b9b7SRafal Kozik DEV_TX_OFFLOAD_TCP_TSO) 18256b8b9b7SRafal Kozik #define MBUF_OFFLOADS (PKT_TX_L4_MASK |\ 18356b8b9b7SRafal Kozik PKT_TX_IP_CKSUM |\ 18456b8b9b7SRafal Kozik PKT_TX_TCP_SEG) 18556b8b9b7SRafal Kozik 1861173fca2SJan Medala /** Vendor ID used by Amazon devices */ 1871173fca2SJan Medala #define PCI_VENDOR_ID_AMAZON 0x1D0F 1881173fca2SJan Medala /** Amazon devices */ 1891173fca2SJan Medala #define PCI_DEVICE_ID_ENA_VF 0xEC20 1901173fca2SJan Medala #define PCI_DEVICE_ID_ENA_LLQ_VF 0xEC21 1911173fca2SJan Medala 192b3fc5a1aSKonstantin Ananyev #define ENA_TX_OFFLOAD_MASK (\ 193b3fc5a1aSKonstantin Ananyev PKT_TX_L4_MASK | \ 194d6db681bSDidier Pallard PKT_TX_IPV6 | \ 195d6db681bSDidier Pallard PKT_TX_IPV4 | \ 196b3fc5a1aSKonstantin Ananyev PKT_TX_IP_CKSUM | \ 197b3fc5a1aSKonstantin Ananyev PKT_TX_TCP_SEG) 198b3fc5a1aSKonstantin Ananyev 199b3fc5a1aSKonstantin Ananyev #define ENA_TX_OFFLOAD_NOTSUP_MASK \ 200b3fc5a1aSKonstantin Ananyev (PKT_TX_OFFLOAD_MASK ^ ENA_TX_OFFLOAD_MASK) 201b3fc5a1aSKonstantin Ananyev 2028bc0acaeSStephen Hemminger int ena_logtype_init; 2038bc0acaeSStephen Hemminger int ena_logtype_driver; 2048bc0acaeSStephen Hemminger 20528a1fd4fSFerruh Yigit static const struct rte_pci_id pci_id_ena_map[] = { 206cb990571SDavid Marchand { RTE_PCI_DEVICE(PCI_VENDOR_ID_AMAZON, PCI_DEVICE_ID_ENA_VF) }, 207cb990571SDavid Marchand { RTE_PCI_DEVICE(PCI_VENDOR_ID_AMAZON, PCI_DEVICE_ID_ENA_LLQ_VF) }, 2081173fca2SJan Medala { .device_id = 0 }, 2091173fca2SJan Medala }; 2101173fca2SJan Medala 211ca148440SMichal Krawczyk static struct ena_aenq_handlers aenq_handlers; 2123adcba9aSMichal Krawczyk 2131173fca2SJan Medala static int ena_device_init(struct ena_com_dev *ena_dev, 214e859d2b8SRafal Kozik struct ena_com_dev_get_features_ctx *get_feat_ctx, 215e859d2b8SRafal Kozik bool *wd_state); 2161173fca2SJan Medala static int ena_dev_configure(struct rte_eth_dev *dev); 2171173fca2SJan Medala static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 2181173fca2SJan Medala uint16_t nb_pkts); 219b3fc5a1aSKonstantin Ananyev static uint16_t eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 220b3fc5a1aSKonstantin Ananyev uint16_t nb_pkts); 2211173fca2SJan Medala static int ena_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, 2221173fca2SJan Medala uint16_t nb_desc, unsigned int socket_id, 2231173fca2SJan Medala const struct rte_eth_txconf *tx_conf); 2241173fca2SJan Medala static int ena_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, 2251173fca2SJan Medala uint16_t nb_desc, unsigned int socket_id, 2261173fca2SJan Medala const struct rte_eth_rxconf *rx_conf, 2271173fca2SJan Medala struct rte_mempool *mp); 2281173fca2SJan Medala static uint16_t eth_ena_recv_pkts(void *rx_queue, 2291173fca2SJan Medala struct rte_mbuf **rx_pkts, uint16_t nb_pkts); 2301173fca2SJan Medala static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count); 2311173fca2SJan Medala static void ena_init_rings(struct ena_adapter *adapter); 2321173fca2SJan Medala static int ena_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); 2331173fca2SJan Medala static int ena_start(struct rte_eth_dev *dev); 234eb0ef49dSMichal Krawczyk static void ena_stop(struct rte_eth_dev *dev); 2351173fca2SJan Medala static void ena_close(struct rte_eth_dev *dev); 2362081d5e2SMichal Krawczyk static int ena_dev_reset(struct rte_eth_dev *dev); 237d5b0924bSMatan Azrad static int ena_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats); 2381173fca2SJan Medala static void ena_rx_queue_release_all(struct rte_eth_dev *dev); 2391173fca2SJan Medala static void ena_tx_queue_release_all(struct rte_eth_dev *dev); 2401173fca2SJan Medala static void ena_rx_queue_release(void *queue); 2411173fca2SJan Medala static void ena_tx_queue_release(void *queue); 2421173fca2SJan Medala static void ena_rx_queue_release_bufs(struct ena_ring *ring); 2431173fca2SJan Medala static void ena_tx_queue_release_bufs(struct ena_ring *ring); 2441173fca2SJan Medala static int ena_link_update(struct rte_eth_dev *dev, 245dd2c630aSFerruh Yigit int wait_to_complete); 246df238f84SMichal Krawczyk static int ena_create_io_queue(struct ena_ring *ring); 247df238f84SMichal Krawczyk static void ena_free_io_queues_all(struct ena_adapter *adapter); 2481173fca2SJan Medala static int ena_queue_restart(struct ena_ring *ring); 2491173fca2SJan Medala static int ena_queue_restart_all(struct rte_eth_dev *dev, 2501173fca2SJan Medala enum ena_ring_type ring_type); 2511173fca2SJan Medala static void ena_stats_restart(struct rte_eth_dev *dev); 252dd2c630aSFerruh Yigit static void ena_infos_get(struct rte_eth_dev *dev, 2531173fca2SJan Medala struct rte_eth_dev_info *dev_info); 2541173fca2SJan Medala static int ena_rss_reta_update(struct rte_eth_dev *dev, 2551173fca2SJan Medala struct rte_eth_rss_reta_entry64 *reta_conf, 2561173fca2SJan Medala uint16_t reta_size); 2571173fca2SJan Medala static int ena_rss_reta_query(struct rte_eth_dev *dev, 2581173fca2SJan Medala struct rte_eth_rss_reta_entry64 *reta_conf, 2591173fca2SJan Medala uint16_t reta_size); 260372c1af5SJan Medala static int ena_get_sset_count(struct rte_eth_dev *dev, int sset); 26115773e06SMichal Krawczyk static void ena_interrupt_handler_rte(void *cb_arg); 262d9b8b106SMichal Krawczyk static void ena_timer_wd_callback(struct rte_timer *timer, void *arg); 2631173fca2SJan Medala 264103ab18cSFerruh Yigit static const struct eth_dev_ops ena_dev_ops = { 2651173fca2SJan Medala .dev_configure = ena_dev_configure, 2661173fca2SJan Medala .dev_infos_get = ena_infos_get, 2671173fca2SJan Medala .rx_queue_setup = ena_rx_queue_setup, 2681173fca2SJan Medala .tx_queue_setup = ena_tx_queue_setup, 2691173fca2SJan Medala .dev_start = ena_start, 270eb0ef49dSMichal Krawczyk .dev_stop = ena_stop, 2711173fca2SJan Medala .link_update = ena_link_update, 2721173fca2SJan Medala .stats_get = ena_stats_get, 2731173fca2SJan Medala .mtu_set = ena_mtu_set, 2741173fca2SJan Medala .rx_queue_release = ena_rx_queue_release, 2751173fca2SJan Medala .tx_queue_release = ena_tx_queue_release, 2761173fca2SJan Medala .dev_close = ena_close, 2772081d5e2SMichal Krawczyk .dev_reset = ena_dev_reset, 2781173fca2SJan Medala .reta_update = ena_rss_reta_update, 2791173fca2SJan Medala .reta_query = ena_rss_reta_query, 2801173fca2SJan Medala }; 2811173fca2SJan Medala 2823d3edc26SJan Medala #define NUMA_NO_NODE SOCKET_ID_ANY 2833d3edc26SJan Medala 2843d3edc26SJan Medala static inline int ena_cpu_to_node(int cpu) 2853d3edc26SJan Medala { 2863d3edc26SJan Medala struct rte_config *config = rte_eal_get_configuration(); 28749df3db8SAnatoly Burakov struct rte_fbarray *arr = &config->mem_config->memzones; 28849df3db8SAnatoly Burakov const struct rte_memzone *mz; 2893d3edc26SJan Medala 29049df3db8SAnatoly Burakov if (unlikely(cpu >= RTE_MAX_MEMZONE)) 2913d3edc26SJan Medala return NUMA_NO_NODE; 29249df3db8SAnatoly Burakov 29349df3db8SAnatoly Burakov mz = rte_fbarray_get(arr, cpu); 29449df3db8SAnatoly Burakov 29549df3db8SAnatoly Burakov return mz->socket_id; 2963d3edc26SJan Medala } 2973d3edc26SJan Medala 2981173fca2SJan Medala static inline void ena_rx_mbuf_prepare(struct rte_mbuf *mbuf, 2991173fca2SJan Medala struct ena_com_rx_ctx *ena_rx_ctx) 3001173fca2SJan Medala { 3011173fca2SJan Medala uint64_t ol_flags = 0; 302fd617795SRafal Kozik uint32_t packet_type = 0; 3031173fca2SJan Medala 3041173fca2SJan Medala if (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP) 305fd617795SRafal Kozik packet_type |= RTE_PTYPE_L4_TCP; 3061173fca2SJan Medala else if (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP) 307fd617795SRafal Kozik packet_type |= RTE_PTYPE_L4_UDP; 3081173fca2SJan Medala 3091173fca2SJan Medala if (ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV4) 310fd617795SRafal Kozik packet_type |= RTE_PTYPE_L3_IPV4; 3111173fca2SJan Medala else if (ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV6) 312fd617795SRafal Kozik packet_type |= RTE_PTYPE_L3_IPV6; 3131173fca2SJan Medala 3141173fca2SJan Medala if (unlikely(ena_rx_ctx->l4_csum_err)) 3151173fca2SJan Medala ol_flags |= PKT_RX_L4_CKSUM_BAD; 3161173fca2SJan Medala if (unlikely(ena_rx_ctx->l3_csum_err)) 3171173fca2SJan Medala ol_flags |= PKT_RX_IP_CKSUM_BAD; 3181173fca2SJan Medala 3191173fca2SJan Medala mbuf->ol_flags = ol_flags; 320fd617795SRafal Kozik mbuf->packet_type = packet_type; 3211173fca2SJan Medala } 3221173fca2SJan Medala 3231173fca2SJan Medala static inline void ena_tx_mbuf_prepare(struct rte_mbuf *mbuf, 32456b8b9b7SRafal Kozik struct ena_com_tx_ctx *ena_tx_ctx, 32556b8b9b7SRafal Kozik uint64_t queue_offloads) 3261173fca2SJan Medala { 3271173fca2SJan Medala struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta; 3281173fca2SJan Medala 32956b8b9b7SRafal Kozik if ((mbuf->ol_flags & MBUF_OFFLOADS) && 33056b8b9b7SRafal Kozik (queue_offloads & QUEUE_OFFLOADS)) { 3311173fca2SJan Medala /* check if TSO is required */ 33256b8b9b7SRafal Kozik if ((mbuf->ol_flags & PKT_TX_TCP_SEG) && 33356b8b9b7SRafal Kozik (queue_offloads & DEV_TX_OFFLOAD_TCP_TSO)) { 3341173fca2SJan Medala ena_tx_ctx->tso_enable = true; 3351173fca2SJan Medala 3361173fca2SJan Medala ena_meta->l4_hdr_len = GET_L4_HDR_LEN(mbuf); 3371173fca2SJan Medala } 3381173fca2SJan Medala 3391173fca2SJan Medala /* check if L3 checksum is needed */ 34056b8b9b7SRafal Kozik if ((mbuf->ol_flags & PKT_TX_IP_CKSUM) && 34156b8b9b7SRafal Kozik (queue_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)) 3421173fca2SJan Medala ena_tx_ctx->l3_csum_enable = true; 3431173fca2SJan Medala 3441173fca2SJan Medala if (mbuf->ol_flags & PKT_TX_IPV6) { 3451173fca2SJan Medala ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV6; 3461173fca2SJan Medala } else { 3471173fca2SJan Medala ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV4; 3481173fca2SJan Medala 3491173fca2SJan Medala /* set don't fragment (DF) flag */ 3501173fca2SJan Medala if (mbuf->packet_type & 3511173fca2SJan Medala (RTE_PTYPE_L4_NONFRAG 3521173fca2SJan Medala | RTE_PTYPE_INNER_L4_NONFRAG)) 3531173fca2SJan Medala ena_tx_ctx->df = true; 3541173fca2SJan Medala } 3551173fca2SJan Medala 3561173fca2SJan Medala /* check if L4 checksum is needed */ 35756b8b9b7SRafal Kozik if ((mbuf->ol_flags & PKT_TX_TCP_CKSUM) && 35856b8b9b7SRafal Kozik (queue_offloads & DEV_TX_OFFLOAD_TCP_CKSUM)) { 3591173fca2SJan Medala ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_TCP; 3601173fca2SJan Medala ena_tx_ctx->l4_csum_enable = true; 36156b8b9b7SRafal Kozik } else if ((mbuf->ol_flags & PKT_TX_UDP_CKSUM) && 36256b8b9b7SRafal Kozik (queue_offloads & DEV_TX_OFFLOAD_UDP_CKSUM)) { 3631173fca2SJan Medala ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UDP; 3641173fca2SJan Medala ena_tx_ctx->l4_csum_enable = true; 36556b8b9b7SRafal Kozik } else { 3661173fca2SJan Medala ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UNKNOWN; 3671173fca2SJan Medala ena_tx_ctx->l4_csum_enable = false; 3681173fca2SJan Medala } 3691173fca2SJan Medala 3701173fca2SJan Medala ena_meta->mss = mbuf->tso_segsz; 3711173fca2SJan Medala ena_meta->l3_hdr_len = mbuf->l3_len; 3721173fca2SJan Medala ena_meta->l3_hdr_offset = mbuf->l2_len; 3731173fca2SJan Medala 3741173fca2SJan Medala ena_tx_ctx->meta_valid = true; 3751173fca2SJan Medala } else { 3761173fca2SJan Medala ena_tx_ctx->meta_valid = false; 3771173fca2SJan Medala } 3781173fca2SJan Medala } 3791173fca2SJan Medala 380c2034976SMichal Krawczyk static inline int validate_rx_req_id(struct ena_ring *rx_ring, uint16_t req_id) 381c2034976SMichal Krawczyk { 382c2034976SMichal Krawczyk if (likely(req_id < rx_ring->ring_size)) 383c2034976SMichal Krawczyk return 0; 384c2034976SMichal Krawczyk 385c2034976SMichal Krawczyk RTE_LOG(ERR, PMD, "Invalid rx req_id: %hu\n", req_id); 386c2034976SMichal Krawczyk 387c2034976SMichal Krawczyk rx_ring->adapter->reset_reason = ENA_REGS_RESET_INV_RX_REQ_ID; 388c2034976SMichal Krawczyk rx_ring->adapter->trigger_reset = true; 389c2034976SMichal Krawczyk 390c2034976SMichal Krawczyk return -EFAULT; 391c2034976SMichal Krawczyk } 392c2034976SMichal Krawczyk 393f7d82d24SRafal Kozik static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id) 394f7d82d24SRafal Kozik { 395f7d82d24SRafal Kozik struct ena_tx_buffer *tx_info = NULL; 396f7d82d24SRafal Kozik 397f7d82d24SRafal Kozik if (likely(req_id < tx_ring->ring_size)) { 398f7d82d24SRafal Kozik tx_info = &tx_ring->tx_buffer_info[req_id]; 399f7d82d24SRafal Kozik if (likely(tx_info->mbuf)) 400f7d82d24SRafal Kozik return 0; 401f7d82d24SRafal Kozik } 402f7d82d24SRafal Kozik 403f7d82d24SRafal Kozik if (tx_info) 404f7d82d24SRafal Kozik RTE_LOG(ERR, PMD, "tx_info doesn't have valid mbuf\n"); 405f7d82d24SRafal Kozik else 406f7d82d24SRafal Kozik RTE_LOG(ERR, PMD, "Invalid req_id: %hu\n", req_id); 407f7d82d24SRafal Kozik 408f7d82d24SRafal Kozik /* Trigger device reset */ 409f7d82d24SRafal Kozik tx_ring->adapter->reset_reason = ENA_REGS_RESET_INV_TX_REQ_ID; 410f7d82d24SRafal Kozik tx_ring->adapter->trigger_reset = true; 411f7d82d24SRafal Kozik return -EFAULT; 412f7d82d24SRafal Kozik } 413f7d82d24SRafal Kozik 414372c1af5SJan Medala static void ena_config_host_info(struct ena_com_dev *ena_dev) 415372c1af5SJan Medala { 416372c1af5SJan Medala struct ena_admin_host_info *host_info; 417372c1af5SJan Medala int rc; 418372c1af5SJan Medala 419372c1af5SJan Medala /* Allocate only the host info */ 420372c1af5SJan Medala rc = ena_com_allocate_host_info(ena_dev); 421372c1af5SJan Medala if (rc) { 422372c1af5SJan Medala RTE_LOG(ERR, PMD, "Cannot allocate host info\n"); 423372c1af5SJan Medala return; 424372c1af5SJan Medala } 425372c1af5SJan Medala 426372c1af5SJan Medala host_info = ena_dev->host_attr.host_info; 427372c1af5SJan Medala 428372c1af5SJan Medala host_info->os_type = ENA_ADMIN_OS_DPDK; 429372c1af5SJan Medala host_info->kernel_ver = RTE_VERSION; 430103bb1ccSJohn W. Linville snprintf((char *)host_info->kernel_ver_str, 431103bb1ccSJohn W. Linville sizeof(host_info->kernel_ver_str), 432103bb1ccSJohn W. Linville "%s", rte_version()); 433372c1af5SJan Medala host_info->os_dist = RTE_VERSION; 434103bb1ccSJohn W. Linville snprintf((char *)host_info->os_dist_str, 435103bb1ccSJohn W. Linville sizeof(host_info->os_dist_str), 436103bb1ccSJohn W. Linville "%s", rte_version()); 437372c1af5SJan Medala host_info->driver_version = 438372c1af5SJan Medala (DRV_MODULE_VER_MAJOR) | 439372c1af5SJan Medala (DRV_MODULE_VER_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) | 440c4144557SJan Medala (DRV_MODULE_VER_SUBMINOR << 441c4144557SJan Medala ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT); 442b9302eb9SRafal Kozik host_info->num_cpus = rte_lcore_count(); 443372c1af5SJan Medala 444372c1af5SJan Medala rc = ena_com_set_host_attributes(ena_dev); 445372c1af5SJan Medala if (rc) { 446241da076SRafal Kozik if (rc == -ENA_COM_UNSUPPORTED) 447241da076SRafal Kozik RTE_LOG(WARNING, PMD, "Cannot set host attributes\n"); 448241da076SRafal Kozik else 449372c1af5SJan Medala RTE_LOG(ERR, PMD, "Cannot set host attributes\n"); 450241da076SRafal Kozik 451372c1af5SJan Medala goto err; 452372c1af5SJan Medala } 453372c1af5SJan Medala 454372c1af5SJan Medala return; 455372c1af5SJan Medala 456372c1af5SJan Medala err: 457372c1af5SJan Medala ena_com_delete_host_info(ena_dev); 458372c1af5SJan Medala } 459372c1af5SJan Medala 460372c1af5SJan Medala static int 461372c1af5SJan Medala ena_get_sset_count(struct rte_eth_dev *dev, int sset) 462372c1af5SJan Medala { 463372c1af5SJan Medala if (sset != ETH_SS_STATS) 464372c1af5SJan Medala return -EOPNOTSUPP; 465372c1af5SJan Medala 466372c1af5SJan Medala /* Workaround for clang: 467372c1af5SJan Medala * touch internal structures to prevent 468372c1af5SJan Medala * compiler error 469372c1af5SJan Medala */ 470372c1af5SJan Medala ENA_TOUCH(ena_stats_global_strings); 471372c1af5SJan Medala ENA_TOUCH(ena_stats_tx_strings); 472372c1af5SJan Medala ENA_TOUCH(ena_stats_rx_strings); 473372c1af5SJan Medala ENA_TOUCH(ena_stats_ena_com_strings); 474372c1af5SJan Medala 475372c1af5SJan Medala return dev->data->nb_tx_queues * 476372c1af5SJan Medala (ENA_STATS_ARRAY_TX + ENA_STATS_ARRAY_RX) + 477372c1af5SJan Medala ENA_STATS_ARRAY_GLOBAL + ENA_STATS_ARRAY_ENA_COM; 478372c1af5SJan Medala } 479372c1af5SJan Medala 480372c1af5SJan Medala static void ena_config_debug_area(struct ena_adapter *adapter) 481372c1af5SJan Medala { 482372c1af5SJan Medala u32 debug_area_size; 483372c1af5SJan Medala int rc, ss_count; 484372c1af5SJan Medala 485372c1af5SJan Medala ss_count = ena_get_sset_count(adapter->rte_dev, ETH_SS_STATS); 486372c1af5SJan Medala if (ss_count <= 0) { 487372c1af5SJan Medala RTE_LOG(ERR, PMD, "SS count is negative\n"); 488372c1af5SJan Medala return; 489372c1af5SJan Medala } 490372c1af5SJan Medala 491372c1af5SJan Medala /* allocate 32 bytes for each string and 64bit for the value */ 492372c1af5SJan Medala debug_area_size = ss_count * ETH_GSTRING_LEN + sizeof(u64) * ss_count; 493372c1af5SJan Medala 494372c1af5SJan Medala rc = ena_com_allocate_debug_area(&adapter->ena_dev, debug_area_size); 495372c1af5SJan Medala if (rc) { 496372c1af5SJan Medala RTE_LOG(ERR, PMD, "Cannot allocate debug area\n"); 497372c1af5SJan Medala return; 498372c1af5SJan Medala } 499372c1af5SJan Medala 500372c1af5SJan Medala rc = ena_com_set_host_attributes(&adapter->ena_dev); 501372c1af5SJan Medala if (rc) { 502241da076SRafal Kozik if (rc == -ENA_COM_UNSUPPORTED) 503372c1af5SJan Medala RTE_LOG(WARNING, PMD, "Cannot set host attributes\n"); 504241da076SRafal Kozik else 505241da076SRafal Kozik RTE_LOG(ERR, PMD, "Cannot set host attributes\n"); 506241da076SRafal Kozik 507372c1af5SJan Medala goto err; 508372c1af5SJan Medala } 509372c1af5SJan Medala 510372c1af5SJan Medala return; 511372c1af5SJan Medala err: 512372c1af5SJan Medala ena_com_delete_debug_area(&adapter->ena_dev); 513372c1af5SJan Medala } 514372c1af5SJan Medala 5151173fca2SJan Medala static void ena_close(struct rte_eth_dev *dev) 5161173fca2SJan Medala { 5174d7877fdSMichal Krawczyk struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 5184d7877fdSMichal Krawczyk struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 5191173fca2SJan Medala struct ena_adapter *adapter = 5201173fca2SJan Medala (struct ena_adapter *)(dev->data->dev_private); 5211173fca2SJan Medala 522df238f84SMichal Krawczyk if (adapter->state == ENA_ADAPTER_STATE_RUNNING) 523eb0ef49dSMichal Krawczyk ena_stop(dev); 524eb0ef49dSMichal Krawczyk adapter->state = ENA_ADAPTER_STATE_CLOSED; 52515773e06SMichal Krawczyk 5261173fca2SJan Medala ena_rx_queue_release_all(dev); 5271173fca2SJan Medala ena_tx_queue_release_all(dev); 5284d7877fdSMichal Krawczyk 5294d7877fdSMichal Krawczyk rte_free(adapter->drv_stats); 5304d7877fdSMichal Krawczyk adapter->drv_stats = NULL; 5314d7877fdSMichal Krawczyk 5324d7877fdSMichal Krawczyk rte_intr_disable(intr_handle); 5334d7877fdSMichal Krawczyk rte_intr_callback_unregister(intr_handle, 5344d7877fdSMichal Krawczyk ena_interrupt_handler_rte, 5354d7877fdSMichal Krawczyk adapter); 5364d7877fdSMichal Krawczyk 5374d7877fdSMichal Krawczyk /* 5384d7877fdSMichal Krawczyk * MAC is not allocated dynamically. Setting NULL should prevent from 5394d7877fdSMichal Krawczyk * release of the resource in the rte_eth_dev_release_port(). 5404d7877fdSMichal Krawczyk */ 5414d7877fdSMichal Krawczyk dev->data->mac_addrs = NULL; 5421173fca2SJan Medala } 5431173fca2SJan Medala 5442081d5e2SMichal Krawczyk static int 5452081d5e2SMichal Krawczyk ena_dev_reset(struct rte_eth_dev *dev) 5462081d5e2SMichal Krawczyk { 5472081d5e2SMichal Krawczyk struct rte_mempool *mb_pool_rx[ENA_MAX_NUM_QUEUES]; 5482081d5e2SMichal Krawczyk struct rte_eth_dev *eth_dev; 5492081d5e2SMichal Krawczyk struct rte_pci_device *pci_dev; 5502081d5e2SMichal Krawczyk struct rte_intr_handle *intr_handle; 5512081d5e2SMichal Krawczyk struct ena_com_dev *ena_dev; 5522081d5e2SMichal Krawczyk struct ena_com_dev_get_features_ctx get_feat_ctx; 5532081d5e2SMichal Krawczyk struct ena_adapter *adapter; 5542081d5e2SMichal Krawczyk int nb_queues; 5552081d5e2SMichal Krawczyk int rc, i; 556e859d2b8SRafal Kozik bool wd_state; 5572081d5e2SMichal Krawczyk 5582081d5e2SMichal Krawczyk adapter = (struct ena_adapter *)(dev->data->dev_private); 5592081d5e2SMichal Krawczyk ena_dev = &adapter->ena_dev; 5602081d5e2SMichal Krawczyk eth_dev = adapter->rte_dev; 5612081d5e2SMichal Krawczyk pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 5622081d5e2SMichal Krawczyk intr_handle = &pci_dev->intr_handle; 5632081d5e2SMichal Krawczyk nb_queues = eth_dev->data->nb_rx_queues; 5642081d5e2SMichal Krawczyk 5652081d5e2SMichal Krawczyk ena_com_set_admin_running_state(ena_dev, false); 5662081d5e2SMichal Krawczyk 567241da076SRafal Kozik rc = ena_com_dev_reset(ena_dev, adapter->reset_reason); 568241da076SRafal Kozik if (rc) 569241da076SRafal Kozik RTE_LOG(ERR, PMD, "Device reset failed\n"); 5702081d5e2SMichal Krawczyk 5712081d5e2SMichal Krawczyk for (i = 0; i < nb_queues; i++) 5722081d5e2SMichal Krawczyk mb_pool_rx[i] = adapter->rx_ring[i].mb_pool; 5732081d5e2SMichal Krawczyk 5742081d5e2SMichal Krawczyk ena_rx_queue_release_all(eth_dev); 5752081d5e2SMichal Krawczyk ena_tx_queue_release_all(eth_dev); 5762081d5e2SMichal Krawczyk 5772081d5e2SMichal Krawczyk rte_intr_disable(intr_handle); 5782081d5e2SMichal Krawczyk 5792081d5e2SMichal Krawczyk ena_com_abort_admin_commands(ena_dev); 5802081d5e2SMichal Krawczyk ena_com_wait_for_abort_completion(ena_dev); 5812081d5e2SMichal Krawczyk ena_com_admin_destroy(ena_dev); 5822081d5e2SMichal Krawczyk ena_com_mmio_reg_read_request_destroy(ena_dev); 5832081d5e2SMichal Krawczyk 584e859d2b8SRafal Kozik rc = ena_device_init(ena_dev, &get_feat_ctx, &wd_state); 5852081d5e2SMichal Krawczyk if (rc) { 5862081d5e2SMichal Krawczyk PMD_INIT_LOG(CRIT, "Cannot initialize device\n"); 5872081d5e2SMichal Krawczyk return rc; 5882081d5e2SMichal Krawczyk } 589e859d2b8SRafal Kozik adapter->wd_state = wd_state; 5902081d5e2SMichal Krawczyk 5912081d5e2SMichal Krawczyk rte_intr_enable(intr_handle); 5922081d5e2SMichal Krawczyk ena_com_set_admin_polling_mode(ena_dev, false); 5932081d5e2SMichal Krawczyk ena_com_admin_aenq_enable(ena_dev); 5942081d5e2SMichal Krawczyk 5952081d5e2SMichal Krawczyk for (i = 0; i < nb_queues; ++i) 596ea93d37eSRafal Kozik ena_rx_queue_setup(eth_dev, i, adapter->rx_ring[i].ring_size, 0, 597ea93d37eSRafal Kozik NULL, mb_pool_rx[i]); 5982081d5e2SMichal Krawczyk 5992081d5e2SMichal Krawczyk for (i = 0; i < nb_queues; ++i) 600ea93d37eSRafal Kozik ena_tx_queue_setup(eth_dev, i, adapter->tx_ring[i].ring_size, 0, 601ea93d37eSRafal Kozik NULL); 6022081d5e2SMichal Krawczyk 6035efb9fc7SMichal Krawczyk adapter->trigger_reset = false; 6045efb9fc7SMichal Krawczyk 6052081d5e2SMichal Krawczyk return 0; 6062081d5e2SMichal Krawczyk } 6072081d5e2SMichal Krawczyk 6081173fca2SJan Medala static int ena_rss_reta_update(struct rte_eth_dev *dev, 6091173fca2SJan Medala struct rte_eth_rss_reta_entry64 *reta_conf, 6101173fca2SJan Medala uint16_t reta_size) 6111173fca2SJan Medala { 6121173fca2SJan Medala struct ena_adapter *adapter = 6131173fca2SJan Medala (struct ena_adapter *)(dev->data->dev_private); 6141173fca2SJan Medala struct ena_com_dev *ena_dev = &adapter->ena_dev; 615241da076SRafal Kozik int rc, i; 6161173fca2SJan Medala u16 entry_value; 6171173fca2SJan Medala int conf_idx; 6181173fca2SJan Medala int idx; 6191173fca2SJan Medala 6201173fca2SJan Medala if ((reta_size == 0) || (reta_conf == NULL)) 6211173fca2SJan Medala return -EINVAL; 6221173fca2SJan Medala 6231173fca2SJan Medala if (reta_size > ENA_RX_RSS_TABLE_SIZE) { 6241173fca2SJan Medala RTE_LOG(WARNING, PMD, 6251173fca2SJan Medala "indirection table %d is bigger than supported (%d)\n", 6261173fca2SJan Medala reta_size, ENA_RX_RSS_TABLE_SIZE); 627241da076SRafal Kozik return -EINVAL; 6281173fca2SJan Medala } 6291173fca2SJan Medala 6301173fca2SJan Medala for (i = 0 ; i < reta_size ; i++) { 6311173fca2SJan Medala /* each reta_conf is for 64 entries. 6321173fca2SJan Medala * to support 128 we use 2 conf of 64 6331173fca2SJan Medala */ 6341173fca2SJan Medala conf_idx = i / RTE_RETA_GROUP_SIZE; 6351173fca2SJan Medala idx = i % RTE_RETA_GROUP_SIZE; 6361173fca2SJan Medala if (TEST_BIT(reta_conf[conf_idx].mask, idx)) { 6371173fca2SJan Medala entry_value = 6381173fca2SJan Medala ENA_IO_RXQ_IDX(reta_conf[conf_idx].reta[idx]); 639241da076SRafal Kozik 640241da076SRafal Kozik rc = ena_com_indirect_table_fill_entry(ena_dev, 6411173fca2SJan Medala i, 6421173fca2SJan Medala entry_value); 643241da076SRafal Kozik if (unlikely(rc && rc != ENA_COM_UNSUPPORTED)) { 6441173fca2SJan Medala RTE_LOG(ERR, PMD, 6451173fca2SJan Medala "Cannot fill indirect table\n"); 646241da076SRafal Kozik return rc; 6471173fca2SJan Medala } 6481173fca2SJan Medala } 6491173fca2SJan Medala } 6501173fca2SJan Medala 651241da076SRafal Kozik rc = ena_com_indirect_table_set(ena_dev); 652241da076SRafal Kozik if (unlikely(rc && rc != ENA_COM_UNSUPPORTED)) { 6531173fca2SJan Medala RTE_LOG(ERR, PMD, "Cannot flush the indirect table\n"); 654241da076SRafal Kozik return rc; 6551173fca2SJan Medala } 6561173fca2SJan Medala 6571173fca2SJan Medala RTE_LOG(DEBUG, PMD, "%s(): RSS configured %d entries for port %d\n", 6581173fca2SJan Medala __func__, reta_size, adapter->rte_dev->data->port_id); 659241da076SRafal Kozik 660241da076SRafal Kozik return 0; 6611173fca2SJan Medala } 6621173fca2SJan Medala 6631173fca2SJan Medala /* Query redirection table. */ 6641173fca2SJan Medala static int ena_rss_reta_query(struct rte_eth_dev *dev, 6651173fca2SJan Medala struct rte_eth_rss_reta_entry64 *reta_conf, 6661173fca2SJan Medala uint16_t reta_size) 6671173fca2SJan Medala { 6681173fca2SJan Medala struct ena_adapter *adapter = 6691173fca2SJan Medala (struct ena_adapter *)(dev->data->dev_private); 6701173fca2SJan Medala struct ena_com_dev *ena_dev = &adapter->ena_dev; 671241da076SRafal Kozik int rc; 6721173fca2SJan Medala int i; 6731173fca2SJan Medala u32 indirect_table[ENA_RX_RSS_TABLE_SIZE] = {0}; 6741173fca2SJan Medala int reta_conf_idx; 6751173fca2SJan Medala int reta_idx; 6761173fca2SJan Medala 6771173fca2SJan Medala if (reta_size == 0 || reta_conf == NULL || 6781173fca2SJan Medala (reta_size > RTE_RETA_GROUP_SIZE && ((reta_conf + 1) == NULL))) 6791173fca2SJan Medala return -EINVAL; 6801173fca2SJan Medala 681241da076SRafal Kozik rc = ena_com_indirect_table_get(ena_dev, indirect_table); 682241da076SRafal Kozik if (unlikely(rc && rc != ENA_COM_UNSUPPORTED)) { 6831173fca2SJan Medala RTE_LOG(ERR, PMD, "cannot get indirect table\n"); 684241da076SRafal Kozik return -ENOTSUP; 6851173fca2SJan Medala } 6861173fca2SJan Medala 6871173fca2SJan Medala for (i = 0 ; i < reta_size ; i++) { 6881173fca2SJan Medala reta_conf_idx = i / RTE_RETA_GROUP_SIZE; 6891173fca2SJan Medala reta_idx = i % RTE_RETA_GROUP_SIZE; 6901173fca2SJan Medala if (TEST_BIT(reta_conf[reta_conf_idx].mask, reta_idx)) 6911173fca2SJan Medala reta_conf[reta_conf_idx].reta[reta_idx] = 6921173fca2SJan Medala ENA_IO_RXQ_IDX_REV(indirect_table[i]); 6931173fca2SJan Medala } 694241da076SRafal Kozik 695241da076SRafal Kozik return 0; 6961173fca2SJan Medala } 6971173fca2SJan Medala 6981173fca2SJan Medala static int ena_rss_init_default(struct ena_adapter *adapter) 6991173fca2SJan Medala { 7001173fca2SJan Medala struct ena_com_dev *ena_dev = &adapter->ena_dev; 7011173fca2SJan Medala uint16_t nb_rx_queues = adapter->rte_dev->data->nb_rx_queues; 7021173fca2SJan Medala int rc, i; 7031173fca2SJan Medala u32 val; 7041173fca2SJan Medala 7051173fca2SJan Medala rc = ena_com_rss_init(ena_dev, ENA_RX_RSS_TABLE_LOG_SIZE); 7061173fca2SJan Medala if (unlikely(rc)) { 7071173fca2SJan Medala RTE_LOG(ERR, PMD, "Cannot init indirect table\n"); 7081173fca2SJan Medala goto err_rss_init; 7091173fca2SJan Medala } 7101173fca2SJan Medala 7111173fca2SJan Medala for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++) { 7121173fca2SJan Medala val = i % nb_rx_queues; 7131173fca2SJan Medala rc = ena_com_indirect_table_fill_entry(ena_dev, i, 7141173fca2SJan Medala ENA_IO_RXQ_IDX(val)); 7153adcba9aSMichal Krawczyk if (unlikely(rc && (rc != ENA_COM_UNSUPPORTED))) { 7161173fca2SJan Medala RTE_LOG(ERR, PMD, "Cannot fill indirect table\n"); 7171173fca2SJan Medala goto err_fill_indir; 7181173fca2SJan Medala } 7191173fca2SJan Medala } 7201173fca2SJan Medala 7211173fca2SJan Medala rc = ena_com_fill_hash_function(ena_dev, ENA_ADMIN_CRC32, NULL, 7221173fca2SJan Medala ENA_HASH_KEY_SIZE, 0xFFFFFFFF); 7233adcba9aSMichal Krawczyk if (unlikely(rc && (rc != ENA_COM_UNSUPPORTED))) { 7241173fca2SJan Medala RTE_LOG(INFO, PMD, "Cannot fill hash function\n"); 7251173fca2SJan Medala goto err_fill_indir; 7261173fca2SJan Medala } 7271173fca2SJan Medala 7281173fca2SJan Medala rc = ena_com_set_default_hash_ctrl(ena_dev); 7293adcba9aSMichal Krawczyk if (unlikely(rc && (rc != ENA_COM_UNSUPPORTED))) { 7301173fca2SJan Medala RTE_LOG(INFO, PMD, "Cannot fill hash control\n"); 7311173fca2SJan Medala goto err_fill_indir; 7321173fca2SJan Medala } 7331173fca2SJan Medala 7341173fca2SJan Medala rc = ena_com_indirect_table_set(ena_dev); 7353adcba9aSMichal Krawczyk if (unlikely(rc && (rc != ENA_COM_UNSUPPORTED))) { 7361173fca2SJan Medala RTE_LOG(ERR, PMD, "Cannot flush the indirect table\n"); 7371173fca2SJan Medala goto err_fill_indir; 7381173fca2SJan Medala } 7391173fca2SJan Medala RTE_LOG(DEBUG, PMD, "RSS configured for port %d\n", 7401173fca2SJan Medala adapter->rte_dev->data->port_id); 7411173fca2SJan Medala 7421173fca2SJan Medala return 0; 7431173fca2SJan Medala 7441173fca2SJan Medala err_fill_indir: 7451173fca2SJan Medala ena_com_rss_destroy(ena_dev); 7461173fca2SJan Medala err_rss_init: 7471173fca2SJan Medala 7481173fca2SJan Medala return rc; 7491173fca2SJan Medala } 7501173fca2SJan Medala 7511173fca2SJan Medala static void ena_rx_queue_release_all(struct rte_eth_dev *dev) 7521173fca2SJan Medala { 7531173fca2SJan Medala struct ena_ring **queues = (struct ena_ring **)dev->data->rx_queues; 7541173fca2SJan Medala int nb_queues = dev->data->nb_rx_queues; 7551173fca2SJan Medala int i; 7561173fca2SJan Medala 7571173fca2SJan Medala for (i = 0; i < nb_queues; i++) 7581173fca2SJan Medala ena_rx_queue_release(queues[i]); 7591173fca2SJan Medala } 7601173fca2SJan Medala 7611173fca2SJan Medala static void ena_tx_queue_release_all(struct rte_eth_dev *dev) 7621173fca2SJan Medala { 7631173fca2SJan Medala struct ena_ring **queues = (struct ena_ring **)dev->data->tx_queues; 7641173fca2SJan Medala int nb_queues = dev->data->nb_tx_queues; 7651173fca2SJan Medala int i; 7661173fca2SJan Medala 7671173fca2SJan Medala for (i = 0; i < nb_queues; i++) 7681173fca2SJan Medala ena_tx_queue_release(queues[i]); 7691173fca2SJan Medala } 7701173fca2SJan Medala 7711173fca2SJan Medala static void ena_rx_queue_release(void *queue) 7721173fca2SJan Medala { 7731173fca2SJan Medala struct ena_ring *ring = (struct ena_ring *)queue; 7741173fca2SJan Medala 7751173fca2SJan Medala ena_assert_msg(ring->configured, 7761173fca2SJan Medala "API violation - releasing not configured queue"); 7771173fca2SJan Medala ena_assert_msg(ring->adapter->state != ENA_ADAPTER_STATE_RUNNING, 7781173fca2SJan Medala "API violation"); 7791173fca2SJan Medala 7801173fca2SJan Medala /* Free ring resources */ 7811173fca2SJan Medala if (ring->rx_buffer_info) 7821173fca2SJan Medala rte_free(ring->rx_buffer_info); 7831173fca2SJan Medala ring->rx_buffer_info = NULL; 7841173fca2SJan Medala 78579405ee1SRafal Kozik if (ring->rx_refill_buffer) 78679405ee1SRafal Kozik rte_free(ring->rx_refill_buffer); 78779405ee1SRafal Kozik ring->rx_refill_buffer = NULL; 78879405ee1SRafal Kozik 789c2034976SMichal Krawczyk if (ring->empty_rx_reqs) 790c2034976SMichal Krawczyk rte_free(ring->empty_rx_reqs); 791c2034976SMichal Krawczyk ring->empty_rx_reqs = NULL; 792c2034976SMichal Krawczyk 7931173fca2SJan Medala ring->configured = 0; 7941173fca2SJan Medala 7951173fca2SJan Medala RTE_LOG(NOTICE, PMD, "RX Queue %d:%d released\n", 7961173fca2SJan Medala ring->port_id, ring->id); 7971173fca2SJan Medala } 7981173fca2SJan Medala 7991173fca2SJan Medala static void ena_tx_queue_release(void *queue) 8001173fca2SJan Medala { 8011173fca2SJan Medala struct ena_ring *ring = (struct ena_ring *)queue; 8021173fca2SJan Medala 8031173fca2SJan Medala ena_assert_msg(ring->configured, 8041173fca2SJan Medala "API violation. Releasing not configured queue"); 8051173fca2SJan Medala ena_assert_msg(ring->adapter->state != ENA_ADAPTER_STATE_RUNNING, 8061173fca2SJan Medala "API violation"); 8071173fca2SJan Medala 8081173fca2SJan Medala /* Free all bufs */ 8091173fca2SJan Medala ena_tx_queue_release_bufs(ring); 8101173fca2SJan Medala 8111173fca2SJan Medala /* Free ring resources */ 8122fca2a98SMichal Krawczyk if (ring->push_buf_intermediate_buf) 8132fca2a98SMichal Krawczyk rte_free(ring->push_buf_intermediate_buf); 8142fca2a98SMichal Krawczyk 8151173fca2SJan Medala if (ring->tx_buffer_info) 8161173fca2SJan Medala rte_free(ring->tx_buffer_info); 8171173fca2SJan Medala 8181173fca2SJan Medala if (ring->empty_tx_reqs) 8191173fca2SJan Medala rte_free(ring->empty_tx_reqs); 8201173fca2SJan Medala 8211173fca2SJan Medala ring->empty_tx_reqs = NULL; 8221173fca2SJan Medala ring->tx_buffer_info = NULL; 8232fca2a98SMichal Krawczyk ring->push_buf_intermediate_buf = NULL; 8241173fca2SJan Medala 8251173fca2SJan Medala ring->configured = 0; 8261173fca2SJan Medala 8271173fca2SJan Medala RTE_LOG(NOTICE, PMD, "TX Queue %d:%d released\n", 8281173fca2SJan Medala ring->port_id, ring->id); 8291173fca2SJan Medala } 8301173fca2SJan Medala 8311173fca2SJan Medala static void ena_rx_queue_release_bufs(struct ena_ring *ring) 8321173fca2SJan Medala { 8331173fca2SJan Medala unsigned int ring_mask = ring->ring_size - 1; 8341173fca2SJan Medala 8351173fca2SJan Medala while (ring->next_to_clean != ring->next_to_use) { 8361173fca2SJan Medala struct rte_mbuf *m = 8371173fca2SJan Medala ring->rx_buffer_info[ring->next_to_clean & ring_mask]; 8381173fca2SJan Medala 8391173fca2SJan Medala if (m) 8401f88c0a2SOlivier Matz rte_mbuf_raw_free(m); 8411173fca2SJan Medala 8421daff526SJakub Palider ring->next_to_clean++; 8431173fca2SJan Medala } 8441173fca2SJan Medala } 8451173fca2SJan Medala 8461173fca2SJan Medala static void ena_tx_queue_release_bufs(struct ena_ring *ring) 8471173fca2SJan Medala { 848207a514cSMichal Krawczyk unsigned int i; 8491173fca2SJan Medala 850207a514cSMichal Krawczyk for (i = 0; i < ring->ring_size; ++i) { 851207a514cSMichal Krawczyk struct ena_tx_buffer *tx_buf = &ring->tx_buffer_info[i]; 8521173fca2SJan Medala 8531173fca2SJan Medala if (tx_buf->mbuf) 8541173fca2SJan Medala rte_pktmbuf_free(tx_buf->mbuf); 8551173fca2SJan Medala 8561daff526SJakub Palider ring->next_to_clean++; 8571173fca2SJan Medala } 8581173fca2SJan Medala } 8591173fca2SJan Medala 8601173fca2SJan Medala static int ena_link_update(struct rte_eth_dev *dev, 8611173fca2SJan Medala __rte_unused int wait_to_complete) 8621173fca2SJan Medala { 8631173fca2SJan Medala struct rte_eth_link *link = &dev->data->dev_link; 864ca148440SMichal Krawczyk struct ena_adapter *adapter; 8651173fca2SJan Medala 866ca148440SMichal Krawczyk adapter = (struct ena_adapter *)(dev->data->dev_private); 867ca148440SMichal Krawczyk 868ca148440SMichal Krawczyk link->link_status = adapter->link_status ? ETH_LINK_UP : ETH_LINK_DOWN; 86941e59028SRafal Kozik link->link_speed = ETH_SPEED_NUM_NONE; 8701173fca2SJan Medala link->link_duplex = ETH_LINK_FULL_DUPLEX; 8711173fca2SJan Medala 8721173fca2SJan Medala return 0; 8731173fca2SJan Medala } 8741173fca2SJan Medala 8751173fca2SJan Medala static int ena_queue_restart_all(struct rte_eth_dev *dev, 8761173fca2SJan Medala enum ena_ring_type ring_type) 8771173fca2SJan Medala { 8781173fca2SJan Medala struct ena_adapter *adapter = 8791173fca2SJan Medala (struct ena_adapter *)(dev->data->dev_private); 8801173fca2SJan Medala struct ena_ring *queues = NULL; 88153b61841SMichal Krawczyk int nb_queues; 8821173fca2SJan Medala int i = 0; 8831173fca2SJan Medala int rc = 0; 8841173fca2SJan Medala 88553b61841SMichal Krawczyk if (ring_type == ENA_RING_TYPE_RX) { 88653b61841SMichal Krawczyk queues = adapter->rx_ring; 88753b61841SMichal Krawczyk nb_queues = dev->data->nb_rx_queues; 88853b61841SMichal Krawczyk } else { 88953b61841SMichal Krawczyk queues = adapter->tx_ring; 89053b61841SMichal Krawczyk nb_queues = dev->data->nb_tx_queues; 89153b61841SMichal Krawczyk } 89253b61841SMichal Krawczyk for (i = 0; i < nb_queues; i++) { 8931173fca2SJan Medala if (queues[i].configured) { 8941173fca2SJan Medala if (ring_type == ENA_RING_TYPE_RX) { 8951173fca2SJan Medala ena_assert_msg( 8961173fca2SJan Medala dev->data->rx_queues[i] == &queues[i], 8971173fca2SJan Medala "Inconsistent state of rx queues\n"); 8981173fca2SJan Medala } else { 8991173fca2SJan Medala ena_assert_msg( 9001173fca2SJan Medala dev->data->tx_queues[i] == &queues[i], 9011173fca2SJan Medala "Inconsistent state of tx queues\n"); 9021173fca2SJan Medala } 9031173fca2SJan Medala 9041173fca2SJan Medala rc = ena_queue_restart(&queues[i]); 9051173fca2SJan Medala 9061173fca2SJan Medala if (rc) { 9071173fca2SJan Medala PMD_INIT_LOG(ERR, 908f2462150SFerruh Yigit "failed to restart queue %d type(%d)", 9091173fca2SJan Medala i, ring_type); 910241da076SRafal Kozik return rc; 9111173fca2SJan Medala } 9121173fca2SJan Medala } 9131173fca2SJan Medala } 9141173fca2SJan Medala 9151173fca2SJan Medala return 0; 9161173fca2SJan Medala } 9171173fca2SJan Medala 9181173fca2SJan Medala static uint32_t ena_get_mtu_conf(struct ena_adapter *adapter) 9191173fca2SJan Medala { 9201173fca2SJan Medala uint32_t max_frame_len = adapter->max_mtu; 9211173fca2SJan Medala 9227369f88fSRafal Kozik if (adapter->rte_eth_dev_data->dev_conf.rxmode.offloads & 9237369f88fSRafal Kozik DEV_RX_OFFLOAD_JUMBO_FRAME) 9241173fca2SJan Medala max_frame_len = 9251173fca2SJan Medala adapter->rte_eth_dev_data->dev_conf.rxmode.max_rx_pkt_len; 9261173fca2SJan Medala 9271173fca2SJan Medala return max_frame_len; 9281173fca2SJan Medala } 9291173fca2SJan Medala 9301173fca2SJan Medala static int ena_check_valid_conf(struct ena_adapter *adapter) 9311173fca2SJan Medala { 9321173fca2SJan Medala uint32_t max_frame_len = ena_get_mtu_conf(adapter); 9331173fca2SJan Medala 934241da076SRafal Kozik if (max_frame_len > adapter->max_mtu || max_frame_len < ENA_MIN_MTU) { 935241da076SRafal Kozik PMD_INIT_LOG(ERR, "Unsupported MTU of %d. " 936241da076SRafal Kozik "max mtu: %d, min mtu: %d\n", 937241da076SRafal Kozik max_frame_len, adapter->max_mtu, ENA_MIN_MTU); 938241da076SRafal Kozik return ENA_COM_UNSUPPORTED; 9391173fca2SJan Medala } 9401173fca2SJan Medala 9411173fca2SJan Medala return 0; 9421173fca2SJan Medala } 9431173fca2SJan Medala 9441173fca2SJan Medala static int 945ea93d37eSRafal Kozik ena_calc_queue_size(struct ena_calc_queue_size_ctx *ctx) 9461173fca2SJan Medala { 9472fca2a98SMichal Krawczyk struct ena_admin_feature_llq_desc *llq = &ctx->get_feat_ctx->llq; 9482fca2a98SMichal Krawczyk struct ena_com_dev *ena_dev = ctx->ena_dev; 9492fca2a98SMichal Krawczyk uint32_t tx_queue_size = ENA_MAX_RING_SIZE_TX; 9502fca2a98SMichal Krawczyk uint32_t rx_queue_size = ENA_MAX_RING_SIZE_RX; 9511173fca2SJan Medala 9522fca2a98SMichal Krawczyk if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) { 953ea93d37eSRafal Kozik struct ena_admin_queue_ext_feature_fields *max_queue_ext = 954ea93d37eSRafal Kozik &ctx->get_feat_ctx->max_queue_ext.max_queue_ext; 9552fca2a98SMichal Krawczyk rx_queue_size = RTE_MIN(rx_queue_size, 9562fca2a98SMichal Krawczyk max_queue_ext->max_rx_cq_depth); 9572fca2a98SMichal Krawczyk rx_queue_size = RTE_MIN(rx_queue_size, 958ea93d37eSRafal Kozik max_queue_ext->max_rx_sq_depth); 9592fca2a98SMichal Krawczyk tx_queue_size = RTE_MIN(tx_queue_size, 9602fca2a98SMichal Krawczyk max_queue_ext->max_tx_cq_depth); 9612fca2a98SMichal Krawczyk 9622fca2a98SMichal Krawczyk if (ena_dev->tx_mem_queue_type == 9632fca2a98SMichal Krawczyk ENA_ADMIN_PLACEMENT_POLICY_DEV) { 9642fca2a98SMichal Krawczyk tx_queue_size = RTE_MIN(tx_queue_size, 9652fca2a98SMichal Krawczyk llq->max_llq_depth); 9662fca2a98SMichal Krawczyk } else { 9672fca2a98SMichal Krawczyk tx_queue_size = RTE_MIN(tx_queue_size, 968ea93d37eSRafal Kozik max_queue_ext->max_tx_sq_depth); 9692fca2a98SMichal Krawczyk } 9702fca2a98SMichal Krawczyk 971ea93d37eSRafal Kozik ctx->max_rx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS, 972ea93d37eSRafal Kozik max_queue_ext->max_per_packet_rx_descs); 973ea93d37eSRafal Kozik ctx->max_tx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS, 974ea93d37eSRafal Kozik max_queue_ext->max_per_packet_tx_descs); 975ea93d37eSRafal Kozik } else { 976ea93d37eSRafal Kozik struct ena_admin_queue_feature_desc *max_queues = 977ea93d37eSRafal Kozik &ctx->get_feat_ctx->max_queues; 9782fca2a98SMichal Krawczyk rx_queue_size = RTE_MIN(rx_queue_size, 9792fca2a98SMichal Krawczyk max_queues->max_cq_depth); 9802fca2a98SMichal Krawczyk rx_queue_size = RTE_MIN(rx_queue_size, 981ea93d37eSRafal Kozik max_queues->max_sq_depth); 9822fca2a98SMichal Krawczyk tx_queue_size = RTE_MIN(tx_queue_size, 9832fca2a98SMichal Krawczyk max_queues->max_cq_depth); 9842fca2a98SMichal Krawczyk 9852fca2a98SMichal Krawczyk if (ena_dev->tx_mem_queue_type == 9862fca2a98SMichal Krawczyk ENA_ADMIN_PLACEMENT_POLICY_DEV) { 9872fca2a98SMichal Krawczyk tx_queue_size = RTE_MIN(tx_queue_size, 9882fca2a98SMichal Krawczyk llq->max_llq_depth); 9892fca2a98SMichal Krawczyk } else { 9902fca2a98SMichal Krawczyk tx_queue_size = RTE_MIN(tx_queue_size, 9912fca2a98SMichal Krawczyk max_queues->max_sq_depth); 9922fca2a98SMichal Krawczyk } 9932fca2a98SMichal Krawczyk 994ea93d37eSRafal Kozik ctx->max_rx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS, 995ea93d37eSRafal Kozik max_queues->max_packet_tx_descs); 996ea93d37eSRafal Kozik ctx->max_tx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS, 997ea93d37eSRafal Kozik max_queues->max_packet_rx_descs); 998ea93d37eSRafal Kozik } 9991173fca2SJan Medala 1000ea93d37eSRafal Kozik /* Round down to the nearest power of 2 */ 1001ea93d37eSRafal Kozik rx_queue_size = rte_align32prevpow2(rx_queue_size); 1002ea93d37eSRafal Kozik tx_queue_size = rte_align32prevpow2(tx_queue_size); 10031173fca2SJan Medala 1004ea93d37eSRafal Kozik if (unlikely(rx_queue_size == 0 || tx_queue_size == 0)) { 1005f2462150SFerruh Yigit PMD_INIT_LOG(ERR, "Invalid queue size"); 10061173fca2SJan Medala return -EFAULT; 10071173fca2SJan Medala } 10081173fca2SJan Medala 1009ea93d37eSRafal Kozik ctx->rx_queue_size = rx_queue_size; 1010ea93d37eSRafal Kozik ctx->tx_queue_size = tx_queue_size; 10112061fe41SRafal Kozik 1012ea93d37eSRafal Kozik return 0; 10131173fca2SJan Medala } 10141173fca2SJan Medala 10151173fca2SJan Medala static void ena_stats_restart(struct rte_eth_dev *dev) 10161173fca2SJan Medala { 10171173fca2SJan Medala struct ena_adapter *adapter = 10181173fca2SJan Medala (struct ena_adapter *)(dev->data->dev_private); 10191173fca2SJan Medala 10201173fca2SJan Medala rte_atomic64_init(&adapter->drv_stats->ierrors); 10211173fca2SJan Medala rte_atomic64_init(&adapter->drv_stats->oerrors); 10221173fca2SJan Medala rte_atomic64_init(&adapter->drv_stats->rx_nombuf); 10231173fca2SJan Medala } 10241173fca2SJan Medala 1025d5b0924bSMatan Azrad static int ena_stats_get(struct rte_eth_dev *dev, 10261173fca2SJan Medala struct rte_eth_stats *stats) 10271173fca2SJan Medala { 10281173fca2SJan Medala struct ena_admin_basic_stats ena_stats; 10291173fca2SJan Medala struct ena_adapter *adapter = 10301173fca2SJan Medala (struct ena_adapter *)(dev->data->dev_private); 10311173fca2SJan Medala struct ena_com_dev *ena_dev = &adapter->ena_dev; 10321173fca2SJan Medala int rc; 10331173fca2SJan Medala 10341173fca2SJan Medala if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1035d5b0924bSMatan Azrad return -ENOTSUP; 10361173fca2SJan Medala 10371173fca2SJan Medala memset(&ena_stats, 0, sizeof(ena_stats)); 10381173fca2SJan Medala rc = ena_com_get_dev_basic_stats(ena_dev, &ena_stats); 10391173fca2SJan Medala if (unlikely(rc)) { 10401173fca2SJan Medala RTE_LOG(ERR, PMD, "Could not retrieve statistics from ENA"); 1041d5b0924bSMatan Azrad return rc; 10421173fca2SJan Medala } 10431173fca2SJan Medala 10441173fca2SJan Medala /* Set of basic statistics from ENA */ 10451173fca2SJan Medala stats->ipackets = __MERGE_64B_H_L(ena_stats.rx_pkts_high, 10461173fca2SJan Medala ena_stats.rx_pkts_low); 10471173fca2SJan Medala stats->opackets = __MERGE_64B_H_L(ena_stats.tx_pkts_high, 10481173fca2SJan Medala ena_stats.tx_pkts_low); 10491173fca2SJan Medala stats->ibytes = __MERGE_64B_H_L(ena_stats.rx_bytes_high, 10501173fca2SJan Medala ena_stats.rx_bytes_low); 10511173fca2SJan Medala stats->obytes = __MERGE_64B_H_L(ena_stats.tx_bytes_high, 10521173fca2SJan Medala ena_stats.tx_bytes_low); 10531173fca2SJan Medala stats->imissed = __MERGE_64B_H_L(ena_stats.rx_drops_high, 10541173fca2SJan Medala ena_stats.rx_drops_low); 10551173fca2SJan Medala 10561173fca2SJan Medala /* Driver related stats */ 10571173fca2SJan Medala stats->ierrors = rte_atomic64_read(&adapter->drv_stats->ierrors); 10581173fca2SJan Medala stats->oerrors = rte_atomic64_read(&adapter->drv_stats->oerrors); 10591173fca2SJan Medala stats->rx_nombuf = rte_atomic64_read(&adapter->drv_stats->rx_nombuf); 1060d5b0924bSMatan Azrad return 0; 10611173fca2SJan Medala } 10621173fca2SJan Medala 10631173fca2SJan Medala static int ena_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 10641173fca2SJan Medala { 10651173fca2SJan Medala struct ena_adapter *adapter; 10661173fca2SJan Medala struct ena_com_dev *ena_dev; 10671173fca2SJan Medala int rc = 0; 10681173fca2SJan Medala 10691173fca2SJan Medala ena_assert_msg(dev->data != NULL, "Uninitialized device"); 10701173fca2SJan Medala ena_assert_msg(dev->data->dev_private != NULL, "Uninitialized device"); 10711173fca2SJan Medala adapter = (struct ena_adapter *)(dev->data->dev_private); 10721173fca2SJan Medala 10731173fca2SJan Medala ena_dev = &adapter->ena_dev; 10741173fca2SJan Medala ena_assert_msg(ena_dev != NULL, "Uninitialized device"); 10751173fca2SJan Medala 1076241da076SRafal Kozik if (mtu > ena_get_mtu_conf(adapter) || mtu < ENA_MIN_MTU) { 10771173fca2SJan Medala RTE_LOG(ERR, PMD, 1078241da076SRafal Kozik "Invalid MTU setting. new_mtu: %d " 1079241da076SRafal Kozik "max mtu: %d min mtu: %d\n", 1080241da076SRafal Kozik mtu, ena_get_mtu_conf(adapter), ENA_MIN_MTU); 1081241da076SRafal Kozik return -EINVAL; 10821173fca2SJan Medala } 10831173fca2SJan Medala 10841173fca2SJan Medala rc = ena_com_set_dev_mtu(ena_dev, mtu); 10851173fca2SJan Medala if (rc) 10861173fca2SJan Medala RTE_LOG(ERR, PMD, "Could not set MTU: %d\n", mtu); 10871173fca2SJan Medala else 10881173fca2SJan Medala RTE_LOG(NOTICE, PMD, "Set MTU: %d\n", mtu); 10891173fca2SJan Medala 10901173fca2SJan Medala return rc; 10911173fca2SJan Medala } 10921173fca2SJan Medala 10931173fca2SJan Medala static int ena_start(struct rte_eth_dev *dev) 10941173fca2SJan Medala { 10951173fca2SJan Medala struct ena_adapter *adapter = 10961173fca2SJan Medala (struct ena_adapter *)(dev->data->dev_private); 1097d9b8b106SMichal Krawczyk uint64_t ticks; 10981173fca2SJan Medala int rc = 0; 10991173fca2SJan Medala 11001173fca2SJan Medala rc = ena_check_valid_conf(adapter); 11011173fca2SJan Medala if (rc) 11021173fca2SJan Medala return rc; 11031173fca2SJan Medala 11041173fca2SJan Medala rc = ena_queue_restart_all(dev, ENA_RING_TYPE_RX); 11051173fca2SJan Medala if (rc) 11061173fca2SJan Medala return rc; 11071173fca2SJan Medala 11081173fca2SJan Medala rc = ena_queue_restart_all(dev, ENA_RING_TYPE_TX); 11091173fca2SJan Medala if (rc) 11101173fca2SJan Medala return rc; 11111173fca2SJan Medala 11121173fca2SJan Medala if (adapter->rte_dev->data->dev_conf.rxmode.mq_mode & 1113361913adSDaria Kolistratova ETH_MQ_RX_RSS_FLAG && adapter->rte_dev->data->nb_rx_queues > 0) { 11141173fca2SJan Medala rc = ena_rss_init_default(adapter); 11151173fca2SJan Medala if (rc) 11161173fca2SJan Medala return rc; 11171173fca2SJan Medala } 11181173fca2SJan Medala 11191173fca2SJan Medala ena_stats_restart(dev); 11201173fca2SJan Medala 1121d9b8b106SMichal Krawczyk adapter->timestamp_wd = rte_get_timer_cycles(); 1122d9b8b106SMichal Krawczyk adapter->keep_alive_timeout = ENA_DEVICE_KALIVE_TIMEOUT; 1123d9b8b106SMichal Krawczyk 1124d9b8b106SMichal Krawczyk ticks = rte_get_timer_hz(); 1125d9b8b106SMichal Krawczyk rte_timer_reset(&adapter->timer_wd, ticks, PERIODICAL, rte_lcore_id(), 1126d9b8b106SMichal Krawczyk ena_timer_wd_callback, adapter); 1127d9b8b106SMichal Krawczyk 11281173fca2SJan Medala adapter->state = ENA_ADAPTER_STATE_RUNNING; 11291173fca2SJan Medala 11301173fca2SJan Medala return 0; 11311173fca2SJan Medala } 11321173fca2SJan Medala 1133eb0ef49dSMichal Krawczyk static void ena_stop(struct rte_eth_dev *dev) 1134eb0ef49dSMichal Krawczyk { 1135eb0ef49dSMichal Krawczyk struct ena_adapter *adapter = 1136eb0ef49dSMichal Krawczyk (struct ena_adapter *)(dev->data->dev_private); 1137eb0ef49dSMichal Krawczyk 1138d9b8b106SMichal Krawczyk rte_timer_stop_sync(&adapter->timer_wd); 1139df238f84SMichal Krawczyk ena_free_io_queues_all(adapter); 1140d9b8b106SMichal Krawczyk 1141eb0ef49dSMichal Krawczyk adapter->state = ENA_ADAPTER_STATE_STOPPED; 1142eb0ef49dSMichal Krawczyk } 1143eb0ef49dSMichal Krawczyk 1144df238f84SMichal Krawczyk static int ena_create_io_queue(struct ena_ring *ring) 1145df238f84SMichal Krawczyk { 1146df238f84SMichal Krawczyk struct ena_adapter *adapter; 1147df238f84SMichal Krawczyk struct ena_com_dev *ena_dev; 1148df238f84SMichal Krawczyk struct ena_com_create_io_ctx ctx = 1149df238f84SMichal Krawczyk /* policy set to _HOST just to satisfy icc compiler */ 1150df238f84SMichal Krawczyk { ENA_ADMIN_PLACEMENT_POLICY_HOST, 1151df238f84SMichal Krawczyk 0, 0, 0, 0, 0 }; 1152df238f84SMichal Krawczyk uint16_t ena_qid; 1153778677dcSRafal Kozik unsigned int i; 1154df238f84SMichal Krawczyk int rc; 1155df238f84SMichal Krawczyk 1156df238f84SMichal Krawczyk adapter = ring->adapter; 1157df238f84SMichal Krawczyk ena_dev = &adapter->ena_dev; 1158df238f84SMichal Krawczyk 1159df238f84SMichal Krawczyk if (ring->type == ENA_RING_TYPE_TX) { 1160df238f84SMichal Krawczyk ena_qid = ENA_IO_TXQ_IDX(ring->id); 1161df238f84SMichal Krawczyk ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX; 1162df238f84SMichal Krawczyk ctx.mem_queue_type = ena_dev->tx_mem_queue_type; 1163df238f84SMichal Krawczyk ctx.queue_size = adapter->tx_ring_size; 1164778677dcSRafal Kozik for (i = 0; i < ring->ring_size; i++) 1165778677dcSRafal Kozik ring->empty_tx_reqs[i] = i; 1166df238f84SMichal Krawczyk } else { 1167df238f84SMichal Krawczyk ena_qid = ENA_IO_RXQ_IDX(ring->id); 1168df238f84SMichal Krawczyk ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX; 1169df238f84SMichal Krawczyk ctx.queue_size = adapter->rx_ring_size; 1170778677dcSRafal Kozik for (i = 0; i < ring->ring_size; i++) 1171778677dcSRafal Kozik ring->empty_rx_reqs[i] = i; 1172df238f84SMichal Krawczyk } 1173df238f84SMichal Krawczyk ctx.qid = ena_qid; 1174df238f84SMichal Krawczyk ctx.msix_vector = -1; /* interrupts not used */ 1175df238f84SMichal Krawczyk ctx.numa_node = ena_cpu_to_node(ring->id); 1176df238f84SMichal Krawczyk 1177df238f84SMichal Krawczyk rc = ena_com_create_io_queue(ena_dev, &ctx); 1178df238f84SMichal Krawczyk if (rc) { 1179df238f84SMichal Krawczyk RTE_LOG(ERR, PMD, 1180df238f84SMichal Krawczyk "failed to create io queue #%d (qid:%d) rc: %d\n", 1181df238f84SMichal Krawczyk ring->id, ena_qid, rc); 1182df238f84SMichal Krawczyk return rc; 1183df238f84SMichal Krawczyk } 1184df238f84SMichal Krawczyk 1185df238f84SMichal Krawczyk rc = ena_com_get_io_handlers(ena_dev, ena_qid, 1186df238f84SMichal Krawczyk &ring->ena_com_io_sq, 1187df238f84SMichal Krawczyk &ring->ena_com_io_cq); 1188df238f84SMichal Krawczyk if (rc) { 1189df238f84SMichal Krawczyk RTE_LOG(ERR, PMD, 1190df238f84SMichal Krawczyk "Failed to get io queue handlers. queue num %d rc: %d\n", 1191df238f84SMichal Krawczyk ring->id, rc); 1192df238f84SMichal Krawczyk ena_com_destroy_io_queue(ena_dev, ena_qid); 1193df238f84SMichal Krawczyk return rc; 1194df238f84SMichal Krawczyk } 1195df238f84SMichal Krawczyk 1196df238f84SMichal Krawczyk if (ring->type == ENA_RING_TYPE_TX) 1197df238f84SMichal Krawczyk ena_com_update_numa_node(ring->ena_com_io_cq, ctx.numa_node); 1198df238f84SMichal Krawczyk 1199df238f84SMichal Krawczyk return 0; 1200df238f84SMichal Krawczyk } 1201df238f84SMichal Krawczyk 1202df238f84SMichal Krawczyk static void ena_free_io_queues_all(struct ena_adapter *adapter) 1203df238f84SMichal Krawczyk { 1204df238f84SMichal Krawczyk struct rte_eth_dev *eth_dev = adapter->rte_dev; 1205df238f84SMichal Krawczyk struct ena_com_dev *ena_dev = &adapter->ena_dev; 1206df238f84SMichal Krawczyk int i; 1207df238f84SMichal Krawczyk uint16_t ena_qid; 1208df238f84SMichal Krawczyk uint16_t nb_rxq = eth_dev->data->nb_rx_queues; 1209df238f84SMichal Krawczyk uint16_t nb_txq = eth_dev->data->nb_tx_queues; 1210df238f84SMichal Krawczyk 1211df238f84SMichal Krawczyk for (i = 0; i < nb_txq; ++i) { 1212df238f84SMichal Krawczyk ena_qid = ENA_IO_TXQ_IDX(i); 1213df238f84SMichal Krawczyk ena_com_destroy_io_queue(ena_dev, ena_qid); 1214778677dcSRafal Kozik 1215778677dcSRafal Kozik ena_tx_queue_release_bufs(&adapter->tx_ring[i]); 1216df238f84SMichal Krawczyk } 1217df238f84SMichal Krawczyk 1218df238f84SMichal Krawczyk for (i = 0; i < nb_rxq; ++i) { 1219df238f84SMichal Krawczyk ena_qid = ENA_IO_RXQ_IDX(i); 1220df238f84SMichal Krawczyk ena_com_destroy_io_queue(ena_dev, ena_qid); 1221df238f84SMichal Krawczyk 1222df238f84SMichal Krawczyk ena_rx_queue_release_bufs(&adapter->rx_ring[i]); 1223df238f84SMichal Krawczyk } 1224df238f84SMichal Krawczyk } 1225df238f84SMichal Krawczyk 12261173fca2SJan Medala static int ena_queue_restart(struct ena_ring *ring) 12271173fca2SJan Medala { 1228a467e8f3SMichal Krawczyk int rc, bufs_num; 12291173fca2SJan Medala 12301173fca2SJan Medala ena_assert_msg(ring->configured == 1, 12311173fca2SJan Medala "Trying to restart unconfigured queue\n"); 12321173fca2SJan Medala 1233df238f84SMichal Krawczyk rc = ena_create_io_queue(ring); 1234df238f84SMichal Krawczyk if (rc) { 1235df238f84SMichal Krawczyk PMD_INIT_LOG(ERR, "Failed to create IO queue!\n"); 1236df238f84SMichal Krawczyk return rc; 1237df238f84SMichal Krawczyk } 1238df238f84SMichal Krawczyk 12391173fca2SJan Medala ring->next_to_clean = 0; 12401173fca2SJan Medala ring->next_to_use = 0; 12411173fca2SJan Medala 12421173fca2SJan Medala if (ring->type == ENA_RING_TYPE_TX) 12431173fca2SJan Medala return 0; 12441173fca2SJan Medala 1245a467e8f3SMichal Krawczyk bufs_num = ring->ring_size - 1; 1246a467e8f3SMichal Krawczyk rc = ena_populate_rx_queue(ring, bufs_num); 1247a467e8f3SMichal Krawczyk if (rc != bufs_num) { 1248f2462150SFerruh Yigit PMD_INIT_LOG(ERR, "Failed to populate rx ring !"); 1249241da076SRafal Kozik return ENA_COM_FAULT; 12501173fca2SJan Medala } 12511173fca2SJan Medala 12521173fca2SJan Medala return 0; 12531173fca2SJan Medala } 12541173fca2SJan Medala 12551173fca2SJan Medala static int ena_tx_queue_setup(struct rte_eth_dev *dev, 12561173fca2SJan Medala uint16_t queue_idx, 12571173fca2SJan Medala uint16_t nb_desc, 12581173fca2SJan Medala __rte_unused unsigned int socket_id, 125956b8b9b7SRafal Kozik const struct rte_eth_txconf *tx_conf) 12601173fca2SJan Medala { 12611173fca2SJan Medala struct ena_ring *txq = NULL; 12621173fca2SJan Medala struct ena_adapter *adapter = 12631173fca2SJan Medala (struct ena_adapter *)(dev->data->dev_private); 12641173fca2SJan Medala unsigned int i; 12651173fca2SJan Medala 12661173fca2SJan Medala txq = &adapter->tx_ring[queue_idx]; 12671173fca2SJan Medala 12681173fca2SJan Medala if (txq->configured) { 12691173fca2SJan Medala RTE_LOG(CRIT, PMD, 12701173fca2SJan Medala "API violation. Queue %d is already configured\n", 12711173fca2SJan Medala queue_idx); 1272241da076SRafal Kozik return ENA_COM_FAULT; 12731173fca2SJan Medala } 12741173fca2SJan Medala 12751daff526SJakub Palider if (!rte_is_power_of_2(nb_desc)) { 12761daff526SJakub Palider RTE_LOG(ERR, PMD, 1277241da076SRafal Kozik "Unsupported size of TX queue: %d is not a power of 2.", 12781daff526SJakub Palider nb_desc); 12791daff526SJakub Palider return -EINVAL; 12801daff526SJakub Palider } 12811daff526SJakub Palider 12821173fca2SJan Medala if (nb_desc > adapter->tx_ring_size) { 12831173fca2SJan Medala RTE_LOG(ERR, PMD, 12841173fca2SJan Medala "Unsupported size of TX queue (max size: %d)\n", 12851173fca2SJan Medala adapter->tx_ring_size); 12861173fca2SJan Medala return -EINVAL; 12871173fca2SJan Medala } 12881173fca2SJan Medala 1289ea93d37eSRafal Kozik if (nb_desc == RTE_ETH_DEV_FALLBACK_TX_RINGSIZE) 1290ea93d37eSRafal Kozik nb_desc = adapter->tx_ring_size; 1291ea93d37eSRafal Kozik 12921173fca2SJan Medala txq->port_id = dev->data->port_id; 12931173fca2SJan Medala txq->next_to_clean = 0; 12941173fca2SJan Medala txq->next_to_use = 0; 12951173fca2SJan Medala txq->ring_size = nb_desc; 12961173fca2SJan Medala 12971173fca2SJan Medala txq->tx_buffer_info = rte_zmalloc("txq->tx_buffer_info", 12981173fca2SJan Medala sizeof(struct ena_tx_buffer) * 12991173fca2SJan Medala txq->ring_size, 13001173fca2SJan Medala RTE_CACHE_LINE_SIZE); 13011173fca2SJan Medala if (!txq->tx_buffer_info) { 13021173fca2SJan Medala RTE_LOG(ERR, PMD, "failed to alloc mem for tx buffer info\n"); 1303df238f84SMichal Krawczyk return -ENOMEM; 13041173fca2SJan Medala } 13051173fca2SJan Medala 13061173fca2SJan Medala txq->empty_tx_reqs = rte_zmalloc("txq->empty_tx_reqs", 13071173fca2SJan Medala sizeof(u16) * txq->ring_size, 13081173fca2SJan Medala RTE_CACHE_LINE_SIZE); 13091173fca2SJan Medala if (!txq->empty_tx_reqs) { 13101173fca2SJan Medala RTE_LOG(ERR, PMD, "failed to alloc mem for tx reqs\n"); 1311df238f84SMichal Krawczyk rte_free(txq->tx_buffer_info); 1312df238f84SMichal Krawczyk return -ENOMEM; 13131173fca2SJan Medala } 1314241da076SRafal Kozik 13152fca2a98SMichal Krawczyk txq->push_buf_intermediate_buf = 13162fca2a98SMichal Krawczyk rte_zmalloc("txq->push_buf_intermediate_buf", 13172fca2a98SMichal Krawczyk txq->tx_max_header_size, 13182fca2a98SMichal Krawczyk RTE_CACHE_LINE_SIZE); 13192fca2a98SMichal Krawczyk if (!txq->push_buf_intermediate_buf) { 13202fca2a98SMichal Krawczyk RTE_LOG(ERR, PMD, "failed to alloc push buff for LLQ\n"); 13212fca2a98SMichal Krawczyk rte_free(txq->tx_buffer_info); 13222fca2a98SMichal Krawczyk rte_free(txq->empty_tx_reqs); 13232fca2a98SMichal Krawczyk return -ENOMEM; 13242fca2a98SMichal Krawczyk } 13252fca2a98SMichal Krawczyk 13261173fca2SJan Medala for (i = 0; i < txq->ring_size; i++) 13271173fca2SJan Medala txq->empty_tx_reqs[i] = i; 13281173fca2SJan Medala 13292081d5e2SMichal Krawczyk if (tx_conf != NULL) { 13302081d5e2SMichal Krawczyk txq->offloads = 13312081d5e2SMichal Krawczyk tx_conf->offloads | dev->data->dev_conf.txmode.offloads; 13322081d5e2SMichal Krawczyk } 133356b8b9b7SRafal Kozik 13341173fca2SJan Medala /* Store pointer to this queue in upper layer */ 13351173fca2SJan Medala txq->configured = 1; 13361173fca2SJan Medala dev->data->tx_queues[queue_idx] = txq; 1337241da076SRafal Kozik 1338241da076SRafal Kozik return 0; 13391173fca2SJan Medala } 13401173fca2SJan Medala 13411173fca2SJan Medala static int ena_rx_queue_setup(struct rte_eth_dev *dev, 13421173fca2SJan Medala uint16_t queue_idx, 13431173fca2SJan Medala uint16_t nb_desc, 13441173fca2SJan Medala __rte_unused unsigned int socket_id, 1345a4996bd8SWei Dai __rte_unused const struct rte_eth_rxconf *rx_conf, 13461173fca2SJan Medala struct rte_mempool *mp) 13471173fca2SJan Medala { 13481173fca2SJan Medala struct ena_adapter *adapter = 13491173fca2SJan Medala (struct ena_adapter *)(dev->data->dev_private); 13501173fca2SJan Medala struct ena_ring *rxq = NULL; 1351df238f84SMichal Krawczyk int i; 13521173fca2SJan Medala 13531173fca2SJan Medala rxq = &adapter->rx_ring[queue_idx]; 13541173fca2SJan Medala if (rxq->configured) { 13551173fca2SJan Medala RTE_LOG(CRIT, PMD, 13561173fca2SJan Medala "API violation. Queue %d is already configured\n", 13571173fca2SJan Medala queue_idx); 1358241da076SRafal Kozik return ENA_COM_FAULT; 13591173fca2SJan Medala } 13601173fca2SJan Medala 1361ea93d37eSRafal Kozik if (nb_desc == RTE_ETH_DEV_FALLBACK_RX_RINGSIZE) 1362ea93d37eSRafal Kozik nb_desc = adapter->rx_ring_size; 1363ea93d37eSRafal Kozik 13641daff526SJakub Palider if (!rte_is_power_of_2(nb_desc)) { 13651daff526SJakub Palider RTE_LOG(ERR, PMD, 1366241da076SRafal Kozik "Unsupported size of RX queue: %d is not a power of 2.", 13671daff526SJakub Palider nb_desc); 13681daff526SJakub Palider return -EINVAL; 13691daff526SJakub Palider } 13701daff526SJakub Palider 13711173fca2SJan Medala if (nb_desc > adapter->rx_ring_size) { 13721173fca2SJan Medala RTE_LOG(ERR, PMD, 13731173fca2SJan Medala "Unsupported size of RX queue (max size: %d)\n", 13741173fca2SJan Medala adapter->rx_ring_size); 13751173fca2SJan Medala return -EINVAL; 13761173fca2SJan Medala } 13771173fca2SJan Medala 13781173fca2SJan Medala rxq->port_id = dev->data->port_id; 13791173fca2SJan Medala rxq->next_to_clean = 0; 13801173fca2SJan Medala rxq->next_to_use = 0; 13811173fca2SJan Medala rxq->ring_size = nb_desc; 13821173fca2SJan Medala rxq->mb_pool = mp; 13831173fca2SJan Medala 13841173fca2SJan Medala rxq->rx_buffer_info = rte_zmalloc("rxq->buffer_info", 13851173fca2SJan Medala sizeof(struct rte_mbuf *) * nb_desc, 13861173fca2SJan Medala RTE_CACHE_LINE_SIZE); 13871173fca2SJan Medala if (!rxq->rx_buffer_info) { 13881173fca2SJan Medala RTE_LOG(ERR, PMD, "failed to alloc mem for rx buffer info\n"); 13891173fca2SJan Medala return -ENOMEM; 13901173fca2SJan Medala } 13911173fca2SJan Medala 139279405ee1SRafal Kozik rxq->rx_refill_buffer = rte_zmalloc("rxq->rx_refill_buffer", 139379405ee1SRafal Kozik sizeof(struct rte_mbuf *) * nb_desc, 139479405ee1SRafal Kozik RTE_CACHE_LINE_SIZE); 139579405ee1SRafal Kozik 139679405ee1SRafal Kozik if (!rxq->rx_refill_buffer) { 139779405ee1SRafal Kozik RTE_LOG(ERR, PMD, "failed to alloc mem for rx refill buffer\n"); 139879405ee1SRafal Kozik rte_free(rxq->rx_buffer_info); 139979405ee1SRafal Kozik rxq->rx_buffer_info = NULL; 140079405ee1SRafal Kozik return -ENOMEM; 140179405ee1SRafal Kozik } 140279405ee1SRafal Kozik 1403c2034976SMichal Krawczyk rxq->empty_rx_reqs = rte_zmalloc("rxq->empty_rx_reqs", 1404c2034976SMichal Krawczyk sizeof(uint16_t) * nb_desc, 1405c2034976SMichal Krawczyk RTE_CACHE_LINE_SIZE); 1406c2034976SMichal Krawczyk if (!rxq->empty_rx_reqs) { 1407c2034976SMichal Krawczyk RTE_LOG(ERR, PMD, "failed to alloc mem for empty rx reqs\n"); 1408c2034976SMichal Krawczyk rte_free(rxq->rx_buffer_info); 1409c2034976SMichal Krawczyk rxq->rx_buffer_info = NULL; 141079405ee1SRafal Kozik rte_free(rxq->rx_refill_buffer); 141179405ee1SRafal Kozik rxq->rx_refill_buffer = NULL; 1412c2034976SMichal Krawczyk return -ENOMEM; 1413c2034976SMichal Krawczyk } 1414c2034976SMichal Krawczyk 1415c2034976SMichal Krawczyk for (i = 0; i < nb_desc; i++) 1416c2034976SMichal Krawczyk rxq->empty_tx_reqs[i] = i; 1417c2034976SMichal Krawczyk 14181173fca2SJan Medala /* Store pointer to this queue in upper layer */ 14191173fca2SJan Medala rxq->configured = 1; 14201173fca2SJan Medala dev->data->rx_queues[queue_idx] = rxq; 14211173fca2SJan Medala 1422df238f84SMichal Krawczyk return 0; 14231173fca2SJan Medala } 14241173fca2SJan Medala 14251173fca2SJan Medala static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count) 14261173fca2SJan Medala { 14271173fca2SJan Medala unsigned int i; 14281173fca2SJan Medala int rc; 14291daff526SJakub Palider uint16_t ring_size = rxq->ring_size; 14301daff526SJakub Palider uint16_t ring_mask = ring_size - 1; 14311daff526SJakub Palider uint16_t next_to_use = rxq->next_to_use; 1432c2034976SMichal Krawczyk uint16_t in_use, req_id; 143379405ee1SRafal Kozik struct rte_mbuf **mbufs = rxq->rx_refill_buffer; 14341173fca2SJan Medala 14351173fca2SJan Medala if (unlikely(!count)) 14361173fca2SJan Medala return 0; 14371173fca2SJan Medala 14381daff526SJakub Palider in_use = rxq->next_to_use - rxq->next_to_clean; 1439a467e8f3SMichal Krawczyk ena_assert_msg(((in_use + count) < ring_size), "bad ring state"); 14401173fca2SJan Medala 14411173fca2SJan Medala /* get resources for incoming packets */ 144279405ee1SRafal Kozik rc = rte_mempool_get_bulk(rxq->mb_pool, (void **)mbufs, count); 14431173fca2SJan Medala if (unlikely(rc < 0)) { 14441173fca2SJan Medala rte_atomic64_inc(&rxq->adapter->drv_stats->rx_nombuf); 14451173fca2SJan Medala PMD_RX_LOG(DEBUG, "there are no enough free buffers"); 14461173fca2SJan Medala return 0; 14471173fca2SJan Medala } 14481173fca2SJan Medala 14491173fca2SJan Medala for (i = 0; i < count; i++) { 14501daff526SJakub Palider uint16_t next_to_use_masked = next_to_use & ring_mask; 145179405ee1SRafal Kozik struct rte_mbuf *mbuf = mbufs[i]; 14521173fca2SJan Medala struct ena_com_buf ebuf; 14531173fca2SJan Medala 145479405ee1SRafal Kozik if (likely((i + 4) < count)) 145579405ee1SRafal Kozik rte_prefetch0(mbufs[i + 4]); 1456c2034976SMichal Krawczyk 1457c2034976SMichal Krawczyk req_id = rxq->empty_rx_reqs[next_to_use_masked]; 1458241da076SRafal Kozik rc = validate_rx_req_id(rxq, req_id); 1459241da076SRafal Kozik if (unlikely(rc < 0)) 1460241da076SRafal Kozik break; 146179405ee1SRafal Kozik rxq->rx_buffer_info[req_id] = mbuf; 1462241da076SRafal Kozik 14631173fca2SJan Medala /* prepare physical address for DMA transaction */ 1464455da545SSantosh Shukla ebuf.paddr = mbuf->buf_iova + RTE_PKTMBUF_HEADROOM; 14651173fca2SJan Medala ebuf.len = mbuf->buf_len - RTE_PKTMBUF_HEADROOM; 14661173fca2SJan Medala /* pass resource to device */ 14671173fca2SJan Medala rc = ena_com_add_single_rx_desc(rxq->ena_com_io_sq, 1468c2034976SMichal Krawczyk &ebuf, req_id); 14691173fca2SJan Medala if (unlikely(rc)) { 14701173fca2SJan Medala RTE_LOG(WARNING, PMD, "failed adding rx desc\n"); 147179405ee1SRafal Kozik rxq->rx_buffer_info[req_id] = NULL; 14721173fca2SJan Medala break; 14731173fca2SJan Medala } 14741daff526SJakub Palider next_to_use++; 14751173fca2SJan Medala } 14761173fca2SJan Medala 147779405ee1SRafal Kozik if (unlikely(i < count)) { 1478241da076SRafal Kozik RTE_LOG(WARNING, PMD, "refilled rx qid %d with only %d " 1479241da076SRafal Kozik "buffers (from %d)\n", rxq->id, i, count); 148079405ee1SRafal Kozik rte_mempool_put_bulk(rxq->mb_pool, (void **)(&mbufs[i]), 148179405ee1SRafal Kozik count - i); 148279405ee1SRafal Kozik } 1483241da076SRafal Kozik 14845e02e19eSJan Medala /* When we submitted free recources to device... */ 14853d19e1abSRafal Kozik if (likely(i > 0)) { 1486241da076SRafal Kozik /* ...let HW know that it can fill buffers with data 1487241da076SRafal Kozik * 1488241da076SRafal Kozik * Add memory barrier to make sure the desc were written before 1489241da076SRafal Kozik * issue a doorbell 1490241da076SRafal Kozik */ 14911173fca2SJan Medala rte_wmb(); 14921173fca2SJan Medala ena_com_write_sq_doorbell(rxq->ena_com_io_sq); 14931173fca2SJan Medala 14945e02e19eSJan Medala rxq->next_to_use = next_to_use; 14955e02e19eSJan Medala } 14965e02e19eSJan Medala 14971173fca2SJan Medala return i; 14981173fca2SJan Medala } 14991173fca2SJan Medala 15001173fca2SJan Medala static int ena_device_init(struct ena_com_dev *ena_dev, 1501e859d2b8SRafal Kozik struct ena_com_dev_get_features_ctx *get_feat_ctx, 1502e859d2b8SRafal Kozik bool *wd_state) 15031173fca2SJan Medala { 1504ca148440SMichal Krawczyk uint32_t aenq_groups; 15051173fca2SJan Medala int rc; 1506c4144557SJan Medala bool readless_supported; 15071173fca2SJan Medala 15081173fca2SJan Medala /* Initialize mmio registers */ 15091173fca2SJan Medala rc = ena_com_mmio_reg_read_request_init(ena_dev); 15101173fca2SJan Medala if (rc) { 15111173fca2SJan Medala RTE_LOG(ERR, PMD, "failed to init mmio read less\n"); 15121173fca2SJan Medala return rc; 15131173fca2SJan Medala } 15141173fca2SJan Medala 1515c4144557SJan Medala /* The PCIe configuration space revision id indicate if mmio reg 1516c4144557SJan Medala * read is disabled. 1517c4144557SJan Medala */ 1518c4144557SJan Medala readless_supported = 1519c4144557SJan Medala !(((struct rte_pci_device *)ena_dev->dmadev)->id.class_id 1520c4144557SJan Medala & ENA_MMIO_DISABLE_REG_READ); 1521c4144557SJan Medala ena_com_set_mmio_read_mode(ena_dev, readless_supported); 1522c4144557SJan Medala 15231173fca2SJan Medala /* reset device */ 15243adcba9aSMichal Krawczyk rc = ena_com_dev_reset(ena_dev, ENA_REGS_RESET_NORMAL); 15251173fca2SJan Medala if (rc) { 15261173fca2SJan Medala RTE_LOG(ERR, PMD, "cannot reset device\n"); 15271173fca2SJan Medala goto err_mmio_read_less; 15281173fca2SJan Medala } 15291173fca2SJan Medala 15301173fca2SJan Medala /* check FW version */ 15311173fca2SJan Medala rc = ena_com_validate_version(ena_dev); 15321173fca2SJan Medala if (rc) { 15331173fca2SJan Medala RTE_LOG(ERR, PMD, "device version is too low\n"); 15341173fca2SJan Medala goto err_mmio_read_less; 15351173fca2SJan Medala } 15361173fca2SJan Medala 15371173fca2SJan Medala ena_dev->dma_addr_bits = ena_com_get_dma_width(ena_dev); 15381173fca2SJan Medala 15391173fca2SJan Medala /* ENA device administration layer init */ 1540b68309beSRafal Kozik rc = ena_com_admin_init(ena_dev, &aenq_handlers); 15411173fca2SJan Medala if (rc) { 15421173fca2SJan Medala RTE_LOG(ERR, PMD, 15431173fca2SJan Medala "cannot initialize ena admin queue with device\n"); 15441173fca2SJan Medala goto err_mmio_read_less; 15451173fca2SJan Medala } 15461173fca2SJan Medala 15471173fca2SJan Medala /* To enable the msix interrupts the driver needs to know the number 15481173fca2SJan Medala * of queues. So the driver uses polling mode to retrieve this 15491173fca2SJan Medala * information. 15501173fca2SJan Medala */ 15511173fca2SJan Medala ena_com_set_admin_polling_mode(ena_dev, true); 15521173fca2SJan Medala 1553201ff2e5SJakub Palider ena_config_host_info(ena_dev); 1554201ff2e5SJakub Palider 15551173fca2SJan Medala /* Get Device Attributes and features */ 15561173fca2SJan Medala rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx); 15571173fca2SJan Medala if (rc) { 15581173fca2SJan Medala RTE_LOG(ERR, PMD, 15591173fca2SJan Medala "cannot get attribute for ena device rc= %d\n", rc); 15601173fca2SJan Medala goto err_admin_init; 15611173fca2SJan Medala } 15621173fca2SJan Medala 1563f01f060cSRafal Kozik aenq_groups = BIT(ENA_ADMIN_LINK_CHANGE) | 1564d9b8b106SMichal Krawczyk BIT(ENA_ADMIN_NOTIFICATION) | 1565983cce2dSRafal Kozik BIT(ENA_ADMIN_KEEP_ALIVE) | 1566983cce2dSRafal Kozik BIT(ENA_ADMIN_FATAL_ERROR) | 1567983cce2dSRafal Kozik BIT(ENA_ADMIN_WARNING); 1568ca148440SMichal Krawczyk 1569ca148440SMichal Krawczyk aenq_groups &= get_feat_ctx->aenq.supported_groups; 1570ca148440SMichal Krawczyk rc = ena_com_set_aenq_config(ena_dev, aenq_groups); 1571ca148440SMichal Krawczyk if (rc) { 1572ca148440SMichal Krawczyk RTE_LOG(ERR, PMD, "Cannot configure aenq groups rc: %d\n", rc); 1573ca148440SMichal Krawczyk goto err_admin_init; 1574ca148440SMichal Krawczyk } 1575ca148440SMichal Krawczyk 1576e859d2b8SRafal Kozik *wd_state = !!(aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE)); 1577e859d2b8SRafal Kozik 15781173fca2SJan Medala return 0; 15791173fca2SJan Medala 15801173fca2SJan Medala err_admin_init: 15811173fca2SJan Medala ena_com_admin_destroy(ena_dev); 15821173fca2SJan Medala 15831173fca2SJan Medala err_mmio_read_less: 15841173fca2SJan Medala ena_com_mmio_reg_read_request_destroy(ena_dev); 15851173fca2SJan Medala 15861173fca2SJan Medala return rc; 15871173fca2SJan Medala } 15881173fca2SJan Medala 1589ca148440SMichal Krawczyk static void ena_interrupt_handler_rte(void *cb_arg) 159015773e06SMichal Krawczyk { 159115773e06SMichal Krawczyk struct ena_adapter *adapter = (struct ena_adapter *)cb_arg; 159215773e06SMichal Krawczyk struct ena_com_dev *ena_dev = &adapter->ena_dev; 159315773e06SMichal Krawczyk 159415773e06SMichal Krawczyk ena_com_admin_q_comp_intr_handler(ena_dev); 15953d19e1abSRafal Kozik if (likely(adapter->state != ENA_ADAPTER_STATE_CLOSED)) 1596ca148440SMichal Krawczyk ena_com_aenq_intr_handler(ena_dev, adapter); 159715773e06SMichal Krawczyk } 159815773e06SMichal Krawczyk 15995efb9fc7SMichal Krawczyk static void check_for_missing_keep_alive(struct ena_adapter *adapter) 16005efb9fc7SMichal Krawczyk { 1601e859d2b8SRafal Kozik if (!adapter->wd_state) 1602e859d2b8SRafal Kozik return; 1603e859d2b8SRafal Kozik 16045efb9fc7SMichal Krawczyk if (adapter->keep_alive_timeout == ENA_HW_HINTS_NO_TIMEOUT) 16055efb9fc7SMichal Krawczyk return; 16065efb9fc7SMichal Krawczyk 16075efb9fc7SMichal Krawczyk if (unlikely((rte_get_timer_cycles() - adapter->timestamp_wd) >= 16085efb9fc7SMichal Krawczyk adapter->keep_alive_timeout)) { 16095efb9fc7SMichal Krawczyk RTE_LOG(ERR, PMD, "Keep alive timeout\n"); 16105efb9fc7SMichal Krawczyk adapter->reset_reason = ENA_REGS_RESET_KEEP_ALIVE_TO; 16115efb9fc7SMichal Krawczyk adapter->trigger_reset = true; 16125efb9fc7SMichal Krawczyk } 16135efb9fc7SMichal Krawczyk } 16145efb9fc7SMichal Krawczyk 16155efb9fc7SMichal Krawczyk /* Check if admin queue is enabled */ 16165efb9fc7SMichal Krawczyk static void check_for_admin_com_state(struct ena_adapter *adapter) 16175efb9fc7SMichal Krawczyk { 16185efb9fc7SMichal Krawczyk if (unlikely(!ena_com_get_admin_running_state(&adapter->ena_dev))) { 16195efb9fc7SMichal Krawczyk RTE_LOG(ERR, PMD, "ENA admin queue is not in running state!\n"); 16205efb9fc7SMichal Krawczyk adapter->reset_reason = ENA_REGS_RESET_ADMIN_TO; 16215efb9fc7SMichal Krawczyk adapter->trigger_reset = true; 16225efb9fc7SMichal Krawczyk } 16235efb9fc7SMichal Krawczyk } 16245efb9fc7SMichal Krawczyk 1625d9b8b106SMichal Krawczyk static void ena_timer_wd_callback(__rte_unused struct rte_timer *timer, 1626d9b8b106SMichal Krawczyk void *arg) 1627d9b8b106SMichal Krawczyk { 1628d9b8b106SMichal Krawczyk struct ena_adapter *adapter = (struct ena_adapter *)arg; 1629d9b8b106SMichal Krawczyk struct rte_eth_dev *dev = adapter->rte_dev; 1630d9b8b106SMichal Krawczyk 16315efb9fc7SMichal Krawczyk check_for_missing_keep_alive(adapter); 16325efb9fc7SMichal Krawczyk check_for_admin_com_state(adapter); 1633d9b8b106SMichal Krawczyk 16345efb9fc7SMichal Krawczyk if (unlikely(adapter->trigger_reset)) { 16355efb9fc7SMichal Krawczyk RTE_LOG(ERR, PMD, "Trigger reset is on\n"); 1636d9b8b106SMichal Krawczyk _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET, 1637d9b8b106SMichal Krawczyk NULL); 1638d9b8b106SMichal Krawczyk } 1639d9b8b106SMichal Krawczyk } 1640d9b8b106SMichal Krawczyk 16412fca2a98SMichal Krawczyk static inline void 16422fca2a98SMichal Krawczyk set_default_llq_configurations(struct ena_llq_configurations *llq_config) 16432fca2a98SMichal Krawczyk { 16442fca2a98SMichal Krawczyk llq_config->llq_header_location = ENA_ADMIN_INLINE_HEADER; 16452fca2a98SMichal Krawczyk llq_config->llq_ring_entry_size = ENA_ADMIN_LIST_ENTRY_SIZE_128B; 16462fca2a98SMichal Krawczyk llq_config->llq_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY; 16472fca2a98SMichal Krawczyk llq_config->llq_num_decs_before_header = 16482fca2a98SMichal Krawczyk ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2; 16492fca2a98SMichal Krawczyk llq_config->llq_ring_entry_size_value = 128; 16502fca2a98SMichal Krawczyk } 16512fca2a98SMichal Krawczyk 16522fca2a98SMichal Krawczyk static int 16532fca2a98SMichal Krawczyk ena_set_queues_placement_policy(struct ena_adapter *adapter, 16542fca2a98SMichal Krawczyk struct ena_com_dev *ena_dev, 16552fca2a98SMichal Krawczyk struct ena_admin_feature_llq_desc *llq, 16562fca2a98SMichal Krawczyk struct ena_llq_configurations *llq_default_configurations) 16572fca2a98SMichal Krawczyk { 16582fca2a98SMichal Krawczyk int rc; 16592fca2a98SMichal Krawczyk u32 llq_feature_mask; 16602fca2a98SMichal Krawczyk 16612fca2a98SMichal Krawczyk llq_feature_mask = 1 << ENA_ADMIN_LLQ; 16622fca2a98SMichal Krawczyk if (!(ena_dev->supported_features & llq_feature_mask)) { 16632fca2a98SMichal Krawczyk RTE_LOG(INFO, PMD, 16642fca2a98SMichal Krawczyk "LLQ is not supported. Fallback to host mode policy.\n"); 16652fca2a98SMichal Krawczyk ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 16662fca2a98SMichal Krawczyk return 0; 16672fca2a98SMichal Krawczyk } 16682fca2a98SMichal Krawczyk 16692fca2a98SMichal Krawczyk rc = ena_com_config_dev_mode(ena_dev, llq, llq_default_configurations); 16702fca2a98SMichal Krawczyk if (unlikely(rc)) { 16712fca2a98SMichal Krawczyk PMD_INIT_LOG(WARNING, "Failed to config dev mode. " 16722fca2a98SMichal Krawczyk "Fallback to host mode policy.\n"); 16732fca2a98SMichal Krawczyk ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 16742fca2a98SMichal Krawczyk return 0; 16752fca2a98SMichal Krawczyk } 16762fca2a98SMichal Krawczyk 16772fca2a98SMichal Krawczyk /* Nothing to config, exit */ 16782fca2a98SMichal Krawczyk if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) 16792fca2a98SMichal Krawczyk return 0; 16802fca2a98SMichal Krawczyk 16812fca2a98SMichal Krawczyk if (!adapter->dev_mem_base) { 16822fca2a98SMichal Krawczyk RTE_LOG(ERR, PMD, "Unable to access LLQ bar resource. " 16832fca2a98SMichal Krawczyk "Fallback to host mode policy.\n."); 16842fca2a98SMichal Krawczyk ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 16852fca2a98SMichal Krawczyk return 0; 16862fca2a98SMichal Krawczyk } 16872fca2a98SMichal Krawczyk 16882fca2a98SMichal Krawczyk ena_dev->mem_bar = adapter->dev_mem_base; 16892fca2a98SMichal Krawczyk 16902fca2a98SMichal Krawczyk return 0; 16912fca2a98SMichal Krawczyk } 16922fca2a98SMichal Krawczyk 1693ea93d37eSRafal Kozik static int ena_calc_io_queue_num(struct ena_com_dev *ena_dev, 169401bd6877SRafal Kozik struct ena_com_dev_get_features_ctx *get_feat_ctx) 169501bd6877SRafal Kozik { 16962fca2a98SMichal Krawczyk uint32_t io_tx_sq_num, io_tx_cq_num, io_rx_num, io_queue_num; 169701bd6877SRafal Kozik 1698ea93d37eSRafal Kozik /* Regular queues capabilities */ 1699ea93d37eSRafal Kozik if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) { 1700ea93d37eSRafal Kozik struct ena_admin_queue_ext_feature_fields *max_queue_ext = 1701ea93d37eSRafal Kozik &get_feat_ctx->max_queue_ext.max_queue_ext; 17022fca2a98SMichal Krawczyk io_rx_num = RTE_MIN(max_queue_ext->max_rx_sq_num, 17032fca2a98SMichal Krawczyk max_queue_ext->max_rx_cq_num); 17042fca2a98SMichal Krawczyk io_tx_sq_num = max_queue_ext->max_tx_sq_num; 17052fca2a98SMichal Krawczyk io_tx_cq_num = max_queue_ext->max_tx_cq_num; 1706ea93d37eSRafal Kozik } else { 1707ea93d37eSRafal Kozik struct ena_admin_queue_feature_desc *max_queues = 1708ea93d37eSRafal Kozik &get_feat_ctx->max_queues; 17092fca2a98SMichal Krawczyk io_tx_sq_num = max_queues->max_sq_num; 17102fca2a98SMichal Krawczyk io_tx_cq_num = max_queues->max_cq_num; 17112fca2a98SMichal Krawczyk io_rx_num = RTE_MIN(io_tx_sq_num, io_tx_cq_num); 1712ea93d37eSRafal Kozik } 171301bd6877SRafal Kozik 17142fca2a98SMichal Krawczyk /* In case of LLQ use the llq number in the get feature cmd */ 17152fca2a98SMichal Krawczyk if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) 17162fca2a98SMichal Krawczyk io_tx_sq_num = get_feat_ctx->llq.max_llq_num; 17172fca2a98SMichal Krawczyk 17182fca2a98SMichal Krawczyk io_queue_num = RTE_MIN(rte_lcore_count(), ENA_MAX_NUM_IO_QUEUES); 17192fca2a98SMichal Krawczyk io_queue_num = RTE_MIN(io_queue_num, io_rx_num); 17202fca2a98SMichal Krawczyk io_queue_num = RTE_MIN(io_queue_num, io_tx_sq_num); 17212fca2a98SMichal Krawczyk io_queue_num = RTE_MIN(io_queue_num, io_tx_cq_num); 172201bd6877SRafal Kozik 172301bd6877SRafal Kozik if (unlikely(io_queue_num == 0)) { 172401bd6877SRafal Kozik RTE_LOG(ERR, PMD, "Number of IO queues should not be 0\n"); 172501bd6877SRafal Kozik return -EFAULT; 172601bd6877SRafal Kozik } 172701bd6877SRafal Kozik 172801bd6877SRafal Kozik return io_queue_num; 172901bd6877SRafal Kozik } 173001bd6877SRafal Kozik 17311173fca2SJan Medala static int eth_ena_dev_init(struct rte_eth_dev *eth_dev) 17321173fca2SJan Medala { 1733ea93d37eSRafal Kozik struct ena_calc_queue_size_ctx calc_queue_ctx = { 0 }; 17341173fca2SJan Medala struct rte_pci_device *pci_dev; 1735eb0ef49dSMichal Krawczyk struct rte_intr_handle *intr_handle; 17361173fca2SJan Medala struct ena_adapter *adapter = 17371173fca2SJan Medala (struct ena_adapter *)(eth_dev->data->dev_private); 17381173fca2SJan Medala struct ena_com_dev *ena_dev = &adapter->ena_dev; 17391173fca2SJan Medala struct ena_com_dev_get_features_ctx get_feat_ctx; 17402fca2a98SMichal Krawczyk struct ena_llq_configurations llq_config; 17412fca2a98SMichal Krawczyk const char *queue_type_str; 1742ea93d37eSRafal Kozik int rc; 17431173fca2SJan Medala 17441173fca2SJan Medala static int adapters_found; 1745e859d2b8SRafal Kozik bool wd_state; 17461173fca2SJan Medala 17471173fca2SJan Medala memset(adapter, 0, sizeof(struct ena_adapter)); 17481173fca2SJan Medala ena_dev = &adapter->ena_dev; 17491173fca2SJan Medala 17501173fca2SJan Medala eth_dev->dev_ops = &ena_dev_ops; 17511173fca2SJan Medala eth_dev->rx_pkt_burst = ð_ena_recv_pkts; 17521173fca2SJan Medala eth_dev->tx_pkt_burst = ð_ena_xmit_pkts; 1753b3fc5a1aSKonstantin Ananyev eth_dev->tx_pkt_prepare = ð_ena_prep_pkts; 17541173fca2SJan Medala adapter->rte_eth_dev_data = eth_dev->data; 17551173fca2SJan Medala adapter->rte_dev = eth_dev; 17561173fca2SJan Medala 17571173fca2SJan Medala if (rte_eal_process_type() != RTE_PROC_PRIMARY) 17581173fca2SJan Medala return 0; 17591173fca2SJan Medala 1760c0802544SFerruh Yigit pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 17611173fca2SJan Medala adapter->pdev = pci_dev; 17621173fca2SJan Medala 1763f2462150SFerruh Yigit PMD_INIT_LOG(INFO, "Initializing %x:%x:%x.%d", 17641173fca2SJan Medala pci_dev->addr.domain, 17651173fca2SJan Medala pci_dev->addr.bus, 17661173fca2SJan Medala pci_dev->addr.devid, 17671173fca2SJan Medala pci_dev->addr.function); 17681173fca2SJan Medala 1769eb0ef49dSMichal Krawczyk intr_handle = &pci_dev->intr_handle; 1770eb0ef49dSMichal Krawczyk 17711173fca2SJan Medala adapter->regs = pci_dev->mem_resource[ENA_REGS_BAR].addr; 17721173fca2SJan Medala adapter->dev_mem_base = pci_dev->mem_resource[ENA_MEM_BAR].addr; 17731173fca2SJan Medala 17741d339597SRafal Kozik if (!adapter->regs) { 1775f2462150SFerruh Yigit PMD_INIT_LOG(CRIT, "Failed to access registers BAR(%d)", 17761173fca2SJan Medala ENA_REGS_BAR); 17771d339597SRafal Kozik return -ENXIO; 17781d339597SRafal Kozik } 17791173fca2SJan Medala 17801173fca2SJan Medala ena_dev->reg_bar = adapter->regs; 17811173fca2SJan Medala ena_dev->dmadev = adapter->pdev; 17821173fca2SJan Medala 17831173fca2SJan Medala adapter->id_number = adapters_found; 17841173fca2SJan Medala 17851173fca2SJan Medala snprintf(adapter->name, ENA_NAME_MAX_LEN, "ena_%d", 17861173fca2SJan Medala adapter->id_number); 17871173fca2SJan Medala 17881173fca2SJan Medala /* device specific initialization routine */ 1789e859d2b8SRafal Kozik rc = ena_device_init(ena_dev, &get_feat_ctx, &wd_state); 17901173fca2SJan Medala if (rc) { 1791f2462150SFerruh Yigit PMD_INIT_LOG(CRIT, "Failed to init ENA device"); 1792241da076SRafal Kozik goto err; 17931173fca2SJan Medala } 1794e859d2b8SRafal Kozik adapter->wd_state = wd_state; 17951173fca2SJan Medala 17962fca2a98SMichal Krawczyk set_default_llq_configurations(&llq_config); 17972fca2a98SMichal Krawczyk rc = ena_set_queues_placement_policy(adapter, ena_dev, 17982fca2a98SMichal Krawczyk &get_feat_ctx.llq, &llq_config); 17992fca2a98SMichal Krawczyk if (unlikely(rc)) { 18002fca2a98SMichal Krawczyk PMD_INIT_LOG(CRIT, "Failed to set placement policy"); 18012fca2a98SMichal Krawczyk return rc; 18022fca2a98SMichal Krawczyk } 18032fca2a98SMichal Krawczyk 18042fca2a98SMichal Krawczyk if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) 18052fca2a98SMichal Krawczyk queue_type_str = "Regular"; 18062fca2a98SMichal Krawczyk else 18072fca2a98SMichal Krawczyk queue_type_str = "Low latency"; 18082fca2a98SMichal Krawczyk RTE_LOG(INFO, PMD, "Placement policy: %s\n", queue_type_str); 1809ea93d37eSRafal Kozik 1810ea93d37eSRafal Kozik calc_queue_ctx.ena_dev = ena_dev; 1811ea93d37eSRafal Kozik calc_queue_ctx.get_feat_ctx = &get_feat_ctx; 181201bd6877SRafal Kozik adapter->num_queues = ena_calc_io_queue_num(ena_dev, 181301bd6877SRafal Kozik &get_feat_ctx); 18141173fca2SJan Medala 1815ea93d37eSRafal Kozik rc = ena_calc_queue_size(&calc_queue_ctx); 1816ea93d37eSRafal Kozik if (unlikely((rc != 0) || (adapter->num_queues <= 0))) { 1817241da076SRafal Kozik rc = -EFAULT; 1818241da076SRafal Kozik goto err_device_destroy; 1819241da076SRafal Kozik } 18201173fca2SJan Medala 1821ea93d37eSRafal Kozik adapter->tx_ring_size = calc_queue_ctx.tx_queue_size; 1822ea93d37eSRafal Kozik adapter->rx_ring_size = calc_queue_ctx.rx_queue_size; 18231173fca2SJan Medala 1824ea93d37eSRafal Kozik adapter->max_tx_sgl_size = calc_queue_ctx.max_tx_sgl_size; 1825ea93d37eSRafal Kozik adapter->max_rx_sgl_size = calc_queue_ctx.max_rx_sgl_size; 18262061fe41SRafal Kozik 18271173fca2SJan Medala /* prepare ring structures */ 18281173fca2SJan Medala ena_init_rings(adapter); 18291173fca2SJan Medala 1830372c1af5SJan Medala ena_config_debug_area(adapter); 1831372c1af5SJan Medala 18321173fca2SJan Medala /* Set max MTU for this device */ 18331173fca2SJan Medala adapter->max_mtu = get_feat_ctx.dev_attr.max_mtu; 18341173fca2SJan Medala 183583277a7cSJakub Palider /* set device support for TSO */ 183683277a7cSJakub Palider adapter->tso4_supported = get_feat_ctx.offload.tx & 183783277a7cSJakub Palider ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK; 183883277a7cSJakub Palider 18391173fca2SJan Medala /* Copy MAC address and point DPDK to it */ 18401173fca2SJan Medala eth_dev->data->mac_addrs = (struct ether_addr *)adapter->mac_addr; 18411173fca2SJan Medala ether_addr_copy((struct ether_addr *)get_feat_ctx.dev_attr.mac_addr, 18421173fca2SJan Medala (struct ether_addr *)adapter->mac_addr); 18431173fca2SJan Medala 184415febafdSThomas Monjalon /* 184515febafdSThomas Monjalon * Pass the information to the rte_eth_dev_close() that it should also 184615febafdSThomas Monjalon * release the private port resources. 184715febafdSThomas Monjalon */ 184815febafdSThomas Monjalon eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE; 184915febafdSThomas Monjalon 18501173fca2SJan Medala adapter->drv_stats = rte_zmalloc("adapter stats", 18511173fca2SJan Medala sizeof(*adapter->drv_stats), 18521173fca2SJan Medala RTE_CACHE_LINE_SIZE); 18531173fca2SJan Medala if (!adapter->drv_stats) { 18541173fca2SJan Medala RTE_LOG(ERR, PMD, "failed to alloc mem for adapter stats\n"); 1855241da076SRafal Kozik rc = -ENOMEM; 1856241da076SRafal Kozik goto err_delete_debug_area; 18571173fca2SJan Medala } 18581173fca2SJan Medala 1859eb0ef49dSMichal Krawczyk rte_intr_callback_register(intr_handle, 1860eb0ef49dSMichal Krawczyk ena_interrupt_handler_rte, 1861eb0ef49dSMichal Krawczyk adapter); 1862eb0ef49dSMichal Krawczyk rte_intr_enable(intr_handle); 1863eb0ef49dSMichal Krawczyk ena_com_set_admin_polling_mode(ena_dev, false); 1864ca148440SMichal Krawczyk ena_com_admin_aenq_enable(ena_dev); 1865eb0ef49dSMichal Krawczyk 1866d9b8b106SMichal Krawczyk if (adapters_found == 0) 1867d9b8b106SMichal Krawczyk rte_timer_subsystem_init(); 1868d9b8b106SMichal Krawczyk rte_timer_init(&adapter->timer_wd); 1869d9b8b106SMichal Krawczyk 18701173fca2SJan Medala adapters_found++; 18711173fca2SJan Medala adapter->state = ENA_ADAPTER_STATE_INIT; 18721173fca2SJan Medala 18731173fca2SJan Medala return 0; 1874241da076SRafal Kozik 1875241da076SRafal Kozik err_delete_debug_area: 1876241da076SRafal Kozik ena_com_delete_debug_area(ena_dev); 1877241da076SRafal Kozik 1878241da076SRafal Kozik err_device_destroy: 1879241da076SRafal Kozik ena_com_delete_host_info(ena_dev); 1880241da076SRafal Kozik ena_com_admin_destroy(ena_dev); 1881241da076SRafal Kozik 1882241da076SRafal Kozik err: 1883241da076SRafal Kozik return rc; 18841173fca2SJan Medala } 18851173fca2SJan Medala 1886eb0ef49dSMichal Krawczyk static int eth_ena_dev_uninit(struct rte_eth_dev *eth_dev) 1887eb0ef49dSMichal Krawczyk { 1888eb0ef49dSMichal Krawczyk struct ena_adapter *adapter = 1889eb0ef49dSMichal Krawczyk (struct ena_adapter *)(eth_dev->data->dev_private); 1890eb0ef49dSMichal Krawczyk 1891eb0ef49dSMichal Krawczyk if (rte_eal_process_type() != RTE_PROC_PRIMARY) 18928f62ec38SQi Zhang return 0; 1893eb0ef49dSMichal Krawczyk 1894eb0ef49dSMichal Krawczyk if (adapter->state != ENA_ADAPTER_STATE_CLOSED) 1895eb0ef49dSMichal Krawczyk ena_close(eth_dev); 1896eb0ef49dSMichal Krawczyk 1897eb0ef49dSMichal Krawczyk eth_dev->dev_ops = NULL; 1898eb0ef49dSMichal Krawczyk eth_dev->rx_pkt_burst = NULL; 1899eb0ef49dSMichal Krawczyk eth_dev->tx_pkt_burst = NULL; 1900eb0ef49dSMichal Krawczyk eth_dev->tx_pkt_prepare = NULL; 1901eb0ef49dSMichal Krawczyk 1902eb0ef49dSMichal Krawczyk adapter->state = ENA_ADAPTER_STATE_FREE; 1903eb0ef49dSMichal Krawczyk 1904eb0ef49dSMichal Krawczyk return 0; 1905eb0ef49dSMichal Krawczyk } 1906eb0ef49dSMichal Krawczyk 19071173fca2SJan Medala static int ena_dev_configure(struct rte_eth_dev *dev) 19081173fca2SJan Medala { 19091173fca2SJan Medala struct ena_adapter *adapter = 19101173fca2SJan Medala (struct ena_adapter *)(dev->data->dev_private); 19117369f88fSRafal Kozik 19121173fca2SJan Medala adapter->state = ENA_ADAPTER_STATE_CONFIG; 19131173fca2SJan Medala 1914a4996bd8SWei Dai adapter->tx_selected_offloads = dev->data->dev_conf.txmode.offloads; 1915a4996bd8SWei Dai adapter->rx_selected_offloads = dev->data->dev_conf.rxmode.offloads; 19161173fca2SJan Medala return 0; 19171173fca2SJan Medala } 19181173fca2SJan Medala 19191173fca2SJan Medala static void ena_init_rings(struct ena_adapter *adapter) 19201173fca2SJan Medala { 19211173fca2SJan Medala int i; 19221173fca2SJan Medala 19231173fca2SJan Medala for (i = 0; i < adapter->num_queues; i++) { 19241173fca2SJan Medala struct ena_ring *ring = &adapter->tx_ring[i]; 19251173fca2SJan Medala 19261173fca2SJan Medala ring->configured = 0; 19271173fca2SJan Medala ring->type = ENA_RING_TYPE_TX; 19281173fca2SJan Medala ring->adapter = adapter; 19291173fca2SJan Medala ring->id = i; 19301173fca2SJan Medala ring->tx_mem_queue_type = adapter->ena_dev.tx_mem_queue_type; 19311173fca2SJan Medala ring->tx_max_header_size = adapter->ena_dev.tx_max_header_size; 19322061fe41SRafal Kozik ring->sgl_size = adapter->max_tx_sgl_size; 19331173fca2SJan Medala } 19341173fca2SJan Medala 19351173fca2SJan Medala for (i = 0; i < adapter->num_queues; i++) { 19361173fca2SJan Medala struct ena_ring *ring = &adapter->rx_ring[i]; 19371173fca2SJan Medala 19381173fca2SJan Medala ring->configured = 0; 19391173fca2SJan Medala ring->type = ENA_RING_TYPE_RX; 19401173fca2SJan Medala ring->adapter = adapter; 19411173fca2SJan Medala ring->id = i; 1942ea93d37eSRafal Kozik ring->sgl_size = adapter->max_rx_sgl_size; 19431173fca2SJan Medala } 19441173fca2SJan Medala } 19451173fca2SJan Medala 19461173fca2SJan Medala static void ena_infos_get(struct rte_eth_dev *dev, 19471173fca2SJan Medala struct rte_eth_dev_info *dev_info) 19481173fca2SJan Medala { 19491173fca2SJan Medala struct ena_adapter *adapter; 19501173fca2SJan Medala struct ena_com_dev *ena_dev; 19511173fca2SJan Medala struct ena_com_dev_get_features_ctx feat; 195256b8b9b7SRafal Kozik uint64_t rx_feat = 0, tx_feat = 0; 19531173fca2SJan Medala int rc = 0; 19541173fca2SJan Medala 19551173fca2SJan Medala ena_assert_msg(dev->data != NULL, "Uninitialized device"); 19561173fca2SJan Medala ena_assert_msg(dev->data->dev_private != NULL, "Uninitialized device"); 19571173fca2SJan Medala adapter = (struct ena_adapter *)(dev->data->dev_private); 19581173fca2SJan Medala 19591173fca2SJan Medala ena_dev = &adapter->ena_dev; 19601173fca2SJan Medala ena_assert_msg(ena_dev != NULL, "Uninitialized device"); 19611173fca2SJan Medala 1962e274f573SMarc Sune dev_info->speed_capa = 1963e274f573SMarc Sune ETH_LINK_SPEED_1G | 1964e274f573SMarc Sune ETH_LINK_SPEED_2_5G | 1965e274f573SMarc Sune ETH_LINK_SPEED_5G | 1966e274f573SMarc Sune ETH_LINK_SPEED_10G | 1967e274f573SMarc Sune ETH_LINK_SPEED_25G | 1968e274f573SMarc Sune ETH_LINK_SPEED_40G | 1969b2feed01SThomas Monjalon ETH_LINK_SPEED_50G | 1970b2feed01SThomas Monjalon ETH_LINK_SPEED_100G; 1971e274f573SMarc Sune 19721173fca2SJan Medala /* Get supported features from HW */ 19731173fca2SJan Medala rc = ena_com_get_dev_attr_feat(ena_dev, &feat); 19741173fca2SJan Medala if (unlikely(rc)) { 19751173fca2SJan Medala RTE_LOG(ERR, PMD, 19761173fca2SJan Medala "Cannot get attribute for ena device rc= %d\n", rc); 19771173fca2SJan Medala return; 19781173fca2SJan Medala } 19791173fca2SJan Medala 19801173fca2SJan Medala /* Set Tx & Rx features available for device */ 19811173fca2SJan Medala if (feat.offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK) 19821173fca2SJan Medala tx_feat |= DEV_TX_OFFLOAD_TCP_TSO; 19831173fca2SJan Medala 19841173fca2SJan Medala if (feat.offload.tx & 19851173fca2SJan Medala ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK) 19861173fca2SJan Medala tx_feat |= DEV_TX_OFFLOAD_IPV4_CKSUM | 19871173fca2SJan Medala DEV_TX_OFFLOAD_UDP_CKSUM | 19881173fca2SJan Medala DEV_TX_OFFLOAD_TCP_CKSUM; 19891173fca2SJan Medala 19904eea092bSJakub Palider if (feat.offload.rx_supported & 19911173fca2SJan Medala ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK) 19921173fca2SJan Medala rx_feat |= DEV_RX_OFFLOAD_IPV4_CKSUM | 19931173fca2SJan Medala DEV_RX_OFFLOAD_UDP_CKSUM | 19941173fca2SJan Medala DEV_RX_OFFLOAD_TCP_CKSUM; 19951173fca2SJan Medala 1996a0a4ff40SRafal Kozik rx_feat |= DEV_RX_OFFLOAD_JUMBO_FRAME; 1997a0a4ff40SRafal Kozik 19981173fca2SJan Medala /* Inform framework about available features */ 19991173fca2SJan Medala dev_info->rx_offload_capa = rx_feat; 20007369f88fSRafal Kozik dev_info->rx_queue_offload_capa = rx_feat; 20011173fca2SJan Medala dev_info->tx_offload_capa = tx_feat; 200256b8b9b7SRafal Kozik dev_info->tx_queue_offload_capa = tx_feat; 20031173fca2SJan Medala 20041173fca2SJan Medala dev_info->min_rx_bufsize = ENA_MIN_FRAME_LEN; 20051173fca2SJan Medala dev_info->max_rx_pktlen = adapter->max_mtu; 20061173fca2SJan Medala dev_info->max_mac_addrs = 1; 20071173fca2SJan Medala 20081173fca2SJan Medala dev_info->max_rx_queues = adapter->num_queues; 20091173fca2SJan Medala dev_info->max_tx_queues = adapter->num_queues; 20101173fca2SJan Medala dev_info->reta_size = ENA_RX_RSS_TABLE_SIZE; 201156b8b9b7SRafal Kozik 201256b8b9b7SRafal Kozik adapter->tx_supported_offloads = tx_feat; 20137369f88fSRafal Kozik adapter->rx_supported_offloads = rx_feat; 201492680dc2SRafal Kozik 2015ea93d37eSRafal Kozik dev_info->rx_desc_lim.nb_max = adapter->rx_ring_size; 201692680dc2SRafal Kozik dev_info->rx_desc_lim.nb_min = ENA_MIN_RING_DESC; 2017ea93d37eSRafal Kozik dev_info->rx_desc_lim.nb_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS, 2018ea93d37eSRafal Kozik adapter->max_rx_sgl_size); 2019ea93d37eSRafal Kozik dev_info->rx_desc_lim.nb_mtu_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS, 2020ea93d37eSRafal Kozik adapter->max_rx_sgl_size); 202192680dc2SRafal Kozik 2022ea93d37eSRafal Kozik dev_info->tx_desc_lim.nb_max = adapter->tx_ring_size; 202392680dc2SRafal Kozik dev_info->tx_desc_lim.nb_min = ENA_MIN_RING_DESC; 202492680dc2SRafal Kozik dev_info->tx_desc_lim.nb_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS, 2025ea93d37eSRafal Kozik adapter->max_tx_sgl_size); 202692680dc2SRafal Kozik dev_info->tx_desc_lim.nb_mtu_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS, 2027ea93d37eSRafal Kozik adapter->max_tx_sgl_size); 20281173fca2SJan Medala } 20291173fca2SJan Medala 20301173fca2SJan Medala static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, 20311173fca2SJan Medala uint16_t nb_pkts) 20321173fca2SJan Medala { 20331173fca2SJan Medala struct ena_ring *rx_ring = (struct ena_ring *)(rx_queue); 20341173fca2SJan Medala unsigned int ring_size = rx_ring->ring_size; 20351173fca2SJan Medala unsigned int ring_mask = ring_size - 1; 20361173fca2SJan Medala uint16_t next_to_clean = rx_ring->next_to_clean; 20371daff526SJakub Palider uint16_t desc_in_use = 0; 2038c2034976SMichal Krawczyk uint16_t req_id; 20391173fca2SJan Medala unsigned int recv_idx = 0; 20401173fca2SJan Medala struct rte_mbuf *mbuf = NULL; 20411173fca2SJan Medala struct rte_mbuf *mbuf_head = NULL; 20421173fca2SJan Medala struct rte_mbuf *mbuf_prev = NULL; 20431173fca2SJan Medala struct rte_mbuf **rx_buff_info = rx_ring->rx_buffer_info; 20441173fca2SJan Medala unsigned int completed; 20451173fca2SJan Medala 20461173fca2SJan Medala struct ena_com_rx_ctx ena_rx_ctx; 20471173fca2SJan Medala int rc = 0; 20481173fca2SJan Medala 20491173fca2SJan Medala /* Check adapter state */ 20501173fca2SJan Medala if (unlikely(rx_ring->adapter->state != ENA_ADAPTER_STATE_RUNNING)) { 20511173fca2SJan Medala RTE_LOG(ALERT, PMD, 20521173fca2SJan Medala "Trying to receive pkts while device is NOT running\n"); 20531173fca2SJan Medala return 0; 20541173fca2SJan Medala } 20551173fca2SJan Medala 20561daff526SJakub Palider desc_in_use = rx_ring->next_to_use - next_to_clean; 20571173fca2SJan Medala if (unlikely(nb_pkts > desc_in_use)) 20581173fca2SJan Medala nb_pkts = desc_in_use; 20591173fca2SJan Medala 20601173fca2SJan Medala for (completed = 0; completed < nb_pkts; completed++) { 20611173fca2SJan Medala int segments = 0; 20621173fca2SJan Medala 2063ea93d37eSRafal Kozik ena_rx_ctx.max_bufs = rx_ring->sgl_size; 20641173fca2SJan Medala ena_rx_ctx.ena_bufs = rx_ring->ena_bufs; 20651173fca2SJan Medala ena_rx_ctx.descs = 0; 20661173fca2SJan Medala /* receive packet context */ 20671173fca2SJan Medala rc = ena_com_rx_pkt(rx_ring->ena_com_io_cq, 20681173fca2SJan Medala rx_ring->ena_com_io_sq, 20691173fca2SJan Medala &ena_rx_ctx); 20701173fca2SJan Medala if (unlikely(rc)) { 20711173fca2SJan Medala RTE_LOG(ERR, PMD, "ena_com_rx_pkt error %d\n", rc); 20729b260dbfSRafal Kozik rx_ring->adapter->reset_reason = 20739b260dbfSRafal Kozik ENA_REGS_RESET_TOO_MANY_RX_DESCS; 2074241da076SRafal Kozik rx_ring->adapter->trigger_reset = true; 20751173fca2SJan Medala return 0; 20761173fca2SJan Medala } 20771173fca2SJan Medala 20781173fca2SJan Medala if (unlikely(ena_rx_ctx.descs == 0)) 20791173fca2SJan Medala break; 20801173fca2SJan Medala 20811173fca2SJan Medala while (segments < ena_rx_ctx.descs) { 2082c2034976SMichal Krawczyk req_id = ena_rx_ctx.ena_bufs[segments].req_id; 2083c2034976SMichal Krawczyk rc = validate_rx_req_id(rx_ring, req_id); 2084c2034976SMichal Krawczyk if (unlikely(rc)) 2085c2034976SMichal Krawczyk break; 2086c2034976SMichal Krawczyk 2087c2034976SMichal Krawczyk mbuf = rx_buff_info[req_id]; 20881173fca2SJan Medala mbuf->data_len = ena_rx_ctx.ena_bufs[segments].len; 20891173fca2SJan Medala mbuf->data_off = RTE_PKTMBUF_HEADROOM; 20901173fca2SJan Medala mbuf->refcnt = 1; 20911173fca2SJan Medala mbuf->next = NULL; 20923d19e1abSRafal Kozik if (unlikely(segments == 0)) { 20931173fca2SJan Medala mbuf->nb_segs = ena_rx_ctx.descs; 20941173fca2SJan Medala mbuf->port = rx_ring->port_id; 20951173fca2SJan Medala mbuf->pkt_len = 0; 20961173fca2SJan Medala mbuf_head = mbuf; 20971173fca2SJan Medala } else { 20981173fca2SJan Medala /* for multi-segment pkts create mbuf chain */ 20991173fca2SJan Medala mbuf_prev->next = mbuf; 21001173fca2SJan Medala } 21011173fca2SJan Medala mbuf_head->pkt_len += mbuf->data_len; 21021173fca2SJan Medala 21031173fca2SJan Medala mbuf_prev = mbuf; 2104c2034976SMichal Krawczyk rx_ring->empty_rx_reqs[next_to_clean & ring_mask] = 2105c2034976SMichal Krawczyk req_id; 21061173fca2SJan Medala segments++; 21071daff526SJakub Palider next_to_clean++; 21081173fca2SJan Medala } 2109f00930d9SRafal Kozik if (unlikely(rc)) 2110f00930d9SRafal Kozik break; 21111173fca2SJan Medala 21121173fca2SJan Medala /* fill mbuf attributes if any */ 21131173fca2SJan Medala ena_rx_mbuf_prepare(mbuf_head, &ena_rx_ctx); 2114e5df9f33SStewart Allen mbuf_head->hash.rss = ena_rx_ctx.hash; 21151173fca2SJan Medala 21161173fca2SJan Medala /* pass to DPDK application head mbuf */ 21171173fca2SJan Medala rx_pkts[recv_idx] = mbuf_head; 21181173fca2SJan Medala recv_idx++; 21191173fca2SJan Medala } 21201173fca2SJan Medala 2121ec78af6bSMichal Krawczyk rx_ring->next_to_clean = next_to_clean; 2122ec78af6bSMichal Krawczyk 2123ec78af6bSMichal Krawczyk desc_in_use = desc_in_use - completed + 1; 21241173fca2SJan Medala /* Burst refill to save doorbells, memory barriers, const interval */ 21251daff526SJakub Palider if (ring_size - desc_in_use > ENA_RING_DESCS_RATIO(ring_size)) 21261daff526SJakub Palider ena_populate_rx_queue(rx_ring, ring_size - desc_in_use); 21271173fca2SJan Medala 21281173fca2SJan Medala return recv_idx; 21291173fca2SJan Medala } 21301173fca2SJan Medala 2131b3fc5a1aSKonstantin Ananyev static uint16_t 213283277a7cSJakub Palider eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 2133b3fc5a1aSKonstantin Ananyev uint16_t nb_pkts) 2134b3fc5a1aSKonstantin Ananyev { 2135b3fc5a1aSKonstantin Ananyev int32_t ret; 2136b3fc5a1aSKonstantin Ananyev uint32_t i; 2137b3fc5a1aSKonstantin Ananyev struct rte_mbuf *m; 213883277a7cSJakub Palider struct ena_ring *tx_ring = (struct ena_ring *)(tx_queue); 213983277a7cSJakub Palider struct ipv4_hdr *ip_hdr; 2140b3fc5a1aSKonstantin Ananyev uint64_t ol_flags; 214183277a7cSJakub Palider uint16_t frag_field; 214283277a7cSJakub Palider 2143b3fc5a1aSKonstantin Ananyev for (i = 0; i != nb_pkts; i++) { 2144b3fc5a1aSKonstantin Ananyev m = tx_pkts[i]; 2145b3fc5a1aSKonstantin Ananyev ol_flags = m->ol_flags; 2146b3fc5a1aSKonstantin Ananyev 2147bc5ef57dSMichal Krawczyk if (!(ol_flags & PKT_TX_IPV4)) 2148bc5ef57dSMichal Krawczyk continue; 2149bc5ef57dSMichal Krawczyk 2150bc5ef57dSMichal Krawczyk /* If there was not L2 header length specified, assume it is 2151bc5ef57dSMichal Krawczyk * length of the ethernet header. 2152bc5ef57dSMichal Krawczyk */ 2153bc5ef57dSMichal Krawczyk if (unlikely(m->l2_len == 0)) 2154bc5ef57dSMichal Krawczyk m->l2_len = sizeof(struct ether_hdr); 2155bc5ef57dSMichal Krawczyk 2156bc5ef57dSMichal Krawczyk ip_hdr = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *, 2157bc5ef57dSMichal Krawczyk m->l2_len); 2158bc5ef57dSMichal Krawczyk frag_field = rte_be_to_cpu_16(ip_hdr->fragment_offset); 2159bc5ef57dSMichal Krawczyk 2160bc5ef57dSMichal Krawczyk if ((frag_field & IPV4_HDR_DF_FLAG) != 0) { 2161bc5ef57dSMichal Krawczyk m->packet_type |= RTE_PTYPE_L4_NONFRAG; 2162bc5ef57dSMichal Krawczyk 2163bc5ef57dSMichal Krawczyk /* If IPv4 header has DF flag enabled and TSO support is 2164bc5ef57dSMichal Krawczyk * disabled, partial chcecksum should not be calculated. 2165bc5ef57dSMichal Krawczyk */ 2166bc5ef57dSMichal Krawczyk if (!tx_ring->adapter->tso4_supported) 2167bc5ef57dSMichal Krawczyk continue; 2168bc5ef57dSMichal Krawczyk } 2169bc5ef57dSMichal Krawczyk 2170b3fc5a1aSKonstantin Ananyev if ((ol_flags & ENA_TX_OFFLOAD_NOTSUP_MASK) != 0 || 2171b3fc5a1aSKonstantin Ananyev (ol_flags & PKT_TX_L4_MASK) == 2172b3fc5a1aSKonstantin Ananyev PKT_TX_SCTP_CKSUM) { 2173b3fc5a1aSKonstantin Ananyev rte_errno = -ENOTSUP; 2174b3fc5a1aSKonstantin Ananyev return i; 2175b3fc5a1aSKonstantin Ananyev } 2176b3fc5a1aSKonstantin Ananyev 2177b3fc5a1aSKonstantin Ananyev #ifdef RTE_LIBRTE_ETHDEV_DEBUG 2178b3fc5a1aSKonstantin Ananyev ret = rte_validate_tx_offload(m); 2179b3fc5a1aSKonstantin Ananyev if (ret != 0) { 2180b3fc5a1aSKonstantin Ananyev rte_errno = ret; 2181b3fc5a1aSKonstantin Ananyev return i; 2182b3fc5a1aSKonstantin Ananyev } 2183b3fc5a1aSKonstantin Ananyev #endif 218483277a7cSJakub Palider 218583277a7cSJakub Palider /* In case we are supposed to TSO and have DF not set (DF=0) 218683277a7cSJakub Palider * hardware must be provided with partial checksum, otherwise 218783277a7cSJakub Palider * it will take care of necessary calculations. 218883277a7cSJakub Palider */ 218983277a7cSJakub Palider 2190b3fc5a1aSKonstantin Ananyev ret = rte_net_intel_cksum_flags_prepare(m, 2191b3fc5a1aSKonstantin Ananyev ol_flags & ~PKT_TX_TCP_SEG); 2192b3fc5a1aSKonstantin Ananyev if (ret != 0) { 2193b3fc5a1aSKonstantin Ananyev rte_errno = ret; 2194b3fc5a1aSKonstantin Ananyev return i; 2195b3fc5a1aSKonstantin Ananyev } 2196b3fc5a1aSKonstantin Ananyev } 2197b3fc5a1aSKonstantin Ananyev 2198b3fc5a1aSKonstantin Ananyev return i; 2199b3fc5a1aSKonstantin Ananyev } 2200b3fc5a1aSKonstantin Ananyev 2201f01f060cSRafal Kozik static void ena_update_hints(struct ena_adapter *adapter, 2202f01f060cSRafal Kozik struct ena_admin_ena_hw_hints *hints) 2203f01f060cSRafal Kozik { 2204f01f060cSRafal Kozik if (hints->admin_completion_tx_timeout) 2205f01f060cSRafal Kozik adapter->ena_dev.admin_queue.completion_timeout = 2206f01f060cSRafal Kozik hints->admin_completion_tx_timeout * 1000; 2207f01f060cSRafal Kozik 2208f01f060cSRafal Kozik if (hints->mmio_read_timeout) 2209f01f060cSRafal Kozik /* convert to usec */ 2210f01f060cSRafal Kozik adapter->ena_dev.mmio_read.reg_read_to = 2211f01f060cSRafal Kozik hints->mmio_read_timeout * 1000; 2212d9b8b106SMichal Krawczyk 2213d9b8b106SMichal Krawczyk if (hints->driver_watchdog_timeout) { 2214d9b8b106SMichal Krawczyk if (hints->driver_watchdog_timeout == ENA_HW_HINTS_NO_TIMEOUT) 2215d9b8b106SMichal Krawczyk adapter->keep_alive_timeout = ENA_HW_HINTS_NO_TIMEOUT; 2216d9b8b106SMichal Krawczyk else 2217d9b8b106SMichal Krawczyk // Convert msecs to ticks 2218d9b8b106SMichal Krawczyk adapter->keep_alive_timeout = 2219d9b8b106SMichal Krawczyk (hints->driver_watchdog_timeout * 2220d9b8b106SMichal Krawczyk rte_get_timer_hz()) / 1000; 2221d9b8b106SMichal Krawczyk } 2222f01f060cSRafal Kozik } 2223f01f060cSRafal Kozik 22242061fe41SRafal Kozik static int ena_check_and_linearize_mbuf(struct ena_ring *tx_ring, 22252061fe41SRafal Kozik struct rte_mbuf *mbuf) 22262061fe41SRafal Kozik { 22272fca2a98SMichal Krawczyk struct ena_com_dev *ena_dev; 22282fca2a98SMichal Krawczyk int num_segments, header_len, rc; 22292061fe41SRafal Kozik 22302fca2a98SMichal Krawczyk ena_dev = &tx_ring->adapter->ena_dev; 22312061fe41SRafal Kozik num_segments = mbuf->nb_segs; 22322fca2a98SMichal Krawczyk header_len = mbuf->data_len; 22332061fe41SRafal Kozik 22342061fe41SRafal Kozik if (likely(num_segments < tx_ring->sgl_size)) 22352061fe41SRafal Kozik return 0; 22362061fe41SRafal Kozik 22372fca2a98SMichal Krawczyk if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV && 22382fca2a98SMichal Krawczyk (num_segments == tx_ring->sgl_size) && 22392fca2a98SMichal Krawczyk (header_len < tx_ring->tx_max_header_size)) 22402fca2a98SMichal Krawczyk return 0; 22412fca2a98SMichal Krawczyk 22422061fe41SRafal Kozik rc = rte_pktmbuf_linearize(mbuf); 22432061fe41SRafal Kozik if (unlikely(rc)) 22442061fe41SRafal Kozik RTE_LOG(WARNING, PMD, "Mbuf linearize failed\n"); 22452061fe41SRafal Kozik 22462061fe41SRafal Kozik return rc; 22472061fe41SRafal Kozik } 22482061fe41SRafal Kozik 22491173fca2SJan Medala static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 22501173fca2SJan Medala uint16_t nb_pkts) 22511173fca2SJan Medala { 22521173fca2SJan Medala struct ena_ring *tx_ring = (struct ena_ring *)(tx_queue); 22531daff526SJakub Palider uint16_t next_to_use = tx_ring->next_to_use; 22541daff526SJakub Palider uint16_t next_to_clean = tx_ring->next_to_clean; 22551173fca2SJan Medala struct rte_mbuf *mbuf; 22562fca2a98SMichal Krawczyk uint16_t seg_len; 22571173fca2SJan Medala unsigned int ring_size = tx_ring->ring_size; 22581173fca2SJan Medala unsigned int ring_mask = ring_size - 1; 22591173fca2SJan Medala struct ena_com_tx_ctx ena_tx_ctx; 22601173fca2SJan Medala struct ena_tx_buffer *tx_info; 22611173fca2SJan Medala struct ena_com_buf *ebuf; 22621173fca2SJan Medala uint16_t rc, req_id, total_tx_descs = 0; 2263b66b6e72SJakub Palider uint16_t sent_idx = 0, empty_tx_reqs; 22642fca2a98SMichal Krawczyk uint16_t push_len = 0; 22652fca2a98SMichal Krawczyk uint16_t delta = 0; 22661173fca2SJan Medala int nb_hw_desc; 22671173fca2SJan Medala 22681173fca2SJan Medala /* Check adapter state */ 22691173fca2SJan Medala if (unlikely(tx_ring->adapter->state != ENA_ADAPTER_STATE_RUNNING)) { 22701173fca2SJan Medala RTE_LOG(ALERT, PMD, 22711173fca2SJan Medala "Trying to xmit pkts while device is NOT running\n"); 22721173fca2SJan Medala return 0; 22731173fca2SJan Medala } 22741173fca2SJan Medala 2275b66b6e72SJakub Palider empty_tx_reqs = ring_size - (next_to_use - next_to_clean); 2276b66b6e72SJakub Palider if (nb_pkts > empty_tx_reqs) 2277b66b6e72SJakub Palider nb_pkts = empty_tx_reqs; 2278b66b6e72SJakub Palider 22791173fca2SJan Medala for (sent_idx = 0; sent_idx < nb_pkts; sent_idx++) { 22801173fca2SJan Medala mbuf = tx_pkts[sent_idx]; 22811173fca2SJan Medala 22822061fe41SRafal Kozik rc = ena_check_and_linearize_mbuf(tx_ring, mbuf); 22832061fe41SRafal Kozik if (unlikely(rc)) 22842061fe41SRafal Kozik break; 22852061fe41SRafal Kozik 22861daff526SJakub Palider req_id = tx_ring->empty_tx_reqs[next_to_use & ring_mask]; 22871173fca2SJan Medala tx_info = &tx_ring->tx_buffer_info[req_id]; 22881173fca2SJan Medala tx_info->mbuf = mbuf; 22891173fca2SJan Medala tx_info->num_of_bufs = 0; 22901173fca2SJan Medala ebuf = tx_info->bufs; 22911173fca2SJan Medala 22921173fca2SJan Medala /* Prepare TX context */ 22931173fca2SJan Medala memset(&ena_tx_ctx, 0x0, sizeof(struct ena_com_tx_ctx)); 22941173fca2SJan Medala memset(&ena_tx_ctx.ena_meta, 0x0, 22951173fca2SJan Medala sizeof(struct ena_com_tx_meta)); 22961173fca2SJan Medala ena_tx_ctx.ena_bufs = ebuf; 22971173fca2SJan Medala ena_tx_ctx.req_id = req_id; 22982fca2a98SMichal Krawczyk 22992fca2a98SMichal Krawczyk delta = 0; 23002fca2a98SMichal Krawczyk seg_len = mbuf->data_len; 23012fca2a98SMichal Krawczyk 23021173fca2SJan Medala if (tx_ring->tx_mem_queue_type == 23031173fca2SJan Medala ENA_ADMIN_PLACEMENT_POLICY_DEV) { 23042fca2a98SMichal Krawczyk push_len = RTE_MIN(mbuf->pkt_len, 23051173fca2SJan Medala tx_ring->tx_max_header_size); 23062fca2a98SMichal Krawczyk ena_tx_ctx.header_len = push_len; 23072fca2a98SMichal Krawczyk 23082fca2a98SMichal Krawczyk if (likely(push_len <= seg_len)) { 23092fca2a98SMichal Krawczyk /* If the push header is in the single segment, 23102fca2a98SMichal Krawczyk * then just point it to the 1st mbuf data. 23112fca2a98SMichal Krawczyk */ 23121173fca2SJan Medala ena_tx_ctx.push_header = 23132fca2a98SMichal Krawczyk rte_pktmbuf_mtod(mbuf, uint8_t *); 23142fca2a98SMichal Krawczyk } else { 23152fca2a98SMichal Krawczyk /* If the push header lays in the several 23162fca2a98SMichal Krawczyk * segments, copy it to the intermediate buffer. 23172fca2a98SMichal Krawczyk */ 23182fca2a98SMichal Krawczyk rte_pktmbuf_read(mbuf, 0, push_len, 23192fca2a98SMichal Krawczyk tx_ring->push_buf_intermediate_buf); 23202fca2a98SMichal Krawczyk ena_tx_ctx.push_header = 23212fca2a98SMichal Krawczyk tx_ring->push_buf_intermediate_buf; 23222fca2a98SMichal Krawczyk delta = push_len - seg_len; 23232fca2a98SMichal Krawczyk } 23241173fca2SJan Medala } /* there's no else as we take advantage of memset zeroing */ 23251173fca2SJan Medala 23261173fca2SJan Medala /* Set TX offloads flags, if applicable */ 232756b8b9b7SRafal Kozik ena_tx_mbuf_prepare(mbuf, &ena_tx_ctx, tx_ring->offloads); 23281173fca2SJan Medala 23291173fca2SJan Medala if (unlikely(mbuf->ol_flags & 23301173fca2SJan Medala (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD))) 23311173fca2SJan Medala rte_atomic64_inc(&tx_ring->adapter->drv_stats->ierrors); 23321173fca2SJan Medala 23331173fca2SJan Medala rte_prefetch0(tx_pkts[(sent_idx + 4) & ring_mask]); 23341173fca2SJan Medala 23351173fca2SJan Medala /* Process first segment taking into 23361173fca2SJan Medala * consideration pushed header 23371173fca2SJan Medala */ 23382fca2a98SMichal Krawczyk if (seg_len > push_len) { 2339455da545SSantosh Shukla ebuf->paddr = mbuf->buf_iova + 23401173fca2SJan Medala mbuf->data_off + 23412fca2a98SMichal Krawczyk push_len; 23422fca2a98SMichal Krawczyk ebuf->len = seg_len - push_len; 23431173fca2SJan Medala ebuf++; 23441173fca2SJan Medala tx_info->num_of_bufs++; 23451173fca2SJan Medala } 23461173fca2SJan Medala 23471173fca2SJan Medala while ((mbuf = mbuf->next) != NULL) { 23482fca2a98SMichal Krawczyk seg_len = mbuf->data_len; 23492fca2a98SMichal Krawczyk 23502fca2a98SMichal Krawczyk /* Skip mbufs if whole data is pushed as a header */ 23512fca2a98SMichal Krawczyk if (unlikely(delta > seg_len)) { 23522fca2a98SMichal Krawczyk delta -= seg_len; 23532fca2a98SMichal Krawczyk continue; 23542fca2a98SMichal Krawczyk } 23552fca2a98SMichal Krawczyk 23562fca2a98SMichal Krawczyk ebuf->paddr = mbuf->buf_iova + mbuf->data_off + delta; 23572fca2a98SMichal Krawczyk ebuf->len = seg_len - delta; 23581173fca2SJan Medala ebuf++; 23591173fca2SJan Medala tx_info->num_of_bufs++; 23602fca2a98SMichal Krawczyk 23612fca2a98SMichal Krawczyk delta = 0; 23621173fca2SJan Medala } 23631173fca2SJan Medala 23641173fca2SJan Medala ena_tx_ctx.num_bufs = tx_info->num_of_bufs; 23651173fca2SJan Medala 2366*c7519ea5SRafal Kozik if (ena_com_is_doorbell_needed(tx_ring->ena_com_io_sq, 2367*c7519ea5SRafal Kozik &ena_tx_ctx)) { 2368*c7519ea5SRafal Kozik RTE_LOG(DEBUG, PMD, "llq tx max burst size of queue %d" 2369*c7519ea5SRafal Kozik " achieved, writing doorbell to send burst\n", 2370*c7519ea5SRafal Kozik tx_ring->id); 2371*c7519ea5SRafal Kozik rte_wmb(); 2372*c7519ea5SRafal Kozik ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq); 2373*c7519ea5SRafal Kozik } 2374*c7519ea5SRafal Kozik 2375*c7519ea5SRafal Kozik /* prepare the packet's descriptors to dma engine */ 23761173fca2SJan Medala rc = ena_com_prepare_tx(tx_ring->ena_com_io_sq, 23771173fca2SJan Medala &ena_tx_ctx, &nb_hw_desc); 23781173fca2SJan Medala if (unlikely(rc)) 23791173fca2SJan Medala break; 23801173fca2SJan Medala 23811173fca2SJan Medala tx_info->tx_descs = nb_hw_desc; 23821173fca2SJan Medala 23831daff526SJakub Palider next_to_use++; 23841173fca2SJan Medala } 23851173fca2SJan Medala 23865e02e19eSJan Medala /* If there are ready packets to be xmitted... */ 23875e02e19eSJan Medala if (sent_idx > 0) { 23885e02e19eSJan Medala /* ...let HW do its best :-) */ 23891173fca2SJan Medala rte_wmb(); 23901173fca2SJan Medala ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq); 23911173fca2SJan Medala 23925e02e19eSJan Medala tx_ring->next_to_use = next_to_use; 23935e02e19eSJan Medala } 23945e02e19eSJan Medala 23951173fca2SJan Medala /* Clear complete packets */ 23961173fca2SJan Medala while (ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq, &req_id) >= 0) { 2397f7d82d24SRafal Kozik rc = validate_tx_req_id(tx_ring, req_id); 2398f7d82d24SRafal Kozik if (rc) 2399f7d82d24SRafal Kozik break; 2400f7d82d24SRafal Kozik 24011173fca2SJan Medala /* Get Tx info & store how many descs were processed */ 24021173fca2SJan Medala tx_info = &tx_ring->tx_buffer_info[req_id]; 24031173fca2SJan Medala total_tx_descs += tx_info->tx_descs; 24041173fca2SJan Medala 24051173fca2SJan Medala /* Free whole mbuf chain */ 24061173fca2SJan Medala mbuf = tx_info->mbuf; 24071173fca2SJan Medala rte_pktmbuf_free(mbuf); 2408207a514cSMichal Krawczyk tx_info->mbuf = NULL; 24091173fca2SJan Medala 24101173fca2SJan Medala /* Put back descriptor to the ring for reuse */ 24111daff526SJakub Palider tx_ring->empty_tx_reqs[next_to_clean & ring_mask] = req_id; 24121daff526SJakub Palider next_to_clean++; 24131173fca2SJan Medala 24141173fca2SJan Medala /* If too many descs to clean, leave it for another run */ 24151173fca2SJan Medala if (unlikely(total_tx_descs > ENA_RING_DESCS_RATIO(ring_size))) 24161173fca2SJan Medala break; 24171173fca2SJan Medala } 24181173fca2SJan Medala 24195e02e19eSJan Medala if (total_tx_descs > 0) { 24201173fca2SJan Medala /* acknowledge completion of sent packets */ 24211173fca2SJan Medala ena_com_comp_ack(tx_ring->ena_com_io_sq, total_tx_descs); 24221daff526SJakub Palider tx_ring->next_to_clean = next_to_clean; 24235e02e19eSJan Medala } 24245e02e19eSJan Medala 24251173fca2SJan Medala return sent_idx; 24261173fca2SJan Medala } 24271173fca2SJan Medala 2428ca148440SMichal Krawczyk /********************************************************************* 2429ca148440SMichal Krawczyk * PMD configuration 2430ca148440SMichal Krawczyk *********************************************************************/ 2431fdf91e0fSJan Blunck static int eth_ena_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 2432fdf91e0fSJan Blunck struct rte_pci_device *pci_dev) 2433fdf91e0fSJan Blunck { 2434fdf91e0fSJan Blunck return rte_eth_dev_pci_generic_probe(pci_dev, 2435fdf91e0fSJan Blunck sizeof(struct ena_adapter), eth_ena_dev_init); 2436fdf91e0fSJan Blunck } 2437fdf91e0fSJan Blunck 2438fdf91e0fSJan Blunck static int eth_ena_pci_remove(struct rte_pci_device *pci_dev) 2439fdf91e0fSJan Blunck { 2440eb0ef49dSMichal Krawczyk return rte_eth_dev_pci_generic_remove(pci_dev, eth_ena_dev_uninit); 2441fdf91e0fSJan Blunck } 2442fdf91e0fSJan Blunck 2443fdf91e0fSJan Blunck static struct rte_pci_driver rte_ena_pmd = { 24441173fca2SJan Medala .id_table = pci_id_ena_map, 244505e0eee0SRafal Kozik .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC | 244605e0eee0SRafal Kozik RTE_PCI_DRV_WC_ACTIVATE, 2447fdf91e0fSJan Blunck .probe = eth_ena_pci_probe, 2448fdf91e0fSJan Blunck .remove = eth_ena_pci_remove, 24491173fca2SJan Medala }; 24501173fca2SJan Medala 2451fdf91e0fSJan Blunck RTE_PMD_REGISTER_PCI(net_ena, rte_ena_pmd); 245201f19227SShreyansh Jain RTE_PMD_REGISTER_PCI_TABLE(net_ena, pci_id_ena_map); 245306e81dc9SDavid Marchand RTE_PMD_REGISTER_KMOD_DEP(net_ena, "* igb_uio | uio_pci_generic | vfio-pci"); 24548bc0acaeSStephen Hemminger 2455f8e99896SThomas Monjalon RTE_INIT(ena_init_log) 24568bc0acaeSStephen Hemminger { 24573f111952SHarry van Haaren ena_logtype_init = rte_log_register("pmd.net.ena.init"); 24588bc0acaeSStephen Hemminger if (ena_logtype_init >= 0) 24598bc0acaeSStephen Hemminger rte_log_set_level(ena_logtype_init, RTE_LOG_NOTICE); 24603f111952SHarry van Haaren ena_logtype_driver = rte_log_register("pmd.net.ena.driver"); 24618bc0acaeSStephen Hemminger if (ena_logtype_driver >= 0) 24628bc0acaeSStephen Hemminger rte_log_set_level(ena_logtype_driver, RTE_LOG_NOTICE); 24638bc0acaeSStephen Hemminger } 24643adcba9aSMichal Krawczyk 24653adcba9aSMichal Krawczyk /****************************************************************************** 24663adcba9aSMichal Krawczyk ******************************** AENQ Handlers ******************************* 24673adcba9aSMichal Krawczyk *****************************************************************************/ 2468ca148440SMichal Krawczyk static void ena_update_on_link_change(void *adapter_data, 2469ca148440SMichal Krawczyk struct ena_admin_aenq_entry *aenq_e) 2470ca148440SMichal Krawczyk { 2471ca148440SMichal Krawczyk struct rte_eth_dev *eth_dev; 2472ca148440SMichal Krawczyk struct ena_adapter *adapter; 2473ca148440SMichal Krawczyk struct ena_admin_aenq_link_change_desc *aenq_link_desc; 2474ca148440SMichal Krawczyk uint32_t status; 2475ca148440SMichal Krawczyk 2476ca148440SMichal Krawczyk adapter = (struct ena_adapter *)adapter_data; 2477ca148440SMichal Krawczyk aenq_link_desc = (struct ena_admin_aenq_link_change_desc *)aenq_e; 2478ca148440SMichal Krawczyk eth_dev = adapter->rte_dev; 2479ca148440SMichal Krawczyk 2480ca148440SMichal Krawczyk status = get_ena_admin_aenq_link_change_desc_link_status(aenq_link_desc); 2481ca148440SMichal Krawczyk adapter->link_status = status; 2482ca148440SMichal Krawczyk 2483ca148440SMichal Krawczyk ena_link_update(eth_dev, 0); 2484ca148440SMichal Krawczyk _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL); 2485ca148440SMichal Krawczyk } 2486ca148440SMichal Krawczyk 2487f01f060cSRafal Kozik static void ena_notification(void *data, 2488f01f060cSRafal Kozik struct ena_admin_aenq_entry *aenq_e) 2489f01f060cSRafal Kozik { 2490f01f060cSRafal Kozik struct ena_adapter *adapter = (struct ena_adapter *)data; 2491f01f060cSRafal Kozik struct ena_admin_ena_hw_hints *hints; 2492f01f060cSRafal Kozik 2493f01f060cSRafal Kozik if (aenq_e->aenq_common_desc.group != ENA_ADMIN_NOTIFICATION) 2494f01f060cSRafal Kozik RTE_LOG(WARNING, PMD, "Invalid group(%x) expected %x\n", 2495f01f060cSRafal Kozik aenq_e->aenq_common_desc.group, 2496f01f060cSRafal Kozik ENA_ADMIN_NOTIFICATION); 2497f01f060cSRafal Kozik 2498f01f060cSRafal Kozik switch (aenq_e->aenq_common_desc.syndrom) { 2499f01f060cSRafal Kozik case ENA_ADMIN_UPDATE_HINTS: 2500f01f060cSRafal Kozik hints = (struct ena_admin_ena_hw_hints *) 2501f01f060cSRafal Kozik (&aenq_e->inline_data_w4); 2502f01f060cSRafal Kozik ena_update_hints(adapter, hints); 2503f01f060cSRafal Kozik break; 2504f01f060cSRafal Kozik default: 2505f01f060cSRafal Kozik RTE_LOG(ERR, PMD, "Invalid aenq notification link state %d\n", 2506f01f060cSRafal Kozik aenq_e->aenq_common_desc.syndrom); 2507f01f060cSRafal Kozik } 2508f01f060cSRafal Kozik } 2509f01f060cSRafal Kozik 2510d9b8b106SMichal Krawczyk static void ena_keep_alive(void *adapter_data, 2511d9b8b106SMichal Krawczyk __rte_unused struct ena_admin_aenq_entry *aenq_e) 2512d9b8b106SMichal Krawczyk { 2513d9b8b106SMichal Krawczyk struct ena_adapter *adapter = (struct ena_adapter *)adapter_data; 2514d9b8b106SMichal Krawczyk 2515d9b8b106SMichal Krawczyk adapter->timestamp_wd = rte_get_timer_cycles(); 2516d9b8b106SMichal Krawczyk } 2517d9b8b106SMichal Krawczyk 25183adcba9aSMichal Krawczyk /** 25193adcba9aSMichal Krawczyk * This handler will called for unknown event group or unimplemented handlers 25203adcba9aSMichal Krawczyk **/ 25213adcba9aSMichal Krawczyk static void unimplemented_aenq_handler(__rte_unused void *data, 25223adcba9aSMichal Krawczyk __rte_unused struct ena_admin_aenq_entry *aenq_e) 25233adcba9aSMichal Krawczyk { 2524983cce2dSRafal Kozik RTE_LOG(ERR, PMD, "Unknown event was received or event with " 2525983cce2dSRafal Kozik "unimplemented handler\n"); 25263adcba9aSMichal Krawczyk } 25273adcba9aSMichal Krawczyk 2528ca148440SMichal Krawczyk static struct ena_aenq_handlers aenq_handlers = { 25293adcba9aSMichal Krawczyk .handlers = { 2530ca148440SMichal Krawczyk [ENA_ADMIN_LINK_CHANGE] = ena_update_on_link_change, 2531f01f060cSRafal Kozik [ENA_ADMIN_NOTIFICATION] = ena_notification, 2532d9b8b106SMichal Krawczyk [ENA_ADMIN_KEEP_ALIVE] = ena_keep_alive 25333adcba9aSMichal Krawczyk }, 25343adcba9aSMichal Krawczyk .unimplemented_handler = unimplemented_aenq_handler 25353adcba9aSMichal Krawczyk }; 2536